diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..6bc819c3cd --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,43 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +version: 2 +updates: + - package-ecosystem: gitsubmodule + directory: / + ignore: + # Libraries that track the latest tag have to be managed manually, + # as Dependabot otherwise suggests the latest unstable commit + # https://github.com/dependabot/dependabot-core/pull/13052 + - dependency-name: vendor/nim-bearssl + - dependency-name: vendor/nim-chronicles + - dependency-name: vendor/nim-chronos + - dependency-name: vendor/nim-eth + - dependency-name: vendor/nim-faststreams + - dependency-name: vendor/nim-json-rpc + - dependency-name: vendor/nim-json-serialization + - dependency-name: vendor/nim-libbacktrace + - dependency-name: vendor/nim-libp2p + - dependency-name: vendor/nim-metrics + - dependency-name: vendor/nim-normalize + - dependency-name: vendor/nim-presto + - dependency-name: vendor/nim-results + - dependency-name: vendor/nim-serialization + - dependency-name: vendor/nim-stew + - dependency-name: vendor/nim-stint + - dependency-name: vendor/nim-taskpools + - dependency-name: vendor/nim-testutils + - dependency-name: vendor/nim-toml-serialization + - dependency-name: vendor/nim-unicodedb + - dependency-name: vendor/nim-unittest2 + - dependency-name: vendor/nim-web3 + - dependency-name: vendor/nim-websock + - dependency-name: vendor/nimcrypto + - dependency-name: vendor/NimYAML + schedule: + interval: daily + target-branch: unstable diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index db1abdc7c9..7799ba932e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,10 +54,15 @@ jobs: builder: ['self-hosted','ubuntu-22.04'] - target: os: macos + cpu: amd64 builder: macos-13 + - target: + os: macos + cpu: arm64 + builder: macos-latest - target: os: windows - builder: windows-2019 + builder: windows-2022 defaults: run: @@ -65,7 +70,17 @@ jobs: name: ${{ matrix.target.os }}-${{ matrix.target.cpu }}${{ matrix.branch != '' && ' (Nim ' || '' }}${{ matrix.branch-short }}${{ matrix.branch != '' && ')' || '' }} runs-on: ${{ matrix.builder }} + steps: + - name: Fix nim cache conflicts + run: | + echo "XDG_CACHE_HOME=${{ runner.temp }}/.nim-cache" >> $GITHUB_ENV + echo "CI_CACHE=${{ runner.temp }}/.nbs-cache" >> $GITHUB_ENV + + - name: Clean workspace (very aggressive) + if: contains(matrix.builder, 'self-hosted') + run: rm -rf "$GITHUB_WORKSPACE"/* + - name: Checkout uses: actions/checkout@v4 @@ -89,23 +104,6 @@ jobs: 7z x -y "external/mingw-${{ matrix.target.cpu }}.zip" -oexternal/mingw-${{ matrix.target.cpu }}/ mv external/mingw-${{ matrix.target.cpu }}/**/* ./external/mingw-${{ matrix.target.cpu }} - - name: Restore Nim DLLs dependencies (Windows) from cache - if: runner.os == 'Windows' - id: windows-dlls-cache - uses: actions/cache@v4 - with: - path: external/dlls-${{ matrix.target.cpu }} - key: 'dlls-${{ matrix.target.cpu }}' - - - name: Install DLLs dependencies (Windows) - if: > - steps.windows-dlls-cache.outputs.cache-hit != 'true' && - runner.os == 'Windows' - run: | - mkdir -p external - curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip - 7z x -y external/windeps.zip -oexternal/dlls-${{ matrix.target.cpu }} - - name: Path to cached dependencies (Windows) if: > runner.os == 'Windows' @@ -219,32 +217,40 @@ jobs: name: "Developer builds" runs-on: ['self-hosted','ubuntu-22.04'] steps: + - name: Fix nim cache conflicts + run: | + echo "XDG_CACHE_HOME=${{ runner.temp }}/.nim-cache" >> $GITHUB_ENV + echo "CI_CACHE=${{ runner.temp }}/.nbs-cache" >> $GITHUB_ENV + + - name: Clean workspace (very aggressive) + run: rm -rf "$GITHUB_WORKSPACE"/* + - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 1 - - name: Build with developer flags - run: | - make -j nimbus_beacon_node LOG_LEVEL=TRACE NIMFLAGS="-d:has_deposit_root_checks=1" - - name: Build files with isMainModule run: | - executables=( - "beacon_chain/el/deposit_contract" - "beacon_chain/fork_choice/fork_choice" - "beacon_chain/fork_choice/proto_array" - "beacon_chain/networking/network_metadata_downloads" - "beacon_chain/era_db" - "beacon_chain/trusted_node_sync" - "benchmarks/rest_api_benchmark" - "tests/mocking/mock_genesis" - ) + makefile_content=$(<./Makefile) + executables=() + while IFS= read -r file; do + base=$(basename "$file" ".nim") + if ! grep -qw "$base" <<< "$makefile_content"; then + executables+=("$file") + fi + done < <(grep -Rl --include="*.nim" --exclude-dir=vendor "when isMainModule:" .) + make -j update source env.sh for executable in "${executables[@]}"; do nim c --passC:-fsyntax-only --noLinking:on -d:chronicles_log_level=TRACE "${executable}" done + - name: Build with custom SECONDS_PER_SLOT + run: | + source env.sh + nim c --passC:-fsyntax-only --noLinking:on -d:chronicles_log_level=TRACE -d:SECONDS_PER_SLOT=1 beacon_chain/nimbus_beacon_node + lint: name: "Lint" runs-on: ubuntu-latest @@ -259,7 +265,7 @@ jobs: if: ${{ !cancelled() }} && github.event_name == 'pull_request' run: | excluded_files="config.yaml|config.nims|beacon_chain.nimble" - excluded_extensions="ans|bin|cfg|yml|json|json\\.template|md|png|service|ssz|tpl|txt|lock|nix|gitignore|envrc" + excluded_extensions="ans|bin|cfg|yml|json|json\\.template|md|png|service|ssz|tpl|txt|lock|nix|gitignore|envrc|sh" current_year=$(date +"%Y") problematic_files=() @@ -282,13 +288,13 @@ jobs: run: | problematic_files=() while read -r file; do - if ! grep -qE '^{\.push raises: \[\]\.}$' "$file"; then + if ! grep -qE '^{\.push raises: \[\](, gcsafe)?\.}$' "$file"; then problematic_files+=("$file") fi done < <(git diff --name-only --diff-filter=AM --ignore-submodules HEAD^ HEAD | grep -E '\.nim$' || true) if (( ${#problematic_files[@]} )); then - echo "The following files do not have '{.push raises: [].}':" + echo "The following files do not have '{.push raises: [], gcsafe.}' (gcsafe optional):" for file in "${problematic_files[@]}"; do echo "- $file" done diff --git a/.github/workflows/nightly_build.yml b/.github/workflows/nightly_build.yml index b6d2a4c852..d85a786915 100644 --- a/.github/workflows/nightly_build.yml +++ b/.github/workflows/nightly_build.yml @@ -21,6 +21,11 @@ jobs: name: Linux AMD64 release asset runs-on: ubuntu-latest steps: + - name: Fix nim cache conflicts + run: | + echo "XDG_CACHE_HOME=${{ runner.temp }}/.nim-cache" >> $GITHUB_ENV + echo "CI_CACHE=${{ runner.temp }}/.nbs-cache" >> $GITHUB_ENV + - name: Checkout code uses: actions/checkout@v4 with: @@ -52,14 +57,14 @@ jobs: - name: Upload BN checksum artefact uses: actions/upload-artifact@v4 with: - name: Linux_amd64_checksum + name: Linux_amd64_checksum_bn path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact uses: actions/upload-artifact@v4 with: - name: Linux_amd64_checksum + name: Linux_amd64_checksum_vc path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum retention-days: 2 @@ -67,6 +72,11 @@ jobs: name: Linux ARM64 release asset runs-on: ubuntu-latest steps: + - name: Fix nim cache conflicts + run: | + echo "XDG_CACHE_HOME=${{ runner.temp }}/.nim-cache" >> $GITHUB_ENV + echo "CI_CACHE=${{ runner.temp }}/.nbs-cache" >> $GITHUB_ENV + - name: Install packages env: DEBIAN_FRONTEND: "noninteractive" @@ -106,14 +116,14 @@ jobs: - name: Upload BN checksum artefact uses: actions/upload-artifact@v4 with: - name: Linux_arm64_checksum + name: Linux_arm64_checksum_bn path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact uses: actions/upload-artifact@v4 with: - name: Linux_arm64_checksum + name: Linux_arm64_checksum_vc path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum retention-days: 2 @@ -121,6 +131,11 @@ jobs: name: Linux ARM release asset runs-on: ubuntu-latest steps: + - name: Fix nim cache conflicts + run: | + echo "XDG_CACHE_HOME=${{ runner.temp }}/.nim-cache" >> $GITHUB_ENV + echo "CI_CACHE=${{ runner.temp }}/.nbs-cache" >> $GITHUB_ENV + - name: Install packages env: DEBIAN_FRONTEND: "noninteractive" @@ -160,14 +175,14 @@ jobs: - name: Upload BN checksum artefact uses: actions/upload-artifact@v4 with: - name: Linux_arm_checksum + name: Linux_arm_checksum_bn path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact uses: actions/upload-artifact@v4 with: - name: Linux_arm_checksum + name: Linux_arm_checksum_vc path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum retention-days: 2 @@ -175,6 +190,11 @@ jobs: name: Windows AMD64 release asset runs-on: ubuntu-latest steps: + - name: Fix nim cache conflicts + run: | + echo "XDG_CACHE_HOME=${{ runner.temp }}/.nim-cache" >> $GITHUB_ENV + echo "CI_CACHE=${{ runner.temp }}/.nbs-cache" >> $GITHUB_ENV + - name: Checkout code uses: actions/checkout@v4 with: @@ -206,14 +226,14 @@ jobs: - name: Upload BN checksum artefact uses: actions/upload-artifact@v4 with: - name: Windows_amd64_checksum + name: Windows_amd64_checksum_bn path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact uses: actions/upload-artifact@v4 with: - name: Windows_amd64_checksum + name: Windows_amd64_checksum_vc path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum retention-days: 2 @@ -221,6 +241,11 @@ jobs: name: macOS AMD64 release asset runs-on: ubuntu-latest steps: + - name: Fix nim cache conflicts + run: | + echo "XDG_CACHE_HOME=${{ runner.temp }}/.nim-cache" >> $GITHUB_ENV + echo "CI_CACHE=${{ runner.temp }}/.nbs-cache" >> $GITHUB_ENV + - name: Checkout code uses: actions/checkout@v4 with: @@ -252,14 +277,14 @@ jobs: - name: Upload BN checksum artefact uses: actions/upload-artifact@v4 with: - name: macOS_amd64_checksum + name: macOS_amd64_checksum_bn path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact uses: actions/upload-artifact@v4 with: - name: macOS_amd64_checksum + name: macOS_amd64_checksum_vc path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum retention-days: 2 @@ -267,6 +292,11 @@ jobs: name: macOS ARM64 release asset runs-on: ubuntu-latest steps: + - name: Fix nim cache conflicts + run: | + echo "XDG_CACHE_HOME=${{ runner.temp }}/.nim-cache" >> $GITHUB_ENV + echo "CI_CACHE=${{ runner.temp }}/.nbs-cache" >> $GITHUB_ENV + - name: Checkout code uses: actions/checkout@v4 with: @@ -298,14 +328,14 @@ jobs: - name: Upload BN checksum artefact uses: actions/upload-artifact@v4 with: - name: macOS_arm64_checksum + name: macOS_arm64_checksum_bn path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact uses: actions/upload-artifact@v4 with: - name: macOS_arm64_checksum + name: macOS_arm64_checksum_vc path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum retention-days: 2 @@ -330,17 +360,23 @@ jobs: \`\`\`text EOF echo '# Linux AMD64' >> release_notes.md - cat Linux_amd64_checksum/* >> release_notes.md + cat Linux_amd64_checksum_bn/* >> release_notes.md + cat Linux_amd64_checksum_vc/* >> release_notes.md echo '# Linux ARM64' >> release_notes.md - cat Linux_arm64_checksum/* >> release_notes.md + cat Linux_arm64_checksum_bn/* >> release_notes.md + cat Linux_arm64_checksum_vc/* >> release_notes.md echo '# Linux ARM' >> release_notes.md - cat Linux_arm_checksum/* >> release_notes.md + cat Linux_arm_checksum_bn/* >> release_notes.md + cat Linux_arm_checksum_vc/* >> release_notes.md echo '# Windows AMD64' >> release_notes.md - cat Windows_amd64_checksum/* >> release_notes.md + cat Windows_amd64_checksum_bn/* >> release_notes.md + cat Windows_amd64_checksum_vc/* >> release_notes.md echo '# macOS AMD64' >> release_notes.md - cat macOS_amd64_checksum/* >> release_notes.md + cat macOS_amd64_checksum_bn/* >> release_notes.md + cat macOS_amd64_checksum_vc/* >> release_notes.md echo '# macOS ARM64' >> release_notes.md - cat macOS_arm64_checksum/* >> release_notes.md + cat macOS_arm64_checksum_bn/* >> release_notes.md + cat macOS_arm64_checksum_vc/* >> release_notes.md echo '```' >> release_notes.md - name: Delete tag @@ -370,14 +406,20 @@ jobs: failOnError: false name: | Linux_amd64_archive - Linux_amd64_checksum + Linux_amd64_checksum_bn + Linux_amd64_checksum_vc Linux_arm64_archive - Linux_arm64_checksum + Linux_arm64_checksum_bn + Linux_arm64_checksum_vc Linux_arm_archive - Linux_arm_checksum + Linux_arm_checksum_bn + Linux_arm_checksum_vc Windows_amd64_archive - Windows_amd64_checksum + Windows_amd64_checksum_bn + Windows_amd64_checksum_vc macOS_amd64_archive - macOS_amd64_checksum + macOS_amd64_checksum_bn + macOS_amd64_checksum_vc macOS_arm64_archive - macOS_arm64_checksum + macOS_arm64_checksum_bn + macOS_arm64_checksum_vc diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bf4dc5685c..d3e4a0481f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2020-2024 Status Research & Development GmbH +# Copyright (c) 2020-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -432,20 +432,26 @@ jobs: failOnError: false name: | Linux_amd64_archive - Linux_amd64_checksum + Linux_amd64_checksum_bn + Linux_amd64_checksum_vc Linux_amd64_packages Linux_arm64_archive - Linux_arm64_checksum + Linux_arm64_checksum_bn + Linux_arm64_checksum_vc Linux_arm64_packages Linux_arm_archive - Linux_arm_checksum + Linux_arm_checksum_bn + Linux_arm_checksum_vc Linux_arm_packages Windows_amd64_archive - Windows_amd64_checksum + Windows_amd64_checksum_bn + Windows_amd64_checksum_vc macOS_amd64_archive - macOS_amd64_checksum + macOS_amd64_checksum_bn + macOS_amd64_checksum_vc macOS_arm64_archive - macOS_arm64_checksum + macOS_arm64_checksum_bn + macOS_arm64_checksum_vc - name: Login to Docker Hub uses: docker/login-action@v1 diff --git a/.gitmodules b/.gitmodules index 45f88f8e32..cb03d83b6c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -225,11 +225,6 @@ url = https://github.com/eth-clients/holesky ignore = untracked branch = main -[submodule "vendor/EIPs"] - path = vendor/EIPs - url = https://github.com/ethereum/EIPs - ignore = untracked - branch = master [submodule "vendor/nim-minilru"] path = vendor/nim-minilru url = https://github.com/status-im/nim-minilru.git diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index e283cefe9c..77afe9cf9a 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -8,26 +8,21 @@ AllTests-mainnet ```diff + Aggregated attestations with disjoint comittee bits into a single on-chain aggregate [Pres OK + Aggregating across committees [Preset: mainnet] OK -+ Attestations with disjoint comittee bits and equal data into single on-chain aggregate [Pr OK -+ Cache coherence on chain aggregates [Preset: mainnet] OK -+ Can add and retrieve simple electra attestations [Preset: mainnet] OK -+ Simple add and get with electra nonzero committee [Preset: mainnet] OK -+ Working with electra aggregates [Preset: mainnet] OK -``` -## Attestation pool processing [Preset: mainnet] -```diff + Attestation from different branch [Preset: mainnet] OK + Attestations may arrive in any order [Preset: mainnet] OK + Attestations may overlap, bigger first [Preset: mainnet] OK + Attestations may overlap, smaller first [Preset: mainnet] OK + Attestations should be combined [Preset: mainnet] OK -+ Can add and retrieve simple attestations [Preset: mainnet] OK ++ Attestations with disjoint comittee bits and equal data into single on-chain aggregate [Pr OK ++ Cache coherence on chain aggregates [Preset: mainnet] OK ++ Can add and retrieve simple electra attestations [Preset: mainnet] OK + Everyone voting for something different [Preset: mainnet] OK + Fork choice returns block with attestation OK + Fork choice returns latest block with no attestations OK ++ Simple add and get with electra nonzero committee [Preset: mainnet] OK + Trying to add a block twice tags the second as an error OK + Trying to add a duplicate block from an old pruned epoch is tagged as an error OK -+ Working with aggregates [Preset: mainnet] OK ++ Working with electra aggregates [Preset: mainnet] OK ``` ## Backfill ```diff @@ -40,37 +35,41 @@ AllTests-mainnet ```diff + empty database [Preset: mainnet] OK + find ancestors [Preset: mainnet] OK -+ sanity check Altair and cross-fork getState rollback [Preset: mainnet] OK -+ sanity check Altair blocks [Preset: mainnet] OK -+ sanity check Altair states [Preset: mainnet] OK -+ sanity check Altair states, reusing buffers [Preset: mainnet] OK -+ sanity check Bellatrix and cross-fork getState rollback [Preset: mainnet] OK -+ sanity check Bellatrix blocks [Preset: mainnet] OK -+ sanity check Bellatrix states [Preset: mainnet] OK -+ sanity check Bellatrix states, reusing buffers [Preset: mainnet] OK -+ sanity check Capella and cross-fork getState rollback [Preset: mainnet] OK -+ sanity check Capella blocks [Preset: mainnet] OK -+ sanity check Capella states [Preset: mainnet] OK -+ sanity check Capella states, reusing buffers [Preset: mainnet] OK -+ sanity check Deneb and cross-fork getState rollback [Preset: mainnet] OK -+ sanity check Deneb blocks [Preset: mainnet] OK -+ sanity check Deneb states [Preset: mainnet] OK -+ sanity check Deneb states, reusing buffers [Preset: mainnet] OK -+ sanity check Electra and cross-fork getState rollback [Preset: mainnet] OK -+ sanity check Electra blocks [Preset: mainnet] OK -+ sanity check Electra states [Preset: mainnet] OK -+ sanity check Electra states, reusing buffers [Preset: mainnet] OK -+ sanity check Fulu and cross-fork getState rollback [Preset: mainnet] OK -+ sanity check Fulu blocks [Preset: mainnet] OK -+ sanity check Fulu states [Preset: mainnet] OK -+ sanity check Fulu states, reusing buffers [Preset: mainnet] OK ++ sanity check altair and cross-fork getState rollback [Preset: mainnet] OK ++ sanity check altair blocks [Preset: mainnet] OK ++ sanity check altair states [Preset: mainnet] OK ++ sanity check altair states, reusing buffers [Preset: mainnet] OK ++ sanity check bellatrix and cross-fork getState rollback [Preset: mainnet] OK ++ sanity check bellatrix blocks [Preset: mainnet] OK ++ sanity check bellatrix states [Preset: mainnet] OK ++ sanity check bellatrix states, reusing buffers [Preset: mainnet] OK + sanity check blobs [Preset: mainnet] OK ++ sanity check capella and cross-fork getState rollback [Preset: mainnet] OK ++ sanity check capella blocks [Preset: mainnet] OK ++ sanity check capella states [Preset: mainnet] OK ++ sanity check capella states, reusing buffers [Preset: mainnet] OK + sanity check data columns [Preset: mainnet] OK ++ sanity check deneb and cross-fork getState rollback [Preset: mainnet] OK ++ sanity check deneb blocks [Preset: mainnet] OK ++ sanity check deneb states [Preset: mainnet] OK ++ sanity check deneb states, reusing buffers [Preset: mainnet] OK ++ sanity check electra and cross-fork getState rollback [Preset: mainnet] OK ++ sanity check electra blocks [Preset: mainnet] OK ++ sanity check electra states [Preset: mainnet] OK ++ sanity check electra states, reusing buffers [Preset: mainnet] OK ++ sanity check fulu and cross-fork getState rollback [Preset: mainnet] OK ++ sanity check fulu blocks [Preset: mainnet] OK ++ sanity check fulu states [Preset: mainnet] OK ++ sanity check fulu states, reusing buffers [Preset: mainnet] OK + sanity check genesis roundtrip [Preset: mainnet] OK -+ sanity check phase 0 blocks [Preset: mainnet] OK -+ sanity check phase 0 getState rollback [Preset: mainnet] OK -+ sanity check phase 0 states [Preset: mainnet] OK -+ sanity check phase 0 states, reusing buffers [Preset: mainnet] OK + sanity check gloas and cross-fork getState rollback [Preset: mainnet] Skip + sanity check gloas blocks [Preset: mainnet] Skip + sanity check gloas states [Preset: mainnet] Skip + sanity check gloas states, reusing buffers [Preset: mainnet] Skip ++ sanity check phase0 blocks [Preset: mainnet] OK ++ sanity check phase0 getState rollback [Preset: mainnet] OK ++ sanity check phase0 states [Preset: mainnet] OK ++ sanity check phase0 states, reusing buffers [Preset: mainnet] OK + sanity check state diff roundtrip [Preset: mainnet] OK ``` ## Beacon chain file test suite @@ -88,7 +87,6 @@ AllTests-mainnet + dependent_root OK + get_beacon_proposer_index OK + latest_block_root OK -+ merklizer state roundtrip OK + process_slots OK ``` ## Beacon time @@ -102,11 +100,23 @@ AllTests-mainnet ``` ## Blinded block conversions ```diff -+ Bellatrix toSignedBlindedBeaconBlock OK -+ Capella toSignedBlindedBeaconBlock OK -+ Deneb toSignedBlindedBeaconBlock OK -+ Electra toSignedBlindedBeaconBlock OK -+ Fulu toSignedBlindedBeaconBlock OK ++ bellatrix toSignedBlindedBeaconBlock OK ++ capella toSignedBlindedBeaconBlock OK ++ deneb toSignedBlindedBeaconBlock OK ++ electra toSignedBlindedBeaconBlock OK ++ fulu toSignedBlindedBeaconBlock OK +``` +## BlobQuarantine data structure test suite [Preset: mainnet] +```diff ++ database and memory overfill protection and pruning test OK ++ database unload/load test OK ++ overfill protection test OK ++ popSidecars()/hasSidecars() return []/true on block without blobs OK ++ pruneAfterFinalization() test OK ++ put() duplicate items should not affect counters OK ++ put()/fetchMissingSidecars/remove test OK ++ put()/hasSidecar(index, slot, proposer_index)/remove() test OK ++ put(sidecar)/put([sidecars])/hasSidecars/popSidecars/remove() test OK ``` ## Block pool altair processing [Preset: mainnet] ```diff @@ -122,6 +132,7 @@ AllTests-mainnet ``` ## Block processor [Preset: mainnet] ```diff ++ Invalidate block root [Preset: mainnet] OK + Reverse order block add & get [Preset: mainnet] OK ``` ## Block quarantine @@ -146,6 +157,24 @@ AllTests-mainnet + atSlot sanity OK + parent sanity OK ``` +## ColumnMap test suite +```diff ++ ColumnMap test OK +``` +## ColumnQuarantine data structure test suite [Preset: mainnet] +```diff ++ database and memory overfill protection and pruning test OK ++ database unload/load test OK ++ overfill protection test OK ++ popSidecars()/hasSidecars() return []/true on block without columns OK ++ pruneAfterFinalization() test OK ++ put() duplicate items should not affect counters OK ++ put()/fetchMissingSidecars/remove test [node] OK ++ put()/fetchMissingSidecars/remove test [supernode] OK ++ put()/hasSidecar(index, slot, proposer_index)/remove() test OK ++ put(sidecar)/put([sidecars])/hasSidecars/popSidecars/remove() [node] test OK ++ put(sidecar)/put([sidecars])/hasSidecars/popSidecars/remove() [supernode] test OK +``` ## Combined scenarios [Beacon Node] [Preset: mainnet] ```diff + ImportKeystores should not be blocked by fee recipient setting [Beacon Node] [Preset: main OK @@ -168,13 +197,6 @@ AllTests-mainnet + Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK + Missing Authorization header [Beacon Node] [Preset: mainnet] OK ``` -## DepositContractSnapshot -```diff -+ Migration OK -+ SSZ OK -+ depositCount OK -+ isValid OK -``` ## Discovery fork ID ```diff + Expected fork IDs OK @@ -184,128 +206,135 @@ AllTests-mainnet + Non-tail block in common OK + Tail block only in common OK ``` +## EF - Fulu - BPO forkdigests +```diff ++ Different fork versions OK ++ Different genesis validators roots OK ++ Different lengths and blob limits OK ++ Fusaka devnet-2 OK +``` ## EF - KZG ```diff -+ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_invalid_blob_59d64ff6b4648fad OK -+ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_invalid_blob_635fb2de5b0dc429 OK -+ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_invalid_blob_a3b9ff28507767f8 OK -+ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_invalid_blob_d3afbd98123a3434 OK -+ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_0951cfd9ab47a8d3 OK -+ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_19b3f3f8c98ea31e OK -+ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_84d8089232bc23a8 OK -+ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_a87a4e636e0f58fb OK -+ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_c40b9b515df8721b OK -+ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_cdb3e6d49eb12307 OK -+ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_fb324bc819407148 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_blob_59d64ff6b4648fad OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_blob_635fb2de5b0dc429 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_blob_a3b9ff28507767f8 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_blob_d3afbd98123a3434 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_03265c1605637b1f OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_881cc19564a97501 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_8e021fdb13259641 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_9683af102559ddf0 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_9df8c89b61183887 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_b30d81e81c1262b6 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_02e696ada7d4631d OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_05c1f3685f3393f0 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_08f9e2f1cb3d39db OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_0cf79b17cb5f4ea2 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_177b58dc7a46b08f OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_1ce8e4f69d5df899 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_26b753dec0560daa OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_2b76dc9e3abf42f3 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_31ebd010e6098750 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_3208425794224c3f OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_36817bfd67de97a8 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_392169c16a2e5ef6 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_395cf6d697d1a743 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_3ac8dc31e9aa6a70 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_3c1e8b38219e3e12 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_3c87ec986c2656c2 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_3cd183d0bab85fb7 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_420f2a187ce77035 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_444b73ff54a19b44 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_53a9bdf4f75196da OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_585454b31673dd62 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_7db4f140a955dd1a OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_83e53423a2dd93fe OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_9b24f8997145435c OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_9b754afb690c47e1 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_a0be66af9a97ea52 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_af669445747d2585 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_af8b75f664ed7d43 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_b6cb6698327d9835 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_b6ec3736f9ff2c62 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_becf2e1641bbd4e6 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_c3d4322ec17fe7cd OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_c5e1490d672d026d OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_cae5d3491190b777 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_d0992bc0387790a4 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_d736268229bd87ec OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_e68d7111a2364a49 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_ed6b180ec759bcf6 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_f0ed3dc11cdeb130 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_f47eb9fc139f6bfd OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_f7f44e1e864aa967 OK -+ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_ffa6e97b97146517 OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_blob_59d64ff6b4648fad OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_blob_635fb2de5b0dc429 OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_blob_a3b9ff28507767f8 OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_blob_d3afbd98123a3434 OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_commitment_1a68c47b6814 OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_commitment_24b932fb4dec OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_commitment_3a6eb616efae OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_commitment_d070689c3e15 OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_0951cfd9ab47a8d3 OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_19b3f3f8c98ea31e OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_84d8089232bc23a8 OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_a87a4e636e0f58fb OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_c40b9b515df8721b OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_cdb3e6d49eb12307 OK -+ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_fb324bc819407148 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_02e696ada7d4631d OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_05c1f3685f3393f0 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_08f9e2f1cb3d39db OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_0cf79b17cb5f4ea2 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_177b58dc7a46b08f OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_1ce8e4f69d5df899 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_26b753dec0560daa OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_2b76dc9e3abf42f3 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_31ebd010e6098750 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_3208425794224c3f OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_36817bfd67de97a8 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_392169c16a2e5ef6 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_395cf6d697d1a743 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_3ac8dc31e9aa6a70 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_3c1e8b38219e3e12 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_3c87ec986c2656c2 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_3cd183d0bab85fb7 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_420f2a187ce77035 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_444b73ff54a19b44 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_53a9bdf4f75196da OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_585454b31673dd62 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_7db4f140a955dd1a OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_83e53423a2dd93fe OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_9b24f8997145435c OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_9b754afb690c47e1 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_a0be66af9a97ea52 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_af669445747d2585 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_af8b75f664ed7d43 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_b6cb6698327d9835 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_b6ec3736f9ff2c62 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_becf2e1641bbd4e6 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_c3d4322ec17fe7cd OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_c5e1490d672d026d OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_cae5d3491190b777 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_d0992bc0387790a4 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_d736268229bd87ec OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_e68d7111a2364a49 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_ed6b180ec759bcf6 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_f0ed3dc11cdeb130 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_f47eb9fc139f6bfd OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_f7f44e1e864aa967 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_ffa6e97b97146517 OK ++ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_invalid_blob_0 OK ++ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_invalid_blob_1 OK ++ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_invalid_blob_2 OK ++ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_invalid_blob_3 OK ++ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_0 OK ++ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_1 OK ++ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_2 OK ++ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_3 OK ++ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_4 OK ++ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_5 OK ++ KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_valid_blob_6 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_blob_0 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_blob_1 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_blob_2 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_blob_3 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_0 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_1 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_2 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_3 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_4 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_invalid_z_5 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_0_0 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_0_1 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_0_2 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_0_3 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_0_4 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_0_5 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_1_0 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_1_1 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_1_2 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_1_3 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_1_4 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_1_5 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_2_0 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_2_1 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_2_2 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_2_3 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_2_4 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_2_5 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_3_0 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_3_1 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_3_2 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_3_3 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_3_4 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_3_5 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_4_0 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_4_1 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_4_2 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_4_3 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_4_4 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_4_5 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_5_0 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_5_1 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_5_2 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_5_3 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_5_4 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_5_5 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_6_0 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_6_1 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_6_2 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_6_3 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_6_4 OK ++ KZG - Compute KZG proof - compute_kzg_proof_case_valid_blob_6_5 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_blob_0 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_blob_1 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_blob_2 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_blob_3 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_commitment_0 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_commitment_1 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_commitment_2 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_invalid_commitment_3 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_0 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_1 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_2 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_3 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_4 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_5 OK ++ KZG - Compute blob KZG proof - compute_blob_kzg_proof_case_valid_blob_6 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_0_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_0_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_0_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_0_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_0_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_0_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_1_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_1_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_1_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_1_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_1_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_1_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_2_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_2_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_2_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_2_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_2_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_2_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_3_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_3_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_3_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_3_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_3_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_3_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_4_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_4_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_4_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_4_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_4_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_4_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_5_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_5_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_5_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_5_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_5_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_5_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_6_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_6_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_6_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_6_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_6_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_6_5 OK + KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_point_at_infinity_for_twos_po OK + KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_point_at_infinity_for_twos_po OK + KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_point_at_infinity_for_twos_po OK @@ -318,197 +347,204 @@ AllTests-mainnet + KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_point_at_infinity_for_zero_po OK + KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_point_at_infinity_for_zero_po OK + KZG - Verify KZG proof - verify_kzg_proof_case_correct_proof_point_at_infinity_for_zero_po OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_02e696ada7d4631d OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_05c1f3685f3393f0 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_08f9e2f1cb3d39db OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_0cf79b17cb5f4ea2 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_177b58dc7a46b08f OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_1ce8e4f69d5df899 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_26b753dec0560daa OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_2b76dc9e3abf42f3 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_31ebd010e6098750 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_3208425794224c3f OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_36817bfd67de97a8 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_392169c16a2e5ef6 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_395cf6d697d1a743 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_3ac8dc31e9aa6a70 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_3c1e8b38219e3e12 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_3c87ec986c2656c2 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_3cd183d0bab85fb7 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_420f2a187ce77035 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_444b73ff54a19b44 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_53a9bdf4f75196da OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_585454b31673dd62 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_7db4f140a955dd1a OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_83e53423a2dd93fe OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_9b24f8997145435c OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_9b754afb690c47e1 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_a0be66af9a97ea52 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_af669445747d2585 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_af8b75f664ed7d43 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_b6cb6698327d9835 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_b6ec3736f9ff2c62 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_becf2e1641bbd4e6 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_c3d4322ec17fe7cd OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_c5e1490d672d026d OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_cae5d3491190b777 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_d0992bc0387790a4 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_d736268229bd87ec OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_e68d7111a2364a49 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_ed6b180ec759bcf6 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_f0ed3dc11cdeb130 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_f47eb9fc139f6bfd OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_f7f44e1e864aa967 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_ffa6e97b97146517 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_392169c16 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_3c1e8b382 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_3c87ec986 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_420f2a187 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_83e53423a OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_ed6b180ec OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_commitment_1b44e341d56c757d OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_commitment_32afa9561a4b3b91 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_commitment_3e55802a5ed3c757 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_commitment_e9d3e9ec16fbc15f OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_proof_1b44e341d56c757d OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_proof_32afa9561a4b3b91 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_proof_3e55802a5ed3c757 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_proof_e9d3e9ec16fbc15f OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_35d08d612aad2197 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_4aa6def8c35c9097 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_4e51cef08a61606f OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_64b9ff2b8f7dddee OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_b358a2e763727b70 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_eb0601fec84cc5e9 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_35d08d612aad2197 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_4aa6def8c35c9097 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_4e51cef08a61606f OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_64b9ff2b8f7dddee OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_b358a2e763727b70 OK -+ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_eb0601fec84cc5e9 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_0951cfd9ab47a8d3 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_19b3f3f8c98ea31e OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_84d8089232bc23a8 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_a87a4e636e0f58fb OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_c40b9b515df8721b OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_cdb3e6d49eb12307 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_fb324bc819407148 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_0_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_0_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_0_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_0_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_0_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_0_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_1_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_1_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_1_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_1_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_1_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_1_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_2_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_2_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_2_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_2_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_2_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_2_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_3_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_3_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_3_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_3_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_3_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_3_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_4_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_4_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_4_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_4_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_4_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_4_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_5_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_5_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_5_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_5_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_5_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_5_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_6_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_6_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_6_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_6_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_6_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_6_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_incorrect_proof_point_at_infinity_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_commitment_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_commitment_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_commitment_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_commitment_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_proof_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_proof_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_proof_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_proof_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_y_5 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_0 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_1 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_2 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_3 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_4 OK ++ KZG - Verify KZG proof - verify_kzg_proof_case_invalid_z_5 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_0 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_1 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_2 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_3 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_4 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_5 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_6 OK + KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_point_at_infinity_f OK + KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_correct_proof_point_at_infinity_f OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_0951cfd9ab47a8d3 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_19b3f3f8c98ea31e OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_84d8089232bc23a8 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_a87a4e636e0f58fb OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_c40b9b515df8721b OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_cdb3e6d49eb12307 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_fb324bc819407148 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_0 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_1 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_2 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_3 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_4 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_5 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_6 OK + KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_incorrect_proof_point_at_infinity OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_blob_59d64ff6b4648fad OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_blob_635fb2de5b0dc429 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_blob_a3b9ff28507767f8 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_blob_d3afbd98123a3434 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_commitment_1a68c47b68148e OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_commitment_24b932fb4dec5b OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_commitment_3a6eb616efae06 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_commitment_d070689c3e1544 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_proof_1a68c47b68148e78 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_proof_24b932fb4dec5b2d OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_proof_3a6eb616efae0627 OK -+ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_proof_d070689c3e15444c OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_0951cfd9ab47a8d3 OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_0f3f1d3f48f71495 OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_12c097d7ca0261e3 OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_2ef482373a81e34e OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_a271b78b8e869d69 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_blob_0 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_blob_1 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_blob_2 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_blob_3 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_commitment_0 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_commitment_1 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_commitment_2 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_commitment_3 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_proof_0 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_proof_1 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_proof_2 OK ++ KZG - Verify blob KZG proof - verify_blob_kzg_proof_case_invalid_proof_3 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_0 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_1 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_2 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_3 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_4 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_5 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_6 OK + KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_blob_length_different OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_cb3c3279a1afddcf OK + KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_commitment_length_dif OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_e61aafba051ddf79 OK + KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_incorrect_proof_add_o OK + KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_incorrect_proof_point OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_blob_59d64ff6 OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_blob_635fb2de OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_blob_a3b9ff28 OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_blob_d3afbd98 OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_commitment_1a OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_commitment_24 OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_commitment_3a OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_commitment_d0 OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_proof_1a68c47 OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_proof_24b932f OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_proof_3a6eb61 OK -+ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_proof_d070689 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_blob_0 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_blob_1 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_blob_2 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_blob_3 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_commitment_0 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_commitment_1 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_commitment_2 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_commitment_3 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_proof_0 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_proof_1 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_proof_2 OK ++ KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_proof_3 OK + KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_proof_length_differen OK ``` ## EF - KZG - PeerDAS ```diff -+ KZG - Compute Cells - compute_cells_case_valid_419245fbfe69f145 OK -+ KZG - Compute Cells - compute_cells_case_valid_4aedd1a2a3933c3e OK -+ KZG - Compute Cells - compute_cells_case_valid_6e773f256383918c OK -+ KZG - Compute Cells - compute_cells_case_valid_b0731ef77b166ca8 OK -+ KZG - Compute Cells - compute_cells_case_valid_b81d309b22788820 OK -+ KZG - Compute Cells - compute_cells_case_valid_ed8b5001151417d5 OK -+ KZG - Compute Cells - compute_cells_case_valid_edeb8500a6507818 OK -+ KZG - Compute Cells - compute_cells_invalid_blob_26555bdcbf18a267 OK -+ KZG - Compute Cells - compute_cells_invalid_blob_79fb3cb1ef585a86 OK -+ KZG - Compute Cells - compute_cells_invalid_blob_7e99dea8893c104a OK -+ KZG - Compute Cells - compute_cells_invalid_blob_9d88c33852eb782d OK -+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_26555bdcbf OK -+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_79fb3cb1ef OK -+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_7e99dea889 OK -+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_9d88c33852 OK -+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_419245fbfe69f145 OK -+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_4aedd1a2a3933c3e OK -+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_6e773f256383918c OK -+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_b0731ef77b166ca8 OK -+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_b81d309b22788820 OK -+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_ed8b5001151417d5 OK -+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_edeb8500a6507818 OK ++ KZG - Compute Cells - compute_cells_case_valid_0 OK ++ KZG - Compute Cells - compute_cells_case_valid_1 OK ++ KZG - Compute Cells - compute_cells_case_valid_2 OK ++ KZG - Compute Cells - compute_cells_case_valid_3 OK ++ KZG - Compute Cells - compute_cells_case_valid_4 OK ++ KZG - Compute Cells - compute_cells_case_valid_5 OK ++ KZG - Compute Cells - compute_cells_case_valid_6 OK ++ KZG - Compute Cells - compute_cells_invalid_blob_0 OK ++ KZG - Compute Cells - compute_cells_invalid_blob_1 OK ++ KZG - Compute Cells - compute_cells_invalid_blob_2 OK ++ KZG - Compute Cells - compute_cells_invalid_blob_3 OK ++ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_0 OK ++ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_1 OK ++ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_2 OK ++ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_3 OK ++ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_0 OK ++ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_1 OK ++ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_2 OK ++ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_3 OK ++ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_4 OK ++ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_5 OK ++ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_6 OK + KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_all_cells_a OK -+ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_cell_047ee7 OK -+ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_cell_76ab46 OK -+ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_cell_77b669 OK -+ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_cell_c8e2ca OK -+ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_cell_index_ OK ++ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_cell_0 OK ++ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_cell_1 OK ++ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_cell_2 OK ++ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_cell_3 OK ++ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_cell_index OK + KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_duplicate_c OK + KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_more_cell_i OK + KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_more_cells_ OK + KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_more_cells_ OK + KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_more_than_h OK ++ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_shuffled_ha OK ++ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_shuffled_no OK ++ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_invalid_shuffled_on OK + KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_valid_half_missing_ OK + KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_valid_half_missing_ OK + KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_valid_half_missing_ OK -+ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_valid_no_missing_a1 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_incorrect_cell_48bcbf OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_incorrect_commitment_ OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_incorrect_proof_ba29f OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_bcb1b35c OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_d89304ce OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_d939faf6 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_ef6ac828 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_index_5d OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_commitment_4b OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_commitment_53 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_commitment_68 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_commitment_d3 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_missing_cell_ OK ++ KZG - Recover Cells And Kzg Proofs - recover_cells_and_kzg_proofs_case_valid_no_missing OK ++ KZG - Recover Cells And Kzg Proofs Parallel - invalid OK ++ KZG - Recover Cells And Kzg Proofs Parallel - valid OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_incorrect_cell OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_incorrect_commitment OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_incorrect_proof OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_0 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_1 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_2 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_3 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_index OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_commitment_0 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_commitment_1 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_commitment_2 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_commitment_3 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_missing_cell OK + KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_missing_cell_ OK + KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_missing_commi OK + KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_missing_proof OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_0424858 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_48fa9d1 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_8feaf47 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_a9d14f0 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_0cfba0f22152206 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_3073caf43016db4 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_5211d9e9ff34c00 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_92c0b5242fa34ae OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_9fb9bff6fe1fb6b OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_d3f60d6d484ddb6 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_fd341ee5517e590 OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_multiple_blobs_ OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_0 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_1 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_2 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_3 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_0 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_1 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_2 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_3 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_4 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_5 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_6 OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_multiple_blobs OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_not_sorted OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_regression1 OK + KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_same_cell_multi OK -+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_zero_cells_fbbd OK ++ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_zero_cells OK ``` ## EF - PeerDAS - Networking [Preset: mainnet] ```diff @@ -526,29 +562,8 @@ AllTests-mainnet + Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK + Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK + Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK -``` -## EF - SSZ generic types -```diff - Testing basic_vector inputs - invalid Skip -+ Testing basic_vector inputs - valid OK -+ Testing bitlist inputs - invalid OK -+ Testing bitlist inputs - valid OK - Testing bitvector inputs - invalid Skip -+ Testing bitvector inputs - valid OK -+ Testing boolean inputs - invalid OK -+ Testing boolean inputs - valid OK -+ Testing containers inputs - invalid - skipping BitsStruct OK -+ Testing containers inputs - valid - skipping BitsStruct OK -+ Testing uints inputs - invalid OK -+ Testing uints inputs - valid OK -``` -## EIP-4881 -```diff -+ deposit_cases OK -+ empty_root OK -+ finalization OK -+ invalid_snapshot OK -+ snapshot_cases OK ++ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK ++ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK ``` ## EIP-7594 Unit Tests ```diff @@ -571,7 +586,6 @@ AllTests-mainnet ``` ## Eth1 monitor ```diff -+ Deposits chain OK + Rewrite URLs OK ``` ## Eth2 specific discovery tests @@ -661,6 +675,7 @@ AllTests-mainnet + Stability subnets OK + isNearSyncCommitteePeriod OK + is_aggregator OK ++ nextForkEpochAtEpoch with BPOs OK ``` ## ImportKeystores requests [Beacon Node] [Preset: mainnet] ```diff @@ -759,6 +774,9 @@ AllTests-mainnet + Sync committee selection proof signatures OK + Sync committee signed contribution and proof signatures OK + Voluntary exit signatures OK ++ execution payload bid signatures OK ++ execution payload envelope signatures OK ++ payload attestation message signatures OK ``` ## Network metadata ```diff @@ -767,7 +785,6 @@ AllTests-mainnet ``` ## Nimbus remote signer/signing test (verifying-web3signer) ```diff -+ Signing BeaconBlock (getBlockSignature(deneb)) OK + Signing BeaconBlock (getBlockSignature(electra)) OK + Waiting for signing node (/upcheck) test OK ``` @@ -776,7 +793,6 @@ AllTests-mainnet + Connection timeout test OK + Public keys enumeration (/api/v1/eth2/publicKeys) test OK + Public keys reload (/reload) test OK -+ Signing BeaconBlock (getBlockSignature(deneb)) OK + Signing BeaconBlock (getBlockSignature(electra)) OK + Signing SC contribution and proof (getContributionAndProofSignature()) OK + Signing SC message (getSyncCommitteeMessage()) OK @@ -795,10 +811,6 @@ AllTests-mainnet ```diff + pre-1.1.0 OK ``` -## PeerDAS Sampling Tests -```diff -+ PeerDAS: Extended Sample Count OK -``` ## PeerPool testing suite ```diff + Access peers by key test OK @@ -819,13 +831,41 @@ AllTests-mainnet ```diff + prune states OK ``` -## REST JSON encoding and decoding +## Quarantine [Preset: mainnet] +```diff ++ put/iterate/remove test [BlobSidecars] OK ++ put/iterate/remove test [DataColumnSidecar] OK +``` +## REST encoding and decoding ```diff + Blob OK + DenebSignedBlockContents decoding OK + KzgCommitment OK + KzgProof OK ++ RestErrorMessage parser tests OK ++ RestErrorMessage writer tests OK + Validator pubkey hack OK ++ remote signing example AGGREGATE_AND_PROOF (DEPRECATED) OK ++ remote signing example AGGREGATE_AND_PROOF_V2 (ALTAIR) OK ++ remote signing example AGGREGATE_AND_PROOF_V2 (BELLATRIX) OK ++ remote signing example AGGREGATE_AND_PROOF_V2 (CAPELLA) OK ++ remote signing example AGGREGATE_AND_PROOF_V2 (DENEB) OK ++ remote signing example AGGREGATE_AND_PROOF_V2 (ELECTRA) OK ++ remote signing example AGGREGATE_AND_PROOF_V2 (PHASE 0) OK ++ remote signing example AGGREGATION_SLOT OK ++ remote signing example ATTESTATION OK ++ remote signing example BLOCK_V2 (BELLATRIX) OK ++ remote signing example BLOCK_V2 (CAPELLA) OK ++ remote signing example BLOCK_V2 (DENEB) OK ++ remote signing example BLOCK_V2 (ELECTRA) OK ++ remote signing example DEPOSIT OK ++ remote signing example RANDAO_REVEAL OK ++ remote signing example SYNC_COMMITTEE_CONTRIBUTION_AND_PROOF OK ++ remote signing example SYNC_COMMITTEE_MESSAGE OK ++ remote signing example SYNC_COMMITTEE_SELECTION_PROOF OK ++ remote signing example VALIDATOR_REGISTRATION OK ++ remote signing example VOLUNTARY_EXIT OK ++ strictParse(Stuint) tests OK ``` ## Remove keystore testing suite ```diff @@ -839,12 +879,6 @@ AllTests-mainnet ```diff + Deserialization test vectors OK ``` -## Serialization/deserialization test suite -```diff -+ RestErrorMessage parser tests OK -+ RestErrorMessage writer tests OK -+ strictParse(Stuint) tests OK -``` ## Shufflings ```diff + Accelerated shuffling computation OK @@ -922,7 +956,6 @@ AllTests-mainnet ## Spec helpers ```diff + build_proof - BeaconState OK -+ hypergeom_cdf OK + integer_squareroot OK ``` ## Specific field types @@ -952,17 +985,23 @@ AllTests-mainnet ```diff + [SyncManager] groupBlobs() test OK + [SyncQueue# & Backward] Combination of missing parent and good blocks [3 peers] test OK ++ [SyncQueue# & Backward] Empty responses should not advance queue until other peers will no OK ++ [SyncQueue# & Backward] Empty responses should not be accounted [3 peers] test OK + [SyncQueue# & Backward] Failure request push test OK + [SyncQueue# & Backward] Invalid block [3 peers] test OK + [SyncQueue# & Backward] Smoke [3 peers] test OK + [SyncQueue# & Backward] Smoke [single peer] test OK + [SyncQueue# & Backward] Unviable block [3 peers] test OK ++ [SyncQueue# & Backward] epochFilter() test OK + [SyncQueue# & Forward] Combination of missing parent and good blocks [3 peers] test OK ++ [SyncQueue# & Forward] Empty responses should not advance queue until other peers will not OK ++ [SyncQueue# & Forward] Empty responses should not be accounted [3 peers] test OK + [SyncQueue# & Forward] Failure request push test OK + [SyncQueue# & Forward] Invalid block [3 peers] test OK + [SyncQueue# & Forward] Smoke [3 peers] test OK + [SyncQueue# & Forward] Smoke [single peer] test OK + [SyncQueue# & Forward] Unviable block [3 peers] test OK ++ [SyncQueue# & Forward] epochFilter() test OK + [SyncQueue#Backward] Missing parent and exponential rewind [3 peers] test OK + [SyncQueue#Backward] getRewindPoint() test OK + [SyncQueue#Forward] Missing parent and exponential rewind [3 peers] test OK @@ -979,7 +1018,8 @@ AllTests-mainnet ```diff + /eth/v1/validator/beacon_committee_selections serialization/deserialization test OK + /eth/v1/validator/sync_committee_selections serialization/deserialization test OK -+ bestSuccess() API timeout test OK ++ bestSuccess() API hard timeout test OK ++ bestSuccess() API soft timeout test OK + firstSuccessParallel() API timeout test OK + getAggregatedAttestationDataScore() default test OK + getAggregatedAttestationDataScore() test vectors OK @@ -1083,6 +1123,4611 @@ AllTests-mainnet ```diff + should register stability subnets on attester duties OK + should register sync committee duties OK ++ should subscribe to all subnets when flag is enabled OK +``` +## test_fixture_ssz_generic_types.nim +```diff ++ basic_progressive_list - invalid - proglist_bool_0_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_0_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_0_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_0_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_0_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_0_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_0_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_0_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_1365_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_1365_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_1365_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_1365_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_1365_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_1365_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_1365_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_1365_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_1366_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_1366_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_1366_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_1366_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_1366_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_1366_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_1366_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_1366_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_1_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_1_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_1_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_1_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_1_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_1_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_1_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_1_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_20_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_20_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_20_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_20_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_20_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_20_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_20_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_20_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_21_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_21_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_21_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_21_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_21_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_21_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_21_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_21_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_22_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_22_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_22_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_22_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_22_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_22_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_22_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_22_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_2_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_2_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_2_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_2_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_2_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_2_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_2_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_2_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_341_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_341_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_341_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_341_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_341_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_341_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_341_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_341_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_342_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_342_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_342_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_342_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_342_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_342_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_342_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_342_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_3_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_3_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_3_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_3_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_3_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_3_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_3_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_3_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_4_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_4_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_4_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_4_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_4_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_4_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_4_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_4_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_5_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_5_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_5_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_5_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_5_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_5_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_5_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_5_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_85_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_85_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_85_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_85_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_85_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_85_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_85_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_85_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_86_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_86_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_86_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_86_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_86_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_86_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_86_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_86_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_8_max_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_8_max_0xff OK ++ basic_progressive_list - invalid - proglist_bool_8_max_2 OK ++ basic_progressive_list - invalid - proglist_bool_8_max_rev_nibble OK ++ basic_progressive_list - invalid - proglist_bool_8_zero_0x80 OK ++ basic_progressive_list - invalid - proglist_bool_8_zero_0xff OK ++ basic_progressive_list - invalid - proglist_bool_8_zero_2 OK ++ basic_progressive_list - invalid - proglist_bool_8_zero_rev_nibble OK ++ basic_progressive_list - invalid - proglist_uint128_0_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_0_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_0_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_0_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_0_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_0_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_1365_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_1365_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_1365_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_1365_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_1365_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_1365_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_1366_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_1366_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_1366_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_1366_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_1366_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_1366_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_1_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_1_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_1_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_1_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_1_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_1_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_20_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_20_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_20_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_20_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_20_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_20_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_21_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_21_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_21_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_21_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_21_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_21_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_22_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_22_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_22_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_22_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_22_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_22_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_2_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_2_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_2_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_2_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_2_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_2_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_341_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_341_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_341_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_341_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_341_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_341_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_342_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_342_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_342_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_342_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_342_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_342_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_3_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_3_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_3_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_3_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_3_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_3_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_4_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_4_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_4_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_4_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_4_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_4_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_5_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_5_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_5_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_5_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_5_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_5_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_85_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_85_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_85_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_85_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_85_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_85_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_86_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_86_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_86_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_86_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_86_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_86_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_8_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_8_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_8_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_8_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint128_8_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint128_8_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_0_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_0_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_0_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_0_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_0_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_0_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_1365_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_1365_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_1365_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_1365_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_1365_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_1365_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_1366_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_1366_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_1366_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_1366_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_1366_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_1366_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_1_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_1_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_1_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_1_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_1_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_1_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_20_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_20_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_20_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_20_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_20_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_20_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_21_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_21_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_21_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_21_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_21_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_21_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_22_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_22_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_22_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_22_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_22_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_22_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_2_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_2_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_2_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_2_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_2_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_2_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_341_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_341_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_341_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_341_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_341_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_341_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_342_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_342_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_342_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_342_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_342_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_342_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_3_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_3_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_3_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_3_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_3_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_3_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_4_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_4_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_4_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_4_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_4_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_4_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_5_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_5_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_5_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_5_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_5_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_5_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_85_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_85_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_85_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_85_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_85_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_85_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_86_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_86_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_86_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_86_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_86_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_86_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_8_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_8_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_8_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_8_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint16_8_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint16_8_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_0_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_0_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_0_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_0_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_0_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_0_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_1365_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_1365_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_1365_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_1365_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_1365_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_1365_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_1366_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_1366_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_1366_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_1366_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_1366_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_1366_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_1_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_1_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_1_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_1_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_1_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_1_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_20_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_20_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_20_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_20_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_20_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_20_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_21_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_21_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_21_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_21_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_21_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_21_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_22_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_22_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_22_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_22_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_22_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_22_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_2_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_2_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_2_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_2_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_2_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_2_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_341_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_341_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_341_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_341_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_341_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_341_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_342_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_342_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_342_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_342_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_342_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_342_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_3_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_3_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_3_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_3_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_3_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_3_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_4_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_4_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_4_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_4_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_4_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_4_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_5_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_5_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_5_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_5_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_5_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_5_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_85_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_85_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_85_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_85_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_85_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_85_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_86_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_86_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_86_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_86_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_86_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_86_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_8_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_8_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_8_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_8_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint256_8_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint256_8_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_0_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_0_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_0_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_0_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_0_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_0_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_1365_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_1365_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_1365_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_1365_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_1365_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_1365_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_1366_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_1366_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_1366_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_1366_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_1366_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_1366_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_1_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_1_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_1_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_1_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_1_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_1_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_20_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_20_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_20_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_20_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_20_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_20_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_21_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_21_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_21_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_21_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_21_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_21_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_22_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_22_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_22_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_22_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_22_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_22_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_2_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_2_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_2_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_2_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_2_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_2_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_341_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_341_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_341_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_341_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_341_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_341_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_342_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_342_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_342_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_342_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_342_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_342_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_3_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_3_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_3_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_3_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_3_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_3_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_4_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_4_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_4_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_4_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_4_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_4_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_5_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_5_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_5_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_5_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_5_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_5_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_85_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_85_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_85_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_85_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_85_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_85_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_86_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_86_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_86_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_86_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_86_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_86_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_8_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_8_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_8_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_8_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint32_8_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint32_8_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_0_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_0_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_0_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_0_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_0_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_0_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_1365_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_1365_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_1365_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_1365_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_1365_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_1365_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_1366_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_1366_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_1366_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_1366_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_1366_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_1366_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_1_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_1_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_1_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_1_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_1_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_1_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_20_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_20_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_20_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_20_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_20_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_20_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_21_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_21_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_21_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_21_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_21_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_21_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_22_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_22_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_22_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_22_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_22_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_22_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_2_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_2_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_2_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_2_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_2_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_2_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_341_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_341_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_341_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_341_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_341_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_341_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_342_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_342_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_342_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_342_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_342_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_342_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_3_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_3_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_3_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_3_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_3_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_3_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_4_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_4_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_4_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_4_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_4_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_4_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_5_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_5_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_5_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_5_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_5_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_5_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_85_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_85_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_85_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_85_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_85_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_85_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_86_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_86_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_86_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_86_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_86_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_86_zero_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_8_max_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_8_max_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_8_random_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_8_random_one_byte_more OK ++ basic_progressive_list - invalid - proglist_uint64_8_zero_one_byte_less OK ++ basic_progressive_list - invalid - proglist_uint64_8_zero_one_byte_more OK ++ basic_progressive_list - valid - proglist_bool_max_0 OK ++ basic_progressive_list - valid - proglist_bool_max_1 OK ++ basic_progressive_list - valid - proglist_bool_max_1365 OK ++ basic_progressive_list - valid - proglist_bool_max_1366 OK ++ basic_progressive_list - valid - proglist_bool_max_2 OK ++ basic_progressive_list - valid - proglist_bool_max_20 OK ++ basic_progressive_list - valid - proglist_bool_max_21 OK ++ basic_progressive_list - valid - proglist_bool_max_22 OK ++ basic_progressive_list - valid - proglist_bool_max_3 OK ++ basic_progressive_list - valid - proglist_bool_max_341 OK ++ basic_progressive_list - valid - proglist_bool_max_342 OK ++ basic_progressive_list - valid - proglist_bool_max_4 OK ++ basic_progressive_list - valid - proglist_bool_max_5 OK ++ basic_progressive_list - valid - proglist_bool_max_8 OK ++ basic_progressive_list - valid - proglist_bool_max_85 OK ++ basic_progressive_list - valid - proglist_bool_max_86 OK ++ basic_progressive_list - valid - proglist_bool_zero_0 OK ++ basic_progressive_list - valid - proglist_bool_zero_1 OK ++ basic_progressive_list - valid - proglist_bool_zero_1365 OK ++ basic_progressive_list - valid - proglist_bool_zero_1366 OK ++ basic_progressive_list - valid - proglist_bool_zero_2 OK ++ basic_progressive_list - valid - proglist_bool_zero_20 OK ++ basic_progressive_list - valid - proglist_bool_zero_21 OK ++ basic_progressive_list - valid - proglist_bool_zero_22 OK ++ basic_progressive_list - valid - proglist_bool_zero_3 OK ++ basic_progressive_list - valid - proglist_bool_zero_341 OK ++ basic_progressive_list - valid - proglist_bool_zero_342 OK ++ basic_progressive_list - valid - proglist_bool_zero_4 OK ++ basic_progressive_list - valid - proglist_bool_zero_5 OK ++ basic_progressive_list - valid - proglist_bool_zero_8 OK ++ basic_progressive_list - valid - proglist_bool_zero_85 OK ++ basic_progressive_list - valid - proglist_bool_zero_86 OK ++ basic_progressive_list - valid - proglist_uint128_max_0 OK ++ basic_progressive_list - valid - proglist_uint128_max_1 OK ++ basic_progressive_list - valid - proglist_uint128_max_1365 OK ++ basic_progressive_list - valid - proglist_uint128_max_1366 OK ++ basic_progressive_list - valid - proglist_uint128_max_2 OK ++ basic_progressive_list - valid - proglist_uint128_max_20 OK ++ basic_progressive_list - valid - proglist_uint128_max_21 OK ++ basic_progressive_list - valid - proglist_uint128_max_22 OK ++ basic_progressive_list - valid - proglist_uint128_max_3 OK ++ basic_progressive_list - valid - proglist_uint128_max_341 OK ++ basic_progressive_list - valid - proglist_uint128_max_342 OK ++ basic_progressive_list - valid - proglist_uint128_max_4 OK ++ basic_progressive_list - valid - proglist_uint128_max_5 OK ++ basic_progressive_list - valid - proglist_uint128_max_8 OK ++ basic_progressive_list - valid - proglist_uint128_max_85 OK ++ basic_progressive_list - valid - proglist_uint128_max_86 OK ++ basic_progressive_list - valid - proglist_uint128_random_0 OK ++ basic_progressive_list - valid - proglist_uint128_random_1 OK ++ basic_progressive_list - valid - proglist_uint128_random_1365 OK ++ basic_progressive_list - valid - proglist_uint128_random_1366 OK ++ basic_progressive_list - valid - proglist_uint128_random_2 OK ++ basic_progressive_list - valid - proglist_uint128_random_20 OK ++ basic_progressive_list - valid - proglist_uint128_random_21 OK ++ basic_progressive_list - valid - proglist_uint128_random_22 OK ++ basic_progressive_list - valid - proglist_uint128_random_3 OK ++ basic_progressive_list - valid - proglist_uint128_random_341 OK ++ basic_progressive_list - valid - proglist_uint128_random_342 OK ++ basic_progressive_list - valid - proglist_uint128_random_4 OK ++ basic_progressive_list - valid - proglist_uint128_random_5 OK ++ basic_progressive_list - valid - proglist_uint128_random_8 OK ++ basic_progressive_list - valid - proglist_uint128_random_85 OK ++ basic_progressive_list - valid - proglist_uint128_random_86 OK ++ basic_progressive_list - valid - proglist_uint128_zero_0 OK ++ basic_progressive_list - valid - proglist_uint128_zero_1 OK ++ basic_progressive_list - valid - proglist_uint128_zero_1365 OK ++ basic_progressive_list - valid - proglist_uint128_zero_1366 OK ++ basic_progressive_list - valid - proglist_uint128_zero_2 OK ++ basic_progressive_list - valid - proglist_uint128_zero_20 OK ++ basic_progressive_list - valid - proglist_uint128_zero_21 OK ++ basic_progressive_list - valid - proglist_uint128_zero_22 OK ++ basic_progressive_list - valid - proglist_uint128_zero_3 OK ++ basic_progressive_list - valid - proglist_uint128_zero_341 OK ++ basic_progressive_list - valid - proglist_uint128_zero_342 OK ++ basic_progressive_list - valid - proglist_uint128_zero_4 OK ++ basic_progressive_list - valid - proglist_uint128_zero_5 OK ++ basic_progressive_list - valid - proglist_uint128_zero_8 OK ++ basic_progressive_list - valid - proglist_uint128_zero_85 OK ++ basic_progressive_list - valid - proglist_uint128_zero_86 OK ++ basic_progressive_list - valid - proglist_uint16_max_0 OK ++ basic_progressive_list - valid - proglist_uint16_max_1 OK ++ basic_progressive_list - valid - proglist_uint16_max_1365 OK ++ basic_progressive_list - valid - proglist_uint16_max_1366 OK ++ basic_progressive_list - valid - proglist_uint16_max_2 OK ++ basic_progressive_list - valid - proglist_uint16_max_20 OK ++ basic_progressive_list - valid - proglist_uint16_max_21 OK ++ basic_progressive_list - valid - proglist_uint16_max_22 OK ++ basic_progressive_list - valid - proglist_uint16_max_3 OK ++ basic_progressive_list - valid - proglist_uint16_max_341 OK ++ basic_progressive_list - valid - proglist_uint16_max_342 OK ++ basic_progressive_list - valid - proglist_uint16_max_4 OK ++ basic_progressive_list - valid - proglist_uint16_max_5 OK ++ basic_progressive_list - valid - proglist_uint16_max_8 OK ++ basic_progressive_list - valid - proglist_uint16_max_85 OK ++ basic_progressive_list - valid - proglist_uint16_max_86 OK ++ basic_progressive_list - valid - proglist_uint16_random_0 OK ++ basic_progressive_list - valid - proglist_uint16_random_1 OK ++ basic_progressive_list - valid - proglist_uint16_random_1365 OK ++ basic_progressive_list - valid - proglist_uint16_random_1366 OK ++ basic_progressive_list - valid - proglist_uint16_random_2 OK ++ basic_progressive_list - valid - proglist_uint16_random_20 OK ++ basic_progressive_list - valid - proglist_uint16_random_21 OK ++ basic_progressive_list - valid - proglist_uint16_random_22 OK ++ basic_progressive_list - valid - proglist_uint16_random_3 OK ++ basic_progressive_list - valid - proglist_uint16_random_341 OK ++ basic_progressive_list - valid - proglist_uint16_random_342 OK ++ basic_progressive_list - valid - proglist_uint16_random_4 OK ++ basic_progressive_list - valid - proglist_uint16_random_5 OK ++ basic_progressive_list - valid - proglist_uint16_random_8 OK ++ basic_progressive_list - valid - proglist_uint16_random_85 OK ++ basic_progressive_list - valid - proglist_uint16_random_86 OK ++ basic_progressive_list - valid - proglist_uint16_zero_0 OK ++ basic_progressive_list - valid - proglist_uint16_zero_1 OK ++ basic_progressive_list - valid - proglist_uint16_zero_1365 OK ++ basic_progressive_list - valid - proglist_uint16_zero_1366 OK ++ basic_progressive_list - valid - proglist_uint16_zero_2 OK ++ basic_progressive_list - valid - proglist_uint16_zero_20 OK ++ basic_progressive_list - valid - proglist_uint16_zero_21 OK ++ basic_progressive_list - valid - proglist_uint16_zero_22 OK ++ basic_progressive_list - valid - proglist_uint16_zero_3 OK ++ basic_progressive_list - valid - proglist_uint16_zero_341 OK ++ basic_progressive_list - valid - proglist_uint16_zero_342 OK ++ basic_progressive_list - valid - proglist_uint16_zero_4 OK ++ basic_progressive_list - valid - proglist_uint16_zero_5 OK ++ basic_progressive_list - valid - proglist_uint16_zero_8 OK ++ basic_progressive_list - valid - proglist_uint16_zero_85 OK ++ basic_progressive_list - valid - proglist_uint16_zero_86 OK ++ basic_progressive_list - valid - proglist_uint256_max_0 OK ++ basic_progressive_list - valid - proglist_uint256_max_1 OK ++ basic_progressive_list - valid - proglist_uint256_max_1365 OK ++ basic_progressive_list - valid - proglist_uint256_max_1366 OK ++ basic_progressive_list - valid - proglist_uint256_max_2 OK ++ basic_progressive_list - valid - proglist_uint256_max_20 OK ++ basic_progressive_list - valid - proglist_uint256_max_21 OK ++ basic_progressive_list - valid - proglist_uint256_max_22 OK ++ basic_progressive_list - valid - proglist_uint256_max_3 OK ++ basic_progressive_list - valid - proglist_uint256_max_341 OK ++ basic_progressive_list - valid - proglist_uint256_max_342 OK ++ basic_progressive_list - valid - proglist_uint256_max_4 OK ++ basic_progressive_list - valid - proglist_uint256_max_5 OK ++ basic_progressive_list - valid - proglist_uint256_max_8 OK ++ basic_progressive_list - valid - proglist_uint256_max_85 OK ++ basic_progressive_list - valid - proglist_uint256_max_86 OK ++ basic_progressive_list - valid - proglist_uint256_random_0 OK ++ basic_progressive_list - valid - proglist_uint256_random_1 OK ++ basic_progressive_list - valid - proglist_uint256_random_1365 OK ++ basic_progressive_list - valid - proglist_uint256_random_1366 OK ++ basic_progressive_list - valid - proglist_uint256_random_2 OK ++ basic_progressive_list - valid - proglist_uint256_random_20 OK ++ basic_progressive_list - valid - proglist_uint256_random_21 OK ++ basic_progressive_list - valid - proglist_uint256_random_22 OK ++ basic_progressive_list - valid - proglist_uint256_random_3 OK ++ basic_progressive_list - valid - proglist_uint256_random_341 OK ++ basic_progressive_list - valid - proglist_uint256_random_342 OK ++ basic_progressive_list - valid - proglist_uint256_random_4 OK ++ basic_progressive_list - valid - proglist_uint256_random_5 OK ++ basic_progressive_list - valid - proglist_uint256_random_8 OK ++ basic_progressive_list - valid - proglist_uint256_random_85 OK ++ basic_progressive_list - valid - proglist_uint256_random_86 OK ++ basic_progressive_list - valid - proglist_uint256_zero_0 OK ++ basic_progressive_list - valid - proglist_uint256_zero_1 OK ++ basic_progressive_list - valid - proglist_uint256_zero_1365 OK ++ basic_progressive_list - valid - proglist_uint256_zero_1366 OK ++ basic_progressive_list - valid - proglist_uint256_zero_2 OK ++ basic_progressive_list - valid - proglist_uint256_zero_20 OK ++ basic_progressive_list - valid - proglist_uint256_zero_21 OK ++ basic_progressive_list - valid - proglist_uint256_zero_22 OK ++ basic_progressive_list - valid - proglist_uint256_zero_3 OK ++ basic_progressive_list - valid - proglist_uint256_zero_341 OK ++ basic_progressive_list - valid - proglist_uint256_zero_342 OK ++ basic_progressive_list - valid - proglist_uint256_zero_4 OK ++ basic_progressive_list - valid - proglist_uint256_zero_5 OK ++ basic_progressive_list - valid - proglist_uint256_zero_8 OK ++ basic_progressive_list - valid - proglist_uint256_zero_85 OK ++ basic_progressive_list - valid - proglist_uint256_zero_86 OK ++ basic_progressive_list - valid - proglist_uint32_max_0 OK ++ basic_progressive_list - valid - proglist_uint32_max_1 OK ++ basic_progressive_list - valid - proglist_uint32_max_1365 OK ++ basic_progressive_list - valid - proglist_uint32_max_1366 OK ++ basic_progressive_list - valid - proglist_uint32_max_2 OK ++ basic_progressive_list - valid - proglist_uint32_max_20 OK ++ basic_progressive_list - valid - proglist_uint32_max_21 OK ++ basic_progressive_list - valid - proglist_uint32_max_22 OK ++ basic_progressive_list - valid - proglist_uint32_max_3 OK ++ basic_progressive_list - valid - proglist_uint32_max_341 OK ++ basic_progressive_list - valid - proglist_uint32_max_342 OK ++ basic_progressive_list - valid - proglist_uint32_max_4 OK ++ basic_progressive_list - valid - proglist_uint32_max_5 OK ++ basic_progressive_list - valid - proglist_uint32_max_8 OK ++ basic_progressive_list - valid - proglist_uint32_max_85 OK ++ basic_progressive_list - valid - proglist_uint32_max_86 OK ++ basic_progressive_list - valid - proglist_uint32_random_0 OK ++ basic_progressive_list - valid - proglist_uint32_random_1 OK ++ basic_progressive_list - valid - proglist_uint32_random_1365 OK ++ basic_progressive_list - valid - proglist_uint32_random_1366 OK ++ basic_progressive_list - valid - proglist_uint32_random_2 OK ++ basic_progressive_list - valid - proglist_uint32_random_20 OK ++ basic_progressive_list - valid - proglist_uint32_random_21 OK ++ basic_progressive_list - valid - proglist_uint32_random_22 OK ++ basic_progressive_list - valid - proglist_uint32_random_3 OK ++ basic_progressive_list - valid - proglist_uint32_random_341 OK ++ basic_progressive_list - valid - proglist_uint32_random_342 OK ++ basic_progressive_list - valid - proglist_uint32_random_4 OK ++ basic_progressive_list - valid - proglist_uint32_random_5 OK ++ basic_progressive_list - valid - proglist_uint32_random_8 OK ++ basic_progressive_list - valid - proglist_uint32_random_85 OK ++ basic_progressive_list - valid - proglist_uint32_random_86 OK ++ basic_progressive_list - valid - proglist_uint32_zero_0 OK ++ basic_progressive_list - valid - proglist_uint32_zero_1 OK ++ basic_progressive_list - valid - proglist_uint32_zero_1365 OK ++ basic_progressive_list - valid - proglist_uint32_zero_1366 OK ++ basic_progressive_list - valid - proglist_uint32_zero_2 OK ++ basic_progressive_list - valid - proglist_uint32_zero_20 OK ++ basic_progressive_list - valid - proglist_uint32_zero_21 OK ++ basic_progressive_list - valid - proglist_uint32_zero_22 OK ++ basic_progressive_list - valid - proglist_uint32_zero_3 OK ++ basic_progressive_list - valid - proglist_uint32_zero_341 OK ++ basic_progressive_list - valid - proglist_uint32_zero_342 OK ++ basic_progressive_list - valid - proglist_uint32_zero_4 OK ++ basic_progressive_list - valid - proglist_uint32_zero_5 OK ++ basic_progressive_list - valid - proglist_uint32_zero_8 OK ++ basic_progressive_list - valid - proglist_uint32_zero_85 OK ++ basic_progressive_list - valid - proglist_uint32_zero_86 OK ++ basic_progressive_list - valid - proglist_uint64_max_0 OK ++ basic_progressive_list - valid - proglist_uint64_max_1 OK ++ basic_progressive_list - valid - proglist_uint64_max_1365 OK ++ basic_progressive_list - valid - proglist_uint64_max_1366 OK ++ basic_progressive_list - valid - proglist_uint64_max_2 OK ++ basic_progressive_list - valid - proglist_uint64_max_20 OK ++ basic_progressive_list - valid - proglist_uint64_max_21 OK ++ basic_progressive_list - valid - proglist_uint64_max_22 OK ++ basic_progressive_list - valid - proglist_uint64_max_3 OK ++ basic_progressive_list - valid - proglist_uint64_max_341 OK ++ basic_progressive_list - valid - proglist_uint64_max_342 OK ++ basic_progressive_list - valid - proglist_uint64_max_4 OK ++ basic_progressive_list - valid - proglist_uint64_max_5 OK ++ basic_progressive_list - valid - proglist_uint64_max_8 OK ++ basic_progressive_list - valid - proglist_uint64_max_85 OK ++ basic_progressive_list - valid - proglist_uint64_max_86 OK ++ basic_progressive_list - valid - proglist_uint64_random_0 OK ++ basic_progressive_list - valid - proglist_uint64_random_1 OK ++ basic_progressive_list - valid - proglist_uint64_random_1365 OK ++ basic_progressive_list - valid - proglist_uint64_random_1366 OK ++ basic_progressive_list - valid - proglist_uint64_random_2 OK ++ basic_progressive_list - valid - proglist_uint64_random_20 OK ++ basic_progressive_list - valid - proglist_uint64_random_21 OK ++ basic_progressive_list - valid - proglist_uint64_random_22 OK ++ basic_progressive_list - valid - proglist_uint64_random_3 OK ++ basic_progressive_list - valid - proglist_uint64_random_341 OK ++ basic_progressive_list - valid - proglist_uint64_random_342 OK ++ basic_progressive_list - valid - proglist_uint64_random_4 OK ++ basic_progressive_list - valid - proglist_uint64_random_5 OK ++ basic_progressive_list - valid - proglist_uint64_random_8 OK ++ basic_progressive_list - valid - proglist_uint64_random_85 OK ++ basic_progressive_list - valid - proglist_uint64_random_86 OK ++ basic_progressive_list - valid - proglist_uint64_zero_0 OK ++ basic_progressive_list - valid - proglist_uint64_zero_1 OK ++ basic_progressive_list - valid - proglist_uint64_zero_1365 OK ++ basic_progressive_list - valid - proglist_uint64_zero_1366 OK ++ basic_progressive_list - valid - proglist_uint64_zero_2 OK ++ basic_progressive_list - valid - proglist_uint64_zero_20 OK ++ basic_progressive_list - valid - proglist_uint64_zero_21 OK ++ basic_progressive_list - valid - proglist_uint64_zero_22 OK ++ basic_progressive_list - valid - proglist_uint64_zero_3 OK ++ basic_progressive_list - valid - proglist_uint64_zero_341 OK ++ basic_progressive_list - valid - proglist_uint64_zero_342 OK ++ basic_progressive_list - valid - proglist_uint64_zero_4 OK ++ basic_progressive_list - valid - proglist_uint64_zero_5 OK ++ basic_progressive_list - valid - proglist_uint64_zero_8 OK ++ basic_progressive_list - valid - proglist_uint64_zero_85 OK ++ basic_progressive_list - valid - proglist_uint64_zero_86 OK ++ basic_progressive_list - valid - proglist_uint8_max_0 OK ++ basic_progressive_list - valid - proglist_uint8_max_1 OK ++ basic_progressive_list - valid - proglist_uint8_max_1365 OK ++ basic_progressive_list - valid - proglist_uint8_max_1366 OK ++ basic_progressive_list - valid - proglist_uint8_max_2 OK ++ basic_progressive_list - valid - proglist_uint8_max_20 OK ++ basic_progressive_list - valid - proglist_uint8_max_21 OK ++ basic_progressive_list - valid - proglist_uint8_max_22 OK ++ basic_progressive_list - valid - proglist_uint8_max_3 OK ++ basic_progressive_list - valid - proglist_uint8_max_341 OK ++ basic_progressive_list - valid - proglist_uint8_max_342 OK ++ basic_progressive_list - valid - proglist_uint8_max_4 OK ++ basic_progressive_list - valid - proglist_uint8_max_5 OK ++ basic_progressive_list - valid - proglist_uint8_max_8 OK ++ basic_progressive_list - valid - proglist_uint8_max_85 OK ++ basic_progressive_list - valid - proglist_uint8_max_86 OK ++ basic_progressive_list - valid - proglist_uint8_random_0 OK ++ basic_progressive_list - valid - proglist_uint8_random_1 OK ++ basic_progressive_list - valid - proglist_uint8_random_1365 OK ++ basic_progressive_list - valid - proglist_uint8_random_1366 OK ++ basic_progressive_list - valid - proglist_uint8_random_2 OK ++ basic_progressive_list - valid - proglist_uint8_random_20 OK ++ basic_progressive_list - valid - proglist_uint8_random_21 OK ++ basic_progressive_list - valid - proglist_uint8_random_22 OK ++ basic_progressive_list - valid - proglist_uint8_random_3 OK ++ basic_progressive_list - valid - proglist_uint8_random_341 OK ++ basic_progressive_list - valid - proglist_uint8_random_342 OK ++ basic_progressive_list - valid - proglist_uint8_random_4 OK ++ basic_progressive_list - valid - proglist_uint8_random_5 OK ++ basic_progressive_list - valid - proglist_uint8_random_8 OK ++ basic_progressive_list - valid - proglist_uint8_random_85 OK ++ basic_progressive_list - valid - proglist_uint8_random_86 OK ++ basic_progressive_list - valid - proglist_uint8_zero_0 OK ++ basic_progressive_list - valid - proglist_uint8_zero_1 OK ++ basic_progressive_list - valid - proglist_uint8_zero_1365 OK ++ basic_progressive_list - valid - proglist_uint8_zero_1366 OK ++ basic_progressive_list - valid - proglist_uint8_zero_2 OK ++ basic_progressive_list - valid - proglist_uint8_zero_20 OK ++ basic_progressive_list - valid - proglist_uint8_zero_21 OK ++ basic_progressive_list - valid - proglist_uint8_zero_22 OK ++ basic_progressive_list - valid - proglist_uint8_zero_3 OK ++ basic_progressive_list - valid - proglist_uint8_zero_341 OK ++ basic_progressive_list - valid - proglist_uint8_zero_342 OK ++ basic_progressive_list - valid - proglist_uint8_zero_4 OK ++ basic_progressive_list - valid - proglist_uint8_zero_5 OK ++ basic_progressive_list - valid - proglist_uint8_zero_8 OK ++ basic_progressive_list - valid - proglist_uint8_zero_85 OK ++ basic_progressive_list - valid - proglist_uint8_zero_86 OK + basic_vector - invalid - vec_bool_0 Skip ++ basic_vector - invalid - vec_bool_16_max_0x80 OK ++ basic_vector - invalid - vec_bool_16_max_0xff OK ++ basic_vector - invalid - vec_bool_16_max_2 OK ++ basic_vector - invalid - vec_bool_16_max_one_byte_less OK ++ basic_vector - invalid - vec_bool_16_max_one_byte_more OK ++ basic_vector - invalid - vec_bool_16_max_one_less OK ++ basic_vector - invalid - vec_bool_16_max_one_more OK ++ basic_vector - invalid - vec_bool_16_max_rev_nibble OK ++ basic_vector - invalid - vec_bool_16_nil OK ++ basic_vector - invalid - vec_bool_16_zero_0x80 OK ++ basic_vector - invalid - vec_bool_16_zero_0xff OK ++ basic_vector - invalid - vec_bool_16_zero_2 OK ++ basic_vector - invalid - vec_bool_16_zero_one_byte_less OK ++ basic_vector - invalid - vec_bool_16_zero_one_byte_more OK ++ basic_vector - invalid - vec_bool_16_zero_one_less OK ++ basic_vector - invalid - vec_bool_16_zero_one_more OK ++ basic_vector - invalid - vec_bool_16_zero_rev_nibble OK ++ basic_vector - invalid - vec_bool_1_max_0x80 OK ++ basic_vector - invalid - vec_bool_1_max_0xff OK ++ basic_vector - invalid - vec_bool_1_max_2 OK ++ basic_vector - invalid - vec_bool_1_max_one_byte_less OK ++ basic_vector - invalid - vec_bool_1_max_one_byte_more OK ++ basic_vector - invalid - vec_bool_1_max_one_less OK ++ basic_vector - invalid - vec_bool_1_max_one_more OK ++ basic_vector - invalid - vec_bool_1_max_rev_nibble OK ++ basic_vector - invalid - vec_bool_1_nil OK ++ basic_vector - invalid - vec_bool_1_zero_0x80 OK ++ basic_vector - invalid - vec_bool_1_zero_0xff OK ++ basic_vector - invalid - vec_bool_1_zero_2 OK ++ basic_vector - invalid - vec_bool_1_zero_one_byte_less OK ++ basic_vector - invalid - vec_bool_1_zero_one_byte_more OK ++ basic_vector - invalid - vec_bool_1_zero_one_less OK ++ basic_vector - invalid - vec_bool_1_zero_one_more OK ++ basic_vector - invalid - vec_bool_1_zero_rev_nibble OK ++ basic_vector - invalid - vec_bool_2_max_0x80 OK ++ basic_vector - invalid - vec_bool_2_max_0xff OK ++ basic_vector - invalid - vec_bool_2_max_2 OK ++ basic_vector - invalid - vec_bool_2_max_one_byte_less OK ++ basic_vector - invalid - vec_bool_2_max_one_byte_more OK ++ basic_vector - invalid - vec_bool_2_max_one_less OK ++ basic_vector - invalid - vec_bool_2_max_one_more OK ++ basic_vector - invalid - vec_bool_2_max_rev_nibble OK ++ basic_vector - invalid - vec_bool_2_nil OK ++ basic_vector - invalid - vec_bool_2_zero_0x80 OK ++ basic_vector - invalid - vec_bool_2_zero_0xff OK ++ basic_vector - invalid - vec_bool_2_zero_2 OK ++ basic_vector - invalid - vec_bool_2_zero_one_byte_less OK ++ basic_vector - invalid - vec_bool_2_zero_one_byte_more OK ++ basic_vector - invalid - vec_bool_2_zero_one_less OK ++ basic_vector - invalid - vec_bool_2_zero_one_more OK ++ basic_vector - invalid - vec_bool_2_zero_rev_nibble OK ++ basic_vector - invalid - vec_bool_31_max_0x80 OK ++ basic_vector - invalid - vec_bool_31_max_0xff OK ++ basic_vector - invalid - vec_bool_31_max_2 OK ++ basic_vector - invalid - vec_bool_31_max_one_byte_less OK ++ basic_vector - invalid - vec_bool_31_max_one_byte_more OK ++ basic_vector - invalid - vec_bool_31_max_one_less OK ++ basic_vector - invalid - vec_bool_31_max_one_more OK ++ basic_vector - invalid - vec_bool_31_max_rev_nibble OK ++ basic_vector - invalid - vec_bool_31_nil OK ++ basic_vector - invalid - vec_bool_31_zero_0x80 OK ++ basic_vector - invalid - vec_bool_31_zero_0xff OK ++ basic_vector - invalid - vec_bool_31_zero_2 OK ++ basic_vector - invalid - vec_bool_31_zero_one_byte_less OK ++ basic_vector - invalid - vec_bool_31_zero_one_byte_more OK ++ basic_vector - invalid - vec_bool_31_zero_one_less OK ++ basic_vector - invalid - vec_bool_31_zero_one_more OK ++ basic_vector - invalid - vec_bool_31_zero_rev_nibble OK ++ basic_vector - invalid - vec_bool_3_max_0x80 OK ++ basic_vector - invalid - vec_bool_3_max_0xff OK ++ basic_vector - invalid - vec_bool_3_max_2 OK ++ basic_vector - invalid - vec_bool_3_max_one_byte_less OK ++ basic_vector - invalid - vec_bool_3_max_one_byte_more OK ++ basic_vector - invalid - vec_bool_3_max_one_less OK ++ basic_vector - invalid - vec_bool_3_max_one_more OK ++ basic_vector - invalid - vec_bool_3_max_rev_nibble OK ++ basic_vector - invalid - vec_bool_3_nil OK ++ basic_vector - invalid - vec_bool_3_zero_0x80 OK ++ basic_vector - invalid - vec_bool_3_zero_0xff OK ++ basic_vector - invalid - vec_bool_3_zero_2 OK ++ basic_vector - invalid - vec_bool_3_zero_one_byte_less OK ++ basic_vector - invalid - vec_bool_3_zero_one_byte_more OK ++ basic_vector - invalid - vec_bool_3_zero_one_less OK ++ basic_vector - invalid - vec_bool_3_zero_one_more OK ++ basic_vector - invalid - vec_bool_3_zero_rev_nibble OK ++ basic_vector - invalid - vec_bool_4_max_0x80 OK ++ basic_vector - invalid - vec_bool_4_max_0xff OK ++ basic_vector - invalid - vec_bool_4_max_2 OK ++ basic_vector - invalid - vec_bool_4_max_one_byte_less OK ++ basic_vector - invalid - vec_bool_4_max_one_byte_more OK ++ basic_vector - invalid - vec_bool_4_max_one_less OK ++ basic_vector - invalid - vec_bool_4_max_one_more OK ++ basic_vector - invalid - vec_bool_4_max_rev_nibble OK ++ basic_vector - invalid - vec_bool_4_nil OK ++ basic_vector - invalid - vec_bool_4_zero_0x80 OK ++ basic_vector - invalid - vec_bool_4_zero_0xff OK ++ basic_vector - invalid - vec_bool_4_zero_2 OK ++ basic_vector - invalid - vec_bool_4_zero_one_byte_less OK ++ basic_vector - invalid - vec_bool_4_zero_one_byte_more OK ++ basic_vector - invalid - vec_bool_4_zero_one_less OK ++ basic_vector - invalid - vec_bool_4_zero_one_more OK ++ basic_vector - invalid - vec_bool_4_zero_rev_nibble OK ++ basic_vector - invalid - vec_bool_512_max_0x80 OK ++ basic_vector - invalid - vec_bool_512_max_0xff OK ++ basic_vector - invalid - vec_bool_512_max_2 OK ++ basic_vector - invalid - vec_bool_512_max_one_byte_less OK ++ basic_vector - invalid - vec_bool_512_max_one_byte_more OK ++ basic_vector - invalid - vec_bool_512_max_one_less OK ++ basic_vector - invalid - vec_bool_512_max_one_more OK ++ basic_vector - invalid - vec_bool_512_max_rev_nibble OK ++ basic_vector - invalid - vec_bool_512_nil OK ++ basic_vector - invalid - vec_bool_512_zero_0x80 OK ++ basic_vector - invalid - vec_bool_512_zero_0xff OK ++ basic_vector - invalid - vec_bool_512_zero_2 OK ++ basic_vector - invalid - vec_bool_512_zero_one_byte_less OK ++ basic_vector - invalid - vec_bool_512_zero_one_byte_more OK ++ basic_vector - invalid - vec_bool_512_zero_one_less OK ++ basic_vector - invalid - vec_bool_512_zero_one_more OK ++ basic_vector - invalid - vec_bool_512_zero_rev_nibble OK ++ basic_vector - invalid - vec_bool_513_max_0x80 OK ++ basic_vector - invalid - vec_bool_513_max_0xff OK ++ basic_vector - invalid - vec_bool_513_max_2 OK ++ basic_vector - invalid - vec_bool_513_max_one_byte_less OK ++ basic_vector - invalid - vec_bool_513_max_one_byte_more OK ++ basic_vector - invalid - vec_bool_513_max_one_less OK ++ basic_vector - invalid - vec_bool_513_max_one_more OK ++ basic_vector - invalid - vec_bool_513_max_rev_nibble OK ++ basic_vector - invalid - vec_bool_513_nil OK ++ basic_vector - invalid - vec_bool_513_zero_0x80 OK ++ basic_vector - invalid - vec_bool_513_zero_0xff OK ++ basic_vector - invalid - vec_bool_513_zero_2 OK ++ basic_vector - invalid - vec_bool_513_zero_one_byte_less OK ++ basic_vector - invalid - vec_bool_513_zero_one_byte_more OK ++ basic_vector - invalid - vec_bool_513_zero_one_less OK ++ basic_vector - invalid - vec_bool_513_zero_one_more OK ++ basic_vector - invalid - vec_bool_513_zero_rev_nibble OK ++ basic_vector - invalid - vec_bool_5_max_0x80 OK ++ basic_vector - invalid - vec_bool_5_max_0xff OK ++ basic_vector - invalid - vec_bool_5_max_2 OK ++ basic_vector - invalid - vec_bool_5_max_one_byte_less OK ++ basic_vector - invalid - vec_bool_5_max_one_byte_more OK ++ basic_vector - invalid - vec_bool_5_max_one_less OK ++ basic_vector - invalid - vec_bool_5_max_one_more OK ++ basic_vector - invalid - vec_bool_5_max_rev_nibble OK ++ basic_vector - invalid - vec_bool_5_nil OK ++ basic_vector - invalid - vec_bool_5_zero_0x80 OK ++ basic_vector - invalid - vec_bool_5_zero_0xff OK ++ basic_vector - invalid - vec_bool_5_zero_2 OK ++ basic_vector - invalid - vec_bool_5_zero_one_byte_less OK ++ basic_vector - invalid - vec_bool_5_zero_one_byte_more OK ++ basic_vector - invalid - vec_bool_5_zero_one_less OK ++ basic_vector - invalid - vec_bool_5_zero_one_more OK ++ basic_vector - invalid - vec_bool_5_zero_rev_nibble OK ++ basic_vector - invalid - vec_bool_8_max_0x80 OK ++ basic_vector - invalid - vec_bool_8_max_0xff OK ++ basic_vector - invalid - vec_bool_8_max_2 OK ++ basic_vector - invalid - vec_bool_8_max_one_byte_less OK ++ basic_vector - invalid - vec_bool_8_max_one_byte_more OK ++ basic_vector - invalid - vec_bool_8_max_one_less OK ++ basic_vector - invalid - vec_bool_8_max_one_more OK ++ basic_vector - invalid - vec_bool_8_max_rev_nibble OK ++ basic_vector - invalid - vec_bool_8_nil OK ++ basic_vector - invalid - vec_bool_8_zero_0x80 OK ++ basic_vector - invalid - vec_bool_8_zero_0xff OK ++ basic_vector - invalid - vec_bool_8_zero_2 OK ++ basic_vector - invalid - vec_bool_8_zero_one_byte_less OK ++ basic_vector - invalid - vec_bool_8_zero_one_byte_more OK ++ basic_vector - invalid - vec_bool_8_zero_one_less OK ++ basic_vector - invalid - vec_bool_8_zero_one_more OK ++ basic_vector - invalid - vec_bool_8_zero_rev_nibble OK + basic_vector - invalid - vec_uint128_0 Skip ++ basic_vector - invalid - vec_uint128_16_max_one_byte_less OK ++ basic_vector - invalid - vec_uint128_16_max_one_byte_more OK ++ basic_vector - invalid - vec_uint128_16_max_one_less OK ++ basic_vector - invalid - vec_uint128_16_max_one_more OK ++ basic_vector - invalid - vec_uint128_16_nil OK ++ basic_vector - invalid - vec_uint128_16_random_one_byte_less OK ++ basic_vector - invalid - vec_uint128_16_random_one_byte_more OK ++ basic_vector - invalid - vec_uint128_16_random_one_less OK ++ basic_vector - invalid - vec_uint128_16_random_one_more OK ++ basic_vector - invalid - vec_uint128_16_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint128_16_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint128_16_zero_one_less OK ++ basic_vector - invalid - vec_uint128_16_zero_one_more OK ++ basic_vector - invalid - vec_uint128_1_max_one_byte_less OK ++ basic_vector - invalid - vec_uint128_1_max_one_byte_more OK ++ basic_vector - invalid - vec_uint128_1_max_one_less OK ++ basic_vector - invalid - vec_uint128_1_max_one_more OK ++ basic_vector - invalid - vec_uint128_1_nil OK ++ basic_vector - invalid - vec_uint128_1_random_one_byte_less OK ++ basic_vector - invalid - vec_uint128_1_random_one_byte_more OK ++ basic_vector - invalid - vec_uint128_1_random_one_less OK ++ basic_vector - invalid - vec_uint128_1_random_one_more OK ++ basic_vector - invalid - vec_uint128_1_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint128_1_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint128_1_zero_one_less OK ++ basic_vector - invalid - vec_uint128_1_zero_one_more OK ++ basic_vector - invalid - vec_uint128_2_max_one_byte_less OK ++ basic_vector - invalid - vec_uint128_2_max_one_byte_more OK ++ basic_vector - invalid - vec_uint128_2_max_one_less OK ++ basic_vector - invalid - vec_uint128_2_max_one_more OK ++ basic_vector - invalid - vec_uint128_2_nil OK ++ basic_vector - invalid - vec_uint128_2_random_one_byte_less OK ++ basic_vector - invalid - vec_uint128_2_random_one_byte_more OK ++ basic_vector - invalid - vec_uint128_2_random_one_less OK ++ basic_vector - invalid - vec_uint128_2_random_one_more OK ++ basic_vector - invalid - vec_uint128_2_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint128_2_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint128_2_zero_one_less OK ++ basic_vector - invalid - vec_uint128_2_zero_one_more OK ++ basic_vector - invalid - vec_uint128_31_max_one_byte_less OK ++ basic_vector - invalid - vec_uint128_31_max_one_byte_more OK ++ basic_vector - invalid - vec_uint128_31_max_one_less OK ++ basic_vector - invalid - vec_uint128_31_max_one_more OK ++ basic_vector - invalid - vec_uint128_31_nil OK ++ basic_vector - invalid - vec_uint128_31_random_one_byte_less OK ++ basic_vector - invalid - vec_uint128_31_random_one_byte_more OK ++ basic_vector - invalid - vec_uint128_31_random_one_less OK ++ basic_vector - invalid - vec_uint128_31_random_one_more OK ++ basic_vector - invalid - vec_uint128_31_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint128_31_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint128_31_zero_one_less OK ++ basic_vector - invalid - vec_uint128_31_zero_one_more OK ++ basic_vector - invalid - vec_uint128_3_max_one_byte_less OK ++ basic_vector - invalid - vec_uint128_3_max_one_byte_more OK ++ basic_vector - invalid - vec_uint128_3_max_one_less OK ++ basic_vector - invalid - vec_uint128_3_max_one_more OK ++ basic_vector - invalid - vec_uint128_3_nil OK ++ basic_vector - invalid - vec_uint128_3_random_one_byte_less OK ++ basic_vector - invalid - vec_uint128_3_random_one_byte_more OK ++ basic_vector - invalid - vec_uint128_3_random_one_less OK ++ basic_vector - invalid - vec_uint128_3_random_one_more OK ++ basic_vector - invalid - vec_uint128_3_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint128_3_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint128_3_zero_one_less OK ++ basic_vector - invalid - vec_uint128_3_zero_one_more OK ++ basic_vector - invalid - vec_uint128_4_max_one_byte_less OK ++ basic_vector - invalid - vec_uint128_4_max_one_byte_more OK ++ basic_vector - invalid - vec_uint128_4_max_one_less OK ++ basic_vector - invalid - vec_uint128_4_max_one_more OK ++ basic_vector - invalid - vec_uint128_4_nil OK ++ basic_vector - invalid - vec_uint128_4_random_one_byte_less OK ++ basic_vector - invalid - vec_uint128_4_random_one_byte_more OK ++ basic_vector - invalid - vec_uint128_4_random_one_less OK ++ basic_vector - invalid - vec_uint128_4_random_one_more OK ++ basic_vector - invalid - vec_uint128_4_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint128_4_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint128_4_zero_one_less OK ++ basic_vector - invalid - vec_uint128_4_zero_one_more OK ++ basic_vector - invalid - vec_uint128_512_max_one_byte_less OK ++ basic_vector - invalid - vec_uint128_512_max_one_byte_more OK ++ basic_vector - invalid - vec_uint128_512_max_one_less OK ++ basic_vector - invalid - vec_uint128_512_max_one_more OK ++ basic_vector - invalid - vec_uint128_512_nil OK ++ basic_vector - invalid - vec_uint128_512_random_one_byte_less OK ++ basic_vector - invalid - vec_uint128_512_random_one_byte_more OK ++ basic_vector - invalid - vec_uint128_512_random_one_less OK ++ basic_vector - invalid - vec_uint128_512_random_one_more OK ++ basic_vector - invalid - vec_uint128_512_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint128_512_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint128_512_zero_one_less OK ++ basic_vector - invalid - vec_uint128_512_zero_one_more OK ++ basic_vector - invalid - vec_uint128_513_max_one_byte_less OK ++ basic_vector - invalid - vec_uint128_513_max_one_byte_more OK ++ basic_vector - invalid - vec_uint128_513_max_one_less OK ++ basic_vector - invalid - vec_uint128_513_max_one_more OK ++ basic_vector - invalid - vec_uint128_513_nil OK ++ basic_vector - invalid - vec_uint128_513_random_one_byte_less OK ++ basic_vector - invalid - vec_uint128_513_random_one_byte_more OK ++ basic_vector - invalid - vec_uint128_513_random_one_less OK ++ basic_vector - invalid - vec_uint128_513_random_one_more OK ++ basic_vector - invalid - vec_uint128_513_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint128_513_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint128_513_zero_one_less OK ++ basic_vector - invalid - vec_uint128_513_zero_one_more OK ++ basic_vector - invalid - vec_uint128_5_max_one_byte_less OK ++ basic_vector - invalid - vec_uint128_5_max_one_byte_more OK ++ basic_vector - invalid - vec_uint128_5_max_one_less OK ++ basic_vector - invalid - vec_uint128_5_max_one_more OK ++ basic_vector - invalid - vec_uint128_5_nil OK ++ basic_vector - invalid - vec_uint128_5_random_one_byte_less OK ++ basic_vector - invalid - vec_uint128_5_random_one_byte_more OK ++ basic_vector - invalid - vec_uint128_5_random_one_less OK ++ basic_vector - invalid - vec_uint128_5_random_one_more OK ++ basic_vector - invalid - vec_uint128_5_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint128_5_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint128_5_zero_one_less OK ++ basic_vector - invalid - vec_uint128_5_zero_one_more OK ++ basic_vector - invalid - vec_uint128_8_max_one_byte_less OK ++ basic_vector - invalid - vec_uint128_8_max_one_byte_more OK ++ basic_vector - invalid - vec_uint128_8_max_one_less OK ++ basic_vector - invalid - vec_uint128_8_max_one_more OK ++ basic_vector - invalid - vec_uint128_8_nil OK ++ basic_vector - invalid - vec_uint128_8_random_one_byte_less OK ++ basic_vector - invalid - vec_uint128_8_random_one_byte_more OK ++ basic_vector - invalid - vec_uint128_8_random_one_less OK ++ basic_vector - invalid - vec_uint128_8_random_one_more OK ++ basic_vector - invalid - vec_uint128_8_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint128_8_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint128_8_zero_one_less OK ++ basic_vector - invalid - vec_uint128_8_zero_one_more OK + basic_vector - invalid - vec_uint16_0 Skip ++ basic_vector - invalid - vec_uint16_16_max_one_byte_less OK ++ basic_vector - invalid - vec_uint16_16_max_one_byte_more OK ++ basic_vector - invalid - vec_uint16_16_max_one_less OK ++ basic_vector - invalid - vec_uint16_16_max_one_more OK ++ basic_vector - invalid - vec_uint16_16_nil OK ++ basic_vector - invalid - vec_uint16_16_random_one_byte_less OK ++ basic_vector - invalid - vec_uint16_16_random_one_byte_more OK ++ basic_vector - invalid - vec_uint16_16_random_one_less OK ++ basic_vector - invalid - vec_uint16_16_random_one_more OK ++ basic_vector - invalid - vec_uint16_16_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint16_16_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint16_16_zero_one_less OK ++ basic_vector - invalid - vec_uint16_16_zero_one_more OK ++ basic_vector - invalid - vec_uint16_1_max_one_byte_less OK ++ basic_vector - invalid - vec_uint16_1_max_one_byte_more OK ++ basic_vector - invalid - vec_uint16_1_max_one_less OK ++ basic_vector - invalid - vec_uint16_1_max_one_more OK ++ basic_vector - invalid - vec_uint16_1_nil OK ++ basic_vector - invalid - vec_uint16_1_random_one_byte_less OK ++ basic_vector - invalid - vec_uint16_1_random_one_byte_more OK ++ basic_vector - invalid - vec_uint16_1_random_one_less OK ++ basic_vector - invalid - vec_uint16_1_random_one_more OK ++ basic_vector - invalid - vec_uint16_1_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint16_1_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint16_1_zero_one_less OK ++ basic_vector - invalid - vec_uint16_1_zero_one_more OK ++ basic_vector - invalid - vec_uint16_2_max_one_byte_less OK ++ basic_vector - invalid - vec_uint16_2_max_one_byte_more OK ++ basic_vector - invalid - vec_uint16_2_max_one_less OK ++ basic_vector - invalid - vec_uint16_2_max_one_more OK ++ basic_vector - invalid - vec_uint16_2_nil OK ++ basic_vector - invalid - vec_uint16_2_random_one_byte_less OK ++ basic_vector - invalid - vec_uint16_2_random_one_byte_more OK ++ basic_vector - invalid - vec_uint16_2_random_one_less OK ++ basic_vector - invalid - vec_uint16_2_random_one_more OK ++ basic_vector - invalid - vec_uint16_2_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint16_2_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint16_2_zero_one_less OK ++ basic_vector - invalid - vec_uint16_2_zero_one_more OK ++ basic_vector - invalid - vec_uint16_31_max_one_byte_less OK ++ basic_vector - invalid - vec_uint16_31_max_one_byte_more OK ++ basic_vector - invalid - vec_uint16_31_max_one_less OK ++ basic_vector - invalid - vec_uint16_31_max_one_more OK ++ basic_vector - invalid - vec_uint16_31_nil OK ++ basic_vector - invalid - vec_uint16_31_random_one_byte_less OK ++ basic_vector - invalid - vec_uint16_31_random_one_byte_more OK ++ basic_vector - invalid - vec_uint16_31_random_one_less OK ++ basic_vector - invalid - vec_uint16_31_random_one_more OK ++ basic_vector - invalid - vec_uint16_31_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint16_31_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint16_31_zero_one_less OK ++ basic_vector - invalid - vec_uint16_31_zero_one_more OK ++ basic_vector - invalid - vec_uint16_3_max_one_byte_less OK ++ basic_vector - invalid - vec_uint16_3_max_one_byte_more OK ++ basic_vector - invalid - vec_uint16_3_max_one_less OK ++ basic_vector - invalid - vec_uint16_3_max_one_more OK ++ basic_vector - invalid - vec_uint16_3_nil OK ++ basic_vector - invalid - vec_uint16_3_random_one_byte_less OK ++ basic_vector - invalid - vec_uint16_3_random_one_byte_more OK ++ basic_vector - invalid - vec_uint16_3_random_one_less OK ++ basic_vector - invalid - vec_uint16_3_random_one_more OK ++ basic_vector - invalid - vec_uint16_3_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint16_3_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint16_3_zero_one_less OK ++ basic_vector - invalid - vec_uint16_3_zero_one_more OK ++ basic_vector - invalid - vec_uint16_4_max_one_byte_less OK ++ basic_vector - invalid - vec_uint16_4_max_one_byte_more OK ++ basic_vector - invalid - vec_uint16_4_max_one_less OK ++ basic_vector - invalid - vec_uint16_4_max_one_more OK ++ basic_vector - invalid - vec_uint16_4_nil OK ++ basic_vector - invalid - vec_uint16_4_random_one_byte_less OK ++ basic_vector - invalid - vec_uint16_4_random_one_byte_more OK ++ basic_vector - invalid - vec_uint16_4_random_one_less OK ++ basic_vector - invalid - vec_uint16_4_random_one_more OK ++ basic_vector - invalid - vec_uint16_4_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint16_4_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint16_4_zero_one_less OK ++ basic_vector - invalid - vec_uint16_4_zero_one_more OK ++ basic_vector - invalid - vec_uint16_512_max_one_byte_less OK ++ basic_vector - invalid - vec_uint16_512_max_one_byte_more OK ++ basic_vector - invalid - vec_uint16_512_max_one_less OK ++ basic_vector - invalid - vec_uint16_512_max_one_more OK ++ basic_vector - invalid - vec_uint16_512_nil OK ++ basic_vector - invalid - vec_uint16_512_random_one_byte_less OK ++ basic_vector - invalid - vec_uint16_512_random_one_byte_more OK ++ basic_vector - invalid - vec_uint16_512_random_one_less OK ++ basic_vector - invalid - vec_uint16_512_random_one_more OK ++ basic_vector - invalid - vec_uint16_512_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint16_512_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint16_512_zero_one_less OK ++ basic_vector - invalid - vec_uint16_512_zero_one_more OK ++ basic_vector - invalid - vec_uint16_513_max_one_byte_less OK ++ basic_vector - invalid - vec_uint16_513_max_one_byte_more OK ++ basic_vector - invalid - vec_uint16_513_max_one_less OK ++ basic_vector - invalid - vec_uint16_513_max_one_more OK ++ basic_vector - invalid - vec_uint16_513_nil OK ++ basic_vector - invalid - vec_uint16_513_random_one_byte_less OK ++ basic_vector - invalid - vec_uint16_513_random_one_byte_more OK ++ basic_vector - invalid - vec_uint16_513_random_one_less OK ++ basic_vector - invalid - vec_uint16_513_random_one_more OK ++ basic_vector - invalid - vec_uint16_513_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint16_513_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint16_513_zero_one_less OK ++ basic_vector - invalid - vec_uint16_513_zero_one_more OK ++ basic_vector - invalid - vec_uint16_5_max_one_byte_less OK ++ basic_vector - invalid - vec_uint16_5_max_one_byte_more OK ++ basic_vector - invalid - vec_uint16_5_max_one_less OK ++ basic_vector - invalid - vec_uint16_5_max_one_more OK ++ basic_vector - invalid - vec_uint16_5_nil OK ++ basic_vector - invalid - vec_uint16_5_random_one_byte_less OK ++ basic_vector - invalid - vec_uint16_5_random_one_byte_more OK ++ basic_vector - invalid - vec_uint16_5_random_one_less OK ++ basic_vector - invalid - vec_uint16_5_random_one_more OK ++ basic_vector - invalid - vec_uint16_5_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint16_5_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint16_5_zero_one_less OK ++ basic_vector - invalid - vec_uint16_5_zero_one_more OK ++ basic_vector - invalid - vec_uint16_8_max_one_byte_less OK ++ basic_vector - invalid - vec_uint16_8_max_one_byte_more OK ++ basic_vector - invalid - vec_uint16_8_max_one_less OK ++ basic_vector - invalid - vec_uint16_8_max_one_more OK ++ basic_vector - invalid - vec_uint16_8_nil OK ++ basic_vector - invalid - vec_uint16_8_random_one_byte_less OK ++ basic_vector - invalid - vec_uint16_8_random_one_byte_more OK ++ basic_vector - invalid - vec_uint16_8_random_one_less OK ++ basic_vector - invalid - vec_uint16_8_random_one_more OK ++ basic_vector - invalid - vec_uint16_8_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint16_8_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint16_8_zero_one_less OK ++ basic_vector - invalid - vec_uint16_8_zero_one_more OK + basic_vector - invalid - vec_uint256_0 Skip ++ basic_vector - invalid - vec_uint256_16_max_one_byte_less OK ++ basic_vector - invalid - vec_uint256_16_max_one_byte_more OK ++ basic_vector - invalid - vec_uint256_16_max_one_less OK ++ basic_vector - invalid - vec_uint256_16_max_one_more OK ++ basic_vector - invalid - vec_uint256_16_nil OK ++ basic_vector - invalid - vec_uint256_16_random_one_byte_less OK ++ basic_vector - invalid - vec_uint256_16_random_one_byte_more OK ++ basic_vector - invalid - vec_uint256_16_random_one_less OK ++ basic_vector - invalid - vec_uint256_16_random_one_more OK ++ basic_vector - invalid - vec_uint256_16_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint256_16_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint256_16_zero_one_less OK ++ basic_vector - invalid - vec_uint256_16_zero_one_more OK ++ basic_vector - invalid - vec_uint256_1_max_one_byte_less OK ++ basic_vector - invalid - vec_uint256_1_max_one_byte_more OK ++ basic_vector - invalid - vec_uint256_1_max_one_less OK ++ basic_vector - invalid - vec_uint256_1_max_one_more OK ++ basic_vector - invalid - vec_uint256_1_nil OK ++ basic_vector - invalid - vec_uint256_1_random_one_byte_less OK ++ basic_vector - invalid - vec_uint256_1_random_one_byte_more OK ++ basic_vector - invalid - vec_uint256_1_random_one_less OK ++ basic_vector - invalid - vec_uint256_1_random_one_more OK ++ basic_vector - invalid - vec_uint256_1_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint256_1_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint256_1_zero_one_less OK ++ basic_vector - invalid - vec_uint256_1_zero_one_more OK ++ basic_vector - invalid - vec_uint256_2_max_one_byte_less OK ++ basic_vector - invalid - vec_uint256_2_max_one_byte_more OK ++ basic_vector - invalid - vec_uint256_2_max_one_less OK ++ basic_vector - invalid - vec_uint256_2_max_one_more OK ++ basic_vector - invalid - vec_uint256_2_nil OK ++ basic_vector - invalid - vec_uint256_2_random_one_byte_less OK ++ basic_vector - invalid - vec_uint256_2_random_one_byte_more OK ++ basic_vector - invalid - vec_uint256_2_random_one_less OK ++ basic_vector - invalid - vec_uint256_2_random_one_more OK ++ basic_vector - invalid - vec_uint256_2_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint256_2_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint256_2_zero_one_less OK ++ basic_vector - invalid - vec_uint256_2_zero_one_more OK ++ basic_vector - invalid - vec_uint256_31_max_one_byte_less OK ++ basic_vector - invalid - vec_uint256_31_max_one_byte_more OK ++ basic_vector - invalid - vec_uint256_31_max_one_less OK ++ basic_vector - invalid - vec_uint256_31_max_one_more OK ++ basic_vector - invalid - vec_uint256_31_nil OK ++ basic_vector - invalid - vec_uint256_31_random_one_byte_less OK ++ basic_vector - invalid - vec_uint256_31_random_one_byte_more OK ++ basic_vector - invalid - vec_uint256_31_random_one_less OK ++ basic_vector - invalid - vec_uint256_31_random_one_more OK ++ basic_vector - invalid - vec_uint256_31_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint256_31_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint256_31_zero_one_less OK ++ basic_vector - invalid - vec_uint256_31_zero_one_more OK ++ basic_vector - invalid - vec_uint256_3_max_one_byte_less OK ++ basic_vector - invalid - vec_uint256_3_max_one_byte_more OK ++ basic_vector - invalid - vec_uint256_3_max_one_less OK ++ basic_vector - invalid - vec_uint256_3_max_one_more OK ++ basic_vector - invalid - vec_uint256_3_nil OK ++ basic_vector - invalid - vec_uint256_3_random_one_byte_less OK ++ basic_vector - invalid - vec_uint256_3_random_one_byte_more OK ++ basic_vector - invalid - vec_uint256_3_random_one_less OK ++ basic_vector - invalid - vec_uint256_3_random_one_more OK ++ basic_vector - invalid - vec_uint256_3_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint256_3_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint256_3_zero_one_less OK ++ basic_vector - invalid - vec_uint256_3_zero_one_more OK ++ basic_vector - invalid - vec_uint256_4_max_one_byte_less OK ++ basic_vector - invalid - vec_uint256_4_max_one_byte_more OK ++ basic_vector - invalid - vec_uint256_4_max_one_less OK ++ basic_vector - invalid - vec_uint256_4_max_one_more OK ++ basic_vector - invalid - vec_uint256_4_nil OK ++ basic_vector - invalid - vec_uint256_4_random_one_byte_less OK ++ basic_vector - invalid - vec_uint256_4_random_one_byte_more OK ++ basic_vector - invalid - vec_uint256_4_random_one_less OK ++ basic_vector - invalid - vec_uint256_4_random_one_more OK ++ basic_vector - invalid - vec_uint256_4_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint256_4_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint256_4_zero_one_less OK ++ basic_vector - invalid - vec_uint256_4_zero_one_more OK ++ basic_vector - invalid - vec_uint256_512_max_one_byte_less OK ++ basic_vector - invalid - vec_uint256_512_max_one_byte_more OK ++ basic_vector - invalid - vec_uint256_512_max_one_less OK ++ basic_vector - invalid - vec_uint256_512_max_one_more OK ++ basic_vector - invalid - vec_uint256_512_nil OK ++ basic_vector - invalid - vec_uint256_512_random_one_byte_less OK ++ basic_vector - invalid - vec_uint256_512_random_one_byte_more OK ++ basic_vector - invalid - vec_uint256_512_random_one_less OK ++ basic_vector - invalid - vec_uint256_512_random_one_more OK ++ basic_vector - invalid - vec_uint256_512_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint256_512_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint256_512_zero_one_less OK ++ basic_vector - invalid - vec_uint256_512_zero_one_more OK ++ basic_vector - invalid - vec_uint256_513_max_one_byte_less OK ++ basic_vector - invalid - vec_uint256_513_max_one_byte_more OK ++ basic_vector - invalid - vec_uint256_513_max_one_less OK ++ basic_vector - invalid - vec_uint256_513_max_one_more OK ++ basic_vector - invalid - vec_uint256_513_nil OK ++ basic_vector - invalid - vec_uint256_513_random_one_byte_less OK ++ basic_vector - invalid - vec_uint256_513_random_one_byte_more OK ++ basic_vector - invalid - vec_uint256_513_random_one_less OK ++ basic_vector - invalid - vec_uint256_513_random_one_more OK ++ basic_vector - invalid - vec_uint256_513_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint256_513_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint256_513_zero_one_less OK ++ basic_vector - invalid - vec_uint256_513_zero_one_more OK ++ basic_vector - invalid - vec_uint256_5_max_one_byte_less OK ++ basic_vector - invalid - vec_uint256_5_max_one_byte_more OK ++ basic_vector - invalid - vec_uint256_5_max_one_less OK ++ basic_vector - invalid - vec_uint256_5_max_one_more OK ++ basic_vector - invalid - vec_uint256_5_nil OK ++ basic_vector - invalid - vec_uint256_5_random_one_byte_less OK ++ basic_vector - invalid - vec_uint256_5_random_one_byte_more OK ++ basic_vector - invalid - vec_uint256_5_random_one_less OK ++ basic_vector - invalid - vec_uint256_5_random_one_more OK ++ basic_vector - invalid - vec_uint256_5_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint256_5_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint256_5_zero_one_less OK ++ basic_vector - invalid - vec_uint256_5_zero_one_more OK ++ basic_vector - invalid - vec_uint256_8_max_one_byte_less OK ++ basic_vector - invalid - vec_uint256_8_max_one_byte_more OK ++ basic_vector - invalid - vec_uint256_8_max_one_less OK ++ basic_vector - invalid - vec_uint256_8_max_one_more OK ++ basic_vector - invalid - vec_uint256_8_nil OK ++ basic_vector - invalid - vec_uint256_8_random_one_byte_less OK ++ basic_vector - invalid - vec_uint256_8_random_one_byte_more OK ++ basic_vector - invalid - vec_uint256_8_random_one_less OK ++ basic_vector - invalid - vec_uint256_8_random_one_more OK ++ basic_vector - invalid - vec_uint256_8_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint256_8_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint256_8_zero_one_less OK ++ basic_vector - invalid - vec_uint256_8_zero_one_more OK + basic_vector - invalid - vec_uint32_0 Skip ++ basic_vector - invalid - vec_uint32_16_max_one_byte_less OK ++ basic_vector - invalid - vec_uint32_16_max_one_byte_more OK ++ basic_vector - invalid - vec_uint32_16_max_one_less OK ++ basic_vector - invalid - vec_uint32_16_max_one_more OK ++ basic_vector - invalid - vec_uint32_16_nil OK ++ basic_vector - invalid - vec_uint32_16_random_one_byte_less OK ++ basic_vector - invalid - vec_uint32_16_random_one_byte_more OK ++ basic_vector - invalid - vec_uint32_16_random_one_less OK ++ basic_vector - invalid - vec_uint32_16_random_one_more OK ++ basic_vector - invalid - vec_uint32_16_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint32_16_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint32_16_zero_one_less OK ++ basic_vector - invalid - vec_uint32_16_zero_one_more OK ++ basic_vector - invalid - vec_uint32_1_max_one_byte_less OK ++ basic_vector - invalid - vec_uint32_1_max_one_byte_more OK ++ basic_vector - invalid - vec_uint32_1_max_one_less OK ++ basic_vector - invalid - vec_uint32_1_max_one_more OK ++ basic_vector - invalid - vec_uint32_1_nil OK ++ basic_vector - invalid - vec_uint32_1_random_one_byte_less OK ++ basic_vector - invalid - vec_uint32_1_random_one_byte_more OK ++ basic_vector - invalid - vec_uint32_1_random_one_less OK ++ basic_vector - invalid - vec_uint32_1_random_one_more OK ++ basic_vector - invalid - vec_uint32_1_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint32_1_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint32_1_zero_one_less OK ++ basic_vector - invalid - vec_uint32_1_zero_one_more OK ++ basic_vector - invalid - vec_uint32_2_max_one_byte_less OK ++ basic_vector - invalid - vec_uint32_2_max_one_byte_more OK ++ basic_vector - invalid - vec_uint32_2_max_one_less OK ++ basic_vector - invalid - vec_uint32_2_max_one_more OK ++ basic_vector - invalid - vec_uint32_2_nil OK ++ basic_vector - invalid - vec_uint32_2_random_one_byte_less OK ++ basic_vector - invalid - vec_uint32_2_random_one_byte_more OK ++ basic_vector - invalid - vec_uint32_2_random_one_less OK ++ basic_vector - invalid - vec_uint32_2_random_one_more OK ++ basic_vector - invalid - vec_uint32_2_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint32_2_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint32_2_zero_one_less OK ++ basic_vector - invalid - vec_uint32_2_zero_one_more OK ++ basic_vector - invalid - vec_uint32_31_max_one_byte_less OK ++ basic_vector - invalid - vec_uint32_31_max_one_byte_more OK ++ basic_vector - invalid - vec_uint32_31_max_one_less OK ++ basic_vector - invalid - vec_uint32_31_max_one_more OK ++ basic_vector - invalid - vec_uint32_31_nil OK ++ basic_vector - invalid - vec_uint32_31_random_one_byte_less OK ++ basic_vector - invalid - vec_uint32_31_random_one_byte_more OK ++ basic_vector - invalid - vec_uint32_31_random_one_less OK ++ basic_vector - invalid - vec_uint32_31_random_one_more OK ++ basic_vector - invalid - vec_uint32_31_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint32_31_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint32_31_zero_one_less OK ++ basic_vector - invalid - vec_uint32_31_zero_one_more OK ++ basic_vector - invalid - vec_uint32_3_max_one_byte_less OK ++ basic_vector - invalid - vec_uint32_3_max_one_byte_more OK ++ basic_vector - invalid - vec_uint32_3_max_one_less OK ++ basic_vector - invalid - vec_uint32_3_max_one_more OK ++ basic_vector - invalid - vec_uint32_3_nil OK ++ basic_vector - invalid - vec_uint32_3_random_one_byte_less OK ++ basic_vector - invalid - vec_uint32_3_random_one_byte_more OK ++ basic_vector - invalid - vec_uint32_3_random_one_less OK ++ basic_vector - invalid - vec_uint32_3_random_one_more OK ++ basic_vector - invalid - vec_uint32_3_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint32_3_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint32_3_zero_one_less OK ++ basic_vector - invalid - vec_uint32_3_zero_one_more OK ++ basic_vector - invalid - vec_uint32_4_max_one_byte_less OK ++ basic_vector - invalid - vec_uint32_4_max_one_byte_more OK ++ basic_vector - invalid - vec_uint32_4_max_one_less OK ++ basic_vector - invalid - vec_uint32_4_max_one_more OK ++ basic_vector - invalid - vec_uint32_4_nil OK ++ basic_vector - invalid - vec_uint32_4_random_one_byte_less OK ++ basic_vector - invalid - vec_uint32_4_random_one_byte_more OK ++ basic_vector - invalid - vec_uint32_4_random_one_less OK ++ basic_vector - invalid - vec_uint32_4_random_one_more OK ++ basic_vector - invalid - vec_uint32_4_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint32_4_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint32_4_zero_one_less OK ++ basic_vector - invalid - vec_uint32_4_zero_one_more OK ++ basic_vector - invalid - vec_uint32_512_max_one_byte_less OK ++ basic_vector - invalid - vec_uint32_512_max_one_byte_more OK ++ basic_vector - invalid - vec_uint32_512_max_one_less OK ++ basic_vector - invalid - vec_uint32_512_max_one_more OK ++ basic_vector - invalid - vec_uint32_512_nil OK ++ basic_vector - invalid - vec_uint32_512_random_one_byte_less OK ++ basic_vector - invalid - vec_uint32_512_random_one_byte_more OK ++ basic_vector - invalid - vec_uint32_512_random_one_less OK ++ basic_vector - invalid - vec_uint32_512_random_one_more OK ++ basic_vector - invalid - vec_uint32_512_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint32_512_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint32_512_zero_one_less OK ++ basic_vector - invalid - vec_uint32_512_zero_one_more OK ++ basic_vector - invalid - vec_uint32_513_max_one_byte_less OK ++ basic_vector - invalid - vec_uint32_513_max_one_byte_more OK ++ basic_vector - invalid - vec_uint32_513_max_one_less OK ++ basic_vector - invalid - vec_uint32_513_max_one_more OK ++ basic_vector - invalid - vec_uint32_513_nil OK ++ basic_vector - invalid - vec_uint32_513_random_one_byte_less OK ++ basic_vector - invalid - vec_uint32_513_random_one_byte_more OK ++ basic_vector - invalid - vec_uint32_513_random_one_less OK ++ basic_vector - invalid - vec_uint32_513_random_one_more OK ++ basic_vector - invalid - vec_uint32_513_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint32_513_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint32_513_zero_one_less OK ++ basic_vector - invalid - vec_uint32_513_zero_one_more OK ++ basic_vector - invalid - vec_uint32_5_max_one_byte_less OK ++ basic_vector - invalid - vec_uint32_5_max_one_byte_more OK ++ basic_vector - invalid - vec_uint32_5_max_one_less OK ++ basic_vector - invalid - vec_uint32_5_max_one_more OK ++ basic_vector - invalid - vec_uint32_5_nil OK ++ basic_vector - invalid - vec_uint32_5_random_one_byte_less OK ++ basic_vector - invalid - vec_uint32_5_random_one_byte_more OK ++ basic_vector - invalid - vec_uint32_5_random_one_less OK ++ basic_vector - invalid - vec_uint32_5_random_one_more OK ++ basic_vector - invalid - vec_uint32_5_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint32_5_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint32_5_zero_one_less OK ++ basic_vector - invalid - vec_uint32_5_zero_one_more OK ++ basic_vector - invalid - vec_uint32_8_max_one_byte_less OK ++ basic_vector - invalid - vec_uint32_8_max_one_byte_more OK ++ basic_vector - invalid - vec_uint32_8_max_one_less OK ++ basic_vector - invalid - vec_uint32_8_max_one_more OK ++ basic_vector - invalid - vec_uint32_8_nil OK ++ basic_vector - invalid - vec_uint32_8_random_one_byte_less OK ++ basic_vector - invalid - vec_uint32_8_random_one_byte_more OK ++ basic_vector - invalid - vec_uint32_8_random_one_less OK ++ basic_vector - invalid - vec_uint32_8_random_one_more OK ++ basic_vector - invalid - vec_uint32_8_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint32_8_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint32_8_zero_one_less OK ++ basic_vector - invalid - vec_uint32_8_zero_one_more OK + basic_vector - invalid - vec_uint64_0 Skip ++ basic_vector - invalid - vec_uint64_16_max_one_byte_less OK ++ basic_vector - invalid - vec_uint64_16_max_one_byte_more OK ++ basic_vector - invalid - vec_uint64_16_max_one_less OK ++ basic_vector - invalid - vec_uint64_16_max_one_more OK ++ basic_vector - invalid - vec_uint64_16_nil OK ++ basic_vector - invalid - vec_uint64_16_random_one_byte_less OK ++ basic_vector - invalid - vec_uint64_16_random_one_byte_more OK ++ basic_vector - invalid - vec_uint64_16_random_one_less OK ++ basic_vector - invalid - vec_uint64_16_random_one_more OK ++ basic_vector - invalid - vec_uint64_16_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint64_16_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint64_16_zero_one_less OK ++ basic_vector - invalid - vec_uint64_16_zero_one_more OK ++ basic_vector - invalid - vec_uint64_1_max_one_byte_less OK ++ basic_vector - invalid - vec_uint64_1_max_one_byte_more OK ++ basic_vector - invalid - vec_uint64_1_max_one_less OK ++ basic_vector - invalid - vec_uint64_1_max_one_more OK ++ basic_vector - invalid - vec_uint64_1_nil OK ++ basic_vector - invalid - vec_uint64_1_random_one_byte_less OK ++ basic_vector - invalid - vec_uint64_1_random_one_byte_more OK ++ basic_vector - invalid - vec_uint64_1_random_one_less OK ++ basic_vector - invalid - vec_uint64_1_random_one_more OK ++ basic_vector - invalid - vec_uint64_1_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint64_1_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint64_1_zero_one_less OK ++ basic_vector - invalid - vec_uint64_1_zero_one_more OK ++ basic_vector - invalid - vec_uint64_2_max_one_byte_less OK ++ basic_vector - invalid - vec_uint64_2_max_one_byte_more OK ++ basic_vector - invalid - vec_uint64_2_max_one_less OK ++ basic_vector - invalid - vec_uint64_2_max_one_more OK ++ basic_vector - invalid - vec_uint64_2_nil OK ++ basic_vector - invalid - vec_uint64_2_random_one_byte_less OK ++ basic_vector - invalid - vec_uint64_2_random_one_byte_more OK ++ basic_vector - invalid - vec_uint64_2_random_one_less OK ++ basic_vector - invalid - vec_uint64_2_random_one_more OK ++ basic_vector - invalid - vec_uint64_2_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint64_2_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint64_2_zero_one_less OK ++ basic_vector - invalid - vec_uint64_2_zero_one_more OK ++ basic_vector - invalid - vec_uint64_31_max_one_byte_less OK ++ basic_vector - invalid - vec_uint64_31_max_one_byte_more OK ++ basic_vector - invalid - vec_uint64_31_max_one_less OK ++ basic_vector - invalid - vec_uint64_31_max_one_more OK ++ basic_vector - invalid - vec_uint64_31_nil OK ++ basic_vector - invalid - vec_uint64_31_random_one_byte_less OK ++ basic_vector - invalid - vec_uint64_31_random_one_byte_more OK ++ basic_vector - invalid - vec_uint64_31_random_one_less OK ++ basic_vector - invalid - vec_uint64_31_random_one_more OK ++ basic_vector - invalid - vec_uint64_31_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint64_31_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint64_31_zero_one_less OK ++ basic_vector - invalid - vec_uint64_31_zero_one_more OK ++ basic_vector - invalid - vec_uint64_3_max_one_byte_less OK ++ basic_vector - invalid - vec_uint64_3_max_one_byte_more OK ++ basic_vector - invalid - vec_uint64_3_max_one_less OK ++ basic_vector - invalid - vec_uint64_3_max_one_more OK ++ basic_vector - invalid - vec_uint64_3_nil OK ++ basic_vector - invalid - vec_uint64_3_random_one_byte_less OK ++ basic_vector - invalid - vec_uint64_3_random_one_byte_more OK ++ basic_vector - invalid - vec_uint64_3_random_one_less OK ++ basic_vector - invalid - vec_uint64_3_random_one_more OK ++ basic_vector - invalid - vec_uint64_3_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint64_3_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint64_3_zero_one_less OK ++ basic_vector - invalid - vec_uint64_3_zero_one_more OK ++ basic_vector - invalid - vec_uint64_4_max_one_byte_less OK ++ basic_vector - invalid - vec_uint64_4_max_one_byte_more OK ++ basic_vector - invalid - vec_uint64_4_max_one_less OK ++ basic_vector - invalid - vec_uint64_4_max_one_more OK ++ basic_vector - invalid - vec_uint64_4_nil OK ++ basic_vector - invalid - vec_uint64_4_random_one_byte_less OK ++ basic_vector - invalid - vec_uint64_4_random_one_byte_more OK ++ basic_vector - invalid - vec_uint64_4_random_one_less OK ++ basic_vector - invalid - vec_uint64_4_random_one_more OK ++ basic_vector - invalid - vec_uint64_4_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint64_4_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint64_4_zero_one_less OK ++ basic_vector - invalid - vec_uint64_4_zero_one_more OK ++ basic_vector - invalid - vec_uint64_512_max_one_byte_less OK ++ basic_vector - invalid - vec_uint64_512_max_one_byte_more OK ++ basic_vector - invalid - vec_uint64_512_max_one_less OK ++ basic_vector - invalid - vec_uint64_512_max_one_more OK ++ basic_vector - invalid - vec_uint64_512_nil OK ++ basic_vector - invalid - vec_uint64_512_random_one_byte_less OK ++ basic_vector - invalid - vec_uint64_512_random_one_byte_more OK ++ basic_vector - invalid - vec_uint64_512_random_one_less OK ++ basic_vector - invalid - vec_uint64_512_random_one_more OK ++ basic_vector - invalid - vec_uint64_512_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint64_512_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint64_512_zero_one_less OK ++ basic_vector - invalid - vec_uint64_512_zero_one_more OK ++ basic_vector - invalid - vec_uint64_513_max_one_byte_less OK ++ basic_vector - invalid - vec_uint64_513_max_one_byte_more OK ++ basic_vector - invalid - vec_uint64_513_max_one_less OK ++ basic_vector - invalid - vec_uint64_513_max_one_more OK ++ basic_vector - invalid - vec_uint64_513_nil OK ++ basic_vector - invalid - vec_uint64_513_random_one_byte_less OK ++ basic_vector - invalid - vec_uint64_513_random_one_byte_more OK ++ basic_vector - invalid - vec_uint64_513_random_one_less OK ++ basic_vector - invalid - vec_uint64_513_random_one_more OK ++ basic_vector - invalid - vec_uint64_513_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint64_513_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint64_513_zero_one_less OK ++ basic_vector - invalid - vec_uint64_513_zero_one_more OK ++ basic_vector - invalid - vec_uint64_5_max_one_byte_less OK ++ basic_vector - invalid - vec_uint64_5_max_one_byte_more OK ++ basic_vector - invalid - vec_uint64_5_max_one_less OK ++ basic_vector - invalid - vec_uint64_5_max_one_more OK ++ basic_vector - invalid - vec_uint64_5_nil OK ++ basic_vector - invalid - vec_uint64_5_random_one_byte_less OK ++ basic_vector - invalid - vec_uint64_5_random_one_byte_more OK ++ basic_vector - invalid - vec_uint64_5_random_one_less OK ++ basic_vector - invalid - vec_uint64_5_random_one_more OK ++ basic_vector - invalid - vec_uint64_5_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint64_5_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint64_5_zero_one_less OK ++ basic_vector - invalid - vec_uint64_5_zero_one_more OK ++ basic_vector - invalid - vec_uint64_8_max_one_byte_less OK ++ basic_vector - invalid - vec_uint64_8_max_one_byte_more OK ++ basic_vector - invalid - vec_uint64_8_max_one_less OK ++ basic_vector - invalid - vec_uint64_8_max_one_more OK ++ basic_vector - invalid - vec_uint64_8_nil OK ++ basic_vector - invalid - vec_uint64_8_random_one_byte_less OK ++ basic_vector - invalid - vec_uint64_8_random_one_byte_more OK ++ basic_vector - invalid - vec_uint64_8_random_one_less OK ++ basic_vector - invalid - vec_uint64_8_random_one_more OK ++ basic_vector - invalid - vec_uint64_8_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint64_8_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint64_8_zero_one_less OK ++ basic_vector - invalid - vec_uint64_8_zero_one_more OK + basic_vector - invalid - vec_uint8_0 Skip ++ basic_vector - invalid - vec_uint8_16_max_one_byte_less OK ++ basic_vector - invalid - vec_uint8_16_max_one_byte_more OK ++ basic_vector - invalid - vec_uint8_16_max_one_less OK ++ basic_vector - invalid - vec_uint8_16_max_one_more OK ++ basic_vector - invalid - vec_uint8_16_nil OK ++ basic_vector - invalid - vec_uint8_16_random_one_byte_less OK ++ basic_vector - invalid - vec_uint8_16_random_one_byte_more OK ++ basic_vector - invalid - vec_uint8_16_random_one_less OK ++ basic_vector - invalid - vec_uint8_16_random_one_more OK ++ basic_vector - invalid - vec_uint8_16_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint8_16_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint8_16_zero_one_less OK ++ basic_vector - invalid - vec_uint8_16_zero_one_more OK ++ basic_vector - invalid - vec_uint8_1_max_one_byte_less OK ++ basic_vector - invalid - vec_uint8_1_max_one_byte_more OK ++ basic_vector - invalid - vec_uint8_1_max_one_less OK ++ basic_vector - invalid - vec_uint8_1_max_one_more OK ++ basic_vector - invalid - vec_uint8_1_nil OK ++ basic_vector - invalid - vec_uint8_1_random_one_byte_less OK ++ basic_vector - invalid - vec_uint8_1_random_one_byte_more OK ++ basic_vector - invalid - vec_uint8_1_random_one_less OK ++ basic_vector - invalid - vec_uint8_1_random_one_more OK ++ basic_vector - invalid - vec_uint8_1_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint8_1_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint8_1_zero_one_less OK ++ basic_vector - invalid - vec_uint8_1_zero_one_more OK ++ basic_vector - invalid - vec_uint8_2_max_one_byte_less OK ++ basic_vector - invalid - vec_uint8_2_max_one_byte_more OK ++ basic_vector - invalid - vec_uint8_2_max_one_less OK ++ basic_vector - invalid - vec_uint8_2_max_one_more OK ++ basic_vector - invalid - vec_uint8_2_nil OK ++ basic_vector - invalid - vec_uint8_2_random_one_byte_less OK ++ basic_vector - invalid - vec_uint8_2_random_one_byte_more OK ++ basic_vector - invalid - vec_uint8_2_random_one_less OK ++ basic_vector - invalid - vec_uint8_2_random_one_more OK ++ basic_vector - invalid - vec_uint8_2_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint8_2_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint8_2_zero_one_less OK ++ basic_vector - invalid - vec_uint8_2_zero_one_more OK ++ basic_vector - invalid - vec_uint8_31_max_one_byte_less OK ++ basic_vector - invalid - vec_uint8_31_max_one_byte_more OK ++ basic_vector - invalid - vec_uint8_31_max_one_less OK ++ basic_vector - invalid - vec_uint8_31_max_one_more OK ++ basic_vector - invalid - vec_uint8_31_nil OK ++ basic_vector - invalid - vec_uint8_31_random_one_byte_less OK ++ basic_vector - invalid - vec_uint8_31_random_one_byte_more OK ++ basic_vector - invalid - vec_uint8_31_random_one_less OK ++ basic_vector - invalid - vec_uint8_31_random_one_more OK ++ basic_vector - invalid - vec_uint8_31_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint8_31_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint8_31_zero_one_less OK ++ basic_vector - invalid - vec_uint8_31_zero_one_more OK ++ basic_vector - invalid - vec_uint8_3_max_one_byte_less OK ++ basic_vector - invalid - vec_uint8_3_max_one_byte_more OK ++ basic_vector - invalid - vec_uint8_3_max_one_less OK ++ basic_vector - invalid - vec_uint8_3_max_one_more OK ++ basic_vector - invalid - vec_uint8_3_nil OK ++ basic_vector - invalid - vec_uint8_3_random_one_byte_less OK ++ basic_vector - invalid - vec_uint8_3_random_one_byte_more OK ++ basic_vector - invalid - vec_uint8_3_random_one_less OK ++ basic_vector - invalid - vec_uint8_3_random_one_more OK ++ basic_vector - invalid - vec_uint8_3_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint8_3_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint8_3_zero_one_less OK ++ basic_vector - invalid - vec_uint8_3_zero_one_more OK ++ basic_vector - invalid - vec_uint8_4_max_one_byte_less OK ++ basic_vector - invalid - vec_uint8_4_max_one_byte_more OK ++ basic_vector - invalid - vec_uint8_4_max_one_less OK ++ basic_vector - invalid - vec_uint8_4_max_one_more OK ++ basic_vector - invalid - vec_uint8_4_nil OK ++ basic_vector - invalid - vec_uint8_4_random_one_byte_less OK ++ basic_vector - invalid - vec_uint8_4_random_one_byte_more OK ++ basic_vector - invalid - vec_uint8_4_random_one_less OK ++ basic_vector - invalid - vec_uint8_4_random_one_more OK ++ basic_vector - invalid - vec_uint8_4_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint8_4_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint8_4_zero_one_less OK ++ basic_vector - invalid - vec_uint8_4_zero_one_more OK ++ basic_vector - invalid - vec_uint8_512_max_one_byte_less OK ++ basic_vector - invalid - vec_uint8_512_max_one_byte_more OK ++ basic_vector - invalid - vec_uint8_512_max_one_less OK ++ basic_vector - invalid - vec_uint8_512_max_one_more OK ++ basic_vector - invalid - vec_uint8_512_nil OK ++ basic_vector - invalid - vec_uint8_512_random_one_byte_less OK ++ basic_vector - invalid - vec_uint8_512_random_one_byte_more OK ++ basic_vector - invalid - vec_uint8_512_random_one_less OK ++ basic_vector - invalid - vec_uint8_512_random_one_more OK ++ basic_vector - invalid - vec_uint8_512_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint8_512_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint8_512_zero_one_less OK ++ basic_vector - invalid - vec_uint8_512_zero_one_more OK ++ basic_vector - invalid - vec_uint8_513_max_one_byte_less OK ++ basic_vector - invalid - vec_uint8_513_max_one_byte_more OK ++ basic_vector - invalid - vec_uint8_513_max_one_less OK ++ basic_vector - invalid - vec_uint8_513_max_one_more OK ++ basic_vector - invalid - vec_uint8_513_nil OK ++ basic_vector - invalid - vec_uint8_513_random_one_byte_less OK ++ basic_vector - invalid - vec_uint8_513_random_one_byte_more OK ++ basic_vector - invalid - vec_uint8_513_random_one_less OK ++ basic_vector - invalid - vec_uint8_513_random_one_more OK ++ basic_vector - invalid - vec_uint8_513_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint8_513_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint8_513_zero_one_less OK ++ basic_vector - invalid - vec_uint8_513_zero_one_more OK ++ basic_vector - invalid - vec_uint8_5_max_one_byte_less OK ++ basic_vector - invalid - vec_uint8_5_max_one_byte_more OK ++ basic_vector - invalid - vec_uint8_5_max_one_less OK ++ basic_vector - invalid - vec_uint8_5_max_one_more OK ++ basic_vector - invalid - vec_uint8_5_nil OK ++ basic_vector - invalid - vec_uint8_5_random_one_byte_less OK ++ basic_vector - invalid - vec_uint8_5_random_one_byte_more OK ++ basic_vector - invalid - vec_uint8_5_random_one_less OK ++ basic_vector - invalid - vec_uint8_5_random_one_more OK ++ basic_vector - invalid - vec_uint8_5_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint8_5_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint8_5_zero_one_less OK ++ basic_vector - invalid - vec_uint8_5_zero_one_more OK ++ basic_vector - invalid - vec_uint8_8_max_one_byte_less OK ++ basic_vector - invalid - vec_uint8_8_max_one_byte_more OK ++ basic_vector - invalid - vec_uint8_8_max_one_less OK ++ basic_vector - invalid - vec_uint8_8_max_one_more OK ++ basic_vector - invalid - vec_uint8_8_nil OK ++ basic_vector - invalid - vec_uint8_8_random_one_byte_less OK ++ basic_vector - invalid - vec_uint8_8_random_one_byte_more OK ++ basic_vector - invalid - vec_uint8_8_random_one_less OK ++ basic_vector - invalid - vec_uint8_8_random_one_more OK ++ basic_vector - invalid - vec_uint8_8_zero_one_byte_less OK ++ basic_vector - invalid - vec_uint8_8_zero_one_byte_more OK ++ basic_vector - invalid - vec_uint8_8_zero_one_less OK ++ basic_vector - invalid - vec_uint8_8_zero_one_more OK ++ basic_vector - valid - vec_bool_16_max OK ++ basic_vector - valid - vec_bool_16_zero OK ++ basic_vector - valid - vec_bool_1_max OK ++ basic_vector - valid - vec_bool_1_zero OK ++ basic_vector - valid - vec_bool_2_max OK ++ basic_vector - valid - vec_bool_2_zero OK ++ basic_vector - valid - vec_bool_31_max OK ++ basic_vector - valid - vec_bool_31_zero OK ++ basic_vector - valid - vec_bool_3_max OK ++ basic_vector - valid - vec_bool_3_zero OK ++ basic_vector - valid - vec_bool_4_max OK ++ basic_vector - valid - vec_bool_4_zero OK ++ basic_vector - valid - vec_bool_512_max OK ++ basic_vector - valid - vec_bool_512_zero OK ++ basic_vector - valid - vec_bool_513_max OK ++ basic_vector - valid - vec_bool_513_zero OK ++ basic_vector - valid - vec_bool_5_max OK ++ basic_vector - valid - vec_bool_5_zero OK ++ basic_vector - valid - vec_bool_8_max OK ++ basic_vector - valid - vec_bool_8_zero OK ++ basic_vector - valid - vec_uint128_16_max OK ++ basic_vector - valid - vec_uint128_16_random OK ++ basic_vector - valid - vec_uint128_16_zero OK ++ basic_vector - valid - vec_uint128_1_max OK ++ basic_vector - valid - vec_uint128_1_random OK ++ basic_vector - valid - vec_uint128_1_zero OK ++ basic_vector - valid - vec_uint128_2_max OK ++ basic_vector - valid - vec_uint128_2_random OK ++ basic_vector - valid - vec_uint128_2_zero OK ++ basic_vector - valid - vec_uint128_31_max OK ++ basic_vector - valid - vec_uint128_31_random OK ++ basic_vector - valid - vec_uint128_31_zero OK ++ basic_vector - valid - vec_uint128_3_max OK ++ basic_vector - valid - vec_uint128_3_random OK ++ basic_vector - valid - vec_uint128_3_zero OK ++ basic_vector - valid - vec_uint128_4_max OK ++ basic_vector - valid - vec_uint128_4_random OK ++ basic_vector - valid - vec_uint128_4_zero OK ++ basic_vector - valid - vec_uint128_512_max OK ++ basic_vector - valid - vec_uint128_512_random OK ++ basic_vector - valid - vec_uint128_512_zero OK ++ basic_vector - valid - vec_uint128_513_max OK ++ basic_vector - valid - vec_uint128_513_random OK ++ basic_vector - valid - vec_uint128_513_zero OK ++ basic_vector - valid - vec_uint128_5_max OK ++ basic_vector - valid - vec_uint128_5_random OK ++ basic_vector - valid - vec_uint128_5_zero OK ++ basic_vector - valid - vec_uint128_8_max OK ++ basic_vector - valid - vec_uint128_8_random OK ++ basic_vector - valid - vec_uint128_8_zero OK ++ basic_vector - valid - vec_uint16_16_max OK ++ basic_vector - valid - vec_uint16_16_random OK ++ basic_vector - valid - vec_uint16_16_zero OK ++ basic_vector - valid - vec_uint16_1_max OK ++ basic_vector - valid - vec_uint16_1_random OK ++ basic_vector - valid - vec_uint16_1_zero OK ++ basic_vector - valid - vec_uint16_2_max OK ++ basic_vector - valid - vec_uint16_2_random OK ++ basic_vector - valid - vec_uint16_2_zero OK ++ basic_vector - valid - vec_uint16_31_max OK ++ basic_vector - valid - vec_uint16_31_random OK ++ basic_vector - valid - vec_uint16_31_zero OK ++ basic_vector - valid - vec_uint16_3_max OK ++ basic_vector - valid - vec_uint16_3_random OK ++ basic_vector - valid - vec_uint16_3_zero OK ++ basic_vector - valid - vec_uint16_4_max OK ++ basic_vector - valid - vec_uint16_4_random OK ++ basic_vector - valid - vec_uint16_4_zero OK ++ basic_vector - valid - vec_uint16_512_max OK ++ basic_vector - valid - vec_uint16_512_random OK ++ basic_vector - valid - vec_uint16_512_zero OK ++ basic_vector - valid - vec_uint16_513_max OK ++ basic_vector - valid - vec_uint16_513_random OK ++ basic_vector - valid - vec_uint16_513_zero OK ++ basic_vector - valid - vec_uint16_5_max OK ++ basic_vector - valid - vec_uint16_5_random OK ++ basic_vector - valid - vec_uint16_5_zero OK ++ basic_vector - valid - vec_uint16_8_max OK ++ basic_vector - valid - vec_uint16_8_random OK ++ basic_vector - valid - vec_uint16_8_zero OK ++ basic_vector - valid - vec_uint256_16_max OK ++ basic_vector - valid - vec_uint256_16_random OK ++ basic_vector - valid - vec_uint256_16_zero OK ++ basic_vector - valid - vec_uint256_1_max OK ++ basic_vector - valid - vec_uint256_1_random OK ++ basic_vector - valid - vec_uint256_1_zero OK ++ basic_vector - valid - vec_uint256_2_max OK ++ basic_vector - valid - vec_uint256_2_random OK ++ basic_vector - valid - vec_uint256_2_zero OK ++ basic_vector - valid - vec_uint256_31_max OK ++ basic_vector - valid - vec_uint256_31_random OK ++ basic_vector - valid - vec_uint256_31_zero OK ++ basic_vector - valid - vec_uint256_3_max OK ++ basic_vector - valid - vec_uint256_3_random OK ++ basic_vector - valid - vec_uint256_3_zero OK ++ basic_vector - valid - vec_uint256_4_max OK ++ basic_vector - valid - vec_uint256_4_random OK ++ basic_vector - valid - vec_uint256_4_zero OK ++ basic_vector - valid - vec_uint256_512_max OK ++ basic_vector - valid - vec_uint256_512_random OK ++ basic_vector - valid - vec_uint256_512_zero OK ++ basic_vector - valid - vec_uint256_513_max OK ++ basic_vector - valid - vec_uint256_513_random OK ++ basic_vector - valid - vec_uint256_513_zero OK ++ basic_vector - valid - vec_uint256_5_max OK ++ basic_vector - valid - vec_uint256_5_random OK ++ basic_vector - valid - vec_uint256_5_zero OK ++ basic_vector - valid - vec_uint256_8_max OK ++ basic_vector - valid - vec_uint256_8_random OK ++ basic_vector - valid - vec_uint256_8_zero OK ++ basic_vector - valid - vec_uint32_16_max OK ++ basic_vector - valid - vec_uint32_16_random OK ++ basic_vector - valid - vec_uint32_16_zero OK ++ basic_vector - valid - vec_uint32_1_max OK ++ basic_vector - valid - vec_uint32_1_random OK ++ basic_vector - valid - vec_uint32_1_zero OK ++ basic_vector - valid - vec_uint32_2_max OK ++ basic_vector - valid - vec_uint32_2_random OK ++ basic_vector - valid - vec_uint32_2_zero OK ++ basic_vector - valid - vec_uint32_31_max OK ++ basic_vector - valid - vec_uint32_31_random OK ++ basic_vector - valid - vec_uint32_31_zero OK ++ basic_vector - valid - vec_uint32_3_max OK ++ basic_vector - valid - vec_uint32_3_random OK ++ basic_vector - valid - vec_uint32_3_zero OK ++ basic_vector - valid - vec_uint32_4_max OK ++ basic_vector - valid - vec_uint32_4_random OK ++ basic_vector - valid - vec_uint32_4_zero OK ++ basic_vector - valid - vec_uint32_512_max OK ++ basic_vector - valid - vec_uint32_512_random OK ++ basic_vector - valid - vec_uint32_512_zero OK ++ basic_vector - valid - vec_uint32_513_max OK ++ basic_vector - valid - vec_uint32_513_random OK ++ basic_vector - valid - vec_uint32_513_zero OK ++ basic_vector - valid - vec_uint32_5_max OK ++ basic_vector - valid - vec_uint32_5_random OK ++ basic_vector - valid - vec_uint32_5_zero OK ++ basic_vector - valid - vec_uint32_8_max OK ++ basic_vector - valid - vec_uint32_8_random OK ++ basic_vector - valid - vec_uint32_8_zero OK ++ basic_vector - valid - vec_uint64_16_max OK ++ basic_vector - valid - vec_uint64_16_random OK ++ basic_vector - valid - vec_uint64_16_zero OK ++ basic_vector - valid - vec_uint64_1_max OK ++ basic_vector - valid - vec_uint64_1_random OK ++ basic_vector - valid - vec_uint64_1_zero OK ++ basic_vector - valid - vec_uint64_2_max OK ++ basic_vector - valid - vec_uint64_2_random OK ++ basic_vector - valid - vec_uint64_2_zero OK ++ basic_vector - valid - vec_uint64_31_max OK ++ basic_vector - valid - vec_uint64_31_random OK ++ basic_vector - valid - vec_uint64_31_zero OK ++ basic_vector - valid - vec_uint64_3_max OK ++ basic_vector - valid - vec_uint64_3_random OK ++ basic_vector - valid - vec_uint64_3_zero OK ++ basic_vector - valid - vec_uint64_4_max OK ++ basic_vector - valid - vec_uint64_4_random OK ++ basic_vector - valid - vec_uint64_4_zero OK ++ basic_vector - valid - vec_uint64_512_max OK ++ basic_vector - valid - vec_uint64_512_random OK ++ basic_vector - valid - vec_uint64_512_zero OK ++ basic_vector - valid - vec_uint64_513_max OK ++ basic_vector - valid - vec_uint64_513_random OK ++ basic_vector - valid - vec_uint64_513_zero OK ++ basic_vector - valid - vec_uint64_5_max OK ++ basic_vector - valid - vec_uint64_5_random OK ++ basic_vector - valid - vec_uint64_5_zero OK ++ basic_vector - valid - vec_uint64_8_max OK ++ basic_vector - valid - vec_uint64_8_random OK ++ basic_vector - valid - vec_uint64_8_zero OK ++ basic_vector - valid - vec_uint8_16_max OK ++ basic_vector - valid - vec_uint8_16_random OK ++ basic_vector - valid - vec_uint8_16_zero OK ++ basic_vector - valid - vec_uint8_1_max OK ++ basic_vector - valid - vec_uint8_1_random OK ++ basic_vector - valid - vec_uint8_1_zero OK ++ basic_vector - valid - vec_uint8_2_max OK ++ basic_vector - valid - vec_uint8_2_random OK ++ basic_vector - valid - vec_uint8_2_zero OK ++ basic_vector - valid - vec_uint8_31_max OK ++ basic_vector - valid - vec_uint8_31_random OK ++ basic_vector - valid - vec_uint8_31_zero OK ++ basic_vector - valid - vec_uint8_3_max OK ++ basic_vector - valid - vec_uint8_3_random OK ++ basic_vector - valid - vec_uint8_3_zero OK ++ basic_vector - valid - vec_uint8_4_max OK ++ basic_vector - valid - vec_uint8_4_random OK ++ basic_vector - valid - vec_uint8_4_zero OK ++ basic_vector - valid - vec_uint8_512_max OK ++ basic_vector - valid - vec_uint8_512_random OK ++ basic_vector - valid - vec_uint8_512_zero OK ++ basic_vector - valid - vec_uint8_513_max OK ++ basic_vector - valid - vec_uint8_513_random OK ++ basic_vector - valid - vec_uint8_513_zero OK ++ basic_vector - valid - vec_uint8_5_max OK ++ basic_vector - valid - vec_uint8_5_random OK ++ basic_vector - valid - vec_uint8_5_zero OK ++ basic_vector - valid - vec_uint8_8_max OK ++ basic_vector - valid - vec_uint8_8_random OK ++ basic_vector - valid - vec_uint8_8_zero OK ++ bitlist - invalid - bitlist_1_but_2 OK ++ bitlist - invalid - bitlist_1_but_7 OK ++ bitlist - invalid - bitlist_1_but_8 OK ++ bitlist - invalid - bitlist_1_but_9 OK ++ bitlist - invalid - bitlist_2_but_3 OK ++ bitlist - invalid - bitlist_32_but_33 OK ++ bitlist - invalid - bitlist_32_but_64 OK ++ bitlist - invalid - bitlist_3_but_4 OK ++ bitlist - invalid - bitlist_4_but_5 OK ++ bitlist - invalid - bitlist_512_but_513 OK ++ bitlist - invalid - bitlist_5_but_6 OK ++ bitlist - invalid - bitlist_6_but_7 OK ++ bitlist - invalid - bitlist_7_but_8 OK ++ bitlist - invalid - bitlist_8_but_9 OK ++ bitlist - invalid - bitlist_no_delimiter_empty OK ++ bitlist - invalid - bitlist_no_delimiter_zero_byte OK ++ bitlist - invalid - bitlist_no_delimiter_zeroes OK ++ bitlist - valid - bitlist_15_lengthy_0 OK ++ bitlist - valid - bitlist_15_lengthy_1 OK ++ bitlist - valid - bitlist_15_lengthy_2 OK ++ bitlist - valid - bitlist_15_lengthy_3 OK ++ bitlist - valid - bitlist_15_lengthy_4 OK ++ bitlist - valid - bitlist_15_max_0 OK ++ bitlist - valid - bitlist_15_max_1 OK ++ bitlist - valid - bitlist_15_max_2 OK ++ bitlist - valid - bitlist_15_max_3 OK ++ bitlist - valid - bitlist_15_max_4 OK ++ bitlist - valid - bitlist_15_nil_0 OK ++ bitlist - valid - bitlist_15_nil_1 OK ++ bitlist - valid - bitlist_15_nil_2 OK ++ bitlist - valid - bitlist_15_nil_3 OK ++ bitlist - valid - bitlist_15_nil_4 OK ++ bitlist - valid - bitlist_15_random_0 OK ++ bitlist - valid - bitlist_15_random_1 OK ++ bitlist - valid - bitlist_15_random_2 OK ++ bitlist - valid - bitlist_15_random_3 OK ++ bitlist - valid - bitlist_15_random_4 OK ++ bitlist - valid - bitlist_15_zero_0 OK ++ bitlist - valid - bitlist_15_zero_1 OK ++ bitlist - valid - bitlist_15_zero_2 OK ++ bitlist - valid - bitlist_15_zero_3 OK ++ bitlist - valid - bitlist_15_zero_4 OK ++ bitlist - valid - bitlist_16_lengthy_0 OK ++ bitlist - valid - bitlist_16_lengthy_1 OK ++ bitlist - valid - bitlist_16_lengthy_2 OK ++ bitlist - valid - bitlist_16_lengthy_3 OK ++ bitlist - valid - bitlist_16_lengthy_4 OK ++ bitlist - valid - bitlist_16_max_0 OK ++ bitlist - valid - bitlist_16_max_1 OK ++ bitlist - valid - bitlist_16_max_2 OK ++ bitlist - valid - bitlist_16_max_3 OK ++ bitlist - valid - bitlist_16_max_4 OK ++ bitlist - valid - bitlist_16_nil_0 OK ++ bitlist - valid - bitlist_16_nil_1 OK ++ bitlist - valid - bitlist_16_nil_2 OK ++ bitlist - valid - bitlist_16_nil_3 OK ++ bitlist - valid - bitlist_16_nil_4 OK ++ bitlist - valid - bitlist_16_random_0 OK ++ bitlist - valid - bitlist_16_random_1 OK ++ bitlist - valid - bitlist_16_random_2 OK ++ bitlist - valid - bitlist_16_random_3 OK ++ bitlist - valid - bitlist_16_random_4 OK ++ bitlist - valid - bitlist_16_zero_0 OK ++ bitlist - valid - bitlist_16_zero_1 OK ++ bitlist - valid - bitlist_16_zero_2 OK ++ bitlist - valid - bitlist_16_zero_3 OK ++ bitlist - valid - bitlist_16_zero_4 OK ++ bitlist - valid - bitlist_17_lengthy_0 OK ++ bitlist - valid - bitlist_17_lengthy_1 OK ++ bitlist - valid - bitlist_17_lengthy_2 OK ++ bitlist - valid - bitlist_17_lengthy_3 OK ++ bitlist - valid - bitlist_17_lengthy_4 OK ++ bitlist - valid - bitlist_17_max_0 OK ++ bitlist - valid - bitlist_17_max_1 OK ++ bitlist - valid - bitlist_17_max_2 OK ++ bitlist - valid - bitlist_17_max_3 OK ++ bitlist - valid - bitlist_17_max_4 OK ++ bitlist - valid - bitlist_17_nil_0 OK ++ bitlist - valid - bitlist_17_nil_1 OK ++ bitlist - valid - bitlist_17_nil_2 OK ++ bitlist - valid - bitlist_17_nil_3 OK ++ bitlist - valid - bitlist_17_nil_4 OK ++ bitlist - valid - bitlist_17_random_0 OK ++ bitlist - valid - bitlist_17_random_1 OK ++ bitlist - valid - bitlist_17_random_2 OK ++ bitlist - valid - bitlist_17_random_3 OK ++ bitlist - valid - bitlist_17_random_4 OK ++ bitlist - valid - bitlist_17_zero_0 OK ++ bitlist - valid - bitlist_17_zero_1 OK ++ bitlist - valid - bitlist_17_zero_2 OK ++ bitlist - valid - bitlist_17_zero_3 OK ++ bitlist - valid - bitlist_17_zero_4 OK ++ bitlist - valid - bitlist_1_lengthy_0 OK ++ bitlist - valid - bitlist_1_lengthy_1 OK ++ bitlist - valid - bitlist_1_lengthy_2 OK ++ bitlist - valid - bitlist_1_lengthy_3 OK ++ bitlist - valid - bitlist_1_lengthy_4 OK ++ bitlist - valid - bitlist_1_max_0 OK ++ bitlist - valid - bitlist_1_max_1 OK ++ bitlist - valid - bitlist_1_max_2 OK ++ bitlist - valid - bitlist_1_max_3 OK ++ bitlist - valid - bitlist_1_max_4 OK ++ bitlist - valid - bitlist_1_nil_0 OK ++ bitlist - valid - bitlist_1_nil_1 OK ++ bitlist - valid - bitlist_1_nil_2 OK ++ bitlist - valid - bitlist_1_nil_3 OK ++ bitlist - valid - bitlist_1_nil_4 OK ++ bitlist - valid - bitlist_1_random_0 OK ++ bitlist - valid - bitlist_1_random_1 OK ++ bitlist - valid - bitlist_1_random_2 OK ++ bitlist - valid - bitlist_1_random_3 OK ++ bitlist - valid - bitlist_1_random_4 OK ++ bitlist - valid - bitlist_1_zero_0 OK ++ bitlist - valid - bitlist_1_zero_1 OK ++ bitlist - valid - bitlist_1_zero_2 OK ++ bitlist - valid - bitlist_1_zero_3 OK ++ bitlist - valid - bitlist_1_zero_4 OK ++ bitlist - valid - bitlist_2_lengthy_0 OK ++ bitlist - valid - bitlist_2_lengthy_1 OK ++ bitlist - valid - bitlist_2_lengthy_2 OK ++ bitlist - valid - bitlist_2_lengthy_3 OK ++ bitlist - valid - bitlist_2_lengthy_4 OK ++ bitlist - valid - bitlist_2_max_0 OK ++ bitlist - valid - bitlist_2_max_1 OK ++ bitlist - valid - bitlist_2_max_2 OK ++ bitlist - valid - bitlist_2_max_3 OK ++ bitlist - valid - bitlist_2_max_4 OK ++ bitlist - valid - bitlist_2_nil_0 OK ++ bitlist - valid - bitlist_2_nil_1 OK ++ bitlist - valid - bitlist_2_nil_2 OK ++ bitlist - valid - bitlist_2_nil_3 OK ++ bitlist - valid - bitlist_2_nil_4 OK ++ bitlist - valid - bitlist_2_random_0 OK ++ bitlist - valid - bitlist_2_random_1 OK ++ bitlist - valid - bitlist_2_random_2 OK ++ bitlist - valid - bitlist_2_random_3 OK ++ bitlist - valid - bitlist_2_random_4 OK ++ bitlist - valid - bitlist_2_zero_0 OK ++ bitlist - valid - bitlist_2_zero_1 OK ++ bitlist - valid - bitlist_2_zero_2 OK ++ bitlist - valid - bitlist_2_zero_3 OK ++ bitlist - valid - bitlist_2_zero_4 OK ++ bitlist - valid - bitlist_31_lengthy_0 OK ++ bitlist - valid - bitlist_31_lengthy_1 OK ++ bitlist - valid - bitlist_31_lengthy_2 OK ++ bitlist - valid - bitlist_31_lengthy_3 OK ++ bitlist - valid - bitlist_31_lengthy_4 OK ++ bitlist - valid - bitlist_31_max_0 OK ++ bitlist - valid - bitlist_31_max_1 OK ++ bitlist - valid - bitlist_31_max_2 OK ++ bitlist - valid - bitlist_31_max_3 OK ++ bitlist - valid - bitlist_31_max_4 OK ++ bitlist - valid - bitlist_31_nil_0 OK ++ bitlist - valid - bitlist_31_nil_1 OK ++ bitlist - valid - bitlist_31_nil_2 OK ++ bitlist - valid - bitlist_31_nil_3 OK ++ bitlist - valid - bitlist_31_nil_4 OK ++ bitlist - valid - bitlist_31_random_0 OK ++ bitlist - valid - bitlist_31_random_1 OK ++ bitlist - valid - bitlist_31_random_2 OK ++ bitlist - valid - bitlist_31_random_3 OK ++ bitlist - valid - bitlist_31_random_4 OK ++ bitlist - valid - bitlist_31_zero_0 OK ++ bitlist - valid - bitlist_31_zero_1 OK ++ bitlist - valid - bitlist_31_zero_2 OK ++ bitlist - valid - bitlist_31_zero_3 OK ++ bitlist - valid - bitlist_31_zero_4 OK ++ bitlist - valid - bitlist_32_lengthy_0 OK ++ bitlist - valid - bitlist_32_lengthy_1 OK ++ bitlist - valid - bitlist_32_lengthy_2 OK ++ bitlist - valid - bitlist_32_lengthy_3 OK ++ bitlist - valid - bitlist_32_lengthy_4 OK ++ bitlist - valid - bitlist_32_max_0 OK ++ bitlist - valid - bitlist_32_max_1 OK ++ bitlist - valid - bitlist_32_max_2 OK ++ bitlist - valid - bitlist_32_max_3 OK ++ bitlist - valid - bitlist_32_max_4 OK ++ bitlist - valid - bitlist_32_nil_0 OK ++ bitlist - valid - bitlist_32_nil_1 OK ++ bitlist - valid - bitlist_32_nil_2 OK ++ bitlist - valid - bitlist_32_nil_3 OK ++ bitlist - valid - bitlist_32_nil_4 OK ++ bitlist - valid - bitlist_32_random_0 OK ++ bitlist - valid - bitlist_32_random_1 OK ++ bitlist - valid - bitlist_32_random_2 OK ++ bitlist - valid - bitlist_32_random_3 OK ++ bitlist - valid - bitlist_32_random_4 OK ++ bitlist - valid - bitlist_32_zero_0 OK ++ bitlist - valid - bitlist_32_zero_1 OK ++ bitlist - valid - bitlist_32_zero_2 OK ++ bitlist - valid - bitlist_32_zero_3 OK ++ bitlist - valid - bitlist_32_zero_4 OK ++ bitlist - valid - bitlist_33_lengthy_0 OK ++ bitlist - valid - bitlist_33_lengthy_1 OK ++ bitlist - valid - bitlist_33_lengthy_2 OK ++ bitlist - valid - bitlist_33_lengthy_3 OK ++ bitlist - valid - bitlist_33_lengthy_4 OK ++ bitlist - valid - bitlist_33_max_0 OK ++ bitlist - valid - bitlist_33_max_1 OK ++ bitlist - valid - bitlist_33_max_2 OK ++ bitlist - valid - bitlist_33_max_3 OK ++ bitlist - valid - bitlist_33_max_4 OK ++ bitlist - valid - bitlist_33_nil_0 OK ++ bitlist - valid - bitlist_33_nil_1 OK ++ bitlist - valid - bitlist_33_nil_2 OK ++ bitlist - valid - bitlist_33_nil_3 OK ++ bitlist - valid - bitlist_33_nil_4 OK ++ bitlist - valid - bitlist_33_random_0 OK ++ bitlist - valid - bitlist_33_random_1 OK ++ bitlist - valid - bitlist_33_random_2 OK ++ bitlist - valid - bitlist_33_random_3 OK ++ bitlist - valid - bitlist_33_random_4 OK ++ bitlist - valid - bitlist_33_zero_0 OK ++ bitlist - valid - bitlist_33_zero_1 OK ++ bitlist - valid - bitlist_33_zero_2 OK ++ bitlist - valid - bitlist_33_zero_3 OK ++ bitlist - valid - bitlist_33_zero_4 OK ++ bitlist - valid - bitlist_3_lengthy_0 OK ++ bitlist - valid - bitlist_3_lengthy_1 OK ++ bitlist - valid - bitlist_3_lengthy_2 OK ++ bitlist - valid - bitlist_3_lengthy_3 OK ++ bitlist - valid - bitlist_3_lengthy_4 OK ++ bitlist - valid - bitlist_3_max_0 OK ++ bitlist - valid - bitlist_3_max_1 OK ++ bitlist - valid - bitlist_3_max_2 OK ++ bitlist - valid - bitlist_3_max_3 OK ++ bitlist - valid - bitlist_3_max_4 OK ++ bitlist - valid - bitlist_3_nil_0 OK ++ bitlist - valid - bitlist_3_nil_1 OK ++ bitlist - valid - bitlist_3_nil_2 OK ++ bitlist - valid - bitlist_3_nil_3 OK ++ bitlist - valid - bitlist_3_nil_4 OK ++ bitlist - valid - bitlist_3_random_0 OK ++ bitlist - valid - bitlist_3_random_1 OK ++ bitlist - valid - bitlist_3_random_2 OK ++ bitlist - valid - bitlist_3_random_3 OK ++ bitlist - valid - bitlist_3_random_4 OK ++ bitlist - valid - bitlist_3_zero_0 OK ++ bitlist - valid - bitlist_3_zero_1 OK ++ bitlist - valid - bitlist_3_zero_2 OK ++ bitlist - valid - bitlist_3_zero_3 OK ++ bitlist - valid - bitlist_3_zero_4 OK ++ bitlist - valid - bitlist_4_lengthy_0 OK ++ bitlist - valid - bitlist_4_lengthy_1 OK ++ bitlist - valid - bitlist_4_lengthy_2 OK ++ bitlist - valid - bitlist_4_lengthy_3 OK ++ bitlist - valid - bitlist_4_lengthy_4 OK ++ bitlist - valid - bitlist_4_max_0 OK ++ bitlist - valid - bitlist_4_max_1 OK ++ bitlist - valid - bitlist_4_max_2 OK ++ bitlist - valid - bitlist_4_max_3 OK ++ bitlist - valid - bitlist_4_max_4 OK ++ bitlist - valid - bitlist_4_nil_0 OK ++ bitlist - valid - bitlist_4_nil_1 OK ++ bitlist - valid - bitlist_4_nil_2 OK ++ bitlist - valid - bitlist_4_nil_3 OK ++ bitlist - valid - bitlist_4_nil_4 OK ++ bitlist - valid - bitlist_4_random_0 OK ++ bitlist - valid - bitlist_4_random_1 OK ++ bitlist - valid - bitlist_4_random_2 OK ++ bitlist - valid - bitlist_4_random_3 OK ++ bitlist - valid - bitlist_4_random_4 OK ++ bitlist - valid - bitlist_4_zero_0 OK ++ bitlist - valid - bitlist_4_zero_1 OK ++ bitlist - valid - bitlist_4_zero_2 OK ++ bitlist - valid - bitlist_4_zero_3 OK ++ bitlist - valid - bitlist_4_zero_4 OK ++ bitlist - valid - bitlist_511_lengthy_0 OK ++ bitlist - valid - bitlist_511_lengthy_1 OK ++ bitlist - valid - bitlist_511_lengthy_2 OK ++ bitlist - valid - bitlist_511_lengthy_3 OK ++ bitlist - valid - bitlist_511_lengthy_4 OK ++ bitlist - valid - bitlist_511_max_0 OK ++ bitlist - valid - bitlist_511_max_1 OK ++ bitlist - valid - bitlist_511_max_2 OK ++ bitlist - valid - bitlist_511_max_3 OK ++ bitlist - valid - bitlist_511_max_4 OK ++ bitlist - valid - bitlist_511_nil_0 OK ++ bitlist - valid - bitlist_511_nil_1 OK ++ bitlist - valid - bitlist_511_nil_2 OK ++ bitlist - valid - bitlist_511_nil_3 OK ++ bitlist - valid - bitlist_511_nil_4 OK ++ bitlist - valid - bitlist_511_random_0 OK ++ bitlist - valid - bitlist_511_random_1 OK ++ bitlist - valid - bitlist_511_random_2 OK ++ bitlist - valid - bitlist_511_random_3 OK ++ bitlist - valid - bitlist_511_random_4 OK ++ bitlist - valid - bitlist_511_zero_0 OK ++ bitlist - valid - bitlist_511_zero_1 OK ++ bitlist - valid - bitlist_511_zero_2 OK ++ bitlist - valid - bitlist_511_zero_3 OK ++ bitlist - valid - bitlist_511_zero_4 OK ++ bitlist - valid - bitlist_512_lengthy_0 OK ++ bitlist - valid - bitlist_512_lengthy_1 OK ++ bitlist - valid - bitlist_512_lengthy_2 OK ++ bitlist - valid - bitlist_512_lengthy_3 OK ++ bitlist - valid - bitlist_512_lengthy_4 OK ++ bitlist - valid - bitlist_512_max_0 OK ++ bitlist - valid - bitlist_512_max_1 OK ++ bitlist - valid - bitlist_512_max_2 OK ++ bitlist - valid - bitlist_512_max_3 OK ++ bitlist - valid - bitlist_512_max_4 OK ++ bitlist - valid - bitlist_512_nil_0 OK ++ bitlist - valid - bitlist_512_nil_1 OK ++ bitlist - valid - bitlist_512_nil_2 OK ++ bitlist - valid - bitlist_512_nil_3 OK ++ bitlist - valid - bitlist_512_nil_4 OK ++ bitlist - valid - bitlist_512_random_0 OK ++ bitlist - valid - bitlist_512_random_1 OK ++ bitlist - valid - bitlist_512_random_2 OK ++ bitlist - valid - bitlist_512_random_3 OK ++ bitlist - valid - bitlist_512_random_4 OK ++ bitlist - valid - bitlist_512_zero_0 OK ++ bitlist - valid - bitlist_512_zero_1 OK ++ bitlist - valid - bitlist_512_zero_2 OK ++ bitlist - valid - bitlist_512_zero_3 OK ++ bitlist - valid - bitlist_512_zero_4 OK ++ bitlist - valid - bitlist_513_lengthy_0 OK ++ bitlist - valid - bitlist_513_lengthy_1 OK ++ bitlist - valid - bitlist_513_lengthy_2 OK ++ bitlist - valid - bitlist_513_lengthy_3 OK ++ bitlist - valid - bitlist_513_lengthy_4 OK ++ bitlist - valid - bitlist_513_max_0 OK ++ bitlist - valid - bitlist_513_max_1 OK ++ bitlist - valid - bitlist_513_max_2 OK ++ bitlist - valid - bitlist_513_max_3 OK ++ bitlist - valid - bitlist_513_max_4 OK ++ bitlist - valid - bitlist_513_nil_0 OK ++ bitlist - valid - bitlist_513_nil_1 OK ++ bitlist - valid - bitlist_513_nil_2 OK ++ bitlist - valid - bitlist_513_nil_3 OK ++ bitlist - valid - bitlist_513_nil_4 OK ++ bitlist - valid - bitlist_513_random_0 OK ++ bitlist - valid - bitlist_513_random_1 OK ++ bitlist - valid - bitlist_513_random_2 OK ++ bitlist - valid - bitlist_513_random_3 OK ++ bitlist - valid - bitlist_513_random_4 OK ++ bitlist - valid - bitlist_513_zero_0 OK ++ bitlist - valid - bitlist_513_zero_1 OK ++ bitlist - valid - bitlist_513_zero_2 OK ++ bitlist - valid - bitlist_513_zero_3 OK ++ bitlist - valid - bitlist_513_zero_4 OK ++ bitlist - valid - bitlist_5_lengthy_0 OK ++ bitlist - valid - bitlist_5_lengthy_1 OK ++ bitlist - valid - bitlist_5_lengthy_2 OK ++ bitlist - valid - bitlist_5_lengthy_3 OK ++ bitlist - valid - bitlist_5_lengthy_4 OK ++ bitlist - valid - bitlist_5_max_0 OK ++ bitlist - valid - bitlist_5_max_1 OK ++ bitlist - valid - bitlist_5_max_2 OK ++ bitlist - valid - bitlist_5_max_3 OK ++ bitlist - valid - bitlist_5_max_4 OK ++ bitlist - valid - bitlist_5_nil_0 OK ++ bitlist - valid - bitlist_5_nil_1 OK ++ bitlist - valid - bitlist_5_nil_2 OK ++ bitlist - valid - bitlist_5_nil_3 OK ++ bitlist - valid - bitlist_5_nil_4 OK ++ bitlist - valid - bitlist_5_random_0 OK ++ bitlist - valid - bitlist_5_random_1 OK ++ bitlist - valid - bitlist_5_random_2 OK ++ bitlist - valid - bitlist_5_random_3 OK ++ bitlist - valid - bitlist_5_random_4 OK ++ bitlist - valid - bitlist_5_zero_0 OK ++ bitlist - valid - bitlist_5_zero_1 OK ++ bitlist - valid - bitlist_5_zero_2 OK ++ bitlist - valid - bitlist_5_zero_3 OK ++ bitlist - valid - bitlist_5_zero_4 OK ++ bitlist - valid - bitlist_6_lengthy_0 OK ++ bitlist - valid - bitlist_6_lengthy_1 OK ++ bitlist - valid - bitlist_6_lengthy_2 OK ++ bitlist - valid - bitlist_6_lengthy_3 OK ++ bitlist - valid - bitlist_6_lengthy_4 OK ++ bitlist - valid - bitlist_6_max_0 OK ++ bitlist - valid - bitlist_6_max_1 OK ++ bitlist - valid - bitlist_6_max_2 OK ++ bitlist - valid - bitlist_6_max_3 OK ++ bitlist - valid - bitlist_6_max_4 OK ++ bitlist - valid - bitlist_6_nil_0 OK ++ bitlist - valid - bitlist_6_nil_1 OK ++ bitlist - valid - bitlist_6_nil_2 OK ++ bitlist - valid - bitlist_6_nil_3 OK ++ bitlist - valid - bitlist_6_nil_4 OK ++ bitlist - valid - bitlist_6_random_0 OK ++ bitlist - valid - bitlist_6_random_1 OK ++ bitlist - valid - bitlist_6_random_2 OK ++ bitlist - valid - bitlist_6_random_3 OK ++ bitlist - valid - bitlist_6_random_4 OK ++ bitlist - valid - bitlist_6_zero_0 OK ++ bitlist - valid - bitlist_6_zero_1 OK ++ bitlist - valid - bitlist_6_zero_2 OK ++ bitlist - valid - bitlist_6_zero_3 OK ++ bitlist - valid - bitlist_6_zero_4 OK ++ bitlist - valid - bitlist_7_lengthy_0 OK ++ bitlist - valid - bitlist_7_lengthy_1 OK ++ bitlist - valid - bitlist_7_lengthy_2 OK ++ bitlist - valid - bitlist_7_lengthy_3 OK ++ bitlist - valid - bitlist_7_lengthy_4 OK ++ bitlist - valid - bitlist_7_max_0 OK ++ bitlist - valid - bitlist_7_max_1 OK ++ bitlist - valid - bitlist_7_max_2 OK ++ bitlist - valid - bitlist_7_max_3 OK ++ bitlist - valid - bitlist_7_max_4 OK ++ bitlist - valid - bitlist_7_nil_0 OK ++ bitlist - valid - bitlist_7_nil_1 OK ++ bitlist - valid - bitlist_7_nil_2 OK ++ bitlist - valid - bitlist_7_nil_3 OK ++ bitlist - valid - bitlist_7_nil_4 OK ++ bitlist - valid - bitlist_7_random_0 OK ++ bitlist - valid - bitlist_7_random_1 OK ++ bitlist - valid - bitlist_7_random_2 OK ++ bitlist - valid - bitlist_7_random_3 OK ++ bitlist - valid - bitlist_7_random_4 OK ++ bitlist - valid - bitlist_7_zero_0 OK ++ bitlist - valid - bitlist_7_zero_1 OK ++ bitlist - valid - bitlist_7_zero_2 OK ++ bitlist - valid - bitlist_7_zero_3 OK ++ bitlist - valid - bitlist_7_zero_4 OK ++ bitlist - valid - bitlist_8_lengthy_0 OK ++ bitlist - valid - bitlist_8_lengthy_1 OK ++ bitlist - valid - bitlist_8_lengthy_2 OK ++ bitlist - valid - bitlist_8_lengthy_3 OK ++ bitlist - valid - bitlist_8_lengthy_4 OK ++ bitlist - valid - bitlist_8_max_0 OK ++ bitlist - valid - bitlist_8_max_1 OK ++ bitlist - valid - bitlist_8_max_2 OK ++ bitlist - valid - bitlist_8_max_3 OK ++ bitlist - valid - bitlist_8_max_4 OK ++ bitlist - valid - bitlist_8_nil_0 OK ++ bitlist - valid - bitlist_8_nil_1 OK ++ bitlist - valid - bitlist_8_nil_2 OK ++ bitlist - valid - bitlist_8_nil_3 OK ++ bitlist - valid - bitlist_8_nil_4 OK ++ bitlist - valid - bitlist_8_random_0 OK ++ bitlist - valid - bitlist_8_random_1 OK ++ bitlist - valid - bitlist_8_random_2 OK ++ bitlist - valid - bitlist_8_random_3 OK ++ bitlist - valid - bitlist_8_random_4 OK ++ bitlist - valid - bitlist_8_zero_0 OK ++ bitlist - valid - bitlist_8_zero_1 OK ++ bitlist - valid - bitlist_8_zero_2 OK ++ bitlist - valid - bitlist_8_zero_3 OK ++ bitlist - valid - bitlist_8_zero_4 OK ++ bitlist - valid - bitlist_9_lengthy_0 OK ++ bitlist - valid - bitlist_9_lengthy_1 OK ++ bitlist - valid - bitlist_9_lengthy_2 OK ++ bitlist - valid - bitlist_9_lengthy_3 OK ++ bitlist - valid - bitlist_9_lengthy_4 OK ++ bitlist - valid - bitlist_9_max_0 OK ++ bitlist - valid - bitlist_9_max_1 OK ++ bitlist - valid - bitlist_9_max_2 OK ++ bitlist - valid - bitlist_9_max_3 OK ++ bitlist - valid - bitlist_9_max_4 OK ++ bitlist - valid - bitlist_9_nil_0 OK ++ bitlist - valid - bitlist_9_nil_1 OK ++ bitlist - valid - bitlist_9_nil_2 OK ++ bitlist - valid - bitlist_9_nil_3 OK ++ bitlist - valid - bitlist_9_nil_4 OK ++ bitlist - valid - bitlist_9_random_0 OK ++ bitlist - valid - bitlist_9_random_1 OK ++ bitlist - valid - bitlist_9_random_2 OK ++ bitlist - valid - bitlist_9_random_3 OK ++ bitlist - valid - bitlist_9_random_4 OK ++ bitlist - valid - bitlist_9_zero_0 OK ++ bitlist - valid - bitlist_9_zero_1 OK ++ bitlist - valid - bitlist_9_zero_2 OK ++ bitlist - valid - bitlist_9_zero_3 OK ++ bitlist - valid - bitlist_9_zero_4 OK + bitvector - invalid - bitvec_0 Skip ++ bitvector - invalid - bitvec_16_max_8 OK ++ bitvector - invalid - bitvec_16_random_8 OK ++ bitvector - invalid - bitvec_16_zero_8 OK ++ bitvector - invalid - bitvec_1_max_2 OK ++ bitvector - invalid - bitvec_1_random_2 OK ++ bitvector - invalid - bitvec_1_zero_2 OK ++ bitvector - invalid - bitvec_2_max_3 OK ++ bitvector - invalid - bitvec_2_random_3 OK ++ bitvector - invalid - bitvec_2_zero_3 OK ++ bitvector - invalid - bitvec_32_max_33 OK ++ bitvector - invalid - bitvec_32_random_33 OK ++ bitvector - invalid - bitvec_32_zero_33 OK ++ bitvector - invalid - bitvec_3_max_4 OK ++ bitvector - invalid - bitvec_3_random_4 OK ++ bitvector - invalid - bitvec_3_zero_4 OK ++ bitvector - invalid - bitvec_4_max_5 OK ++ bitvector - invalid - bitvec_4_random_5 OK ++ bitvector - invalid - bitvec_4_zero_5 OK ++ bitvector - invalid - bitvec_512_max_513 OK ++ bitvector - invalid - bitvec_512_random_513 OK ++ bitvector - invalid - bitvec_512_zero_513 OK ++ bitvector - invalid - bitvec_5_max_6 OK ++ bitvector - invalid - bitvec_5_random_6 OK ++ bitvector - invalid - bitvec_5_zero_6 OK ++ bitvector - invalid - bitvec_8_max_9 OK ++ bitvector - invalid - bitvec_8_random_9 OK ++ bitvector - invalid - bitvec_8_zero_9 OK ++ bitvector - invalid - bitvec_9_max_8 OK ++ bitvector - invalid - bitvec_9_random_8 OK ++ bitvector - invalid - bitvec_9_zero_8 OK ++ bitvector - valid - bitvec_15_max OK ++ bitvector - valid - bitvec_15_random OK ++ bitvector - valid - bitvec_15_zero OK ++ bitvector - valid - bitvec_16_max OK ++ bitvector - valid - bitvec_16_random OK ++ bitvector - valid - bitvec_16_zero OK ++ bitvector - valid - bitvec_17_max OK ++ bitvector - valid - bitvec_17_random OK ++ bitvector - valid - bitvec_17_zero OK ++ bitvector - valid - bitvec_1_max OK ++ bitvector - valid - bitvec_1_random OK ++ bitvector - valid - bitvec_1_zero OK ++ bitvector - valid - bitvec_2_max OK ++ bitvector - valid - bitvec_2_random OK ++ bitvector - valid - bitvec_2_zero OK ++ bitvector - valid - bitvec_31_max OK ++ bitvector - valid - bitvec_31_random OK ++ bitvector - valid - bitvec_31_zero OK ++ bitvector - valid - bitvec_32_max OK ++ bitvector - valid - bitvec_32_random OK ++ bitvector - valid - bitvec_32_zero OK ++ bitvector - valid - bitvec_33_max OK ++ bitvector - valid - bitvec_33_random OK ++ bitvector - valid - bitvec_33_zero OK ++ bitvector - valid - bitvec_3_max OK ++ bitvector - valid - bitvec_3_random OK ++ bitvector - valid - bitvec_3_zero OK ++ bitvector - valid - bitvec_4_max OK ++ bitvector - valid - bitvec_4_random OK ++ bitvector - valid - bitvec_4_zero OK ++ bitvector - valid - bitvec_511_max OK ++ bitvector - valid - bitvec_511_random OK ++ bitvector - valid - bitvec_511_zero OK ++ bitvector - valid - bitvec_512_max OK ++ bitvector - valid - bitvec_512_random OK ++ bitvector - valid - bitvec_512_zero OK ++ bitvector - valid - bitvec_513_max OK ++ bitvector - valid - bitvec_513_random OK ++ bitvector - valid - bitvec_513_zero OK ++ bitvector - valid - bitvec_5_max OK ++ bitvector - valid - bitvec_5_random OK ++ bitvector - valid - bitvec_5_zero OK ++ bitvector - valid - bitvec_6_max OK ++ bitvector - valid - bitvec_6_random OK ++ bitvector - valid - bitvec_6_zero OK ++ bitvector - valid - bitvec_7_max OK ++ bitvector - valid - bitvec_7_random OK ++ bitvector - valid - bitvec_7_zero OK ++ bitvector - valid - bitvec_8_max OK ++ bitvector - valid - bitvec_8_random OK ++ bitvector - valid - bitvec_8_zero OK ++ bitvector - valid - bitvec_9_max OK ++ bitvector - valid - bitvec_9_random OK ++ bitvector - valid - bitvec_9_zero OK ++ boolean - invalid - byte_0x80 OK ++ boolean - invalid - byte_0xff OK ++ boolean - invalid - byte_2 OK ++ boolean - invalid - byte_rev_nibble OK ++ boolean - valid - false OK ++ boolean - valid - true OK ++ containers - invalid - BitsStruct_extra_byte OK ++ containers - invalid - BitsStruct_lengthy_last_offset_0_overflow OK ++ containers - invalid - BitsStruct_lengthy_last_offset_10_overflow OK ++ containers - invalid - BitsStruct_lengthy_last_offset_6_overflow OK ++ containers - invalid - BitsStruct_lengthy_offset_0_minus_one OK ++ containers - invalid - BitsStruct_lengthy_offset_0_plus_one OK ++ containers - invalid - BitsStruct_lengthy_offset_0_zeroed OK ++ containers - invalid - BitsStruct_lengthy_offset_10_minus_one OK ++ containers - invalid - BitsStruct_lengthy_offset_10_plus_one OK ++ containers - invalid - BitsStruct_lengthy_offset_10_zeroed OK ++ containers - invalid - BitsStruct_lengthy_offset_6_minus_one OK ++ containers - invalid - BitsStruct_lengthy_offset_6_plus_one OK ++ containers - invalid - BitsStruct_lengthy_offset_6_zeroed OK ++ containers - invalid - BitsStruct_nil_offset_0_minus_one OK ++ containers - invalid - BitsStruct_nil_offset_0_plus_one OK ++ containers - invalid - BitsStruct_nil_offset_0_zeroed OK ++ containers - invalid - BitsStruct_nil_offset_10_minus_one OK ++ containers - invalid - BitsStruct_nil_offset_10_plus_one OK ++ containers - invalid - BitsStruct_nil_offset_10_zeroed OK ++ containers - invalid - BitsStruct_nil_offset_6_minus_one OK ++ containers - invalid - BitsStruct_nil_offset_6_plus_one OK ++ containers - invalid - BitsStruct_nil_offset_6_zeroed OK ++ containers - invalid - BitsStruct_one_last_offset_0_wrong_byte_length OK ++ containers - invalid - BitsStruct_one_last_offset_10_wrong_byte_length OK ++ containers - invalid - BitsStruct_one_last_offset_6_wrong_byte_length OK ++ containers - invalid - BitsStruct_one_offset_0_minus_one OK ++ containers - invalid - BitsStruct_one_offset_0_plus_one OK ++ containers - invalid - BitsStruct_one_offset_0_zeroed OK ++ containers - invalid - BitsStruct_one_offset_10_minus_one OK ++ containers - invalid - BitsStruct_one_offset_10_plus_one OK ++ containers - invalid - BitsStruct_one_offset_10_zeroed OK ++ containers - invalid - BitsStruct_one_offset_6_minus_one OK ++ containers - invalid - BitsStruct_one_offset_6_plus_one OK ++ containers - invalid - BitsStruct_one_offset_6_zeroed OK ++ containers - invalid - BitsStruct_random_offset_0_minus_one OK ++ containers - invalid - BitsStruct_random_offset_0_plus_one OK ++ containers - invalid - BitsStruct_random_offset_0_zeroed OK ++ containers - invalid - BitsStruct_random_offset_10_minus_one OK ++ containers - invalid - BitsStruct_random_offset_10_plus_one OK ++ containers - invalid - BitsStruct_random_offset_10_zeroed OK ++ containers - invalid - BitsStruct_random_offset_6_minus_one OK ++ containers - invalid - BitsStruct_random_offset_6_plus_one OK ++ containers - invalid - BitsStruct_random_offset_6_zeroed OK ++ containers - invalid - ComplexTestStruct_extra_byte OK ++ containers - invalid - ComplexTestStruct_lengthy_last_offset_11_overflow OK ++ containers - invalid - ComplexTestStruct_lengthy_last_offset_2_overflow OK ++ containers - invalid - ComplexTestStruct_lengthy_last_offset_7_overflow OK ++ containers - invalid - ComplexTestStruct_lengthy_offset_11_minus_one OK ++ containers - invalid - ComplexTestStruct_lengthy_offset_11_plus_one OK ++ containers - invalid - ComplexTestStruct_lengthy_offset_11_zeroed OK ++ containers - invalid - ComplexTestStruct_lengthy_offset_2_minus_one OK ++ containers - invalid - ComplexTestStruct_lengthy_offset_2_plus_one OK ++ containers - invalid - ComplexTestStruct_lengthy_offset_2_zeroed OK ++ containers - invalid - ComplexTestStruct_lengthy_offset_7_minus_one OK ++ containers - invalid - ComplexTestStruct_lengthy_offset_7_plus_one OK ++ containers - invalid - ComplexTestStruct_lengthy_offset_7_zeroed OK ++ containers - invalid - ComplexTestStruct_nil_offset_11_minus_one OK ++ containers - invalid - ComplexTestStruct_nil_offset_11_plus_one OK ++ containers - invalid - ComplexTestStruct_nil_offset_11_zeroed OK ++ containers - invalid - ComplexTestStruct_nil_offset_2_minus_one OK ++ containers - invalid - ComplexTestStruct_nil_offset_2_plus_one OK ++ containers - invalid - ComplexTestStruct_nil_offset_2_zeroed OK ++ containers - invalid - ComplexTestStruct_nil_offset_7_minus_one OK ++ containers - invalid - ComplexTestStruct_nil_offset_7_plus_one OK ++ containers - invalid - ComplexTestStruct_nil_offset_7_zeroed OK ++ containers - invalid - ComplexTestStruct_one_last_offset_11_wrong_byte_length OK ++ containers - invalid - ComplexTestStruct_one_last_offset_2_wrong_byte_length OK ++ containers - invalid - ComplexTestStruct_one_last_offset_7_wrong_byte_length OK ++ containers - invalid - ComplexTestStruct_one_offset_11_minus_one OK ++ containers - invalid - ComplexTestStruct_one_offset_11_plus_one OK ++ containers - invalid - ComplexTestStruct_one_offset_11_zeroed OK ++ containers - invalid - ComplexTestStruct_one_offset_2_minus_one OK ++ containers - invalid - ComplexTestStruct_one_offset_2_plus_one OK ++ containers - invalid - ComplexTestStruct_one_offset_2_zeroed OK ++ containers - invalid - ComplexTestStruct_one_offset_7_minus_one OK ++ containers - invalid - ComplexTestStruct_one_offset_7_plus_one OK ++ containers - invalid - ComplexTestStruct_one_offset_7_zeroed OK ++ containers - invalid - ComplexTestStruct_random_offset_11_minus_one OK ++ containers - invalid - ComplexTestStruct_random_offset_11_plus_one OK ++ containers - invalid - ComplexTestStruct_random_offset_11_zeroed OK ++ containers - invalid - ComplexTestStruct_random_offset_2_minus_one OK ++ containers - invalid - ComplexTestStruct_random_offset_2_plus_one OK ++ containers - invalid - ComplexTestStruct_random_offset_2_zeroed OK ++ containers - invalid - ComplexTestStruct_random_offset_7_minus_one OK ++ containers - invalid - ComplexTestStruct_random_offset_7_plus_one OK ++ containers - invalid - ComplexTestStruct_random_offset_7_zeroed OK ++ containers - invalid - FixedTestStruct_extra_byte OK ++ containers - invalid - ProgressiveBitsStruct_extra_byte OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_241_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_241_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_245_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_245_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_32_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_32_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_32_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_36_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_36_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_410_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_410_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_414_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_414_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_73_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_73_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_77_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_lengthy_offset_77_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_241_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_241_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_241_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_245_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_245_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_245_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_32_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_32_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_32_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_36_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_36_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_36_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_410_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_410_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_410_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_414_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_414_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_414_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_73_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_73_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_73_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_77_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_77_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_nil_offset_77_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_241_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_241_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_241_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_245_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_245_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_245_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_32_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_32_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_32_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_36_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_36_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_36_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_410_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_410_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_410_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_414_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_414_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_414_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_73_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_73_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_73_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_77_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_77_plus_one OK ++ containers - invalid - ProgressiveBitsStruct_one_offset_77_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_random_offset_241_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_random_offset_245_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_random_offset_32_minus_one OK ++ containers - invalid - ProgressiveBitsStruct_random_offset_32_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_random_offset_36_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_random_offset_410_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_random_offset_414_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_random_offset_73_zeroed OK ++ containers - invalid - ProgressiveBitsStruct_random_offset_77_zeroed OK ++ containers - invalid - ProgressiveTestStruct_extra_byte OK ++ containers - invalid - ProgressiveTestStruct_lengthy_last_offset_0_overflow OK ++ containers - invalid - ProgressiveTestStruct_lengthy_last_offset_12_overflow OK ++ containers - invalid - ProgressiveTestStruct_lengthy_last_offset_4_overflow OK ++ containers - invalid - ProgressiveTestStruct_lengthy_last_offset_8_overflow OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_0_minus_one OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_0_plus_one OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_0_zeroed OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_12_minus_one OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_12_plus_one OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_12_zeroed OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_4_minus_one OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_4_plus_one OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_4_zeroed OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_8_minus_one OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_8_plus_one OK ++ containers - invalid - ProgressiveTestStruct_lengthy_offset_8_zeroed OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_0_minus_one OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_0_plus_one OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_0_zeroed OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_12_minus_one OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_12_plus_one OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_12_zeroed OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_4_minus_one OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_4_plus_one OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_4_zeroed OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_8_minus_one OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_8_plus_one OK ++ containers - invalid - ProgressiveTestStruct_nil_offset_8_zeroed OK ++ containers - invalid - ProgressiveTestStruct_one_last_offset_0_wrong_byte_length OK ++ containers - invalid - ProgressiveTestStruct_one_last_offset_12_wrong_byte_length OK ++ containers - invalid - ProgressiveTestStruct_one_last_offset_4_wrong_byte_length OK ++ containers - invalid - ProgressiveTestStruct_one_last_offset_8_wrong_byte_length OK ++ containers - invalid - ProgressiveTestStruct_one_offset_0_minus_one OK ++ containers - invalid - ProgressiveTestStruct_one_offset_0_plus_one OK ++ containers - invalid - ProgressiveTestStruct_one_offset_0_zeroed OK ++ containers - invalid - ProgressiveTestStruct_one_offset_12_minus_one OK ++ containers - invalid - ProgressiveTestStruct_one_offset_12_plus_one OK ++ containers - invalid - ProgressiveTestStruct_one_offset_12_zeroed OK ++ containers - invalid - ProgressiveTestStruct_one_offset_4_minus_one OK ++ containers - invalid - ProgressiveTestStruct_one_offset_4_plus_one OK ++ containers - invalid - ProgressiveTestStruct_one_offset_4_zeroed OK ++ containers - invalid - ProgressiveTestStruct_one_offset_8_minus_one OK ++ containers - invalid - ProgressiveTestStruct_one_offset_8_plus_one OK ++ containers - invalid - ProgressiveTestStruct_one_offset_8_zeroed OK ++ containers - invalid - ProgressiveTestStruct_random_offset_0_minus_one OK ++ containers - invalid - ProgressiveTestStruct_random_offset_0_plus_one OK ++ containers - invalid - ProgressiveTestStruct_random_offset_0_zeroed OK ++ containers - invalid - ProgressiveTestStruct_random_offset_12_minus_one OK ++ containers - invalid - ProgressiveTestStruct_random_offset_12_plus_one OK ++ containers - invalid - ProgressiveTestStruct_random_offset_12_zeroed OK ++ containers - invalid - ProgressiveTestStruct_random_offset_4_minus_one OK ++ containers - invalid - ProgressiveTestStruct_random_offset_4_plus_one OK ++ containers - invalid - ProgressiveTestStruct_random_offset_4_zeroed OK ++ containers - invalid - ProgressiveTestStruct_random_offset_8_minus_one OK ++ containers - invalid - ProgressiveTestStruct_random_offset_8_plus_one OK ++ containers - invalid - ProgressiveTestStruct_random_offset_8_zeroed OK ++ containers - invalid - SingleFieldTestStruct_extra_byte OK ++ containers - invalid - SmallTestStruct_extra_byte OK ++ containers - invalid - VarTestStruct_extra_byte OK ++ containers - invalid - VarTestStruct_lengthy_last_offset_2_overflow OK ++ containers - invalid - VarTestStruct_lengthy_offset_2_minus_one OK ++ containers - invalid - VarTestStruct_lengthy_offset_2_plus_one OK ++ containers - invalid - VarTestStruct_lengthy_offset_2_zeroed OK ++ containers - invalid - VarTestStruct_nil_offset_2_minus_one OK ++ containers - invalid - VarTestStruct_nil_offset_2_plus_one OK ++ containers - invalid - VarTestStruct_nil_offset_2_zeroed OK ++ containers - invalid - VarTestStruct_one_last_offset_2_wrong_byte_length OK ++ containers - invalid - VarTestStruct_one_offset_2_minus_one OK ++ containers - invalid - VarTestStruct_one_offset_2_plus_one OK ++ containers - invalid - VarTestStruct_one_offset_2_zeroed OK ++ containers - invalid - VarTestStruct_random_offset_2_minus_one OK ++ containers - invalid - VarTestStruct_random_offset_2_plus_one OK ++ containers - invalid - VarTestStruct_random_offset_2_zeroed OK ++ containers - valid - BitsStruct_lengthy_0 OK ++ containers - valid - BitsStruct_lengthy_1 OK ++ containers - valid - BitsStruct_lengthy_2 OK ++ containers - valid - BitsStruct_lengthy_3 OK ++ containers - valid - BitsStruct_lengthy_4 OK ++ containers - valid - BitsStruct_lengthy_5 OK ++ containers - valid - BitsStruct_lengthy_6 OK ++ containers - valid - BitsStruct_lengthy_7 OK ++ containers - valid - BitsStruct_lengthy_8 OK ++ containers - valid - BitsStruct_lengthy_9 OK ++ containers - valid - BitsStruct_lengthy_chaos_0 OK ++ containers - valid - BitsStruct_lengthy_chaos_1 OK ++ containers - valid - BitsStruct_lengthy_chaos_2 OK ++ containers - valid - BitsStruct_max OK ++ containers - valid - BitsStruct_max_0 OK ++ containers - valid - BitsStruct_max_1 OK ++ containers - valid - BitsStruct_max_2 OK ++ containers - valid - BitsStruct_max_3 OK ++ containers - valid - BitsStruct_max_4 OK ++ containers - valid - BitsStruct_max_5 OK ++ containers - valid - BitsStruct_max_6 OK ++ containers - valid - BitsStruct_max_7 OK ++ containers - valid - BitsStruct_max_8 OK ++ containers - valid - BitsStruct_max_9 OK ++ containers - valid - BitsStruct_max_chaos_0 OK ++ containers - valid - BitsStruct_max_chaos_1 OK ++ containers - valid - BitsStruct_max_chaos_2 OK ++ containers - valid - BitsStruct_nil_0 OK ++ containers - valid - BitsStruct_nil_1 OK ++ containers - valid - BitsStruct_nil_2 OK ++ containers - valid - BitsStruct_nil_3 OK ++ containers - valid - BitsStruct_nil_4 OK ++ containers - valid - BitsStruct_nil_5 OK ++ containers - valid - BitsStruct_nil_6 OK ++ containers - valid - BitsStruct_nil_7 OK ++ containers - valid - BitsStruct_nil_8 OK ++ containers - valid - BitsStruct_nil_9 OK ++ containers - valid - BitsStruct_nil_chaos_0 OK ++ containers - valid - BitsStruct_nil_chaos_1 OK ++ containers - valid - BitsStruct_nil_chaos_2 OK ++ containers - valid - BitsStruct_one_0 OK ++ containers - valid - BitsStruct_one_1 OK ++ containers - valid - BitsStruct_one_2 OK ++ containers - valid - BitsStruct_one_3 OK ++ containers - valid - BitsStruct_one_4 OK ++ containers - valid - BitsStruct_one_5 OK ++ containers - valid - BitsStruct_one_6 OK ++ containers - valid - BitsStruct_one_7 OK ++ containers - valid - BitsStruct_one_8 OK ++ containers - valid - BitsStruct_one_9 OK ++ containers - valid - BitsStruct_one_chaos_0 OK ++ containers - valid - BitsStruct_one_chaos_1 OK ++ containers - valid - BitsStruct_one_chaos_2 OK ++ containers - valid - BitsStruct_random_0 OK ++ containers - valid - BitsStruct_random_1 OK ++ containers - valid - BitsStruct_random_2 OK ++ containers - valid - BitsStruct_random_3 OK ++ containers - valid - BitsStruct_random_4 OK ++ containers - valid - BitsStruct_random_5 OK ++ containers - valid - BitsStruct_random_6 OK ++ containers - valid - BitsStruct_random_7 OK ++ containers - valid - BitsStruct_random_8 OK ++ containers - valid - BitsStruct_random_9 OK ++ containers - valid - BitsStruct_random_chaos_0 OK ++ containers - valid - BitsStruct_random_chaos_1 OK ++ containers - valid - BitsStruct_random_chaos_2 OK ++ containers - valid - BitsStruct_zero OK ++ containers - valid - BitsStruct_zero_0 OK ++ containers - valid - BitsStruct_zero_1 OK ++ containers - valid - BitsStruct_zero_2 OK ++ containers - valid - BitsStruct_zero_3 OK ++ containers - valid - BitsStruct_zero_4 OK ++ containers - valid - BitsStruct_zero_5 OK ++ containers - valid - BitsStruct_zero_6 OK ++ containers - valid - BitsStruct_zero_7 OK ++ containers - valid - BitsStruct_zero_8 OK ++ containers - valid - BitsStruct_zero_9 OK ++ containers - valid - BitsStruct_zero_chaos_0 OK ++ containers - valid - BitsStruct_zero_chaos_1 OK ++ containers - valid - BitsStruct_zero_chaos_2 OK ++ containers - valid - ComplexTestStruct_lengthy_0 OK ++ containers - valid - ComplexTestStruct_lengthy_1 OK ++ containers - valid - ComplexTestStruct_lengthy_2 OK ++ containers - valid - ComplexTestStruct_lengthy_3 OK ++ containers - valid - ComplexTestStruct_lengthy_4 OK ++ containers - valid - ComplexTestStruct_lengthy_5 OK ++ containers - valid - ComplexTestStruct_lengthy_6 OK ++ containers - valid - ComplexTestStruct_lengthy_7 OK ++ containers - valid - ComplexTestStruct_lengthy_8 OK ++ containers - valid - ComplexTestStruct_lengthy_9 OK ++ containers - valid - ComplexTestStruct_lengthy_chaos_0 OK ++ containers - valid - ComplexTestStruct_lengthy_chaos_1 OK ++ containers - valid - ComplexTestStruct_lengthy_chaos_2 OK ++ containers - valid - ComplexTestStruct_max OK ++ containers - valid - ComplexTestStruct_max_0 OK ++ containers - valid - ComplexTestStruct_max_1 OK ++ containers - valid - ComplexTestStruct_max_2 OK ++ containers - valid - ComplexTestStruct_max_3 OK ++ containers - valid - ComplexTestStruct_max_4 OK ++ containers - valid - ComplexTestStruct_max_5 OK ++ containers - valid - ComplexTestStruct_max_6 OK ++ containers - valid - ComplexTestStruct_max_7 OK ++ containers - valid - ComplexTestStruct_max_8 OK ++ containers - valid - ComplexTestStruct_max_9 OK ++ containers - valid - ComplexTestStruct_max_chaos_0 OK ++ containers - valid - ComplexTestStruct_max_chaos_1 OK ++ containers - valid - ComplexTestStruct_max_chaos_2 OK ++ containers - valid - ComplexTestStruct_nil_0 OK ++ containers - valid - ComplexTestStruct_nil_1 OK ++ containers - valid - ComplexTestStruct_nil_2 OK ++ containers - valid - ComplexTestStruct_nil_3 OK ++ containers - valid - ComplexTestStruct_nil_4 OK ++ containers - valid - ComplexTestStruct_nil_5 OK ++ containers - valid - ComplexTestStruct_nil_6 OK ++ containers - valid - ComplexTestStruct_nil_7 OK ++ containers - valid - ComplexTestStruct_nil_8 OK ++ containers - valid - ComplexTestStruct_nil_9 OK ++ containers - valid - ComplexTestStruct_nil_chaos_0 OK ++ containers - valid - ComplexTestStruct_nil_chaos_1 OK ++ containers - valid - ComplexTestStruct_nil_chaos_2 OK ++ containers - valid - ComplexTestStruct_one_0 OK ++ containers - valid - ComplexTestStruct_one_1 OK ++ containers - valid - ComplexTestStruct_one_2 OK ++ containers - valid - ComplexTestStruct_one_3 OK ++ containers - valid - ComplexTestStruct_one_4 OK ++ containers - valid - ComplexTestStruct_one_5 OK ++ containers - valid - ComplexTestStruct_one_6 OK ++ containers - valid - ComplexTestStruct_one_7 OK ++ containers - valid - ComplexTestStruct_one_8 OK ++ containers - valid - ComplexTestStruct_one_9 OK ++ containers - valid - ComplexTestStruct_one_chaos_0 OK ++ containers - valid - ComplexTestStruct_one_chaos_1 OK ++ containers - valid - ComplexTestStruct_one_chaos_2 OK ++ containers - valid - ComplexTestStruct_random_0 OK ++ containers - valid - ComplexTestStruct_random_1 OK ++ containers - valid - ComplexTestStruct_random_2 OK ++ containers - valid - ComplexTestStruct_random_3 OK ++ containers - valid - ComplexTestStruct_random_4 OK ++ containers - valid - ComplexTestStruct_random_5 OK ++ containers - valid - ComplexTestStruct_random_6 OK ++ containers - valid - ComplexTestStruct_random_7 OK ++ containers - valid - ComplexTestStruct_random_8 OK ++ containers - valid - ComplexTestStruct_random_9 OK ++ containers - valid - ComplexTestStruct_random_chaos_0 OK ++ containers - valid - ComplexTestStruct_random_chaos_1 OK ++ containers - valid - ComplexTestStruct_random_chaos_2 OK ++ containers - valid - ComplexTestStruct_zero OK ++ containers - valid - ComplexTestStruct_zero_0 OK ++ containers - valid - ComplexTestStruct_zero_1 OK ++ containers - valid - ComplexTestStruct_zero_2 OK ++ containers - valid - ComplexTestStruct_zero_3 OK ++ containers - valid - ComplexTestStruct_zero_4 OK ++ containers - valid - ComplexTestStruct_zero_5 OK ++ containers - valid - ComplexTestStruct_zero_6 OK ++ containers - valid - ComplexTestStruct_zero_7 OK ++ containers - valid - ComplexTestStruct_zero_8 OK ++ containers - valid - ComplexTestStruct_zero_9 OK ++ containers - valid - ComplexTestStruct_zero_chaos_0 OK ++ containers - valid - ComplexTestStruct_zero_chaos_1 OK ++ containers - valid - ComplexTestStruct_zero_chaos_2 OK ++ containers - valid - FixedTestStruct_max OK ++ containers - valid - FixedTestStruct_max_chaos_0 OK ++ containers - valid - FixedTestStruct_max_chaos_1 OK ++ containers - valid - FixedTestStruct_max_chaos_2 OK ++ containers - valid - FixedTestStruct_random_0 OK ++ containers - valid - FixedTestStruct_random_1 OK ++ containers - valid - FixedTestStruct_random_2 OK ++ containers - valid - FixedTestStruct_random_3 OK ++ containers - valid - FixedTestStruct_random_4 OK ++ containers - valid - FixedTestStruct_random_5 OK ++ containers - valid - FixedTestStruct_random_6 OK ++ containers - valid - FixedTestStruct_random_7 OK ++ containers - valid - FixedTestStruct_random_8 OK ++ containers - valid - FixedTestStruct_random_9 OK ++ containers - valid - FixedTestStruct_random_chaos_0 OK ++ containers - valid - FixedTestStruct_random_chaos_1 OK ++ containers - valid - FixedTestStruct_random_chaos_2 OK ++ containers - valid - FixedTestStruct_zero OK ++ containers - valid - FixedTestStruct_zero_chaos_0 OK ++ containers - valid - FixedTestStruct_zero_chaos_1 OK ++ containers - valid - FixedTestStruct_zero_chaos_2 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_0 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_1 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_2 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_3 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_4 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_5 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_6 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_7 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_8 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_9 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_chaos_0 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_chaos_1 OK ++ containers - valid - ProgressiveBitsStruct_lengthy_chaos_2 OK ++ containers - valid - ProgressiveBitsStruct_max OK ++ containers - valid - ProgressiveBitsStruct_max_0 OK ++ containers - valid - ProgressiveBitsStruct_max_1 OK ++ containers - valid - ProgressiveBitsStruct_max_2 OK ++ containers - valid - ProgressiveBitsStruct_max_3 OK ++ containers - valid - ProgressiveBitsStruct_max_4 OK ++ containers - valid - ProgressiveBitsStruct_max_5 OK ++ containers - valid - ProgressiveBitsStruct_max_6 OK ++ containers - valid - ProgressiveBitsStruct_max_7 OK ++ containers - valid - ProgressiveBitsStruct_max_8 OK ++ containers - valid - ProgressiveBitsStruct_max_9 OK ++ containers - valid - ProgressiveBitsStruct_max_chaos_0 OK ++ containers - valid - ProgressiveBitsStruct_max_chaos_1 OK ++ containers - valid - ProgressiveBitsStruct_max_chaos_2 OK ++ containers - valid - ProgressiveBitsStruct_nil_0 OK ++ containers - valid - ProgressiveBitsStruct_nil_1 OK ++ containers - valid - ProgressiveBitsStruct_nil_2 OK ++ containers - valid - ProgressiveBitsStruct_nil_3 OK ++ containers - valid - ProgressiveBitsStruct_nil_4 OK ++ containers - valid - ProgressiveBitsStruct_nil_5 OK ++ containers - valid - ProgressiveBitsStruct_nil_6 OK ++ containers - valid - ProgressiveBitsStruct_nil_7 OK ++ containers - valid - ProgressiveBitsStruct_nil_8 OK ++ containers - valid - ProgressiveBitsStruct_nil_9 OK ++ containers - valid - ProgressiveBitsStruct_nil_chaos_0 OK ++ containers - valid - ProgressiveBitsStruct_nil_chaos_1 OK ++ containers - valid - ProgressiveBitsStruct_nil_chaos_2 OK ++ containers - valid - ProgressiveBitsStruct_one_0 OK ++ containers - valid - ProgressiveBitsStruct_one_1 OK ++ containers - valid - ProgressiveBitsStruct_one_2 OK ++ containers - valid - ProgressiveBitsStruct_one_3 OK ++ containers - valid - ProgressiveBitsStruct_one_4 OK ++ containers - valid - ProgressiveBitsStruct_one_5 OK ++ containers - valid - ProgressiveBitsStruct_one_6 OK ++ containers - valid - ProgressiveBitsStruct_one_7 OK ++ containers - valid - ProgressiveBitsStruct_one_8 OK ++ containers - valid - ProgressiveBitsStruct_one_9 OK ++ containers - valid - ProgressiveBitsStruct_one_chaos_0 OK ++ containers - valid - ProgressiveBitsStruct_one_chaos_1 OK ++ containers - valid - ProgressiveBitsStruct_one_chaos_2 OK ++ containers - valid - ProgressiveBitsStruct_random_0 OK ++ containers - valid - ProgressiveBitsStruct_random_1 OK ++ containers - valid - ProgressiveBitsStruct_random_2 OK ++ containers - valid - ProgressiveBitsStruct_random_3 OK ++ containers - valid - ProgressiveBitsStruct_random_4 OK ++ containers - valid - ProgressiveBitsStruct_random_5 OK ++ containers - valid - ProgressiveBitsStruct_random_6 OK ++ containers - valid - ProgressiveBitsStruct_random_7 OK ++ containers - valid - ProgressiveBitsStruct_random_8 OK ++ containers - valid - ProgressiveBitsStruct_random_9 OK ++ containers - valid - ProgressiveBitsStruct_random_chaos_0 OK ++ containers - valid - ProgressiveBitsStruct_random_chaos_1 OK ++ containers - valid - ProgressiveBitsStruct_random_chaos_2 OK ++ containers - valid - ProgressiveBitsStruct_zero OK ++ containers - valid - ProgressiveBitsStruct_zero_0 OK ++ containers - valid - ProgressiveBitsStruct_zero_1 OK ++ containers - valid - ProgressiveBitsStruct_zero_2 OK ++ containers - valid - ProgressiveBitsStruct_zero_3 OK ++ containers - valid - ProgressiveBitsStruct_zero_4 OK ++ containers - valid - ProgressiveBitsStruct_zero_5 OK ++ containers - valid - ProgressiveBitsStruct_zero_6 OK ++ containers - valid - ProgressiveBitsStruct_zero_7 OK ++ containers - valid - ProgressiveBitsStruct_zero_8 OK ++ containers - valid - ProgressiveBitsStruct_zero_9 OK ++ containers - valid - ProgressiveBitsStruct_zero_chaos_0 OK ++ containers - valid - ProgressiveBitsStruct_zero_chaos_1 OK ++ containers - valid - ProgressiveBitsStruct_zero_chaos_2 OK ++ containers - valid - ProgressiveTestStruct_lengthy_0 OK ++ containers - valid - ProgressiveTestStruct_lengthy_1 OK ++ containers - valid - ProgressiveTestStruct_lengthy_2 OK ++ containers - valid - ProgressiveTestStruct_lengthy_3 OK ++ containers - valid - ProgressiveTestStruct_lengthy_4 OK ++ containers - valid - ProgressiveTestStruct_lengthy_5 OK ++ containers - valid - ProgressiveTestStruct_lengthy_6 OK ++ containers - valid - ProgressiveTestStruct_lengthy_7 OK ++ containers - valid - ProgressiveTestStruct_lengthy_8 OK ++ containers - valid - ProgressiveTestStruct_lengthy_9 OK ++ containers - valid - ProgressiveTestStruct_lengthy_chaos_0 OK ++ containers - valid - ProgressiveTestStruct_lengthy_chaos_1 OK ++ containers - valid - ProgressiveTestStruct_lengthy_chaos_2 OK ++ containers - valid - ProgressiveTestStruct_max OK ++ containers - valid - ProgressiveTestStruct_max_0 OK ++ containers - valid - ProgressiveTestStruct_max_1 OK ++ containers - valid - ProgressiveTestStruct_max_2 OK ++ containers - valid - ProgressiveTestStruct_max_3 OK ++ containers - valid - ProgressiveTestStruct_max_4 OK ++ containers - valid - ProgressiveTestStruct_max_5 OK ++ containers - valid - ProgressiveTestStruct_max_6 OK ++ containers - valid - ProgressiveTestStruct_max_7 OK ++ containers - valid - ProgressiveTestStruct_max_8 OK ++ containers - valid - ProgressiveTestStruct_max_9 OK ++ containers - valid - ProgressiveTestStruct_max_chaos_0 OK ++ containers - valid - ProgressiveTestStruct_max_chaos_1 OK ++ containers - valid - ProgressiveTestStruct_max_chaos_2 OK ++ containers - valid - ProgressiveTestStruct_nil_0 OK ++ containers - valid - ProgressiveTestStruct_nil_1 OK ++ containers - valid - ProgressiveTestStruct_nil_2 OK ++ containers - valid - ProgressiveTestStruct_nil_3 OK ++ containers - valid - ProgressiveTestStruct_nil_4 OK ++ containers - valid - ProgressiveTestStruct_nil_5 OK ++ containers - valid - ProgressiveTestStruct_nil_6 OK ++ containers - valid - ProgressiveTestStruct_nil_7 OK ++ containers - valid - ProgressiveTestStruct_nil_8 OK ++ containers - valid - ProgressiveTestStruct_nil_9 OK ++ containers - valid - ProgressiveTestStruct_nil_chaos_0 OK ++ containers - valid - ProgressiveTestStruct_nil_chaos_1 OK ++ containers - valid - ProgressiveTestStruct_nil_chaos_2 OK ++ containers - valid - ProgressiveTestStruct_one_0 OK ++ containers - valid - ProgressiveTestStruct_one_1 OK ++ containers - valid - ProgressiveTestStruct_one_2 OK ++ containers - valid - ProgressiveTestStruct_one_3 OK ++ containers - valid - ProgressiveTestStruct_one_4 OK ++ containers - valid - ProgressiveTestStruct_one_5 OK ++ containers - valid - ProgressiveTestStruct_one_6 OK ++ containers - valid - ProgressiveTestStruct_one_7 OK ++ containers - valid - ProgressiveTestStruct_one_8 OK ++ containers - valid - ProgressiveTestStruct_one_9 OK ++ containers - valid - ProgressiveTestStruct_one_chaos_0 OK ++ containers - valid - ProgressiveTestStruct_one_chaos_1 OK ++ containers - valid - ProgressiveTestStruct_one_chaos_2 OK ++ containers - valid - ProgressiveTestStruct_random_0 OK ++ containers - valid - ProgressiveTestStruct_random_1 OK ++ containers - valid - ProgressiveTestStruct_random_2 OK ++ containers - valid - ProgressiveTestStruct_random_3 OK ++ containers - valid - ProgressiveTestStruct_random_4 OK ++ containers - valid - ProgressiveTestStruct_random_5 OK ++ containers - valid - ProgressiveTestStruct_random_6 OK ++ containers - valid - ProgressiveTestStruct_random_7 OK ++ containers - valid - ProgressiveTestStruct_random_8 OK ++ containers - valid - ProgressiveTestStruct_random_9 OK ++ containers - valid - ProgressiveTestStruct_random_chaos_0 OK ++ containers - valid - ProgressiveTestStruct_random_chaos_1 OK ++ containers - valid - ProgressiveTestStruct_random_chaos_2 OK ++ containers - valid - ProgressiveTestStruct_zero OK ++ containers - valid - ProgressiveTestStruct_zero_0 OK ++ containers - valid - ProgressiveTestStruct_zero_1 OK ++ containers - valid - ProgressiveTestStruct_zero_2 OK ++ containers - valid - ProgressiveTestStruct_zero_3 OK ++ containers - valid - ProgressiveTestStruct_zero_4 OK ++ containers - valid - ProgressiveTestStruct_zero_5 OK ++ containers - valid - ProgressiveTestStruct_zero_6 OK ++ containers - valid - ProgressiveTestStruct_zero_7 OK ++ containers - valid - ProgressiveTestStruct_zero_8 OK ++ containers - valid - ProgressiveTestStruct_zero_9 OK ++ containers - valid - ProgressiveTestStruct_zero_chaos_0 OK ++ containers - valid - ProgressiveTestStruct_zero_chaos_1 OK ++ containers - valid - ProgressiveTestStruct_zero_chaos_2 OK ++ containers - valid - SingleFieldTestStruct_max OK ++ containers - valid - SingleFieldTestStruct_max_chaos_0 OK ++ containers - valid - SingleFieldTestStruct_max_chaos_1 OK ++ containers - valid - SingleFieldTestStruct_max_chaos_2 OK ++ containers - valid - SingleFieldTestStruct_random_0 OK ++ containers - valid - SingleFieldTestStruct_random_1 OK ++ containers - valid - SingleFieldTestStruct_random_2 OK ++ containers - valid - SingleFieldTestStruct_random_3 OK ++ containers - valid - SingleFieldTestStruct_random_4 OK ++ containers - valid - SingleFieldTestStruct_random_5 OK ++ containers - valid - SingleFieldTestStruct_random_6 OK ++ containers - valid - SingleFieldTestStruct_random_7 OK ++ containers - valid - SingleFieldTestStruct_random_8 OK ++ containers - valid - SingleFieldTestStruct_random_9 OK ++ containers - valid - SingleFieldTestStruct_random_chaos_0 OK ++ containers - valid - SingleFieldTestStruct_random_chaos_1 OK ++ containers - valid - SingleFieldTestStruct_random_chaos_2 OK ++ containers - valid - SingleFieldTestStruct_zero OK ++ containers - valid - SingleFieldTestStruct_zero_chaos_0 OK ++ containers - valid - SingleFieldTestStruct_zero_chaos_1 OK ++ containers - valid - SingleFieldTestStruct_zero_chaos_2 OK ++ containers - valid - SmallTestStruct_max OK ++ containers - valid - SmallTestStruct_max_chaos_0 OK ++ containers - valid - SmallTestStruct_max_chaos_1 OK ++ containers - valid - SmallTestStruct_max_chaos_2 OK ++ containers - valid - SmallTestStruct_random_0 OK ++ containers - valid - SmallTestStruct_random_1 OK ++ containers - valid - SmallTestStruct_random_2 OK ++ containers - valid - SmallTestStruct_random_3 OK ++ containers - valid - SmallTestStruct_random_4 OK ++ containers - valid - SmallTestStruct_random_5 OK ++ containers - valid - SmallTestStruct_random_6 OK ++ containers - valid - SmallTestStruct_random_7 OK ++ containers - valid - SmallTestStruct_random_8 OK ++ containers - valid - SmallTestStruct_random_9 OK ++ containers - valid - SmallTestStruct_random_chaos_0 OK ++ containers - valid - SmallTestStruct_random_chaos_1 OK ++ containers - valid - SmallTestStruct_random_chaos_2 OK ++ containers - valid - SmallTestStruct_zero OK ++ containers - valid - SmallTestStruct_zero_chaos_0 OK ++ containers - valid - SmallTestStruct_zero_chaos_1 OK ++ containers - valid - SmallTestStruct_zero_chaos_2 OK ++ containers - valid - VarTestStruct_lengthy_0 OK ++ containers - valid - VarTestStruct_lengthy_1 OK ++ containers - valid - VarTestStruct_lengthy_2 OK ++ containers - valid - VarTestStruct_lengthy_3 OK ++ containers - valid - VarTestStruct_lengthy_4 OK ++ containers - valid - VarTestStruct_lengthy_5 OK ++ containers - valid - VarTestStruct_lengthy_6 OK ++ containers - valid - VarTestStruct_lengthy_7 OK ++ containers - valid - VarTestStruct_lengthy_8 OK ++ containers - valid - VarTestStruct_lengthy_9 OK ++ containers - valid - VarTestStruct_lengthy_chaos_0 OK ++ containers - valid - VarTestStruct_lengthy_chaos_1 OK ++ containers - valid - VarTestStruct_lengthy_chaos_2 OK ++ containers - valid - VarTestStruct_max OK ++ containers - valid - VarTestStruct_max_0 OK ++ containers - valid - VarTestStruct_max_1 OK ++ containers - valid - VarTestStruct_max_2 OK ++ containers - valid - VarTestStruct_max_3 OK ++ containers - valid - VarTestStruct_max_4 OK ++ containers - valid - VarTestStruct_max_5 OK ++ containers - valid - VarTestStruct_max_6 OK ++ containers - valid - VarTestStruct_max_7 OK ++ containers - valid - VarTestStruct_max_8 OK ++ containers - valid - VarTestStruct_max_9 OK ++ containers - valid - VarTestStruct_max_chaos_0 OK ++ containers - valid - VarTestStruct_max_chaos_1 OK ++ containers - valid - VarTestStruct_max_chaos_2 OK ++ containers - valid - VarTestStruct_nil_0 OK ++ containers - valid - VarTestStruct_nil_1 OK ++ containers - valid - VarTestStruct_nil_2 OK ++ containers - valid - VarTestStruct_nil_3 OK ++ containers - valid - VarTestStruct_nil_4 OK ++ containers - valid - VarTestStruct_nil_5 OK ++ containers - valid - VarTestStruct_nil_6 OK ++ containers - valid - VarTestStruct_nil_7 OK ++ containers - valid - VarTestStruct_nil_8 OK ++ containers - valid - VarTestStruct_nil_9 OK ++ containers - valid - VarTestStruct_nil_chaos_0 OK ++ containers - valid - VarTestStruct_nil_chaos_1 OK ++ containers - valid - VarTestStruct_nil_chaos_2 OK ++ containers - valid - VarTestStruct_one_0 OK ++ containers - valid - VarTestStruct_one_1 OK ++ containers - valid - VarTestStruct_one_2 OK ++ containers - valid - VarTestStruct_one_3 OK ++ containers - valid - VarTestStruct_one_4 OK ++ containers - valid - VarTestStruct_one_5 OK ++ containers - valid - VarTestStruct_one_6 OK ++ containers - valid - VarTestStruct_one_7 OK ++ containers - valid - VarTestStruct_one_8 OK ++ containers - valid - VarTestStruct_one_9 OK ++ containers - valid - VarTestStruct_one_chaos_0 OK ++ containers - valid - VarTestStruct_one_chaos_1 OK ++ containers - valid - VarTestStruct_one_chaos_2 OK ++ containers - valid - VarTestStruct_random_0 OK ++ containers - valid - VarTestStruct_random_1 OK ++ containers - valid - VarTestStruct_random_2 OK ++ containers - valid - VarTestStruct_random_3 OK ++ containers - valid - VarTestStruct_random_4 OK ++ containers - valid - VarTestStruct_random_5 OK ++ containers - valid - VarTestStruct_random_6 OK ++ containers - valid - VarTestStruct_random_7 OK ++ containers - valid - VarTestStruct_random_8 OK ++ containers - valid - VarTestStruct_random_9 OK ++ containers - valid - VarTestStruct_random_chaos_0 OK ++ containers - valid - VarTestStruct_random_chaos_1 OK ++ containers - valid - VarTestStruct_random_chaos_2 OK ++ containers - valid - VarTestStruct_zero OK ++ containers - valid - VarTestStruct_zero_0 OK ++ containers - valid - VarTestStruct_zero_1 OK ++ containers - valid - VarTestStruct_zero_2 OK ++ containers - valid - VarTestStruct_zero_3 OK ++ containers - valid - VarTestStruct_zero_4 OK ++ containers - valid - VarTestStruct_zero_5 OK ++ containers - valid - VarTestStruct_zero_6 OK ++ containers - valid - VarTestStruct_zero_7 OK ++ containers - valid - VarTestStruct_zero_8 OK ++ containers - valid - VarTestStruct_zero_9 OK ++ containers - valid - VarTestStruct_zero_chaos_0 OK ++ containers - valid - VarTestStruct_zero_chaos_1 OK ++ containers - valid - VarTestStruct_zero_chaos_2 OK ++ progressive_bitlist - invalid - progbitlist_no_delimiter_empty OK ++ progressive_bitlist - invalid - progbitlist_no_delimiter_zero_byte OK ++ progressive_bitlist - invalid - progbitlist_no_delimiter_zeroes OK ++ progressive_bitlist - valid - progbitlist_lengthy_0_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_0_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_0_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_0_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_0_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1023_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1023_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1023_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1023_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1023_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1024_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1024_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1024_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1024_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1024_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1025_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1025_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1025_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1025_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1025_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_15_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_15_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_15_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_15_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_15_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_16_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_16_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_16_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_16_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_16_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_17_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_17_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_17_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_17_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_17_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_1_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_255_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_255_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_255_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_255_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_255_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_256_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_256_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_256_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_256_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_256_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_257_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_257_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_257_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_257_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_257_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_2_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_2_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_2_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_2_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_2_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_31_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_31_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_31_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_31_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_31_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_32_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_32_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_32_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_32_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_32_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_33_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_33_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_33_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_33_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_33_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_3_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_3_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_3_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_3_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_3_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_4_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_4_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_4_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_4_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_4_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_511_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_511_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_511_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_511_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_511_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_512_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_512_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_512_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_512_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_512_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_513_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_513_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_513_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_513_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_513_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_5_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_5_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_5_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_5_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_5_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_63_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_63_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_63_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_63_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_63_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_64_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_64_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_64_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_64_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_64_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_65_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_65_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_65_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_65_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_65_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_6_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_6_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_6_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_6_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_6_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_7_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_7_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_7_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_7_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_7_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_8_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_8_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_8_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_8_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_8_4 OK ++ progressive_bitlist - valid - progbitlist_lengthy_9_0 OK ++ progressive_bitlist - valid - progbitlist_lengthy_9_1 OK ++ progressive_bitlist - valid - progbitlist_lengthy_9_2 OK ++ progressive_bitlist - valid - progbitlist_lengthy_9_3 OK ++ progressive_bitlist - valid - progbitlist_lengthy_9_4 OK ++ progressive_bitlist - valid - progbitlist_max_0_0 OK ++ progressive_bitlist - valid - progbitlist_max_0_1 OK ++ progressive_bitlist - valid - progbitlist_max_0_2 OK ++ progressive_bitlist - valid - progbitlist_max_0_3 OK ++ progressive_bitlist - valid - progbitlist_max_0_4 OK ++ progressive_bitlist - valid - progbitlist_max_1023_0 OK ++ progressive_bitlist - valid - progbitlist_max_1023_1 OK ++ progressive_bitlist - valid - progbitlist_max_1023_2 OK ++ progressive_bitlist - valid - progbitlist_max_1023_3 OK ++ progressive_bitlist - valid - progbitlist_max_1023_4 OK ++ progressive_bitlist - valid - progbitlist_max_1024_0 OK ++ progressive_bitlist - valid - progbitlist_max_1024_1 OK ++ progressive_bitlist - valid - progbitlist_max_1024_2 OK ++ progressive_bitlist - valid - progbitlist_max_1024_3 OK ++ progressive_bitlist - valid - progbitlist_max_1024_4 OK ++ progressive_bitlist - valid - progbitlist_max_1025_0 OK ++ progressive_bitlist - valid - progbitlist_max_1025_1 OK ++ progressive_bitlist - valid - progbitlist_max_1025_2 OK ++ progressive_bitlist - valid - progbitlist_max_1025_3 OK ++ progressive_bitlist - valid - progbitlist_max_1025_4 OK ++ progressive_bitlist - valid - progbitlist_max_15_0 OK ++ progressive_bitlist - valid - progbitlist_max_15_1 OK ++ progressive_bitlist - valid - progbitlist_max_15_2 OK ++ progressive_bitlist - valid - progbitlist_max_15_3 OK ++ progressive_bitlist - valid - progbitlist_max_15_4 OK ++ progressive_bitlist - valid - progbitlist_max_16_0 OK ++ progressive_bitlist - valid - progbitlist_max_16_1 OK ++ progressive_bitlist - valid - progbitlist_max_16_2 OK ++ progressive_bitlist - valid - progbitlist_max_16_3 OK ++ progressive_bitlist - valid - progbitlist_max_16_4 OK ++ progressive_bitlist - valid - progbitlist_max_17_0 OK ++ progressive_bitlist - valid - progbitlist_max_17_1 OK ++ progressive_bitlist - valid - progbitlist_max_17_2 OK ++ progressive_bitlist - valid - progbitlist_max_17_3 OK ++ progressive_bitlist - valid - progbitlist_max_17_4 OK ++ progressive_bitlist - valid - progbitlist_max_1_0 OK ++ progressive_bitlist - valid - progbitlist_max_1_1 OK ++ progressive_bitlist - valid - progbitlist_max_1_2 OK ++ progressive_bitlist - valid - progbitlist_max_1_3 OK ++ progressive_bitlist - valid - progbitlist_max_1_4 OK ++ progressive_bitlist - valid - progbitlist_max_255_0 OK ++ progressive_bitlist - valid - progbitlist_max_255_1 OK ++ progressive_bitlist - valid - progbitlist_max_255_2 OK ++ progressive_bitlist - valid - progbitlist_max_255_3 OK ++ progressive_bitlist - valid - progbitlist_max_255_4 OK ++ progressive_bitlist - valid - progbitlist_max_256_0 OK ++ progressive_bitlist - valid - progbitlist_max_256_1 OK ++ progressive_bitlist - valid - progbitlist_max_256_2 OK ++ progressive_bitlist - valid - progbitlist_max_256_3 OK ++ progressive_bitlist - valid - progbitlist_max_256_4 OK ++ progressive_bitlist - valid - progbitlist_max_257_0 OK ++ progressive_bitlist - valid - progbitlist_max_257_1 OK ++ progressive_bitlist - valid - progbitlist_max_257_2 OK ++ progressive_bitlist - valid - progbitlist_max_257_3 OK ++ progressive_bitlist - valid - progbitlist_max_257_4 OK ++ progressive_bitlist - valid - progbitlist_max_2_0 OK ++ progressive_bitlist - valid - progbitlist_max_2_1 OK ++ progressive_bitlist - valid - progbitlist_max_2_2 OK ++ progressive_bitlist - valid - progbitlist_max_2_3 OK ++ progressive_bitlist - valid - progbitlist_max_2_4 OK ++ progressive_bitlist - valid - progbitlist_max_31_0 OK ++ progressive_bitlist - valid - progbitlist_max_31_1 OK ++ progressive_bitlist - valid - progbitlist_max_31_2 OK ++ progressive_bitlist - valid - progbitlist_max_31_3 OK ++ progressive_bitlist - valid - progbitlist_max_31_4 OK ++ progressive_bitlist - valid - progbitlist_max_32_0 OK ++ progressive_bitlist - valid - progbitlist_max_32_1 OK ++ progressive_bitlist - valid - progbitlist_max_32_2 OK ++ progressive_bitlist - valid - progbitlist_max_32_3 OK ++ progressive_bitlist - valid - progbitlist_max_32_4 OK ++ progressive_bitlist - valid - progbitlist_max_33_0 OK ++ progressive_bitlist - valid - progbitlist_max_33_1 OK ++ progressive_bitlist - valid - progbitlist_max_33_2 OK ++ progressive_bitlist - valid - progbitlist_max_33_3 OK ++ progressive_bitlist - valid - progbitlist_max_33_4 OK ++ progressive_bitlist - valid - progbitlist_max_3_0 OK ++ progressive_bitlist - valid - progbitlist_max_3_1 OK ++ progressive_bitlist - valid - progbitlist_max_3_2 OK ++ progressive_bitlist - valid - progbitlist_max_3_3 OK ++ progressive_bitlist - valid - progbitlist_max_3_4 OK ++ progressive_bitlist - valid - progbitlist_max_4_0 OK ++ progressive_bitlist - valid - progbitlist_max_4_1 OK ++ progressive_bitlist - valid - progbitlist_max_4_2 OK ++ progressive_bitlist - valid - progbitlist_max_4_3 OK ++ progressive_bitlist - valid - progbitlist_max_4_4 OK ++ progressive_bitlist - valid - progbitlist_max_511_0 OK ++ progressive_bitlist - valid - progbitlist_max_511_1 OK ++ progressive_bitlist - valid - progbitlist_max_511_2 OK ++ progressive_bitlist - valid - progbitlist_max_511_3 OK ++ progressive_bitlist - valid - progbitlist_max_511_4 OK ++ progressive_bitlist - valid - progbitlist_max_512_0 OK ++ progressive_bitlist - valid - progbitlist_max_512_1 OK ++ progressive_bitlist - valid - progbitlist_max_512_2 OK ++ progressive_bitlist - valid - progbitlist_max_512_3 OK ++ progressive_bitlist - valid - progbitlist_max_512_4 OK ++ progressive_bitlist - valid - progbitlist_max_513_0 OK ++ progressive_bitlist - valid - progbitlist_max_513_1 OK ++ progressive_bitlist - valid - progbitlist_max_513_2 OK ++ progressive_bitlist - valid - progbitlist_max_513_3 OK ++ progressive_bitlist - valid - progbitlist_max_513_4 OK ++ progressive_bitlist - valid - progbitlist_max_5_0 OK ++ progressive_bitlist - valid - progbitlist_max_5_1 OK ++ progressive_bitlist - valid - progbitlist_max_5_2 OK ++ progressive_bitlist - valid - progbitlist_max_5_3 OK ++ progressive_bitlist - valid - progbitlist_max_5_4 OK ++ progressive_bitlist - valid - progbitlist_max_63_0 OK ++ progressive_bitlist - valid - progbitlist_max_63_1 OK ++ progressive_bitlist - valid - progbitlist_max_63_2 OK ++ progressive_bitlist - valid - progbitlist_max_63_3 OK ++ progressive_bitlist - valid - progbitlist_max_63_4 OK ++ progressive_bitlist - valid - progbitlist_max_64_0 OK ++ progressive_bitlist - valid - progbitlist_max_64_1 OK ++ progressive_bitlist - valid - progbitlist_max_64_2 OK ++ progressive_bitlist - valid - progbitlist_max_64_3 OK ++ progressive_bitlist - valid - progbitlist_max_64_4 OK ++ progressive_bitlist - valid - progbitlist_max_65_0 OK ++ progressive_bitlist - valid - progbitlist_max_65_1 OK ++ progressive_bitlist - valid - progbitlist_max_65_2 OK ++ progressive_bitlist - valid - progbitlist_max_65_3 OK ++ progressive_bitlist - valid - progbitlist_max_65_4 OK ++ progressive_bitlist - valid - progbitlist_max_6_0 OK ++ progressive_bitlist - valid - progbitlist_max_6_1 OK ++ progressive_bitlist - valid - progbitlist_max_6_2 OK ++ progressive_bitlist - valid - progbitlist_max_6_3 OK ++ progressive_bitlist - valid - progbitlist_max_6_4 OK ++ progressive_bitlist - valid - progbitlist_max_7_0 OK ++ progressive_bitlist - valid - progbitlist_max_7_1 OK ++ progressive_bitlist - valid - progbitlist_max_7_2 OK ++ progressive_bitlist - valid - progbitlist_max_7_3 OK ++ progressive_bitlist - valid - progbitlist_max_7_4 OK ++ progressive_bitlist - valid - progbitlist_max_8_0 OK ++ progressive_bitlist - valid - progbitlist_max_8_1 OK ++ progressive_bitlist - valid - progbitlist_max_8_2 OK ++ progressive_bitlist - valid - progbitlist_max_8_3 OK ++ progressive_bitlist - valid - progbitlist_max_8_4 OK ++ progressive_bitlist - valid - progbitlist_max_9_0 OK ++ progressive_bitlist - valid - progbitlist_max_9_1 OK ++ progressive_bitlist - valid - progbitlist_max_9_2 OK ++ progressive_bitlist - valid - progbitlist_max_9_3 OK ++ progressive_bitlist - valid - progbitlist_max_9_4 OK ++ progressive_bitlist - valid - progbitlist_nil_0_0 OK ++ progressive_bitlist - valid - progbitlist_nil_0_1 OK ++ progressive_bitlist - valid - progbitlist_nil_0_2 OK ++ progressive_bitlist - valid - progbitlist_nil_0_3 OK ++ progressive_bitlist - valid - progbitlist_nil_0_4 OK ++ progressive_bitlist - valid - progbitlist_nil_1023_0 OK ++ progressive_bitlist - valid - progbitlist_nil_1023_1 OK ++ progressive_bitlist - valid - progbitlist_nil_1023_2 OK ++ progressive_bitlist - valid - progbitlist_nil_1023_3 OK ++ progressive_bitlist - valid - progbitlist_nil_1023_4 OK ++ progressive_bitlist - valid - progbitlist_nil_1024_0 OK ++ progressive_bitlist - valid - progbitlist_nil_1024_1 OK ++ progressive_bitlist - valid - progbitlist_nil_1024_2 OK ++ progressive_bitlist - valid - progbitlist_nil_1024_3 OK ++ progressive_bitlist - valid - progbitlist_nil_1024_4 OK ++ progressive_bitlist - valid - progbitlist_nil_1025_0 OK ++ progressive_bitlist - valid - progbitlist_nil_1025_1 OK ++ progressive_bitlist - valid - progbitlist_nil_1025_2 OK ++ progressive_bitlist - valid - progbitlist_nil_1025_3 OK ++ progressive_bitlist - valid - progbitlist_nil_1025_4 OK ++ progressive_bitlist - valid - progbitlist_nil_15_0 OK ++ progressive_bitlist - valid - progbitlist_nil_15_1 OK ++ progressive_bitlist - valid - progbitlist_nil_15_2 OK ++ progressive_bitlist - valid - progbitlist_nil_15_3 OK ++ progressive_bitlist - valid - progbitlist_nil_15_4 OK ++ progressive_bitlist - valid - progbitlist_nil_16_0 OK ++ progressive_bitlist - valid - progbitlist_nil_16_1 OK ++ progressive_bitlist - valid - progbitlist_nil_16_2 OK ++ progressive_bitlist - valid - progbitlist_nil_16_3 OK ++ progressive_bitlist - valid - progbitlist_nil_16_4 OK ++ progressive_bitlist - valid - progbitlist_nil_17_0 OK ++ progressive_bitlist - valid - progbitlist_nil_17_1 OK ++ progressive_bitlist - valid - progbitlist_nil_17_2 OK ++ progressive_bitlist - valid - progbitlist_nil_17_3 OK ++ progressive_bitlist - valid - progbitlist_nil_17_4 OK ++ progressive_bitlist - valid - progbitlist_nil_1_0 OK ++ progressive_bitlist - valid - progbitlist_nil_1_1 OK ++ progressive_bitlist - valid - progbitlist_nil_1_2 OK ++ progressive_bitlist - valid - progbitlist_nil_1_3 OK ++ progressive_bitlist - valid - progbitlist_nil_1_4 OK ++ progressive_bitlist - valid - progbitlist_nil_255_0 OK ++ progressive_bitlist - valid - progbitlist_nil_255_1 OK ++ progressive_bitlist - valid - progbitlist_nil_255_2 OK ++ progressive_bitlist - valid - progbitlist_nil_255_3 OK ++ progressive_bitlist - valid - progbitlist_nil_255_4 OK ++ progressive_bitlist - valid - progbitlist_nil_256_0 OK ++ progressive_bitlist - valid - progbitlist_nil_256_1 OK ++ progressive_bitlist - valid - progbitlist_nil_256_2 OK ++ progressive_bitlist - valid - progbitlist_nil_256_3 OK ++ progressive_bitlist - valid - progbitlist_nil_256_4 OK ++ progressive_bitlist - valid - progbitlist_nil_257_0 OK ++ progressive_bitlist - valid - progbitlist_nil_257_1 OK ++ progressive_bitlist - valid - progbitlist_nil_257_2 OK ++ progressive_bitlist - valid - progbitlist_nil_257_3 OK ++ progressive_bitlist - valid - progbitlist_nil_257_4 OK ++ progressive_bitlist - valid - progbitlist_nil_2_0 OK ++ progressive_bitlist - valid - progbitlist_nil_2_1 OK ++ progressive_bitlist - valid - progbitlist_nil_2_2 OK ++ progressive_bitlist - valid - progbitlist_nil_2_3 OK ++ progressive_bitlist - valid - progbitlist_nil_2_4 OK ++ progressive_bitlist - valid - progbitlist_nil_31_0 OK ++ progressive_bitlist - valid - progbitlist_nil_31_1 OK ++ progressive_bitlist - valid - progbitlist_nil_31_2 OK ++ progressive_bitlist - valid - progbitlist_nil_31_3 OK ++ progressive_bitlist - valid - progbitlist_nil_31_4 OK ++ progressive_bitlist - valid - progbitlist_nil_32_0 OK ++ progressive_bitlist - valid - progbitlist_nil_32_1 OK ++ progressive_bitlist - valid - progbitlist_nil_32_2 OK ++ progressive_bitlist - valid - progbitlist_nil_32_3 OK ++ progressive_bitlist - valid - progbitlist_nil_32_4 OK ++ progressive_bitlist - valid - progbitlist_nil_33_0 OK ++ progressive_bitlist - valid - progbitlist_nil_33_1 OK ++ progressive_bitlist - valid - progbitlist_nil_33_2 OK ++ progressive_bitlist - valid - progbitlist_nil_33_3 OK ++ progressive_bitlist - valid - progbitlist_nil_33_4 OK ++ progressive_bitlist - valid - progbitlist_nil_3_0 OK ++ progressive_bitlist - valid - progbitlist_nil_3_1 OK ++ progressive_bitlist - valid - progbitlist_nil_3_2 OK ++ progressive_bitlist - valid - progbitlist_nil_3_3 OK ++ progressive_bitlist - valid - progbitlist_nil_3_4 OK ++ progressive_bitlist - valid - progbitlist_nil_4_0 OK ++ progressive_bitlist - valid - progbitlist_nil_4_1 OK ++ progressive_bitlist - valid - progbitlist_nil_4_2 OK ++ progressive_bitlist - valid - progbitlist_nil_4_3 OK ++ progressive_bitlist - valid - progbitlist_nil_4_4 OK ++ progressive_bitlist - valid - progbitlist_nil_511_0 OK ++ progressive_bitlist - valid - progbitlist_nil_511_1 OK ++ progressive_bitlist - valid - progbitlist_nil_511_2 OK ++ progressive_bitlist - valid - progbitlist_nil_511_3 OK ++ progressive_bitlist - valid - progbitlist_nil_511_4 OK ++ progressive_bitlist - valid - progbitlist_nil_512_0 OK ++ progressive_bitlist - valid - progbitlist_nil_512_1 OK ++ progressive_bitlist - valid - progbitlist_nil_512_2 OK ++ progressive_bitlist - valid - progbitlist_nil_512_3 OK ++ progressive_bitlist - valid - progbitlist_nil_512_4 OK ++ progressive_bitlist - valid - progbitlist_nil_513_0 OK ++ progressive_bitlist - valid - progbitlist_nil_513_1 OK ++ progressive_bitlist - valid - progbitlist_nil_513_2 OK ++ progressive_bitlist - valid - progbitlist_nil_513_3 OK ++ progressive_bitlist - valid - progbitlist_nil_513_4 OK ++ progressive_bitlist - valid - progbitlist_nil_5_0 OK ++ progressive_bitlist - valid - progbitlist_nil_5_1 OK ++ progressive_bitlist - valid - progbitlist_nil_5_2 OK ++ progressive_bitlist - valid - progbitlist_nil_5_3 OK ++ progressive_bitlist - valid - progbitlist_nil_5_4 OK ++ progressive_bitlist - valid - progbitlist_nil_63_0 OK ++ progressive_bitlist - valid - progbitlist_nil_63_1 OK ++ progressive_bitlist - valid - progbitlist_nil_63_2 OK ++ progressive_bitlist - valid - progbitlist_nil_63_3 OK ++ progressive_bitlist - valid - progbitlist_nil_63_4 OK ++ progressive_bitlist - valid - progbitlist_nil_64_0 OK ++ progressive_bitlist - valid - progbitlist_nil_64_1 OK ++ progressive_bitlist - valid - progbitlist_nil_64_2 OK ++ progressive_bitlist - valid - progbitlist_nil_64_3 OK ++ progressive_bitlist - valid - progbitlist_nil_64_4 OK ++ progressive_bitlist - valid - progbitlist_nil_65_0 OK ++ progressive_bitlist - valid - progbitlist_nil_65_1 OK ++ progressive_bitlist - valid - progbitlist_nil_65_2 OK ++ progressive_bitlist - valid - progbitlist_nil_65_3 OK ++ progressive_bitlist - valid - progbitlist_nil_65_4 OK ++ progressive_bitlist - valid - progbitlist_nil_6_0 OK ++ progressive_bitlist - valid - progbitlist_nil_6_1 OK ++ progressive_bitlist - valid - progbitlist_nil_6_2 OK ++ progressive_bitlist - valid - progbitlist_nil_6_3 OK ++ progressive_bitlist - valid - progbitlist_nil_6_4 OK ++ progressive_bitlist - valid - progbitlist_nil_7_0 OK ++ progressive_bitlist - valid - progbitlist_nil_7_1 OK ++ progressive_bitlist - valid - progbitlist_nil_7_2 OK ++ progressive_bitlist - valid - progbitlist_nil_7_3 OK ++ progressive_bitlist - valid - progbitlist_nil_7_4 OK ++ progressive_bitlist - valid - progbitlist_nil_8_0 OK ++ progressive_bitlist - valid - progbitlist_nil_8_1 OK ++ progressive_bitlist - valid - progbitlist_nil_8_2 OK ++ progressive_bitlist - valid - progbitlist_nil_8_3 OK ++ progressive_bitlist - valid - progbitlist_nil_8_4 OK ++ progressive_bitlist - valid - progbitlist_nil_9_0 OK ++ progressive_bitlist - valid - progbitlist_nil_9_1 OK ++ progressive_bitlist - valid - progbitlist_nil_9_2 OK ++ progressive_bitlist - valid - progbitlist_nil_9_3 OK ++ progressive_bitlist - valid - progbitlist_nil_9_4 OK ++ progressive_bitlist - valid - progbitlist_random_0_0 OK ++ progressive_bitlist - valid - progbitlist_random_0_1 OK ++ progressive_bitlist - valid - progbitlist_random_0_2 OK ++ progressive_bitlist - valid - progbitlist_random_0_3 OK ++ progressive_bitlist - valid - progbitlist_random_0_4 OK ++ progressive_bitlist - valid - progbitlist_random_1023_0 OK ++ progressive_bitlist - valid - progbitlist_random_1023_1 OK ++ progressive_bitlist - valid - progbitlist_random_1023_2 OK ++ progressive_bitlist - valid - progbitlist_random_1023_3 OK ++ progressive_bitlist - valid - progbitlist_random_1023_4 OK ++ progressive_bitlist - valid - progbitlist_random_1024_0 OK ++ progressive_bitlist - valid - progbitlist_random_1024_1 OK ++ progressive_bitlist - valid - progbitlist_random_1024_2 OK ++ progressive_bitlist - valid - progbitlist_random_1024_3 OK ++ progressive_bitlist - valid - progbitlist_random_1024_4 OK ++ progressive_bitlist - valid - progbitlist_random_1025_0 OK ++ progressive_bitlist - valid - progbitlist_random_1025_1 OK ++ progressive_bitlist - valid - progbitlist_random_1025_2 OK ++ progressive_bitlist - valid - progbitlist_random_1025_3 OK ++ progressive_bitlist - valid - progbitlist_random_1025_4 OK ++ progressive_bitlist - valid - progbitlist_random_15_0 OK ++ progressive_bitlist - valid - progbitlist_random_15_1 OK ++ progressive_bitlist - valid - progbitlist_random_15_2 OK ++ progressive_bitlist - valid - progbitlist_random_15_3 OK ++ progressive_bitlist - valid - progbitlist_random_15_4 OK ++ progressive_bitlist - valid - progbitlist_random_16_0 OK ++ progressive_bitlist - valid - progbitlist_random_16_1 OK ++ progressive_bitlist - valid - progbitlist_random_16_2 OK ++ progressive_bitlist - valid - progbitlist_random_16_3 OK ++ progressive_bitlist - valid - progbitlist_random_16_4 OK ++ progressive_bitlist - valid - progbitlist_random_17_0 OK ++ progressive_bitlist - valid - progbitlist_random_17_1 OK ++ progressive_bitlist - valid - progbitlist_random_17_2 OK ++ progressive_bitlist - valid - progbitlist_random_17_3 OK ++ progressive_bitlist - valid - progbitlist_random_17_4 OK ++ progressive_bitlist - valid - progbitlist_random_1_0 OK ++ progressive_bitlist - valid - progbitlist_random_1_1 OK ++ progressive_bitlist - valid - progbitlist_random_1_2 OK ++ progressive_bitlist - valid - progbitlist_random_1_3 OK ++ progressive_bitlist - valid - progbitlist_random_1_4 OK ++ progressive_bitlist - valid - progbitlist_random_255_0 OK ++ progressive_bitlist - valid - progbitlist_random_255_1 OK ++ progressive_bitlist - valid - progbitlist_random_255_2 OK ++ progressive_bitlist - valid - progbitlist_random_255_3 OK ++ progressive_bitlist - valid - progbitlist_random_255_4 OK ++ progressive_bitlist - valid - progbitlist_random_256_0 OK ++ progressive_bitlist - valid - progbitlist_random_256_1 OK ++ progressive_bitlist - valid - progbitlist_random_256_2 OK ++ progressive_bitlist - valid - progbitlist_random_256_3 OK ++ progressive_bitlist - valid - progbitlist_random_256_4 OK ++ progressive_bitlist - valid - progbitlist_random_257_0 OK ++ progressive_bitlist - valid - progbitlist_random_257_1 OK ++ progressive_bitlist - valid - progbitlist_random_257_2 OK ++ progressive_bitlist - valid - progbitlist_random_257_3 OK ++ progressive_bitlist - valid - progbitlist_random_257_4 OK ++ progressive_bitlist - valid - progbitlist_random_2_0 OK ++ progressive_bitlist - valid - progbitlist_random_2_1 OK ++ progressive_bitlist - valid - progbitlist_random_2_2 OK ++ progressive_bitlist - valid - progbitlist_random_2_3 OK ++ progressive_bitlist - valid - progbitlist_random_2_4 OK ++ progressive_bitlist - valid - progbitlist_random_31_0 OK ++ progressive_bitlist - valid - progbitlist_random_31_1 OK ++ progressive_bitlist - valid - progbitlist_random_31_2 OK ++ progressive_bitlist - valid - progbitlist_random_31_3 OK ++ progressive_bitlist - valid - progbitlist_random_31_4 OK ++ progressive_bitlist - valid - progbitlist_random_32_0 OK ++ progressive_bitlist - valid - progbitlist_random_32_1 OK ++ progressive_bitlist - valid - progbitlist_random_32_2 OK ++ progressive_bitlist - valid - progbitlist_random_32_3 OK ++ progressive_bitlist - valid - progbitlist_random_32_4 OK ++ progressive_bitlist - valid - progbitlist_random_33_0 OK ++ progressive_bitlist - valid - progbitlist_random_33_1 OK ++ progressive_bitlist - valid - progbitlist_random_33_2 OK ++ progressive_bitlist - valid - progbitlist_random_33_3 OK ++ progressive_bitlist - valid - progbitlist_random_33_4 OK ++ progressive_bitlist - valid - progbitlist_random_3_0 OK ++ progressive_bitlist - valid - progbitlist_random_3_1 OK ++ progressive_bitlist - valid - progbitlist_random_3_2 OK ++ progressive_bitlist - valid - progbitlist_random_3_3 OK ++ progressive_bitlist - valid - progbitlist_random_3_4 OK ++ progressive_bitlist - valid - progbitlist_random_4_0 OK ++ progressive_bitlist - valid - progbitlist_random_4_1 OK ++ progressive_bitlist - valid - progbitlist_random_4_2 OK ++ progressive_bitlist - valid - progbitlist_random_4_3 OK ++ progressive_bitlist - valid - progbitlist_random_4_4 OK ++ progressive_bitlist - valid - progbitlist_random_511_0 OK ++ progressive_bitlist - valid - progbitlist_random_511_1 OK ++ progressive_bitlist - valid - progbitlist_random_511_2 OK ++ progressive_bitlist - valid - progbitlist_random_511_3 OK ++ progressive_bitlist - valid - progbitlist_random_511_4 OK ++ progressive_bitlist - valid - progbitlist_random_512_0 OK ++ progressive_bitlist - valid - progbitlist_random_512_1 OK ++ progressive_bitlist - valid - progbitlist_random_512_2 OK ++ progressive_bitlist - valid - progbitlist_random_512_3 OK ++ progressive_bitlist - valid - progbitlist_random_512_4 OK ++ progressive_bitlist - valid - progbitlist_random_513_0 OK ++ progressive_bitlist - valid - progbitlist_random_513_1 OK ++ progressive_bitlist - valid - progbitlist_random_513_2 OK ++ progressive_bitlist - valid - progbitlist_random_513_3 OK ++ progressive_bitlist - valid - progbitlist_random_513_4 OK ++ progressive_bitlist - valid - progbitlist_random_5_0 OK ++ progressive_bitlist - valid - progbitlist_random_5_1 OK ++ progressive_bitlist - valid - progbitlist_random_5_2 OK ++ progressive_bitlist - valid - progbitlist_random_5_3 OK ++ progressive_bitlist - valid - progbitlist_random_5_4 OK ++ progressive_bitlist - valid - progbitlist_random_63_0 OK ++ progressive_bitlist - valid - progbitlist_random_63_1 OK ++ progressive_bitlist - valid - progbitlist_random_63_2 OK ++ progressive_bitlist - valid - progbitlist_random_63_3 OK ++ progressive_bitlist - valid - progbitlist_random_63_4 OK ++ progressive_bitlist - valid - progbitlist_random_64_0 OK ++ progressive_bitlist - valid - progbitlist_random_64_1 OK ++ progressive_bitlist - valid - progbitlist_random_64_2 OK ++ progressive_bitlist - valid - progbitlist_random_64_3 OK ++ progressive_bitlist - valid - progbitlist_random_64_4 OK ++ progressive_bitlist - valid - progbitlist_random_65_0 OK ++ progressive_bitlist - valid - progbitlist_random_65_1 OK ++ progressive_bitlist - valid - progbitlist_random_65_2 OK ++ progressive_bitlist - valid - progbitlist_random_65_3 OK ++ progressive_bitlist - valid - progbitlist_random_65_4 OK ++ progressive_bitlist - valid - progbitlist_random_6_0 OK ++ progressive_bitlist - valid - progbitlist_random_6_1 OK ++ progressive_bitlist - valid - progbitlist_random_6_2 OK ++ progressive_bitlist - valid - progbitlist_random_6_3 OK ++ progressive_bitlist - valid - progbitlist_random_6_4 OK ++ progressive_bitlist - valid - progbitlist_random_7_0 OK ++ progressive_bitlist - valid - progbitlist_random_7_1 OK ++ progressive_bitlist - valid - progbitlist_random_7_2 OK ++ progressive_bitlist - valid - progbitlist_random_7_3 OK ++ progressive_bitlist - valid - progbitlist_random_7_4 OK ++ progressive_bitlist - valid - progbitlist_random_8_0 OK ++ progressive_bitlist - valid - progbitlist_random_8_1 OK ++ progressive_bitlist - valid - progbitlist_random_8_2 OK ++ progressive_bitlist - valid - progbitlist_random_8_3 OK ++ progressive_bitlist - valid - progbitlist_random_8_4 OK ++ progressive_bitlist - valid - progbitlist_random_9_0 OK ++ progressive_bitlist - valid - progbitlist_random_9_1 OK ++ progressive_bitlist - valid - progbitlist_random_9_2 OK ++ progressive_bitlist - valid - progbitlist_random_9_3 OK ++ progressive_bitlist - valid - progbitlist_random_9_4 OK ++ progressive_bitlist - valid - progbitlist_zero_0_0 OK ++ progressive_bitlist - valid - progbitlist_zero_0_1 OK ++ progressive_bitlist - valid - progbitlist_zero_0_2 OK ++ progressive_bitlist - valid - progbitlist_zero_0_3 OK ++ progressive_bitlist - valid - progbitlist_zero_0_4 OK ++ progressive_bitlist - valid - progbitlist_zero_1023_0 OK ++ progressive_bitlist - valid - progbitlist_zero_1023_1 OK ++ progressive_bitlist - valid - progbitlist_zero_1023_2 OK ++ progressive_bitlist - valid - progbitlist_zero_1023_3 OK ++ progressive_bitlist - valid - progbitlist_zero_1023_4 OK ++ progressive_bitlist - valid - progbitlist_zero_1024_0 OK ++ progressive_bitlist - valid - progbitlist_zero_1024_1 OK ++ progressive_bitlist - valid - progbitlist_zero_1024_2 OK ++ progressive_bitlist - valid - progbitlist_zero_1024_3 OK ++ progressive_bitlist - valid - progbitlist_zero_1024_4 OK ++ progressive_bitlist - valid - progbitlist_zero_1025_0 OK ++ progressive_bitlist - valid - progbitlist_zero_1025_1 OK ++ progressive_bitlist - valid - progbitlist_zero_1025_2 OK ++ progressive_bitlist - valid - progbitlist_zero_1025_3 OK ++ progressive_bitlist - valid - progbitlist_zero_1025_4 OK ++ progressive_bitlist - valid - progbitlist_zero_15_0 OK ++ progressive_bitlist - valid - progbitlist_zero_15_1 OK ++ progressive_bitlist - valid - progbitlist_zero_15_2 OK ++ progressive_bitlist - valid - progbitlist_zero_15_3 OK ++ progressive_bitlist - valid - progbitlist_zero_15_4 OK ++ progressive_bitlist - valid - progbitlist_zero_16_0 OK ++ progressive_bitlist - valid - progbitlist_zero_16_1 OK ++ progressive_bitlist - valid - progbitlist_zero_16_2 OK ++ progressive_bitlist - valid - progbitlist_zero_16_3 OK ++ progressive_bitlist - valid - progbitlist_zero_16_4 OK ++ progressive_bitlist - valid - progbitlist_zero_17_0 OK ++ progressive_bitlist - valid - progbitlist_zero_17_1 OK ++ progressive_bitlist - valid - progbitlist_zero_17_2 OK ++ progressive_bitlist - valid - progbitlist_zero_17_3 OK ++ progressive_bitlist - valid - progbitlist_zero_17_4 OK ++ progressive_bitlist - valid - progbitlist_zero_1_0 OK ++ progressive_bitlist - valid - progbitlist_zero_1_1 OK ++ progressive_bitlist - valid - progbitlist_zero_1_2 OK ++ progressive_bitlist - valid - progbitlist_zero_1_3 OK ++ progressive_bitlist - valid - progbitlist_zero_1_4 OK ++ progressive_bitlist - valid - progbitlist_zero_255_0 OK ++ progressive_bitlist - valid - progbitlist_zero_255_1 OK ++ progressive_bitlist - valid - progbitlist_zero_255_2 OK ++ progressive_bitlist - valid - progbitlist_zero_255_3 OK ++ progressive_bitlist - valid - progbitlist_zero_255_4 OK ++ progressive_bitlist - valid - progbitlist_zero_256_0 OK ++ progressive_bitlist - valid - progbitlist_zero_256_1 OK ++ progressive_bitlist - valid - progbitlist_zero_256_2 OK ++ progressive_bitlist - valid - progbitlist_zero_256_3 OK ++ progressive_bitlist - valid - progbitlist_zero_256_4 OK ++ progressive_bitlist - valid - progbitlist_zero_257_0 OK ++ progressive_bitlist - valid - progbitlist_zero_257_1 OK ++ progressive_bitlist - valid - progbitlist_zero_257_2 OK ++ progressive_bitlist - valid - progbitlist_zero_257_3 OK ++ progressive_bitlist - valid - progbitlist_zero_257_4 OK ++ progressive_bitlist - valid - progbitlist_zero_2_0 OK ++ progressive_bitlist - valid - progbitlist_zero_2_1 OK ++ progressive_bitlist - valid - progbitlist_zero_2_2 OK ++ progressive_bitlist - valid - progbitlist_zero_2_3 OK ++ progressive_bitlist - valid - progbitlist_zero_2_4 OK ++ progressive_bitlist - valid - progbitlist_zero_31_0 OK ++ progressive_bitlist - valid - progbitlist_zero_31_1 OK ++ progressive_bitlist - valid - progbitlist_zero_31_2 OK ++ progressive_bitlist - valid - progbitlist_zero_31_3 OK ++ progressive_bitlist - valid - progbitlist_zero_31_4 OK ++ progressive_bitlist - valid - progbitlist_zero_32_0 OK ++ progressive_bitlist - valid - progbitlist_zero_32_1 OK ++ progressive_bitlist - valid - progbitlist_zero_32_2 OK ++ progressive_bitlist - valid - progbitlist_zero_32_3 OK ++ progressive_bitlist - valid - progbitlist_zero_32_4 OK ++ progressive_bitlist - valid - progbitlist_zero_33_0 OK ++ progressive_bitlist - valid - progbitlist_zero_33_1 OK ++ progressive_bitlist - valid - progbitlist_zero_33_2 OK ++ progressive_bitlist - valid - progbitlist_zero_33_3 OK ++ progressive_bitlist - valid - progbitlist_zero_33_4 OK ++ progressive_bitlist - valid - progbitlist_zero_3_0 OK ++ progressive_bitlist - valid - progbitlist_zero_3_1 OK ++ progressive_bitlist - valid - progbitlist_zero_3_2 OK ++ progressive_bitlist - valid - progbitlist_zero_3_3 OK ++ progressive_bitlist - valid - progbitlist_zero_3_4 OK ++ progressive_bitlist - valid - progbitlist_zero_4_0 OK ++ progressive_bitlist - valid - progbitlist_zero_4_1 OK ++ progressive_bitlist - valid - progbitlist_zero_4_2 OK ++ progressive_bitlist - valid - progbitlist_zero_4_3 OK ++ progressive_bitlist - valid - progbitlist_zero_4_4 OK ++ progressive_bitlist - valid - progbitlist_zero_511_0 OK ++ progressive_bitlist - valid - progbitlist_zero_511_1 OK ++ progressive_bitlist - valid - progbitlist_zero_511_2 OK ++ progressive_bitlist - valid - progbitlist_zero_511_3 OK ++ progressive_bitlist - valid - progbitlist_zero_511_4 OK ++ progressive_bitlist - valid - progbitlist_zero_512_0 OK ++ progressive_bitlist - valid - progbitlist_zero_512_1 OK ++ progressive_bitlist - valid - progbitlist_zero_512_2 OK ++ progressive_bitlist - valid - progbitlist_zero_512_3 OK ++ progressive_bitlist - valid - progbitlist_zero_512_4 OK ++ progressive_bitlist - valid - progbitlist_zero_513_0 OK ++ progressive_bitlist - valid - progbitlist_zero_513_1 OK ++ progressive_bitlist - valid - progbitlist_zero_513_2 OK ++ progressive_bitlist - valid - progbitlist_zero_513_3 OK ++ progressive_bitlist - valid - progbitlist_zero_513_4 OK ++ progressive_bitlist - valid - progbitlist_zero_5_0 OK ++ progressive_bitlist - valid - progbitlist_zero_5_1 OK ++ progressive_bitlist - valid - progbitlist_zero_5_2 OK ++ progressive_bitlist - valid - progbitlist_zero_5_3 OK ++ progressive_bitlist - valid - progbitlist_zero_5_4 OK ++ progressive_bitlist - valid - progbitlist_zero_63_0 OK ++ progressive_bitlist - valid - progbitlist_zero_63_1 OK ++ progressive_bitlist - valid - progbitlist_zero_63_2 OK ++ progressive_bitlist - valid - progbitlist_zero_63_3 OK ++ progressive_bitlist - valid - progbitlist_zero_63_4 OK ++ progressive_bitlist - valid - progbitlist_zero_64_0 OK ++ progressive_bitlist - valid - progbitlist_zero_64_1 OK ++ progressive_bitlist - valid - progbitlist_zero_64_2 OK ++ progressive_bitlist - valid - progbitlist_zero_64_3 OK ++ progressive_bitlist - valid - progbitlist_zero_64_4 OK ++ progressive_bitlist - valid - progbitlist_zero_65_0 OK ++ progressive_bitlist - valid - progbitlist_zero_65_1 OK ++ progressive_bitlist - valid - progbitlist_zero_65_2 OK ++ progressive_bitlist - valid - progbitlist_zero_65_3 OK ++ progressive_bitlist - valid - progbitlist_zero_65_4 OK ++ progressive_bitlist - valid - progbitlist_zero_6_0 OK ++ progressive_bitlist - valid - progbitlist_zero_6_1 OK ++ progressive_bitlist - valid - progbitlist_zero_6_2 OK ++ progressive_bitlist - valid - progbitlist_zero_6_3 OK ++ progressive_bitlist - valid - progbitlist_zero_6_4 OK ++ progressive_bitlist - valid - progbitlist_zero_7_0 OK ++ progressive_bitlist - valid - progbitlist_zero_7_1 OK ++ progressive_bitlist - valid - progbitlist_zero_7_2 OK ++ progressive_bitlist - valid - progbitlist_zero_7_3 OK ++ progressive_bitlist - valid - progbitlist_zero_7_4 OK ++ progressive_bitlist - valid - progbitlist_zero_8_0 OK ++ progressive_bitlist - valid - progbitlist_zero_8_1 OK ++ progressive_bitlist - valid - progbitlist_zero_8_2 OK ++ progressive_bitlist - valid - progbitlist_zero_8_3 OK ++ progressive_bitlist - valid - progbitlist_zero_8_4 OK ++ progressive_bitlist - valid - progbitlist_zero_9_0 OK ++ progressive_bitlist - valid - progbitlist_zero_9_1 OK ++ progressive_bitlist - valid - progbitlist_zero_9_2 OK ++ progressive_bitlist - valid - progbitlist_zero_9_3 OK ++ progressive_bitlist - valid - progbitlist_zero_9_4 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_extra_byte OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_last_offset_13_ove OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_last_offset_17_ove OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_last_offset_1_over OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_last_offset_21_ove OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_last_offset_25_ove OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_last_offset_5_over OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_last_offset_9_over OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_modded_0 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_modded_1 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_modded_2 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_modded_3 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_modded_4 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_modded_5 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_modded_6 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_modded_7 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_modded_8 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_13_minus_on OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_13_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_13_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_17_minus_on OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_17_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_17_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_1_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_1_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_1_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_21_minus_on OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_21_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_21_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_25_minus_on OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_25_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_25_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_5_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_5_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_5_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_9_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_9_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_lengthy_offset_9_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_modded_0 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_modded_1 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_modded_2 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_modded_3 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_modded_4 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_modded_5 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_modded_6 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_modded_7 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_modded_8 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_13_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_13_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_13_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_17_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_17_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_17_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_1_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_1_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_1_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_21_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_21_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_21_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_25_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_25_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_25_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_5_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_5_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_5_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_9_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_9_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_nil_offset_9_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_modded_0 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_modded_1 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_modded_2 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_modded_3 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_modded_4 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_modded_5 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_modded_6 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_modded_7 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_modded_8 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_13_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_13_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_13_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_17_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_17_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_17_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_1_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_1_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_1_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_21_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_21_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_21_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_25_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_25_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_25_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_5_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_5_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_5_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_9_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_9_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_one_offset_9_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_modded_0 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_modded_1 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_modded_2 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_modded_3 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_modded_4 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_modded_5 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_modded_6 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_modded_7 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_modded_8 OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_13_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_13_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_13_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_17_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_17_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_17_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_1_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_1_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_1_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_21_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_21_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_21_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_25_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_25_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_25_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_5_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_5_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_5_zeroed OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_9_minus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_9_plus_one OK ++ progressive_containers - invalid - ProgressiveComplexTestStruct_random_offset_9_zeroed OK ++ progressive_containers - invalid - ProgressiveSingleFieldContainerTestStruct_extra_byte OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_extra_byte OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_lengthy_last_o OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_lengthy_modded OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_lengthy_modded OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_lengthy_modded OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_lengthy_modded OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_lengthy_modded OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_lengthy_modded OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_lengthy_modded OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_lengthy_offset OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_lengthy_offset OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_nil_modded_0 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_nil_modded_1 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_nil_modded_2 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_nil_modded_3 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_nil_modded_4 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_nil_modded_5 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_nil_modded_6 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_nil_modded_8 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_nil_offset_0_m OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_nil_offset_0_p OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_nil_offset_0_z OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_modded_0 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_modded_1 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_modded_2 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_modded_3 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_modded_4 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_modded_5 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_modded_6 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_modded_7 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_modded_8 OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_offset_0_m OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_offset_0_p OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_one_offset_0_z OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_random_modded_ OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_random_modded_ OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_random_modded_ OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_random_modded_ OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_random_modded_ OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_random_modded_ OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_random_modded_ OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_random_modded_ OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_random_offset_ OK ++ progressive_containers - invalid - ProgressiveSingleListContainerTestStruct_random_offset_ OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_extra_byte OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_last_offset_1_overflow OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_last_offset_5_overflow OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_modded_0 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_modded_3 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_modded_4 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_modded_5 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_modded_6 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_modded_7 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_modded_8 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_offset_1_minus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_offset_1_plus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_offset_1_zeroed OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_offset_5_minus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_offset_5_plus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_lengthy_offset_5_zeroed OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_modded_0 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_modded_1 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_modded_3 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_modded_4 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_modded_5 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_modded_6 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_modded_7 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_modded_8 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_offset_1_minus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_offset_1_plus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_offset_1_zeroed OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_offset_5_minus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_offset_5_plus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_nil_offset_5_zeroed OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_modded_0 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_modded_3 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_modded_4 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_modded_5 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_modded_6 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_modded_7 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_modded_8 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_offset_1_minus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_offset_1_plus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_offset_1_zeroed OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_offset_5_minus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_offset_5_plus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_one_offset_5_zeroed OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_modded_0 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_modded_3 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_modded_4 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_modded_5 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_modded_6 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_modded_7 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_modded_8 OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_offset_1_minus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_offset_1_plus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_offset_1_zeroed OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_offset_5_minus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_offset_5_plus_one OK ++ progressive_containers - invalid - ProgressiveVarTestStruct_random_offset_5_zeroed OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_2 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_3 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_4 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_5 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_6 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_7 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_8 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_9 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_chaos_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_chaos_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_lengthy_chaos_2 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_2 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_3 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_4 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_5 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_6 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_7 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_8 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_9 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_chaos_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_chaos_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_max_chaos_2 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_2 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_3 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_4 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_5 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_6 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_7 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_8 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_9 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_chaos_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_chaos_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_nil_chaos_2 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_2 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_3 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_4 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_5 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_6 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_7 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_8 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_9 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_chaos_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_chaos_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_one_chaos_2 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_2 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_3 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_4 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_5 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_6 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_7 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_8 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_9 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_chaos_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_chaos_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_random_chaos_2 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_2 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_3 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_4 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_5 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_6 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_7 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_8 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_9 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_chaos_0 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_chaos_1 OK ++ progressive_containers - valid - ProgressiveComplexTestStruct_zero_chaos_2 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_max OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_max_chaos_0 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_max_chaos_1 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_max_chaos_2 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_0 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_1 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_2 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_3 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_4 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_5 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_6 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_7 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_8 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_9 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_chaos_0 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_chaos_1 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_random_chaos_2 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_zero OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_zero_chaos_0 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_zero_chaos_1 OK ++ progressive_containers - valid - ProgressiveSingleFieldContainerTestStruct_zero_chaos_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_3 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_4 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_5 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_6 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_7 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_8 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_9 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_chaos_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_chaos_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_lengthy_chaos_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_3 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_4 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_5 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_6 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_7 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_8 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_9 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_chaos_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_chaos_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_max_chaos_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_3 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_4 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_5 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_6 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_7 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_8 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_9 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_chaos_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_chaos_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_nil_chaos_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_3 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_4 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_5 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_6 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_7 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_8 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_9 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_chaos_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_chaos_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_one_chaos_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_3 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_4 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_5 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_6 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_7 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_8 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_9 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_chaos_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_chaos_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_random_chaos_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_2 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_3 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_4 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_5 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_6 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_7 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_8 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_9 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_chaos_0 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_chaos_1 OK ++ progressive_containers - valid - ProgressiveSingleListContainerTestStruct_zero_chaos_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_3 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_4 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_5 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_6 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_7 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_8 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_9 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_chaos_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_chaos_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_lengthy_chaos_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_3 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_4 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_5 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_6 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_7 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_8 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_9 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_chaos_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_chaos_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_max_chaos_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_3 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_4 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_5 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_6 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_7 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_8 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_9 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_chaos_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_chaos_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_nil_chaos_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_3 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_4 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_5 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_6 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_7 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_8 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_9 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_chaos_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_chaos_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_one_chaos_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_3 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_4 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_5 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_6 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_7 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_8 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_9 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_chaos_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_chaos_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_random_chaos_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_2 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_3 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_4 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_5 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_6 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_7 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_8 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_9 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_chaos_0 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_chaos_1 OK ++ progressive_containers - valid - ProgressiveVarTestStruct_zero_chaos_2 OK ++ uints - invalid - uint_128_one_byte_longer OK ++ uints - invalid - uint_128_one_byte_shorter OK ++ uints - invalid - uint_128_one_too_high OK ++ uints - invalid - uint_16_one_byte_longer OK ++ uints - invalid - uint_16_one_byte_shorter OK ++ uints - invalid - uint_16_one_too_high OK ++ uints - invalid - uint_256_one_byte_longer OK ++ uints - invalid - uint_256_one_byte_shorter OK ++ uints - invalid - uint_256_one_too_high OK ++ uints - invalid - uint_32_one_byte_longer OK ++ uints - invalid - uint_32_one_byte_shorter OK ++ uints - invalid - uint_32_one_too_high OK ++ uints - invalid - uint_64_one_byte_longer OK ++ uints - invalid - uint_64_one_byte_shorter OK ++ uints - invalid - uint_64_one_too_high OK ++ uints - invalid - uint_8_one_byte_longer OK ++ uints - invalid - uint_8_one_byte_shorter OK ++ uints - invalid - uint_8_one_too_high OK ++ uints - valid - uint_128_last_byte_empty OK ++ uints - valid - uint_128_max OK ++ uints - valid - uint_128_random_0 OK ++ uints - valid - uint_128_random_1 OK ++ uints - valid - uint_128_random_2 OK ++ uints - valid - uint_128_random_3 OK ++ uints - valid - uint_128_random_4 OK ++ uints - valid - uint_128_zero OK ++ uints - valid - uint_16_last_byte_empty OK ++ uints - valid - uint_16_max OK ++ uints - valid - uint_16_random_0 OK ++ uints - valid - uint_16_random_1 OK ++ uints - valid - uint_16_random_2 OK ++ uints - valid - uint_16_random_3 OK ++ uints - valid - uint_16_random_4 OK ++ uints - valid - uint_16_zero OK ++ uints - valid - uint_256_last_byte_empty OK ++ uints - valid - uint_256_max OK ++ uints - valid - uint_256_random_0 OK ++ uints - valid - uint_256_random_1 OK ++ uints - valid - uint_256_random_2 OK ++ uints - valid - uint_256_random_3 OK ++ uints - valid - uint_256_random_4 OK ++ uints - valid - uint_256_zero OK ++ uints - valid - uint_32_last_byte_empty OK ++ uints - valid - uint_32_max OK ++ uints - valid - uint_32_random_0 OK ++ uints - valid - uint_32_random_1 OK ++ uints - valid - uint_32_random_2 OK ++ uints - valid - uint_32_random_3 OK ++ uints - valid - uint_32_random_4 OK ++ uints - valid - uint_32_zero OK ++ uints - valid - uint_64_last_byte_empty OK ++ uints - valid - uint_64_max OK ++ uints - valid - uint_64_random_0 OK ++ uints - valid - uint_64_random_1 OK ++ uints - valid - uint_64_random_2 OK ++ uints - valid - uint_64_random_3 OK ++ uints - valid - uint_64_random_4 OK ++ uints - valid - uint_64_zero OK ++ uints - valid - uint_8_last_byte_empty OK ++ uints - valid - uint_8_max OK ++ uints - valid - uint_8_random_0 OK ++ uints - valid - uint_8_random_1 OK ++ uints - valid - uint_8_random_2 OK ++ uints - valid - uint_8_random_3 OK ++ uints - valid - uint_8_random_4 OK ++ uints - valid - uint_8_zero OK ``` ## weak-subjectivity-checkpoint ```diff diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d6e5c35c6..865cbabf67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,199 @@ +2025-09-26 v25.9.2 +================== + +Nimbus `v25.9.2` is a `medium-urgency` stability-oriented release for mainnet and `high-urgency` release for the Hoodi, Sepolia, and Holesky testnets, due to impending Fusaka forks on each. + +### Fixes + +- Fix crash when block without blobs is unqueued from quarantine: + https://github.com/status-im/nimbus-eth2/pull/7543 + +2025-09-25 v25.9.1 +================== + +Nimbus `v25.9.1` is a `low-urgency` release for mainnet and `high-urgency` release for the Hoodi, Sepolia, and Holesky testnets, due to impending Fusaka forks on each. + +### Improvements + +- Provide execution clients more time in certain circumstances to prepare payloads: + https://github.com/status-im/nimbus-eth2/pull/7457 + +- Include BLS to execution changes under more situations in blocks: + https://github.com/status-im/nimbus-eth2/pull/7506 + +- Update the validator client to use non-deprecated publishBlockV2 pre-Electra: + https://github.com/status-im/nimbus-eth2/pull/7438 + +### Fixes + +- Fix sync sometimes getting stuck: + https://github.com/status-im/nimbus-eth2/pull/7491 + +- Provide proper CORS headers for beacon API: + https://github.com/status-im/nimbus-eth2/pull/7473 + +- Enable building on non-macOS ARM platforms: + https://github.com/status-im/nimbus-eth2/pull/7465 + +2025-09-02 v25.9.0 +================== + +Nimbus `v25.9.0` is a `low-urgency` release which increases the resiliency and efficiency of block production. Furthermore, macOS amd64 releases have been deprecated and will be discontinued by October 2025. + +### Improvements + +- Allow fallback to builder API if EL provides invalid requests: + https://github.com/status-im/nimbus-eth2/pull/7396 + +- Propose blocks more efficiently when the builder API is enabled: + https://github.com/status-im/nimbus-eth2/pull/7353 + +- Log execution addresses more readably: + https://github.com/status-im/nimbus-eth2/pull/7325 + +### Fixes + +- Avoid crashing on startup when data directory can't be created: + https://github.com/status-im/nimbus-eth2/pull/7326 + +- Require matching Electra fork configurations between beacon node and REST clients: + https://github.com/status-im/nimbus-eth2/pull/7321 + https://github.com/status-im/nimbus-eth2/pull/7322 + +- Fix dropped connections being interpreted as empty lists during syncing: + https://github.com/status-im/nimbus-eth2/pull/7318 + +2025-07-31 v25.7.1 +================== + +Nimbus `v25.7.1` is a `medium-urgency` release, fixing a potential syncing-related crash. + +### Improvements + +- Use Nimbus agent string for builder API calls: + https://github.com/status-im/nimbus-eth2/pull/7300 + +### Fixes + +- Fix assertion on syncing: + https://github.com/status-im/nimbus-eth2/pull/7315 + +2025-07-10 v25.7.0 +================== + +Nimbus `v25.7.0` is a `low-urgency` release, except for usage of the validator client with non-Nimbus beacon nodes for which it's a `medium-urgency` release. + +### Improvements + +- Increase default builder API gas limit to 45M: + https://github.com/status-im/nimbus-eth2/pull/7234 + +- Ensure that validator client attests in a timely way even with partially unresponsive beacon nodes: + https://github.com/status-im/nimbus-eth2/pull/7276 + +- Implement postStateValidatorIdentities beacon API endpoint: + https://github.com/status-im/nimbus-eth2/pull/7223 + +- Implement getDebugDataColumnSidecars beacon API endpoint: + https://github.com/status-im/nimbus-eth2/pull/7237 + +### Fixes + +- Fix sync-related crash regression in v25.6.0: + https://github.com/status-im/nimbus-eth2/pull/7275 + +- Restore validator client compatibility with beacon nodes providing BPO schedules: + https://github.com/status-im/nimbus-eth2/pull/7219 + +- Add missing `finalized` field to getStateV2 beacon API endpoint: + https://github.com/status-im/nimbus-eth2/pull/7248 + +2025-06-16 v25.6.0 +================== + +Nimbus `v25.6.0` is a `low-urgency` release. + +### Improvements + +- Exit on first slashing of a validator for which it is responsible: + https://github.com/status-im/nimbus-eth2/pull/7091 + +- Reduce the extent to which backfilling can hinder forward sync or chain progress: + https://github.com/status-im/nimbus-eth2/pull/7191 + +- Prevent empty block responses from peers from causing forward sync to overshoot: + https://github.com/status-im/nimbus-eth2/pull/7197 + +- Provide blocks and blobs over req/resp regardless of payload validation status: + https://github.com/status-im/nimbus-eth2/pull/7198 + +- Remove deprecated Eth1Data polling for deposits in favor of EIP6110: + https://github.com/status-im/nimbus-eth2/pull/7114 + +### Fixes + +- Implement getPendingConsolidations and add `version` field to getPendingDeposits and getPendingPartialWithdrawals beacon API endpoints: + https://github.com/status-im/nimbus-eth2/pull/7170 + +- Add missing `finalized` field to getStateRandao beacon API endpoint: + https://github.com/status-im/nimbus-eth2/pull/7171 + +2025-05-08 v25.5.0 +================== + +Nimbus `v25.5.0` does not alter the beacon node, is a low-urgency release for nodes which don't use the Nimbus validator client with a non-Nimbus BN, and a high-urgency release for nodes which do use the Nimbus validator client with a non-Nimbus beacon node. + +### Fixes + +- fix missing Eth-Consensus-Version header in validator client publishBlindedBlockV2 beacon API requests: + https://github.com/status-im/nimbus-eth2/pull/7140 + +2025-04-26 v25.4.1 +================== + +Nimbus `v25.4.1` is a high-urgency release for Ethereum and Gnosis mainnets due to their Pectra hardforks. + +### Fixes + +- fix potential missed MEV blocks starting with Electra builder API: + https://github.com/status-im/nimbus-eth2/pull/7103 + +- fix `single_attestation` SSE beacon API stream subscription: + https://github.com/status-im/nimbus-eth2/pull/7107 + +2025-04-21 v25.4.0 +================== + +Nimbus `v25.4.0` is a high-urgency release for Ethereum and Gnosis mainnets due to their Pectra hardforks. + +### Improvements + +- Add Ethereum Foundation mainnet Pectra support: + https://github.com/status-im/nimbus-eth2/pull/7063 + +- Add Gnosis mainnet Pectra support: + https://github.com/status-im/nimbus-eth2/pull/7062 + +- Improve Electra attestation packing quality: + https://github.com/status-im/nimbus-eth2/pull/7053 + +- Reduce unnecessary state replays on low-participation networks: + https://github.com/status-im/nimbus-eth2/pull/7072 + +- Implement `block_gossip` beacon API SSE stream support: + https://github.com/status-im/nimbus-eth2/pull/7054 + +- Reduce unnecessary Electra attestation logging verbosity: + https://github.com/status-im/nimbus-eth2/pull/7058 + +### Fixes + +- Fix compatibility with certain builder API relays: + https://github.com/status-im/nimbus-eth2/pull/7045 + +- Fix getAggregatedAttestationV2 beacon API endpoint for non-zero committee indices: + https://github.com/status-im/nimbus-eth2/pull/7037 + 2025-03-21 v25.3.1 ================== diff --git a/ConsensusSpecPreset-mainnet.md b/ConsensusSpecPreset-mainnet.md index 7e5edfc6ce..0ecd4847ae 100644 --- a/ConsensusSpecPreset-mainnet.md +++ b/ConsensusSpecPreset-mainnet.md @@ -91,8 +91,8 @@ ConsensusSpecPreset-mainnet + Rewards and penalties - duplicate_attestation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK + Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK @@ -111,16 +111,11 @@ ConsensusSpecPreset-mainnet ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -## EF - Altair - Finality [Preset: mainnet] -```diff -+ [Valid] EF - Altair - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK -+ [Valid] EF - Altair - Finality - finality_rule_1 [Preset: mainnet] OK -+ [Valid] EF - Altair - Finality - finality_rule_2 [Preset: mainnet] OK -+ [Valid] EF - Altair - Finality - finality_rule_3 [Preset: mainnet] OK -+ [Valid] EF - Altair - Finality - finality_rule_4 [Preset: mainnet] OK -``` ## EF - Altair - Fork [Preset: mainnet] ```diff ++ EF - Altair - Fork - after_fork_deactivate_validators_from_phase0_to_altair [Preset: mainn OK ++ EF - Altair - Fork - after_fork_deactivate_validators_wo_block_from_phase0_to_altair [Pres OK ++ EF - Altair - Fork - after_fork_new_validator_active_from_phase0_to_altair [Preset: mainne OK + EF - Altair - Fork - altair_fork_random_0 [Preset: mainnet] OK + EF - Altair - Fork - altair_fork_random_1 [Preset: mainnet] OK + EF - Altair - Fork - altair_fork_random_2 [Preset: mainnet] OK @@ -303,25 +298,6 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Altair - Operations - Voluntary Exit - default_exit_epoch_subsequent_exit OK + [Valid] EF - Altair - Operations - Voluntary Exit - success_exit_queue__min_churn OK ``` -## EF - Altair - Random [Preset: mainnet] -```diff -+ [Valid] EF - Altair - Random - randomized_0 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_1 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_10 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_11 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_12 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_13 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_14 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_15 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_2 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_3 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_4 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_5 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_6 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_7 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_8 [Preset: mainnet] OK -+ [Valid] EF - Altair - Random - randomized_9 [Preset: mainnet] OK -``` ## EF - Altair - Rewards [Preset: mainnet] ```diff + EF - Altair - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK @@ -401,56 +377,6 @@ ConsensusSpecPreset-mainnet + Testing Validator OK + Testing VoluntaryExit OK ``` -## EF - Altair - Sanity - Blocks [Preset: mainnet] -```diff -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [ OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: ma OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pre OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expect OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propos OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mai OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: main OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_same_slot_block_transition [Preset: main OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [P OK -+ [Invalid] EF - Altair - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - attestation [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - attester_slashing [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - balance_driven_status_transitions [Preset: mainn OK -+ [Valid] EF - Altair - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainne OK -+ [Valid] EF - Altair - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - historical_batch [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pr OK -+ [Valid] EF - Altair - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK -+ [Valid] EF - Altair - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pre OK -+ [Valid] EF - Altair - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK -+ [Valid] EF - Altair - Sanity - Blocks - multiple_different_validator_exits_same_block [P OK -+ [Valid] EF - Altair - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - skipped_slots [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee__empty [Preset: mainnet OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet] OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK -+ [Valid] EF - Altair - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK -``` ## EF - Altair - Sanity - Slots [Preset: mainnet] ```diff + EF - Altair - Slots - balance_change_affects_proposer [Preset: mainnet] OK @@ -584,8 +510,8 @@ ConsensusSpecPreset-mainnet + Rewards and penalties - duplicate_attestation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK + Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK @@ -604,16 +530,11 @@ ConsensusSpecPreset-mainnet ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -## EF - Bellatrix - Finality [Preset: mainnet] -```diff -+ [Valid] EF - Bellatrix - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Finality - finality_rule_1 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Finality - finality_rule_2 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Finality - finality_rule_3 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Finality - finality_rule_4 [Preset: mainnet] OK -``` ## EF - Bellatrix - Fork [Preset: mainnet] ```diff ++ EF - Bellatrix - Fork - after_fork_deactivate_validators_from_altair_to_bellatrix [Preset: OK ++ EF - Bellatrix - Fork - after_fork_deactivate_validators_wo_block_from_altair_to_bellatrix OK ++ EF - Bellatrix - Fork - after_fork_new_validator_active_from_altair_to_bellatrix [Preset: OK + EF - Bellatrix - Fork - bellatrix_fork_random_0 [Preset: mainnet] OK + EF - Bellatrix - Fork - bellatrix_fork_random_1 [Preset: mainnet] OK + EF - Bellatrix - Fork - bellatrix_fork_random_2 [Preset: mainnet] OK @@ -830,25 +751,6 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Bellatrix - Operations - Voluntary Exit - voluntary_exit_with_current_fork_ OK + [Valid] EF - Bellatrix - Operations - Voluntary Exit - voluntary_exit_with_previous_fork OK ``` -## EF - Bellatrix - Random [Preset: mainnet] -```diff -+ [Valid] EF - Bellatrix - Random - randomized_0 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_1 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_10 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_11 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_12 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_13 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_14 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_15 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_2 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_3 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_4 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_5 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_6 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_7 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_8 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Random - randomized_9 [Preset: mainnet] OK -``` ## EF - Bellatrix - Rewards [Preset: mainnet] ```diff + EF - Bellatrix - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK @@ -931,59 +833,6 @@ ConsensusSpecPreset-mainnet + Testing Validator OK + Testing VoluntaryExit OK ``` -## EF - Bellatrix - Sanity - Blocks [Preset: mainnet] -```diff -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_duplicate_attester_slashing_same_bloc OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_blo OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [ OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_exp OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_pro OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainne OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: m OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_same_slot_block_transition [Preset: m OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_similar_proposer_slashings_same_block OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - attestation [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - attester_slashing [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - balance_driven_status_transitions [Preset: ma OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - block_transition_randomized_payload [Preset: OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - duplicate_attestation_same_block [Preset: mai OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - empty_block_transition_no_tx [Preset: mainnet OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - historical_batch [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - inactivity_scores_full_participation_leaking OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - is_execution_enabled_false [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - multiple_attester_slashings_no_overlap [Prese OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - multiple_attester_slashings_partial_overlap [ OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - multiple_different_proposer_slashings_same_bl OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - multiple_different_validator_exits_same_block OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - proposer_after_inactive_index [Preset: mainne OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - skipped_slots [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee__empty [Preset: main OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee__full [Preset: mainn OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee__half [Preset: mainn OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee_genesis__empty [Pres OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee_genesis__full [Prese OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee_genesis__half [Prese OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK -``` ## EF - Bellatrix - Sanity - Slots [Preset: mainnet] ```diff + EF - Bellatrix - Slots - balance_change_affects_proposer [Preset: mainnet] OK @@ -1110,8 +959,8 @@ ConsensusSpecPreset-mainnet + Rewards and penalties - duplicate_attestation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK + Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK @@ -1130,16 +979,11 @@ ConsensusSpecPreset-mainnet ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -## EF - Capella - Finality [Preset: mainnet] -```diff -+ [Valid] EF - Capella - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK -+ [Valid] EF - Capella - Finality - finality_rule_1 [Preset: mainnet] OK -+ [Valid] EF - Capella - Finality - finality_rule_2 [Preset: mainnet] OK -+ [Valid] EF - Capella - Finality - finality_rule_3 [Preset: mainnet] OK -+ [Valid] EF - Capella - Finality - finality_rule_4 [Preset: mainnet] OK -``` ## EF - Capella - Fork [Preset: mainnet] ```diff ++ EF - Capella - Fork - after_fork_deactivate_validators_from_bellatrix_to_capella [Preset: OK ++ EF - Capella - Fork - after_fork_deactivate_validators_wo_block_from_bellatrix_to_capella OK ++ EF - Capella - Fork - after_fork_new_validator_active_from_bellatrix_to_capella [Preset: m OK + EF - Capella - Fork - capella_fork_random_0 [Preset: mainnet] OK + EF - Capella - Fork - capella_fork_random_1 [Preset: mainnet] OK + EF - Capella - Fork - capella_fork_random_2 [Preset: mainnet] OK @@ -1431,25 +1275,6 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Capella - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK + [Valid] EF - Capella - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK ``` -## EF - Capella - Random [Preset: mainnet] -```diff -+ [Valid] EF - Capella - Random - randomized_0 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_1 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_10 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_11 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_12 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_13 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_14 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_15 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_2 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_3 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_4 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_5 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_6 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_7 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_8 [Preset: mainnet] OK -+ [Valid] EF - Capella - Random - randomized_9 [Preset: mainnet] OK -``` ## EF - Capella - Rewards [Preset: mainnet] ```diff + EF - Capella - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK @@ -1536,71 +1361,6 @@ ConsensusSpecPreset-mainnet + Testing VoluntaryExit OK + Testing Withdrawal OK ``` -## EF - Capella - Sanity - Blocks [Preset: mainnet] -```diff -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Prese OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: m OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pr OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expec OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propo OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: mai OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: ma OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mai OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mai OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [ OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_ OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_is OK -+ [Invalid] EF - Capella - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - attestation [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - attester_slashing [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - balance_driven_status_transitions [Preset: main OK -+ [Valid] EF - Capella - Sanity - Blocks - block_transition_randomized_payload [Preset: ma OK -+ [Valid] EF - Capella - Sanity - Blocks - bls_change [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainn OK -+ [Valid] EF - Capella - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - empty_block_transition_no_tx [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - exit_and_bls_change [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: ma OK -+ [Valid] EF - Capella - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - historical_batch [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - inactivity_scores_full_participation_leaking [P OK -+ [Valid] EF - Capella - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [P OK -+ [Valid] EF - Capella - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK -+ [Valid] EF - Capella - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pr OK -+ [Valid] EF - Capella - Sanity - Blocks - multiple_different_proposer_slashings_same_bloc OK -+ [Valid] EF - Capella - Sanity - Blocks - multiple_different_validator_exits_same_block [ OK -+ [Valid] EF - Capella - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: OK -+ [Valid] EF - Capella - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - skipped_slots [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee__empty [Preset: mainne OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK -+ [Valid] EF - Capella - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Pres OK -+ [Valid] EF - Capella - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: ma OK -+ [Valid] EF - Capella - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK -+ [Valid] EF - Capella - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK -``` ## EF - Capella - Sanity - Slots [Preset: mainnet] ```diff + EF - Capella - Slots - balance_change_affects_proposer [Preset: mainnet] OK @@ -1734,8 +1494,8 @@ ConsensusSpecPreset-mainnet + Rewards and penalties - duplicate_attestation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK + Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK @@ -1754,16 +1514,11 @@ ConsensusSpecPreset-mainnet ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -## EF - Deneb - Finality [Preset: mainnet] -```diff -+ [Valid] EF - Deneb - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK -+ [Valid] EF - Deneb - Finality - finality_rule_1 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Finality - finality_rule_2 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Finality - finality_rule_3 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Finality - finality_rule_4 [Preset: mainnet] OK -``` ## EF - Deneb - Fork [Preset: mainnet] ```diff ++ EF - Deneb - Fork - after_fork_deactivate_validators_from_capella_to_deneb [Preset: mainne OK ++ EF - Deneb - Fork - after_fork_deactivate_validators_wo_block_from_capella_to_deneb [Prese OK ++ EF - Deneb - Fork - after_fork_new_validator_active_from_capella_to_deneb [Preset: mainnet OK + EF - Deneb - Fork - deneb_fork_random_0 [Preset: mainnet] OK + EF - Deneb - Fork - deneb_fork_random_1 [Preset: mainnet] OK + EF - Deneb - Fork - deneb_fork_random_2 [Preset: mainnet] OK @@ -2068,25 +1823,6 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Deneb - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balan OK + [Valid] EF - Deneb - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balan OK ``` -## EF - Deneb - Random [Preset: mainnet] -```diff -+ [Valid] EF - Deneb - Random - randomized_0 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_1 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_10 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_11 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_12 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_13 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_14 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_15 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_2 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_3 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_4 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_5 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_6 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_7 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_8 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Random - randomized_9 [Preset: mainnet] OK -``` ## EF - Deneb - Rewards [Preset: mainnet] ```diff + EF - Deneb - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK @@ -2175,80 +1911,6 @@ ConsensusSpecPreset-mainnet + Testing VoluntaryExit OK + Testing Withdrawal OK ``` -## EF - Deneb - Sanity - Blocks [Preset: mainnet] -```diff -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [P OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: mai OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [ OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pres OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_exceed_max_blobs_per_block [Preset: mainn OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expecte OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propose OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: mainn OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_max_blobs_per_block_two_txs [Preset: main OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_one_blob_max_plus_one_txs [Preset: mainne OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: main OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mainn OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mainn OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pr OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sa OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt OK -+ [Invalid] EF - Deneb - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - attestation [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - attester_slashing [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - balance_driven_status_transitions [Preset: mainne OK -+ [Valid] EF - Deneb - Sanity - Blocks - block_transition_randomized_payload [Preset: main OK -+ [Valid] EF - Deneb - Sanity - Blocks - bls_change [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainnet OK -+ [Valid] EF - Deneb - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - empty_block_transition_no_tx [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - exit_and_bls_change [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: main OK -+ [Valid] EF - Deneb - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - historical_batch [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pre OK -+ [Valid] EF - Deneb - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pre OK -+ [Valid] EF - Deneb - Sanity - Blocks - max_blobs_per_block [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: m OK -+ [Valid] EF - Deneb - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pres OK -+ [Valid] EF - Deneb - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK -+ [Valid] EF - Deneb - Sanity - Blocks - multiple_different_validator_exits_same_block [Pr OK -+ [Valid] EF - Deneb - Sanity - Blocks - one_blob [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - one_blob_max_txs [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - one_blob_two_txs [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: m OK -+ [Valid] EF - Deneb - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - skipped_slots [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee__empty [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: m OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: m OK -+ [Valid] EF - Deneb - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset OK -+ [Valid] EF - Deneb - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: main OK -+ [Valid] EF - Deneb - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK -+ [Valid] EF - Deneb - Sanity - Blocks - zero_blob [Preset: mainnet] OK -``` ## EF - Deneb - Sanity - Slots [Preset: mainnet] ```diff + EF - Deneb - Slots - balance_change_affects_proposer [Preset: mainnet] OK @@ -2399,7 +2061,6 @@ ConsensusSpecPreset-mainnet + Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK + Pending deposits - apply_pending_deposit_top_up__min_activation_balance [Preset: mainnet] OK + Pending deposits - apply_pending_deposit_top_up__min_activation_balance_compounding [Prese OK -+ Pending deposits - apply_pending_deposit_top_up__zero_balance [Preset: mainnet] OK + Pending deposits - apply_pending_deposit_under_min_activation [Preset: mainnet] OK + Pending deposits - apply_pending_deposit_with_previous_fork_version [Preset: mainnet] OK + Pending deposits - ineffective_deposit_with_current_fork_version [Preset: mainnet] OK @@ -2454,8 +2115,8 @@ ConsensusSpecPreset-mainnet + Rewards and penalties - duplicate_attestation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK + Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK @@ -2474,16 +2135,11 @@ ConsensusSpecPreset-mainnet ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -## EF - Electra - Finality [Preset: mainnet] -```diff -+ [Valid] EF - Electra - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK -+ [Valid] EF - Electra - Finality - finality_rule_1 [Preset: mainnet] OK -+ [Valid] EF - Electra - Finality - finality_rule_2 [Preset: mainnet] OK -+ [Valid] EF - Electra - Finality - finality_rule_3 [Preset: mainnet] OK -+ [Valid] EF - Electra - Finality - finality_rule_4 [Preset: mainnet] OK -``` ## EF - Electra - Fork [Preset: mainnet] ```diff ++ EF - Electra - Fork - after_fork_deactivate_validators_from_deneb_to_electra [Preset: main OK ++ EF - Electra - Fork - after_fork_deactivate_validators_wo_block_from_deneb_to_electra [Pre OK ++ EF - Electra - Fork - after_fork_new_validator_active_from_deneb_to_electra [Preset: mainn OK + EF - Electra - Fork - electra_fork_random_0 [Preset: mainnet] OK + EF - Electra - Fork - electra_fork_random_1 [Preset: mainnet] OK + EF - Electra - Fork - electra_fork_random_2 [Preset: mainnet] OK @@ -2650,6 +2306,8 @@ ConsensusSpecPreset-mainnet ``` ## EF - Electra - Operations - Deposit Request [Preset: mainnet] ```diff ++ [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_extra_gwei OK ++ [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_greater_th OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_invalid_si OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_max_effect OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_min_activa OK @@ -2658,6 +2316,7 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_inv OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_max OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_min OK ++ [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_sti OK ``` ## EF - Electra - Operations - Execution Payload [Preset: mainnet] ```diff @@ -2775,6 +2434,7 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Electra - Operations - Voluntary Exit - min_balance_exits_above_churn OK + [Valid] EF - Electra - Operations - Voluntary Exit - min_balance_exits_up_to_churn OK + [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__min_churn OK ++ [Valid] EF - Electra - Operations - Voluntary Exit - voluntary_exit_with_pending_deposit OK ``` ## EF - Electra - Operations - Withdrawal Request [Preset: mainnet] ```diff @@ -2841,6 +2501,8 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_next_epoch OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_no_excess_balance OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_one_skipped_one_ef OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_two_partial_withdr OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_two_partial_withdr OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_effective_swe OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_ineffective_s OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_ineffective_s OK @@ -2879,25 +2541,6 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Electra - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK + [Valid] EF - Electra - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK ``` -## EF - Electra - Random [Preset: mainnet] -```diff -+ [Valid] EF - Electra - Random - randomized_0 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_1 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_10 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_11 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_12 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_13 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_14 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_15 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_2 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_3 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_4 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_5 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_6 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_7 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_8 [Preset: mainnet] OK -+ [Valid] EF - Electra - Random - randomized_9 [Preset: mainnet] OK -``` ## EF - Electra - Rewards [Preset: mainnet] ```diff + EF - Electra - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK @@ -2994,100 +2637,11 @@ ConsensusSpecPreset-mainnet + Testing Withdrawal OK + Testing WithdrawalRequest OK ``` -## EF - Electra - Sanity - Blocks [Preset: mainnet] -```diff -+ [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK -+ [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_not_enough_eth1_dep OK -+ [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_too_many_eth1_depos OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Prese OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: m OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pr OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_exceed_max_blobs_per_block [Preset: mai OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expec OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propo OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: mai OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_max_blobs_per_block_two_txs [Preset: ma OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_one_blob_max_plus_one_txs [Preset: main OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: ma OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mai OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mai OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [ OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_ OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_is OK -+ [Invalid] EF - Electra - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - attestation [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - attester_slashing [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - balance_driven_status_transitions [Preset: main OK -+ [Valid] EF - Electra - Sanity - Blocks - basic_btec_and_el_withdrawal_request_in_same_bl OK -+ [Valid] EF - Electra - Sanity - Blocks - basic_btec_before_el_withdrawal_request [Preset OK -+ [Valid] EF - Electra - Sanity - Blocks - basic_el_withdrawal_request [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - block_transition_randomized_payload [Preset: ma OK -+ [Valid] EF - Electra - Sanity - Blocks - bls_change [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - cl_exit_and_el_withdrawal_request_in_same_block OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_request_with_same_pubkey_different_with OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__deposit_and_top_up_same_blo OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__deposit_with_same_pubkey_di OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_eth1_deposits [Pres OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_eth1_deposits_up_to OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_max_eth1_deposits [ OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__start_index_is_set [Preset: OK -+ [Valid] EF - Electra - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainn OK -+ [Valid] EF - Electra - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - empty_block_transition_no_tx [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - exit_and_bls_change [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: ma OK -+ [Valid] EF - Electra - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - historical_batch [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - inactivity_scores_full_participation_leaking [P OK -+ [Valid] EF - Electra - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [P OK -+ [Valid] EF - Electra - Sanity - Blocks - max_blobs_per_block [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - multiple_different_proposer_slashings_same_bloc OK -+ [Valid] EF - Electra - Sanity - Blocks - multiple_different_validator_exits_same_block [ OK -+ [Valid] EF - Electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_differe OK -+ [Valid] EF - Electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_same_va OK -+ [Valid] EF - Electra - Sanity - Blocks - one_blob [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - one_blob_max_txs [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - one_blob_two_txs [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: OK -+ [Valid] EF - Electra - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - skipped_slots [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - switch_to_compounding_requests_when_too_little_ OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__empty [Preset: mainne OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK -+ [Valid] EF - Electra - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Pres OK -+ [Valid] EF - Electra - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: ma OK -+ [Valid] EF - Electra - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_switch_to_compounding_request_sa OK -+ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_withdrawal_request_same_validato OK -+ [Valid] EF - Electra - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK -+ [Valid] EF - Electra - Sanity - Blocks - zero_blob [Preset: mainnet] OK -``` ## EF - Electra - Sanity - Slots [Preset: mainnet] ```diff + EF - Electra - Slots - balance_change_affects_proposer [Preset: mainnet] OK + EF - Electra - Slots - double_empty_epoch [Preset: mainnet] OK ++ EF - Electra - Slots - effective_decrease_balance_updates_lookahead [Preset: mainnet] OK + EF - Electra - Slots - empty_epoch [Preset: mainnet] OK + EF - Electra - Slots - historical_accumulator [Preset: mainnet] OK + EF - Electra - Slots - multiple_pending_deposits_same_pubkey [Preset: mainnet] OK @@ -3097,6 +2651,7 @@ ConsensusSpecPreset-mainnet + EF - Electra - Slots - multiple_pending_deposits_same_pubkey_different_signature [Preset: OK + EF - Electra - Slots - over_epoch_boundary [Preset: mainnet] OK + EF - Electra - Slots - pending_consolidation [Preset: mainnet] OK ++ EF - Electra - Slots - pending_deposit_extra_gwei [Preset: mainnet] OK + EF - Electra - Slots - slots_1 [Preset: mainnet] OK + EF - Electra - Slots - slots_2 [Preset: mainnet] OK ``` @@ -3242,7 +2797,6 @@ ConsensusSpecPreset-mainnet + Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK + Pending deposits - apply_pending_deposit_top_up__min_activation_balance [Preset: mainnet] OK + Pending deposits - apply_pending_deposit_top_up__min_activation_balance_compounding [Prese OK -+ Pending deposits - apply_pending_deposit_top_up__zero_balance [Preset: mainnet] OK + Pending deposits - apply_pending_deposit_under_min_activation [Preset: mainnet] OK + Pending deposits - apply_pending_deposit_with_previous_fork_version [Preset: mainnet] OK + Pending deposits - ineffective_deposit_with_current_fork_version [Preset: mainnet] OK @@ -3264,6 +2818,11 @@ ConsensusSpecPreset-mainnet + Pending deposits - process_pending_deposits_withdrawable_validator [Preset: mainnet] OK + Pending deposits - process_pending_deposits_withdrawable_validator_not_churned [Preset: ma OK ``` +## EF - Fulu - Epoch Processing - Proposer lookahead [Preset: mainnet] +```diff ++ Proposer lookahead - proposer_lookahead_does_not_contain_exited_validators [Preset: mainne OK ++ Proposer lookahead - proposer_lookahead_in_state_matches_computed_lookahead [Preset: mainn OK +``` ## EF - Fulu - Epoch Processing - RANDAO mixes reset [Preset: mainnet] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK @@ -3297,8 +2856,8 @@ ConsensusSpecPreset-mainnet + Rewards and penalties - duplicate_attestation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK + Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK @@ -3317,16 +2876,11 @@ ConsensusSpecPreset-mainnet ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -## EF - Fulu - Finality [Preset: mainnet] -```diff -+ [Valid] EF - Fulu - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK -+ [Valid] EF - Fulu - Finality - finality_rule_1 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Finality - finality_rule_2 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Finality - finality_rule_3 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Finality - finality_rule_4 [Preset: mainnet] OK -``` ## EF - Fulu - Fork [Preset: mainnet] ```diff ++ EF - Fulu - Fork - after_fork_deactivate_validators_from_electra_to_fulu [Preset: mainnet] OK ++ EF - Fulu - Fork - after_fork_deactivate_validators_wo_block_from_electra_to_fulu [Preset: OK ++ EF - Fulu - Fork - after_fork_new_validator_active_from_electra_to_fulu [Preset: mainnet] OK + EF - Fulu - Fork - fork_base_state [Preset: mainnet] OK + EF - Fulu - Fork - fork_many_next_epoch [Preset: mainnet] OK + EF - Fulu - Fork - fork_next_epoch [Preset: mainnet] OK @@ -3339,6 +2893,9 @@ ConsensusSpecPreset-mainnet + EF - Fulu - Fork - fulu_fork_random_3 [Preset: mainnet] OK + EF - Fulu - Fork - fulu_fork_random_low_balances [Preset: mainnet] OK + EF - Fulu - Fork - fulu_fork_random_misc_balances [Preset: mainnet] OK ++ EF - Fulu - Fork - lookahead_consistency_at_fork [Preset: mainnet] OK ++ EF - Fulu - Fork - lookahead_consistency_with_effective_balance_change_at_fork [Preset: ma OK ++ EF - Fulu - Fork - proposer_lookahead_init_at_fork_only_contains_active_validators [Preset OK ``` ## EF - Fulu - Operations - Attestation [Preset: mainnet] ```diff @@ -3479,12 +3036,15 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Fulu - Operations - Deposit - new_deposit_non_versioned_withdrawal_credenti OK + [Valid] EF - Fulu - Operations - Deposit - new_deposit_over_max OK + [Valid] EF - Fulu - Operations - Deposit - new_deposit_under_max OK ++ [Valid] EF - Fulu - Operations - Deposit - success_top_up_to_withdrawn_validator OK + [Valid] EF - Fulu - Operations - Deposit - top_up__less_effective_balance OK + [Valid] EF - Fulu - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Fulu - Operations - Deposit - top_up__zero_balance OK ``` ## EF - Fulu - Operations - Deposit Request [Preset: mainnet] ```diff ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_extra_gwei OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_greater_than_ OK + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_invalid_sig OK + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_max_effective OK + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_min_activatio OK @@ -3493,11 +3053,14 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_invali OK + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_max_ef OK + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_min_ac OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_still_ OK ``` ## EF - Fulu - Operations - Execution Payload [Preset: mainnet] ```diff + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_everything_first_payloa OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_everything_regular_payl OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_execution_first_payload OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_execution_regular_paylo OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_parent_hash_first_paylo OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_parent_hash_regular_pay OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_pre_randao_regular_payl OK @@ -3508,6 +3071,8 @@ ConsensusSpecPreset-mainnet + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_future_timestamp_regular_pa OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_past_timestamp_first_payloa OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_past_timestamp_regular_payl OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_randomized_non_validated_ex OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_randomized_non_validated_ex OK + [Valid] EF - Fulu - Operations - Execution Payload - incorrect_blob_tx_type OK + [Valid] EF - Fulu - Operations - Execution Payload - incorrect_block_hash OK + [Valid] EF - Fulu - Operations - Execution Payload - incorrect_commitment OK @@ -3519,6 +3084,18 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_no_blobs_but_ OK + [Valid] EF - Fulu - Operations - Execution Payload - no_commitments_for_transactions OK + [Valid] EF - Fulu - Operations - Execution Payload - no_transactions_with_commitments OK ++ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_extra_data_first_payload OK ++ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_extra_data_regular_payloa OK ++ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_transactions_first_payloa OK ++ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_transactions_regular_payl OK ++ [Valid] EF - Fulu - Operations - Execution Payload - randomized_non_validated_execution_ OK ++ [Valid] EF - Fulu - Operations - Execution Payload - randomized_non_validated_execution_ OK ++ [Valid] EF - Fulu - Operations - Execution Payload - success_first_payload OK ++ [Valid] EF - Fulu - Operations - Execution Payload - success_first_payload_with_gap_slot OK ++ [Valid] EF - Fulu - Operations - Execution Payload - success_regular_payload OK ++ [Valid] EF - Fulu - Operations - Execution Payload - success_regular_payload_with_gap_sl OK ++ [Valid] EF - Fulu - Operations - Execution Payload - zero_length_transaction_first_paylo OK ++ [Valid] EF - Fulu - Operations - Execution Payload - zero_length_transaction_regular_pay OK + [Valid] EF - Fulu - Operations - Execution Payload - zeroed_commitment OK ``` ## EF - Fulu - Operations - Proposer Slashing [Preset: mainnet] @@ -3594,6 +3171,7 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exits_above_churn OK + [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exits_up_to_churn OK + [Valid] EF - Fulu - Operations - Voluntary Exit - success_exit_queue__min_churn OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - voluntary_exit_with_pending_deposit OK ``` ## EF - Fulu - Operations - Withdrawal Request [Preset: mainnet] ```diff @@ -3619,9 +3197,30 @@ ConsensusSpecPreset-mainnet ``` ## EF - Fulu - Operations - Withdrawals [Preset: mainnet] ```diff ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_few_ OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_a_lot_mixed_withdrawable_in_queue OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_a_lot_partially_withdrawable_too_ OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_address_full OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_address_partial OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_amount_full OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_amount_partial OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_withdrawal_index OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_many_incorrectly_full OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_many_incorrectly_partial OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_max_per_slot_full_withdrawals_and OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_max_per_slot_partial_withdrawals_ OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_non_withdrawable_non_empty_withdr OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_expected_full_withdrawal_and_ OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_expected_full_withdrawal_and_ OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_expected_partial_withdrawal_a OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_of_many_incorrectly_full OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_of_many_incorrectly_partial OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_two_expected_partial_withdrawal_a OK ++ [Valid] EF - Fulu - Operations - Withdrawals - all_withdrawal OK + [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK + [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK + [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK ++ [Valid] EF - Fulu - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK + [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK + [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK + [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK @@ -3639,49 +3238,45 @@ ConsensusSpecPreset-mainnet + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_next_epoch OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_no_excess_balance OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_one_skipped_one_effec OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_two_partial_withdrawa OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_two_partial_withdrawa OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_effective_sweep_ OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_ineffective_swee OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_ineffective_swee OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_sweep_different_ OK + [Valid] EF - Fulu - Operations - Withdrawals - random_0 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_0 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_1 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_2 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_3 OK + [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_1 OK + [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_2 OK + [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_3 OK + [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_4 OK + [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_5 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_all_fully_withdrawable_in_one_swe OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_all_partially_withdrawable_in_one OK + [Valid] EF - Fulu - Operations - Withdrawals - success_excess_balance_but_no_max_effecti OK + [Valid] EF - Fulu - Operations - Withdrawals - success_excess_balance_but_no_max_effecti OK + [Valid] EF - Fulu - Operations - Withdrawals - success_max_partial_withdrawable OK + [Valid] EF - Fulu - Operations - Withdrawals - success_mixed_fully_and_partial_withdrawa OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_mixed_fully_and_partial_withdrawa OK + [Valid] EF - Fulu - Operations - Withdrawals - success_no_excess_balance OK + [Valid] EF - Fulu - Operations - Withdrawals - success_no_excess_balance_compounding OK + [Valid] EF - Fulu - Operations - Withdrawals - success_no_max_effective_balance OK + [Valid] EF - Fulu - Operations - Withdrawals - success_no_max_effective_balance_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_full_withdrawal OK + [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_active_a OK + [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_exited OK + [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_exited_a OK + [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_in_exit_ OK + [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_not_yet_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawal OK + [Valid] EF - Fulu - Operations - Withdrawals - success_two_partial_withdrawable OK -``` -## EF - Fulu - Random [Preset: mainnet] -```diff -+ [Valid] EF - Fulu - Random - randomized_0 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_1 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_10 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_11 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_12 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_13 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_14 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_15 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_2 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_3 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_4 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_5 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_6 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_7 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_8 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Random - randomized_9 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_zero_expected_withdrawals OK ++ [Valid] EF - Fulu - Operations - Withdrawals - withdrawable_epoch_but_0_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balanc OK ++ [Valid] EF - Fulu - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balanc OK ``` ## EF - Fulu - Rewards [Preset: mainnet] ```diff @@ -3736,8 +3331,8 @@ ConsensusSpecPreset-mainnet + Testing Checkpoint OK + Testing ConsolidationRequest OK + Testing ContributionAndProof OK -+ Testing DataColumnIdentifier OK + Testing DataColumnSidecar OK ++ Testing DataColumnsByRootIdentifier OK + Testing Deposit OK + Testing DepositData OK + Testing DepositMessage OK @@ -3782,68 +3377,11 @@ ConsensusSpecPreset-mainnet + Testing Withdrawal OK + Testing WithdrawalRequest OK ``` -## EF - Fulu - Sanity - Blocks [Preset: mainnet] -```diff -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: main OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [P OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Prese OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expected OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_proposer OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mainn OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mainne OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mainne OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pre OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sam OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt_ OK -+ [Invalid] EF - Fulu - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - attestation [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - attester_slashing [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - balance_driven_status_transitions [Preset: mainnet OK -+ [Valid] EF - Fulu - Sanity - Blocks - bls_change [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - exit_and_bls_change [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - historical_batch [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pres OK -+ [Valid] EF - Fulu - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pres OK -+ [Valid] EF - Fulu - Sanity - Blocks - multiple_different_proposer_slashings_same_block [ OK -+ [Valid] EF - Fulu - Sanity - Blocks - multiple_different_validator_exits_same_block [Pre OK -+ [Valid] EF - Fulu - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: ma OK -+ [Valid] EF - Fulu - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - skipped_slots [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__empty [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: m OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: ma OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: ma OK -+ [Valid] EF - Fulu - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset: OK -+ [Valid] EF - Fulu - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mainn OK -+ [Valid] EF - Fulu - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK -+ [Valid] EF - Fulu - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK -``` ## EF - Fulu - Sanity - Slots [Preset: mainnet] ```diff + EF - Fulu - Slots - balance_change_affects_proposer [Preset: mainnet] OK + EF - Fulu - Slots - double_empty_epoch [Preset: mainnet] OK ++ EF - Fulu - Slots - effective_decrease_balance_updates_lookahead [Preset: mainnet] OK + EF - Fulu - Slots - empty_epoch [Preset: mainnet] OK + EF - Fulu - Slots - historical_accumulator [Preset: mainnet] OK + EF - Fulu - Slots - multiple_pending_deposits_same_pubkey [Preset: mainnet] OK @@ -3853,9 +3391,770 @@ ConsensusSpecPreset-mainnet + EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_different_signature [Preset: mai OK + EF - Fulu - Slots - over_epoch_boundary [Preset: mainnet] OK + EF - Fulu - Slots - pending_consolidation [Preset: mainnet] OK ++ EF - Fulu - Slots - pending_deposit_extra_gwei [Preset: mainnet] OK + EF - Fulu - Slots - slots_1 [Preset: mainnet] OK + EF - Fulu - Slots - slots_2 [Preset: mainnet] OK ``` +## EF - Fulu - Transition [Preset: mainnet] +```diff ++ EF - Fulu - Transition - non_empty_historical_roots [Preset: mainnet] OK ++ EF - Fulu - Transition - normal_transition [Preset: mainnet] OK ++ EF - Fulu - Transition - simple_transition [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_attestation_from_previous_fork_with_new_range [Preset: OK ++ EF - Fulu - Transition - transition_missing_first_post_block [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_missing_last_pre_fork_block [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_only_blocks_post_fork [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_randomized_state [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_with_activation_at_fork_epoch [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_with_attester_slashing_right_after_fork [Preset: mainn OK ++ EF - Fulu - Transition - transition_with_attester_slashing_right_before_fork [Preset: main OK ++ EF - Fulu - Transition - transition_with_btec_right_after_fork [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_with_btec_right_before_fork [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_with_consolidation_request_right_after_fork [Preset: m OK ++ EF - Fulu - Transition - transition_with_deposit_request_right_after_fork [Preset: mainnet OK ++ EF - Fulu - Transition - transition_with_deposit_right_after_fork [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_with_deposit_right_before_fork [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_with_finality [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_with_leaking_at_fork [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_with_leaking_pre_fork [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_with_no_attestations_until_after_fork [Preset: mainnet OK ++ EF - Fulu - Transition - transition_with_non_empty_activation_queue [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_with_one_fourth_exiting_validators_exit_at_fork [Prese OK ++ EF - Fulu - Transition - transition_with_proposer_slashing_right_after_fork [Preset: mainn OK ++ EF - Fulu - Transition - transition_with_proposer_slashing_right_before_fork [Preset: main OK ++ EF - Fulu - Transition - transition_with_random_half_participation [Preset: mainnet] OK ++ EF - Fulu - Transition - transition_with_random_three_quarters_participation [Preset: main OK +``` +## EF - Gloas - Epoch Processing - Builder pending payments [Preset: mainnet] +```diff ++ Builder pending payments - process_builder_pending_payments_above_quorum [Preset: mainnet] OK ++ Builder pending payments - process_builder_pending_payments_below_quorum [Preset: mainnet] OK ++ Builder pending payments - process_builder_pending_payments_empty_queue [Preset: mainnet] OK ++ Builder pending payments - process_builder_pending_payments_equal_quorum [Preset: mainnet] OK ++ Builder pending payments - process_builder_pending_payments_large_amount_churn_impact [Pre OK ++ Builder pending payments - process_builder_pending_payments_mixed_weights [Preset: mainnet OK ++ Builder pending payments - process_builder_pending_payments_multiple_above_quorum [Preset: OK ++ Builder pending payments - process_builder_pending_payments_queue_rotation [Preset: mainne OK +``` +## EF - Gloas - Epoch Processing - Effective balance updates [Preset: mainnet] +```diff ++ Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK ++ Effective balance updates - effective_balance_hysteresis_with_compounding_credentials [Pre OK +``` +## EF - Gloas - Epoch Processing - Eth1 data reset [Preset: mainnet] +```diff ++ Eth1 data reset - eth1_vote_no_reset [Preset: mainnet] OK ++ Eth1 data reset - eth1_vote_reset [Preset: mainnet] OK +``` +## EF - Gloas - Epoch Processing - Historical summaries update [Preset: mainnet] +```diff ++ Historical summaries update - historical_summaries_accumulator [Preset: mainnet] OK +``` +## EF - Gloas - Epoch Processing - Inactivity [Preset: mainnet] +```diff ++ Inactivity - all_zero_inactivity_scores_empty_participation [Preset: mainnet] OK ++ Inactivity - all_zero_inactivity_scores_empty_participation_leaking [Preset: mainnet] OK ++ Inactivity - all_zero_inactivity_scores_full_participation [Preset: mainnet] OK ++ Inactivity - all_zero_inactivity_scores_full_participation_leaking [Preset: mainnet] OK ++ Inactivity - all_zero_inactivity_scores_random_participation [Preset: mainnet] OK ++ Inactivity - all_zero_inactivity_scores_random_participation_leaking [Preset: mainnet] OK ++ Inactivity - genesis [Preset: mainnet] OK ++ Inactivity - genesis_random_scores [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_empty_participation [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_empty_participation_leaking [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_full_participation [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_full_participation_leaking [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_random_participation [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_random_participation_leaking [Preset: mainnet] OK ++ Inactivity - randomized_state [Preset: mainnet] OK ++ Inactivity - randomized_state_leaking [Preset: mainnet] OK ++ Inactivity - some_exited_full_random_leaking [Preset: mainnet] OK ++ Inactivity - some_slashed_full_random [Preset: mainnet] OK ++ Inactivity - some_slashed_full_random_leaking [Preset: mainnet] OK ++ Inactivity - some_slashed_zero_scores_full_participation [Preset: mainnet] OK ++ Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: mainnet] OK +``` +## EF - Gloas - Epoch Processing - Justification & Finalization [Preset: mainnet] +```diff ++ Justification & Finalization - 123_ok_support [Preset: mainnet] OK ++ Justification & Finalization - 123_poor_support [Preset: mainnet] OK ++ Justification & Finalization - 12_ok_support [Preset: mainnet] OK ++ Justification & Finalization - 12_ok_support_messed_target [Preset: mainnet] OK ++ Justification & Finalization - 12_poor_support [Preset: mainnet] OK ++ Justification & Finalization - 234_ok_support [Preset: mainnet] OK ++ Justification & Finalization - 234_poor_support [Preset: mainnet] OK ++ Justification & Finalization - 23_ok_support [Preset: mainnet] OK ++ Justification & Finalization - 23_poor_support [Preset: mainnet] OK ++ Justification & Finalization - balance_threshold_with_exited_validators [Preset: mainnet] OK +``` +## EF - Gloas - Epoch Processing - Participation flag updates [Preset: mainnet] +```diff ++ Participation flag updates - all_zeroed [Preset: mainnet] OK ++ Participation flag updates - current_epoch_zeroed [Preset: mainnet] OK ++ Participation flag updates - current_filled [Preset: mainnet] OK ++ Participation flag updates - filled [Preset: mainnet] OK ++ Participation flag updates - previous_epoch_zeroed [Preset: mainnet] OK ++ Participation flag updates - previous_filled [Preset: mainnet] OK ++ Participation flag updates - random_0 [Preset: mainnet] OK ++ Participation flag updates - random_1 [Preset: mainnet] OK ++ Participation flag updates - random_2 [Preset: mainnet] OK ++ Participation flag updates - random_genesis [Preset: mainnet] OK +``` +## EF - Gloas - Epoch Processing - Pending consolidations [Preset: mainnet] +```diff ++ Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK ++ Pending consolidations - basic_pending_consolidation [Preset: mainnet] OK ++ Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: mainnet] OK ++ Pending consolidations - pending_consolidation_balance_computation_compounding [Preset: ma OK ++ Pending consolidations - pending_consolidation_balance_computation_eth1 [Preset: mainnet] OK ++ Pending consolidations - pending_consolidation_compounding_creds [Preset: mainnet] OK ++ Pending consolidations - pending_consolidation_future_epoch [Preset: mainnet] OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective [ OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective_c OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective [Pre OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective_comp OK ++ Pending consolidations - pending_consolidation_with_pending_deposit [Preset: mainnet] OK ++ Pending consolidations - skip_consolidation_when_source_slashed [Preset: mainnet] OK +``` +## EF - Gloas - Epoch Processing - Pending deposits [Preset: mainnet] +```diff ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_max [Preset: m OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max [Pres OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max_next_ OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_under_max [Pre OK ++ Pending deposits - apply_pending_deposit_correct_sig_but_forked_state [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_effective_deposit_with_genesis_fork_version [Pres OK ++ Pending deposits - apply_pending_deposit_eth1_withdrawal_credentials [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_incorrect_sig_new_deposit [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_incorrect_sig_top_up [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_incorrect_withdrawal_credentials_top_up [Preset: OK ++ Pending deposits - apply_pending_deposit_ineffective_deposit_with_bad_fork_version [Preset OK ++ Pending deposits - apply_pending_deposit_key_validate_invalid_decompression [Preset: mainn OK ++ Pending deposits - apply_pending_deposit_key_validate_invalid_subgroup [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_min_activation [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials [Preset: mai OK ++ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials_over_min_act OK ++ Pending deposits - apply_pending_deposit_over_min_activation [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_over_min_activation_next_increment [Preset: mainn OK ++ Pending deposits - apply_pending_deposit_success_top_up_to_withdrawn_validator [Preset: ma OK ++ Pending deposits - apply_pending_deposit_top_up__less_effective_balance [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK ++ Pending deposits - apply_pending_deposit_top_up__min_activation_balance [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_top_up__min_activation_balance_compounding [Prese OK ++ Pending deposits - apply_pending_deposit_under_min_activation [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_with_previous_fork_version [Preset: mainnet] OK ++ Pending deposits - ineffective_deposit_with_current_fork_version [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_balance_above_churn [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_balance_equal_churn [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_complete [Preset: mainn OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_not_applied [Preset: ma OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_pending [Preset: mainne OK ++ Pending deposits - process_pending_deposits_limit_is_reached [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_mixture_of_skipped_and_above_churn [Preset: ma OK ++ Pending deposits - process_pending_deposits_multiple_for_new_validator [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_multiple_pending_deposits_above_churn [Preset: OK ++ Pending deposits - process_pending_deposits_multiple_pending_deposits_below_churn [Preset: OK ++ Pending deposits - process_pending_deposits_multiple_pending_one_skipped [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_multiple_skipped_deposits_exiting_validators [ OK ++ Pending deposits - process_pending_deposits_not_finalized [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_preexisting_churn [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_skipped_deposit_exiting_validator [Preset: mai OK ++ Pending deposits - process_pending_deposits_withdrawable_validator [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_withdrawable_validator_not_churned [Preset: ma OK +``` +## EF - Gloas - Epoch Processing - Proposer lookahead [Preset: mainnet] +```diff ++ Proposer lookahead - proposer_lookahead_does_not_contain_exited_validators [Preset: mainne OK ++ Proposer lookahead - proposer_lookahead_in_state_matches_computed_lookahead [Preset: mainn OK +``` +## EF - Gloas - Epoch Processing - RANDAO mixes reset [Preset: mainnet] +```diff ++ RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK +``` +## EF - Gloas - Epoch Processing - Registry updates [Preset: mainnet] +```diff ++ Registry updates - activation_queue_activation_and_ejection__1 [Preset: mainnet] OK ++ Registry updates - activation_queue_activation_and_ejection__churn_limit [Preset: mainnet] OK ++ Registry updates - activation_queue_activation_and_ejection__exceed_churn_limit [Preset: m OK ++ Registry updates - activation_queue_efficiency_min [Preset: mainnet] OK ++ Registry updates - activation_queue_eligibility__greater_than_min_activation_balance [Pres OK ++ Registry updates - activation_queue_eligibility__less_than_min_activation_balance [Preset: OK ++ Registry updates - activation_queue_eligibility__min_activation_balance [Preset: mainnet] OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_compounding_creds OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_eth1_creds [Preset OK ++ Registry updates - activation_queue_no_activation_no_finality [Preset: mainnet] OK ++ Registry updates - activation_queue_sorting [Preset: mainnet] OK ++ Registry updates - activation_queue_to_activated_if_finalized [Preset: mainnet] OK ++ Registry updates - add_to_activation_queue [Preset: mainnet] OK ++ Registry updates - ejection [Preset: mainnet] OK ++ Registry updates - ejection_past_churn_limit_min [Preset: mainnet] OK ++ Registry updates - invalid_large_withdrawable_epoch [Preset: mainnet] OK +``` +## EF - Gloas - Epoch Processing - Rewards and penalties [Preset: mainnet] +```diff ++ Rewards and penalties - almost_empty_attestations [Preset: mainnet] OK ++ Rewards and penalties - almost_empty_attestations_with_leak [Preset: mainnet] OK ++ Rewards and penalties - almost_full_attestations [Preset: mainnet] OK ++ Rewards and penalties - almost_full_attestations_with_leak [Preset: mainnet] OK ++ Rewards and penalties - attestations_some_slashed [Preset: mainnet] OK ++ Rewards and penalties - duplicate_attestation [Preset: mainnet] OK ++ Rewards and penalties - full_attestation_participation [Preset: mainnet] OK ++ Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK ++ Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK ++ Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK ++ Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK ++ Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK ++ Rewards and penalties - random_fill_attestations [Preset: mainnet] OK ++ Rewards and penalties - random_fill_attestations_with_leak [Preset: mainnet] OK +``` +## EF - Gloas - Epoch Processing - Slashings [Preset: mainnet] +```diff ++ Slashings - low_penalty [Preset: mainnet] OK ++ Slashings - max_penalties [Preset: mainnet] OK ++ Slashings - minimal_penalty [Preset: mainnet] OK ++ Slashings - scaled_penalties [Preset: mainnet] OK ++ Slashings - slashings_with_random_state [Preset: mainnet] OK +``` +## EF - Gloas - Epoch Processing - Slashings reset [Preset: mainnet] +```diff ++ Slashings reset - flush_slashings [Preset: mainnet] OK +``` +## EF - Gloas - Fork [Preset: mainnet] +```diff ++ EF - Gloas - Fork - after_fork_deactivate_validators_from_fulu_to_gloas [Preset: mainnet] OK ++ EF - Gloas - Fork - after_fork_deactivate_validators_wo_block_from_fulu_to_gloas [Preset: OK ++ EF - Gloas - Fork - after_fork_new_validator_active_from_fulu_to_gloas [Preset: mainnet] OK +``` +## EF - Gloas - Operations - Attestation [Preset: mainnet] +```diff ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_after_max_inclusion_slot OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_attestation_data_index_not_zero OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_attestation_data_index_too_high OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_attestation_signature OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_bad_source_root OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_before_inclusion_delay OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_committee_index OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_correct_attestation_included_aft OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_current_source_root OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_empty_participants_seemingly_val OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_empty_participants_zeroes_sig OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_future_target_epoch OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_incorrect_head_and_target_includ OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_incorrect_head_included_after_ma OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_incorrect_target_included_after_ OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_index OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_mismatched_target_and_slot OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_new_source_epoch OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_nonset_committee_bits OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_old_source_epoch OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_old_target_epoch OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_previous_source_root OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_same_slot_attestation_index_one OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_source_root_is_target_root OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_too_few_aggregation_bits OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_too_many_aggregation_bits OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_too_many_committee_bits OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_wrong_index_for_committee_signat OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_wrong_index_for_slot_0 OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_wrong_index_for_slot_1 OK ++ [Valid] EF - Gloas - Operations - Attestation - at_max_inclusion_slot OK ++ [Valid] EF - Gloas - Operations - Attestation - builder_payment_weight_no_double_countin OK ++ [Valid] EF - Gloas - Operations - Attestation - builder_payment_weight_tracking OK ++ [Valid] EF - Gloas - Operations - Attestation - correct_attestation_included_at_max_incl OK ++ [Valid] EF - Gloas - Operations - Attestation - correct_attestation_included_at_min_incl OK ++ [Valid] EF - Gloas - Operations - Attestation - correct_attestation_included_at_one_epoc OK ++ [Valid] EF - Gloas - Operations - Attestation - correct_attestation_included_at_sqrt_epo OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_and_target_included_at_ep OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_and_target_included_at_sq OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_and_target_min_inclusion_ OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_included_at_max_inclusion OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_included_at_min_inclusion OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_de OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_target_included_at_epoch_delay OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_target_included_at_min_inclusi OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_target_included_at_sqrt_epoch_ OK ++ [Valid] EF - Gloas - Operations - Attestation - matching_payload_false_historical_slot OK ++ [Valid] EF - Gloas - Operations - Attestation - matching_payload_gets_head_flag OK ++ [Valid] EF - Gloas - Operations - Attestation - matching_payload_true_historical_slot OK ++ [Valid] EF - Gloas - Operations - Attestation - matching_payload_true_same_slot OK ++ [Valid] EF - Gloas - Operations - Attestation - mismatched_payload_no_head_flag OK ++ [Valid] EF - Gloas - Operations - Attestation - multi_proposer_index_iterations OK ++ [Valid] EF - Gloas - Operations - Attestation - one_basic_attestation OK ++ [Valid] EF - Gloas - Operations - Attestation - previous_epoch OK ++ [Valid] EF - Gloas - Operations - Attestation - valid_attestation_data_index_one_previou OK ++ [Valid] EF - Gloas - Operations - Attestation - valid_attestation_data_index_one_previou OK ++ [Valid] EF - Gloas - Operations - Attestation - valid_attestation_data_index_zero_previo OK ++ [Valid] EF - Gloas - Operations - Attestation - valid_same_slot_attestation_index_zero OK +``` +## EF - Gloas - Operations - Attester Slashing [Preset: mainnet] +```diff ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_all_empty_indices OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_bad_extra_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_bad_replaced_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_duplicate_index_doubl OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_duplicate_index_norma OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_empty_indices OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_high_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_bad_extra_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_bad_replaced_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_duplicate_index_doubl OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_duplicate_index_norma OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_empty_indices OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_high_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_incorrect_sig_1 OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_incorrect_sig_1_and_2 OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_incorrect_sig_2 OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_no_double_or_surround OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_participants_already_slash OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_same_data OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_unsorted_att_1 OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_unsorted_att_2 OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - already_exited_long_ago OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - already_exited_recent OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - attestation_from_future OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - basic_double OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - basic_surround OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - low_balances OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - misc_balances OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - proposer_index_slashed OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - with_effective_balance_disparity OK +``` +## EF - Gloas - Operations - BLS to execution change [Preset: mainnet] +```diff ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_already_0x01 OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_bad_signature OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_current_fork_version OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_genesis_validators_r OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_incorrect_from_bls_p OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_previous_fork_versio OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_val_index_out_of_ran OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - genesis_fork_version OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success_exited OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success_in_activation_queue OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success_in_exit_queue OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success_not_activated OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success_withdrawable OK +``` +## EF - Gloas - Operations - Block Header [Preset: mainnet] +```diff ++ [Invalid] EF - Gloas - Operations - Block Header - invalid_multiple_blocks_single_slot OK ++ [Invalid] EF - Gloas - Operations - Block Header - invalid_parent_root OK ++ [Invalid] EF - Gloas - Operations - Block Header - invalid_proposer_index OK ++ [Invalid] EF - Gloas - Operations - Block Header - invalid_proposer_slashed OK ++ [Invalid] EF - Gloas - Operations - Block Header - invalid_slot_block_header OK ++ [Valid] EF - Gloas - Operations - Block Header - basic_block_header OK +``` +## EF - Gloas - Operations - Consolidation Request [Preset: mainnet] +```diff ++ [Valid] EF - Gloas - Operations - Consolidation Request - basic_switch_to_compounding OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_not_enough_consolida OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_exited_s OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_inactive OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_not_auth OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_source_b OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_source_c OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_unknown_ OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_with_exc OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_with_pen OK +``` +## EF - Gloas - Operations - Deposit [Preset: mainnet] +```diff ++ [Invalid] EF - Gloas - Operations - Deposit - invalid_bad_merkle_proof OK ++ [Invalid] EF - Gloas - Operations - Deposit - invalid_wrong_deposit_for_deposit_count OK ++ [Valid] EF - Gloas - Operations - Deposit - correct_sig_but_forked_state OK ++ [Valid] EF - Gloas - Operations - Deposit - effective_deposit_with_genesis_fork_version OK ++ [Valid] EF - Gloas - Operations - Deposit - incorrect_sig_new_deposit OK ++ [Valid] EF - Gloas - Operations - Deposit - incorrect_sig_top_up OK ++ [Valid] EF - Gloas - Operations - Deposit - incorrect_withdrawal_credentials_top_up OK ++ [Valid] EF - Gloas - Operations - Deposit - ineffective_deposit_with_bad_fork_version OK ++ [Valid] EF - Gloas - Operations - Deposit - ineffective_deposit_with_current_fork_versio OK ++ [Valid] EF - Gloas - Operations - Deposit - ineffective_deposit_with_previous_fork_versi OK ++ [Valid] EF - Gloas - Operations - Deposit - key_validate_invalid_decompression OK ++ [Valid] EF - Gloas - Operations - Deposit - key_validate_invalid_subgroup OK ++ [Valid] EF - Gloas - Operations - Deposit - new_deposit_eth1_withdrawal_credentials OK ++ [Valid] EF - Gloas - Operations - Deposit - new_deposit_max OK ++ [Valid] EF - Gloas - Operations - Deposit - new_deposit_non_versioned_withdrawal_credent OK ++ [Valid] EF - Gloas - Operations - Deposit - new_deposit_over_max OK ++ [Valid] EF - Gloas - Operations - Deposit - new_deposit_under_max OK ++ [Valid] EF - Gloas - Operations - Deposit - top_up__less_effective_balance OK ++ [Valid] EF - Gloas - Operations - Deposit - top_up__max_effective_balance OK ++ [Valid] EF - Gloas - Operations - Deposit - top_up__zero_balance OK +``` +## EF - Gloas - Operations - Deposit Request [Preset: mainnet] +```diff ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_extra_gwei OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_greater_than OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_invalid_sig OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_max_effectiv OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_min_activati OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_set_start_in OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_set_start_in OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_top_up_inval OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_top_up_max_e OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_top_up_min_a OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_top_up_still OK +``` +## EF - Gloas - Operations - Execution Payload [Preset: mainnet] +```diff ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_executio OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_invalid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_be OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_bl OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_bl OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_bu OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_ga OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_pa OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_pr OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_sl OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_ti OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_large_pa OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_max_blob OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_self_bui OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_valid OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_with_blo OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_with_exe OK +``` +## EF - Gloas - Operations - Execution Payload Bid [Preset: mainnet] +```diff ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK +``` +## EF - Gloas - Operations - Payload Attestation [Preset: mainnet] +```diff ++ [Invalid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_futu OK ++ [Invalid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_inva OK ++ [Invalid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_inva OK ++ [Invalid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_no_a OK ++ [Invalid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_too_ OK ++ [Valid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_part OK ++ [Valid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_payl OK ++ [Valid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_payl OK +``` +## EF - Gloas - Operations - Proposer Slashing [Preset: mainnet] +```diff ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_different_proposer_indices OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_headers_are_same_sigs_are_ OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_headers_are_same_sigs_are_ OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_incorrect_proposer_index OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_incorrect_sig_1 OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_incorrect_sig_1_and_2 OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_incorrect_sig_1_and_2_swap OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_incorrect_sig_2 OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_proposer_is_not_activated OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_proposer_is_slashed OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_proposer_is_withdrawn OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_slots_of_different_epochs OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - basic OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - block_header_from_future OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - builder_payment_deletion_current_e OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - builder_payment_deletion_previous_ OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - builder_payment_deletion_too_late OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - slashed_and_proposer_index_the_sam OK +``` +## EF - Gloas - Operations - Sync Aggregate [Preset: mainnet] +```diff ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_bad_domain OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_extra_participant OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_infinite_signature_ OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_infinite_signature_ OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_missing_participant OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_no_participants OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_past_block OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_all_but_one_participating_with OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_high_participation_with_duplic OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_low_participation_with_duplica OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_misc_balances_and_half_partici OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_only_one_participant_with_dupl OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_with_exits_with_duplicates OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_duplicate_comm OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_duplicate_comm OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_duplicate_comm OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_duplicate_comm OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_duplicate_comm OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_duplicate_comm OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_duplicate_comm OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_empty_particip OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_not_full_parti OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_with_nonparticipating_ OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_with_nonparticipating_ OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_with_participating_exi OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_with_participating_wit OK +``` +## EF - Gloas - Operations - Voluntary Exit [Preset: mainnet] +```diff ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_incorrect_signature OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_already_exited OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_exit_in_future OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_has_pending_withdra OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_incorrect_validator OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_not_active OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_not_active_long_eno OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_f OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_f OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_f OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_f OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - basic OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - default_exit_epoch_subsequent_exit OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - exit_existing_churn_and_balance_multi OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - exit_existing_churn_and_churn_limit_b OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - exit_with_balance_equal_to_churn_limi OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - exit_with_balance_multiple_of_churn_l OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - max_balance_exit OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - min_balance_exit OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - min_balance_exits_above_churn OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - min_balance_exits_up_to_churn OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - success_exit_queue__min_churn OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - voluntary_exit_with_pending_deposit OK +``` +## EF - Gloas - Operations - Withdrawal Request [Preset: mainnet] +```diff ++ [Valid] EF - Gloas - Operations - Withdrawal Request - activation_epoch_less_than_shard_ OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - basic_withdrawal_request OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - basic_withdrawal_request_with_com OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - basic_withdrawal_request_with_fir OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - full_exit_request_has_partial_wit OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - incorrect_inactive_validator OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - incorrect_source_address OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - incorrect_withdrawal_credential_p OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - insufficient_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - insufficient_effective_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - no_compounding_credentials OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - no_excess_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - on_withdrawal_request_initiated_e OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_activation_epo OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_incorrect_sour OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_incorrect_with OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_on_exit_initia OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - pending_withdrawals_consume_all_e OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - unknown_pubkey OK +``` +## EF - Gloas - Operations - Withdrawals [Preset: mainnet] +```diff ++ [Valid] EF - Gloas - Operations - Withdrawals - full_pending_withdrawals_but_first_skipp OK ++ [Valid] EF - Gloas - Operations - Withdrawals - full_pending_withdrawals_but_first_skipp OK ++ [Valid] EF - Gloas - Operations - Withdrawals - full_pending_withdrawals_but_first_skipp OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_at_max OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_at_max_mixed_with_sw OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_exiting_validator OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_low_effective_balanc OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_mixed_with_sweep_and OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_next_epoch OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_no_excess_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_one_skipped_one_effe OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_two_partial_withdraw OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_two_partial_withdraw OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_with_effective_sweep OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_with_ineffective_swe OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_with_ineffective_swe OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_with_sweep_different OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_0 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_partial_withdrawals_1 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_partial_withdrawals_2 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_partial_withdrawals_3 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_partial_withdrawals_4 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_partial_withdrawals_5 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_excess_balance_but_no_max_effect OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_excess_balance_but_no_max_effect OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_max_partial_withdrawable OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_mixed_fully_and_partial_withdraw OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_no_excess_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_no_excess_balance_compounding OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_no_max_effective_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_no_max_effective_balance_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_one_partial_withdrawable_active_ OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_one_partial_withdrawable_exited OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_one_partial_withdrawable_exited_ OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_one_partial_withdrawable_in_exit OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_one_partial_withdrawable_not_yet OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_two_partial_withdrawable OK +``` +## EF - Gloas - Rewards [Preset: mainnet] +```diff ++ EF - Gloas - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK ++ EF - Gloas - Rewards - empty [Preset: mainnet] OK ++ EF - Gloas - Rewards - empty_leak [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_all_correct [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_but_partial_participation [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_but_partial_participation_leak [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_leak [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_0 [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_1 [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_2 [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_3 [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_4 [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_leak [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_low_balances_0 [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_low_balances_1 [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_misc_balances [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_seven_epoch_leak [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_ten_epoch_leak [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_without_leak_0 [Preset: mainnet] OK ++ EF - Gloas - Rewards - full_random_without_leak_and_current_exit_0 [Preset: mainnet] OK ++ EF - Gloas - Rewards - half_full [Preset: mainnet] OK ++ EF - Gloas - Rewards - half_full_leak [Preset: mainnet] OK ++ EF - Gloas - Rewards - quarter_full [Preset: mainnet] OK ++ EF - Gloas - Rewards - quarter_full_leak [Preset: mainnet] OK ++ EF - Gloas - Rewards - some_very_low_effective_balances_that_attested [Preset: mainnet] OK ++ EF - Gloas - Rewards - some_very_low_effective_balances_that_attested_leak [Preset: mainne OK ++ EF - Gloas - Rewards - some_very_low_effective_balances_that_did_not_attest [Preset: mainn OK ++ EF - Gloas - Rewards - some_very_low_effective_balances_that_did_not_attest_leak [Preset: OK ++ EF - Gloas - Rewards - with_exited_validators [Preset: mainnet] OK ++ EF - Gloas - Rewards - with_exited_validators_leak [Preset: mainnet] OK ++ EF - Gloas - Rewards - with_not_yet_activated_validators [Preset: mainnet] OK ++ EF - Gloas - Rewards - with_not_yet_activated_validators_leak [Preset: mainnet] OK ++ EF - Gloas - Rewards - with_slashed_validators [Preset: mainnet] OK ++ EF - Gloas - Rewards - with_slashed_validators_leak [Preset: mainnet] OK +``` +## EF - Gloas - SSZ consensus objects [Preset: mainnet] +```diff ++ Testing AggregateAndProof OK ++ Testing Attestation OK ++ Testing AttestationData OK ++ Testing AttesterSlashing OK ++ Testing BLSToExecutionChange OK ++ Testing BeaconBlock OK ++ Testing BeaconBlockBody OK ++ Testing BeaconBlockHeader OK ++ Testing BeaconState OK ++ Testing BlobIdentifier OK ++ Testing BlobSidecar OK ++ Testing BuilderPendingPayment OK ++ Testing BuilderPendingWithdrawal OK ++ Testing Checkpoint OK ++ Testing ConsolidationRequest OK ++ Testing ContributionAndProof OK ++ Testing DataColumnSidecar OK ++ Testing DataColumnsByRootIdentifier OK ++ Testing Deposit OK ++ Testing DepositData OK ++ Testing DepositMessage OK ++ Testing DepositRequest OK ++ Testing Eth1Block OK ++ Testing Eth1Data OK ++ Testing ExecutionPayload OK ++ Testing ExecutionPayloadBid OK ++ Testing ExecutionPayloadEnvelope OK ++ Testing ExecutionPayloadHeader OK ++ Testing ExecutionRequests OK ++ Testing Fork OK ++ Testing ForkChoiceNode OK ++ Testing ForkData OK ++ Testing HistoricalBatch OK ++ Testing HistoricalSummary OK ++ Testing IndexedAttestation OK ++ Testing IndexedPayloadAttestation OK ++ Testing LightClientBootstrap OK ++ Testing LightClientFinalityUpdate OK ++ Testing LightClientHeader OK ++ Testing LightClientOptimisticUpdate OK ++ Testing LightClientUpdate OK ++ Testing MatrixEntry OK ++ Testing PayloadAttestation OK ++ Testing PayloadAttestationData OK ++ Testing PayloadAttestationMessage OK ++ Testing PendingAttestation OK ++ Testing PendingConsolidation OK ++ Testing PendingDeposit OK ++ Testing PendingPartialWithdrawal OK ++ Testing PowBlock OK ++ Testing ProposerSlashing OK ++ Testing SignedAggregateAndProof OK ++ Testing SignedBLSToExecutionChange OK ++ Testing SignedBeaconBlock OK ++ Testing SignedBeaconBlockHeader OK ++ Testing SignedContributionAndProof OK ++ Testing SignedExecutionPayloadBid OK ++ Testing SignedExecutionPayloadEnvelope OK ++ Testing SignedVoluntaryExit OK ++ Testing SigningData OK ++ Testing SingleAttestation OK ++ Testing SyncAggregate OK ++ Testing SyncAggregatorSelectionData OK ++ Testing SyncCommittee OK ++ Testing SyncCommitteeContribution OK ++ Testing SyncCommitteeMessage OK ++ Testing Validator OK ++ Testing VoluntaryExit OK ++ Testing Withdrawal OK ++ Testing WithdrawalRequest OK +``` +## EF - Gloas - Sanity - Slots [Preset: mainnet] +```diff ++ EF - Gloas - Slots - balance_change_affects_proposer [Preset: mainnet] OK ++ EF - Gloas - Slots - double_empty_epoch [Preset: mainnet] OK ++ EF - Gloas - Slots - effective_decrease_balance_updates_lookahead [Preset: mainnet] OK ++ EF - Gloas - Slots - empty_epoch [Preset: mainnet] OK ++ EF - Gloas - Slots - execution_payload_availability_reset_from_set [Preset: mainnet] OK ++ EF - Gloas - Slots - execution_payload_availability_reset_from_unset [Preset: mainnet] OK ++ EF - Gloas - Slots - historical_accumulator [Preset: mainnet] OK ++ EF - Gloas - Slots - multiple_pending_deposits_same_pubkey [Preset: mainnet] OK ++ EF - Gloas - Slots - multiple_pending_deposits_same_pubkey_above_upward_threshold [Preset: OK ++ EF - Gloas - Slots - multiple_pending_deposits_same_pubkey_below_upward_threshold [Preset: OK ++ EF - Gloas - Slots - multiple_pending_deposits_same_pubkey_compounding [Preset: mainnet] OK ++ EF - Gloas - Slots - multiple_pending_deposits_same_pubkey_different_signature [Preset: ma OK ++ EF - Gloas - Slots - over_epoch_boundary [Preset: mainnet] OK ++ EF - Gloas - Slots - pending_consolidation [Preset: mainnet] OK ++ EF - Gloas - Slots - pending_deposit_extra_gwei [Preset: mainnet] OK ++ EF - Gloas - Slots - slots_1 [Preset: mainnet] OK ++ EF - Gloas - Slots - slots_2 [Preset: mainnet] OK +``` +## EF - Gloas - Transition [Preset: mainnet] +```diff ++ EF - Gloas - Transition - non_empty_historical_roots [Preset: mainnet] OK ++ EF - Gloas - Transition - normal_transition [Preset: mainnet] OK ++ EF - Gloas - Transition - simple_transition [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_attestation_from_previous_fork_with_new_range [Preset OK ++ EF - Gloas - Transition - transition_missing_first_post_block [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_missing_last_pre_fork_block [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_only_blocks_post_fork [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_randomized_state [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_with_activation_at_fork_epoch [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_with_attester_slashing_right_after_fork [Preset: main OK ++ EF - Gloas - Transition - transition_with_attester_slashing_right_before_fork [Preset: mai OK ++ EF - Gloas - Transition - transition_with_btec_right_after_fork [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_with_btec_right_before_fork [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_with_deposit_right_after_fork [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_with_deposit_right_before_fork [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_with_finality [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_with_leaking_at_fork [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_with_leaking_pre_fork [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_with_no_attestations_until_after_fork [Preset: mainne OK ++ EF - Gloas - Transition - transition_with_non_empty_activation_queue [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_with_one_fourth_exiting_validators_exit_at_fork [Pres OK ++ EF - Gloas - Transition - transition_with_proposer_slashing_right_after_fork [Preset: main OK ++ EF - Gloas - Transition - transition_with_proposer_slashing_right_before_fork [Preset: mai OK ++ EF - Gloas - Transition - transition_with_random_half_participation [Preset: mainnet] OK ++ EF - Gloas - Transition - transition_with_random_three_quarters_participation [Preset: mai OK +``` ## EF - Light client - Single merkle proof [Preset: mainnet] ```diff + Light client - Single merkle proof - mainnet/altair/light_client/single_merkle_proof/Beaco OK @@ -3876,9 +4175,18 @@ ConsensusSpecPreset-mainnet + Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK + Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK + Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK ++ Light client - Single merkle proof - mainnet/fulu/light_client/single_merkle_proof/BeaconB OK ++ Light client - Single merkle proof - mainnet/fulu/light_client/single_merkle_proof/BeaconS OK ++ Light client - Single merkle proof - mainnet/fulu/light_client/single_merkle_proof/BeaconS OK ++ Light client - Single merkle proof - mainnet/fulu/light_client/single_merkle_proof/BeaconS OK ``` ## EF - Merkle proof [Preset: mainnet] ```diff + Merkle proof - Single merkle proof - blob_kzg_commitments_merkle_proof__basic Skip + Merkle proof - Single merkle proof - blob_kzg_commitments_merkle_proof__max_blobs Skip + Merkle proof - Single merkle proof - blob_kzg_commitments_merkle_proof__multiple_blobs Skip + Merkle proof - Single merkle proof - blob_kzg_commitments_merkle_proof__random_block_1 Skip + Merkle proof - Single merkle proof - eip7805 Skip + Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK + Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK + Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK @@ -3891,10 +4199,6 @@ ConsensusSpecPreset-mainnet + Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK + Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK + Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK -+ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK -+ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK -+ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK -+ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK ``` ## EF - Phase 0 - Epoch Processing - Effective balance updates [Preset: mainnet] ```diff @@ -3957,8 +4261,8 @@ ConsensusSpecPreset-mainnet + Rewards and penalties - duplicate_participants_different_attestation_3 [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - full_attestations_random_incorrect_fields [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK @@ -4206,75 +4510,720 @@ ConsensusSpecPreset-mainnet + EF - Phase 0 - Slots - slots_1 [Preset: mainnet] OK + EF - Phase 0 - Slots - slots_2 [Preset: mainnet] OK ``` -## EF - Phase0 - Finality [Preset: mainnet] -```diff -+ [Valid] EF - Phase0 - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Finality - finality_rule_1 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Finality - finality_rule_2 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Finality - finality_rule_3 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Finality - finality_rule_4 [Preset: mainnet] OK -``` -## EF - Phase0 - Random [Preset: mainnet] -```diff -+ [Valid] EF - Phase0 - Random - randomized_0 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_1 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_10 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_11 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_12 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_13 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_14 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_15 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_2 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_3 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_4 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_5 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_6 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_7 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_8 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Random - randomized_9 [Preset: mainnet] OK -``` -## EF - Phase0 - Sanity - Blocks [Preset: mainnet] -```diff -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [ OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: ma OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pre OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expect OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propos OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mai OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: main OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_proposal_for_genesis_slot [Preset: mainn OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_same_slot_block_transition [Preset: main OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [P OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - attestation [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - attester_slashing [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - balance_driven_status_transitions [Preset: mainn OK -+ [Valid] EF - Phase0 - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainne OK -+ [Valid] EF - Phase0 - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - historical_batch [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK -+ [Valid] EF - Phase0 - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pre OK -+ [Valid] EF - Phase0 - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK -+ [Valid] EF - Phase0 - Sanity - Blocks - multiple_different_validator_exits_same_block [P OK -+ [Valid] EF - Phase0 - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - skipped_slots [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK +## EF - altair - Finality [Preset: mainnet] +```diff ++ [Valid] EF - altair - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK ++ [Valid] EF - altair - Finality - finality_rule_1 [Preset: mainnet] OK ++ [Valid] EF - altair - Finality - finality_rule_2 [Preset: mainnet] OK ++ [Valid] EF - altair - Finality - finality_rule_3 [Preset: mainnet] OK ++ [Valid] EF - altair - Finality - finality_rule_4 [Preset: mainnet] OK +``` +## EF - altair - Random [Preset: mainnet] +```diff ++ [Valid] EF - altair - Random - randomized_0 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_1 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_10 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_11 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_12 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_13 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_14 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_15 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_2 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_3 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_4 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_5 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_6 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_7 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_8 [Preset: mainnet] OK ++ [Valid] EF - altair - Random - randomized_9 [Preset: mainnet] OK +``` +## EF - altair - Sanity - Blocks [Preset: mainnet] +```diff ++ [Invalid] EF - altair - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [ OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: ma OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pre OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expect OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propos OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mai OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: main OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_same_slot_block_transition [Preset: main OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [P OK ++ [Invalid] EF - altair - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - attestation [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - attester_slashing [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - balance_driven_status_transitions [Preset: mainn OK ++ [Valid] EF - altair - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainne OK ++ [Valid] EF - altair - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - historical_batch [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pr OK ++ [Valid] EF - altair - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK ++ [Valid] EF - altair - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pre OK ++ [Valid] EF - altair - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK ++ [Valid] EF - altair - Sanity - Blocks - multiple_different_validator_exits_same_block [P OK ++ [Valid] EF - altair - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - skipped_slots [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee__empty [Preset: mainnet OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet] OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK ++ [Valid] EF - altair - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK +``` +## EF - bellatrix - Finality [Preset: mainnet] +```diff ++ [Valid] EF - bellatrix - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Finality - finality_rule_1 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Finality - finality_rule_2 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Finality - finality_rule_3 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Finality - finality_rule_4 [Preset: mainnet] OK +``` +## EF - bellatrix - Random [Preset: mainnet] +```diff ++ [Valid] EF - bellatrix - Random - randomized_0 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_1 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_10 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_11 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_12 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_13 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_14 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_15 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_2 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_3 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_4 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_5 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_6 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_7 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_8 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Random - randomized_9 [Preset: mainnet] OK +``` +## EF - bellatrix - Sanity - Blocks [Preset: mainnet] +```diff ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_duplicate_attester_slashing_same_bloc OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_blo OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [ OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_exp OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_pro OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainne OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: m OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_same_slot_block_transition [Preset: m OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_similar_proposer_slashings_same_block OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - attestation [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - attester_slashing [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - balance_driven_status_transitions [Preset: ma OK ++ [Valid] EF - bellatrix - Sanity - Blocks - block_transition_randomized_payload [Preset: OK ++ [Valid] EF - bellatrix - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - duplicate_attestation_same_block [Preset: mai OK ++ [Valid] EF - bellatrix - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - empty_block_transition_no_tx [Preset: mainnet OK ++ [Valid] EF - bellatrix - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - historical_batch [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - inactivity_scores_full_participation_leaking OK ++ [Valid] EF - bellatrix - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - is_execution_enabled_false [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - multiple_attester_slashings_no_overlap [Prese OK ++ [Valid] EF - bellatrix - Sanity - Blocks - multiple_attester_slashings_partial_overlap [ OK ++ [Valid] EF - bellatrix - Sanity - Blocks - multiple_different_proposer_slashings_same_bl OK ++ [Valid] EF - bellatrix - Sanity - Blocks - multiple_different_validator_exits_same_block OK ++ [Valid] EF - bellatrix - Sanity - Blocks - proposer_after_inactive_index [Preset: mainne OK ++ [Valid] EF - bellatrix - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - skipped_slots [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee__empty [Preset: main OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee__full [Preset: mainn OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee__half [Preset: mainn OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee_genesis__empty [Pres OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee_genesis__full [Prese OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee_genesis__half [Prese OK ++ [Valid] EF - bellatrix - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK +``` +## EF - capella - Finality [Preset: mainnet] +```diff ++ [Valid] EF - capella - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK ++ [Valid] EF - capella - Finality - finality_rule_1 [Preset: mainnet] OK ++ [Valid] EF - capella - Finality - finality_rule_2 [Preset: mainnet] OK ++ [Valid] EF - capella - Finality - finality_rule_3 [Preset: mainnet] OK ++ [Valid] EF - capella - Finality - finality_rule_4 [Preset: mainnet] OK +``` +## EF - capella - Random [Preset: mainnet] +```diff ++ [Valid] EF - capella - Random - randomized_0 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_1 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_10 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_11 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_12 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_13 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_14 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_15 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_2 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_3 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_4 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_5 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_6 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_7 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_8 [Preset: mainnet] OK ++ [Valid] EF - capella - Random - randomized_9 [Preset: mainnet] OK +``` +## EF - capella - Sanity - Blocks [Preset: mainnet] +```diff ++ [Invalid] EF - capella - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Prese OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: m OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pr OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expec OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propo OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: mai OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: ma OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mai OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mai OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [ OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_ OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_is OK ++ [Invalid] EF - capella - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - attestation [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - attester_slashing [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - balance_driven_status_transitions [Preset: main OK ++ [Valid] EF - capella - Sanity - Blocks - block_transition_randomized_payload [Preset: ma OK ++ [Valid] EF - capella - Sanity - Blocks - bls_change [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainn OK ++ [Valid] EF - capella - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - empty_block_transition_no_tx [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - exit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: ma OK ++ [Valid] EF - capella - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - historical_batch [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - inactivity_scores_full_participation_leaking [P OK ++ [Valid] EF - capella - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [P OK ++ [Valid] EF - capella - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK ++ [Valid] EF - capella - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pr OK ++ [Valid] EF - capella - Sanity - Blocks - multiple_different_proposer_slashings_same_bloc OK ++ [Valid] EF - capella - Sanity - Blocks - multiple_different_validator_exits_same_block [ OK ++ [Valid] EF - capella - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: OK ++ [Valid] EF - capella - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - skipped_slots [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee__empty [Preset: mainne OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK ++ [Valid] EF - capella - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Pres OK ++ [Valid] EF - capella - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: ma OK ++ [Valid] EF - capella - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK ++ [Valid] EF - capella - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK +``` +## EF - deneb - Finality [Preset: mainnet] +```diff ++ [Valid] EF - deneb - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK ++ [Valid] EF - deneb - Finality - finality_rule_1 [Preset: mainnet] OK ++ [Valid] EF - deneb - Finality - finality_rule_2 [Preset: mainnet] OK ++ [Valid] EF - deneb - Finality - finality_rule_3 [Preset: mainnet] OK ++ [Valid] EF - deneb - Finality - finality_rule_4 [Preset: mainnet] OK +``` +## EF - deneb - Random [Preset: mainnet] +```diff ++ [Valid] EF - deneb - Random - randomized_0 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_1 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_10 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_11 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_12 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_13 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_14 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_15 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_2 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_3 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_4 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_5 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_6 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_7 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_8 [Preset: mainnet] OK ++ [Valid] EF - deneb - Random - randomized_9 [Preset: mainnet] OK +``` +## EF - deneb - Sanity - Blocks [Preset: mainnet] +```diff ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [P OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: mai OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [ OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pres OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_exceed_max_blobs_per_block [Preset: mainn OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expecte OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propose OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: mainn OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_max_blobs_per_block_two_txs [Preset: main OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_one_blob_max_plus_one_txs [Preset: mainne OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: main OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mainn OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mainn OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pr OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sa OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt OK ++ [Invalid] EF - deneb - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - attestation [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - attester_slashing [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - balance_driven_status_transitions [Preset: mainne OK ++ [Valid] EF - deneb - Sanity - Blocks - block_transition_randomized_payload [Preset: main OK ++ [Valid] EF - deneb - Sanity - Blocks - bls_change [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainnet OK ++ [Valid] EF - deneb - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - empty_block_transition_no_tx [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - exit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: main OK ++ [Valid] EF - deneb - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - historical_batch [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pre OK ++ [Valid] EF - deneb - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pre OK ++ [Valid] EF - deneb - Sanity - Blocks - max_blobs_per_block [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: m OK ++ [Valid] EF - deneb - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pres OK ++ [Valid] EF - deneb - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK ++ [Valid] EF - deneb - Sanity - Blocks - multiple_different_validator_exits_same_block [Pr OK ++ [Valid] EF - deneb - Sanity - Blocks - one_blob [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - one_blob_max_txs [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - one_blob_two_txs [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: m OK ++ [Valid] EF - deneb - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - skipped_slots [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee__empty [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: m OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: m OK ++ [Valid] EF - deneb - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset OK ++ [Valid] EF - deneb - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: main OK ++ [Valid] EF - deneb - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK ++ [Valid] EF - deneb - Sanity - Blocks - zero_blob [Preset: mainnet] OK +``` +## EF - electra - Finality [Preset: mainnet] +```diff ++ [Valid] EF - electra - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK ++ [Valid] EF - electra - Finality - finality_rule_1 [Preset: mainnet] OK ++ [Valid] EF - electra - Finality - finality_rule_2 [Preset: mainnet] OK ++ [Valid] EF - electra - Finality - finality_rule_3 [Preset: mainnet] OK ++ [Valid] EF - electra - Finality - finality_rule_4 [Preset: mainnet] OK +``` +## EF - electra - Random [Preset: mainnet] +```diff ++ [Valid] EF - electra - Random - randomized_0 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_1 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_10 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_11 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_12 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_13 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_14 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_15 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_2 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_3 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_4 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_5 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_6 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_7 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_8 [Preset: mainnet] OK ++ [Valid] EF - electra - Random - randomized_9 [Preset: mainnet] OK +``` +## EF - electra - Sanity - Blocks [Preset: mainnet] +```diff ++ [Invalid] EF - electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK ++ [Invalid] EF - electra - Sanity - Blocks - deposit_transition__invalid_not_enough_eth1_dep OK ++ [Invalid] EF - electra - Sanity - Blocks - deposit_transition__invalid_too_many_eth1_depos OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Prese OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: m OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pr OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_exceed_max_blobs_per_block [Preset: mai OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expec OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propo OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: mai OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_max_blobs_per_block_two_txs [Preset: ma OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_one_blob_max_plus_one_txs [Preset: main OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: ma OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mai OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mai OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [ OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_ OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_is OK ++ [Invalid] EF - electra - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - attestation [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - attester_slashing [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - balance_driven_status_transitions [Preset: main OK ++ [Valid] EF - electra - Sanity - Blocks - basic_btec_and_el_withdrawal_request_in_same_bl OK ++ [Valid] EF - electra - Sanity - Blocks - basic_btec_before_el_withdrawal_request [Preset OK ++ [Valid] EF - electra - Sanity - Blocks - basic_el_withdrawal_request [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - block_transition_randomized_payload [Preset: ma OK ++ [Valid] EF - electra - Sanity - Blocks - bls_change [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - cl_exit_and_el_withdrawal_request_in_same_block OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_request_max_per_payload [Preset: mainne OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_request_with_same_pubkey_different_with OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__deposit_and_top_up_same_blo OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__deposit_with_same_pubkey_di OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__process_eth1_deposits [Pres OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__process_eth1_deposits_up_to OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__process_max_eth1_deposits [ OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__start_index_is_set [Preset: OK ++ [Valid] EF - electra - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainn OK ++ [Valid] EF - electra - Sanity - Blocks - effective_balance_increase_changes_lookahead [P OK ++ [Valid] EF - electra - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - empty_block_transition_no_tx [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - exit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: ma OK ++ [Valid] EF - electra - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - historical_batch [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - inactivity_scores_full_participation_leaking [P OK ++ [Valid] EF - electra - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [P OK ++ [Valid] EF - electra - Sanity - Blocks - max_blobs_per_block [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - multiple_different_proposer_slashings_same_bloc OK ++ [Valid] EF - electra - Sanity - Blocks - multiple_different_validator_exits_same_block [ OK ++ [Valid] EF - electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_differe OK ++ [Valid] EF - electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_same_va OK ++ [Valid] EF - electra - Sanity - Blocks - one_blob [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - one_blob_max_txs [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - one_blob_two_txs [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: OK ++ [Valid] EF - electra - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - skipped_slots [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - switch_to_compounding_requests_when_too_little_ OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee__empty [Preset: mainne OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK ++ [Valid] EF - electra - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Pres OK ++ [Valid] EF - electra - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: ma OK ++ [Valid] EF - electra - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - withdrawal_and_switch_to_compounding_request_sa OK ++ [Valid] EF - electra - Sanity - Blocks - withdrawal_and_withdrawal_request_same_validato OK ++ [Valid] EF - electra - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK ++ [Valid] EF - electra - Sanity - Blocks - zero_blob [Preset: mainnet] OK +``` +## EF - fulu - Finality [Preset: mainnet] +```diff ++ [Valid] EF - fulu - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK ++ [Valid] EF - fulu - Finality - finality_rule_1 [Preset: mainnet] OK ++ [Valid] EF - fulu - Finality - finality_rule_2 [Preset: mainnet] OK ++ [Valid] EF - fulu - Finality - finality_rule_3 [Preset: mainnet] OK ++ [Valid] EF - fulu - Finality - finality_rule_4 [Preset: mainnet] OK +``` +## EF - fulu - Random [Preset: mainnet] +```diff ++ [Valid] EF - fulu - Random - randomized_0 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_1 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_10 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_11 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_12 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_13 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_14 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_15 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_2 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_3 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_4 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_5 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_6 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_7 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_8 [Preset: mainnet] OK ++ [Valid] EF - fulu - Random - randomized_9 [Preset: mainnet] OK +``` +## EF - fulu - Sanity - Blocks [Preset: mainnet] +```diff ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: main OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [P OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Prese OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_exceed_max_blobs_per_block [Preset: mainne OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expected OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_proposer OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: mainne OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_max_blobs_per_block_two_txs [Preset: mainn OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_one_blob_max_plus_one_txs [Preset: mainnet OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mainn OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mainne OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mainne OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pre OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sam OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt_ OK ++ [Invalid] EF - fulu - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - attestation [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - attester_slashing [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - balance_driven_status_transitions [Preset: mainnet OK ++ [Valid] EF - fulu - Sanity - Blocks - basic_btec_and_el_withdrawal_request_in_same_block OK ++ [Valid] EF - fulu - Sanity - Blocks - basic_btec_before_el_withdrawal_request [Preset: m OK ++ [Valid] EF - fulu - Sanity - Blocks - basic_el_withdrawal_request [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - block_transition_randomized_payload [Preset: mainn OK ++ [Valid] EF - fulu - Sanity - Blocks - bls_change [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - cl_exit_and_el_withdrawal_request_in_same_block [P OK ++ [Valid] EF - fulu - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - deposit_request_max_per_payload [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - deposit_request_with_same_pubkey_different_withdra OK ++ [Valid] EF - fulu - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - effective_balance_increase_changes_lookahead [Pres OK ++ [Valid] EF - fulu - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - empty_block_transition_no_tx [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - exit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: mainn OK ++ [Valid] EF - fulu - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - historical_batch [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pres OK ++ [Valid] EF - fulu - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pres OK ++ [Valid] EF - fulu - Sanity - Blocks - max_blobs_per_block [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - multiple_different_proposer_slashings_same_block [ OK ++ [Valid] EF - fulu - Sanity - Blocks - multiple_different_validator_exits_same_block [Pre OK ++ [Valid] EF - fulu - Sanity - Blocks - multiple_el_partial_withdrawal_requests_different_ OK ++ [Valid] EF - fulu - Sanity - Blocks - multiple_el_partial_withdrawal_requests_same_valid OK ++ [Valid] EF - fulu - Sanity - Blocks - one_blob [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - one_blob_max_txs [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - one_blob_two_txs [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: ma OK ++ [Valid] EF - fulu - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - skipped_slots [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - switch_to_compounding_requests_when_too_little_con OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee__empty [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: m OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: ma OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: ma OK ++ [Valid] EF - fulu - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset: OK ++ [Valid] EF - fulu - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mainn OK ++ [Valid] EF - fulu - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - withdrawal_and_switch_to_compounding_request_same_ OK ++ [Valid] EF - fulu - Sanity - Blocks - withdrawal_and_withdrawal_request_same_validator [ OK ++ [Valid] EF - fulu - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK ++ [Valid] EF - fulu - Sanity - Blocks - zero_blob [Preset: mainnet] OK +``` +## EF - gloas - Finality [Preset: mainnet] +```diff ++ [Valid] EF - gloas - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK ++ [Valid] EF - gloas - Finality - finality_rule_1 [Preset: mainnet] OK ++ [Valid] EF - gloas - Finality - finality_rule_2 [Preset: mainnet] OK ++ [Valid] EF - gloas - Finality - finality_rule_3 [Preset: mainnet] OK ++ [Valid] EF - gloas - Finality - finality_rule_4 [Preset: mainnet] OK +``` +## EF - gloas - Sanity - Blocks [Preset: mainnet] +```diff ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: mai OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [ OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pres OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expecte OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propose OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: main OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mainn OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mainn OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pr OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sa OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt OK ++ [Invalid] EF - gloas - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - attestation [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - attester_slashing [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - balance_driven_status_transitions [Preset: mainne OK ++ [Valid] EF - gloas - Sanity - Blocks - bls_change [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainnet OK ++ [Valid] EF - gloas - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - exit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - historical_batch [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pre OK ++ [Valid] EF - gloas - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pre OK ++ [Valid] EF - gloas - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK ++ [Valid] EF - gloas - Sanity - Blocks - multiple_different_validator_exits_same_block [Pr OK ++ [Valid] EF - gloas - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: m OK ++ [Valid] EF - gloas - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - skipped_slots [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee__empty [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: m OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: m OK ++ [Valid] EF - gloas - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset OK ++ [Valid] EF - gloas - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: main OK ++ [Valid] EF - gloas - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK ++ [Valid] EF - gloas - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK +``` +## EF - phase0 - Finality [Preset: mainnet] +```diff ++ [Valid] EF - phase0 - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK ++ [Valid] EF - phase0 - Finality - finality_rule_1 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Finality - finality_rule_2 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Finality - finality_rule_3 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Finality - finality_rule_4 [Preset: mainnet] OK +``` +## EF - phase0 - Random [Preset: mainnet] +```diff ++ [Valid] EF - phase0 - Random - randomized_0 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_1 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_10 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_11 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_12 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_13 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_14 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_15 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_2 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_3 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_4 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_5 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_6 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_7 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_8 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Random - randomized_9 [Preset: mainnet] OK +``` +## EF - phase0 - Sanity - Blocks [Preset: mainnet] +```diff ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [ OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: ma OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pre OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expect OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propos OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mai OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: main OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_proposal_for_genesis_slot [Preset: mainn OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_same_slot_block_transition [Preset: main OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [P OK ++ [Invalid] EF - phase0 - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - attestation [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - attester_slashing [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - balance_driven_status_transitions [Preset: mainn OK ++ [Valid] EF - phase0 - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainne OK ++ [Valid] EF - phase0 - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - historical_batch [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK ++ [Valid] EF - phase0 - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pre OK ++ [Valid] EF - phase0 - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK ++ [Valid] EF - phase0 - Sanity - Blocks - multiple_different_validator_exits_same_block [P OK ++ [Valid] EF - phase0 - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - skipped_slots [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK ++ [Valid] EF - phase0 - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK ``` ## ForkChoice ```diff @@ -4399,14 +5348,35 @@ ConsensusSpecPreset-mainnet + ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/chain_no_attestations OK + ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/discard_equivocations_on_attes OK + ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/genesis OK ++ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/proposer_boost_correct_head OK + ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier_weig OK + ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attestati OK + ForkChoice - mainnet/fulu/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip + ForkChoice - mainnet/fulu/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_root Skip + ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/basic OK + ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_bad_parent_root OK ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_future_block Skip ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_inde OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_inde OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_zero OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__not_availabl OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__ok OK + ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/proposer_boost OK + ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/proposer_boost_is_first_block OK + ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_slot_ OK + ForkChoice - mainnet/fulu/fork_choice/should_override_forkchoice_update/pyspec_tests/shoul Skip ``` ## Sync ```diff @@ -4414,4 +5384,5 @@ ConsensusSpecPreset-mainnet + Sync - mainnet/capella/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK + Sync - mainnet/deneb/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK + Sync - mainnet/electra/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK ++ Sync - mainnet/fulu/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK ``` diff --git a/ConsensusSpecPreset-minimal.md b/ConsensusSpecPreset-minimal.md index b8d8ffbe75..c09344b3eb 100644 --- a/ConsensusSpecPreset-minimal.md +++ b/ConsensusSpecPreset-minimal.md @@ -97,8 +97,8 @@ ConsensusSpecPreset-minimal + Rewards and penalties - duplicate_attestation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK + Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK @@ -125,16 +125,11 @@ ConsensusSpecPreset-minimal + Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK + Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK ``` -## EF - Altair - Finality [Preset: minimal] -```diff -+ [Valid] EF - Altair - Finality - finality_no_updates_at_genesis [Preset: minimal] OK -+ [Valid] EF - Altair - Finality - finality_rule_1 [Preset: minimal] OK -+ [Valid] EF - Altair - Finality - finality_rule_2 [Preset: minimal] OK -+ [Valid] EF - Altair - Finality - finality_rule_3 [Preset: minimal] OK -+ [Valid] EF - Altair - Finality - finality_rule_4 [Preset: minimal] OK -``` ## EF - Altair - Fork [Preset: minimal] ```diff ++ EF - Altair - Fork - after_fork_deactivate_validators_from_phase0_to_altair [Preset: minim OK ++ EF - Altair - Fork - after_fork_deactivate_validators_wo_block_from_phase0_to_altair [Pres OK ++ EF - Altair - Fork - after_fork_new_validator_active_from_phase0_to_altair [Preset: minima OK + EF - Altair - Fork - altair_fork_random_0 [Preset: minimal] OK + EF - Altair - Fork - altair_fork_random_1 [Preset: minimal] OK + EF - Altair - Fork - altair_fork_random_2 [Preset: minimal] OK @@ -318,25 +313,6 @@ ConsensusSpecPreset-minimal + [Valid] EF - Altair - Operations - Voluntary Exit - success_exit_queue__min_churn OK + [Valid] EF - Altair - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK ``` -## EF - Altair - Random [Preset: minimal] -```diff -+ [Valid] EF - Altair - Random - randomized_0 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_1 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_10 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_11 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_12 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_13 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_14 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_15 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_2 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_3 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_4 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_5 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_6 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_7 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_8 [Preset: minimal] OK -+ [Valid] EF - Altair - Random - randomized_9 [Preset: minimal] OK -``` ## EF - Altair - Rewards [Preset: minimal] ```diff + EF - Altair - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK @@ -416,61 +392,6 @@ ConsensusSpecPreset-minimal + Testing Validator OK + Testing VoluntaryExit OK ``` -## EF - Altair - Sanity - Blocks [Preset: minimal] -```diff -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [ OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: mi OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pre OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expect OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propos OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: min OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mini OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mini OK -+ [Invalid] EF - Altair - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [P OK -+ [Invalid] EF - Altair - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - attestation [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - attester_slashing [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - balance_driven_status_transitions [Preset: minim OK -+ [Valid] EF - Altair - Sanity - Blocks - deposit_in_block [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - deposit_top_up [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - duplicate_attestation_same_block [Preset: minima OK -+ [Valid] EF - Altair - Sanity - Blocks - empty_block_transition [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - empty_block_transition_large_validator_set [Pres OK -+ [Valid] EF - Altair - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - empty_epoch_transition_large_validator_set [Pres OK -+ [Valid] EF - Altair - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: m OK -+ [Valid] EF - Altair - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - high_proposer_index [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - historical_batch [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pr OK -+ [Valid] EF - Altair - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK -+ [Valid] EF - Altair - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pre OK -+ [Valid] EF - Altair - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK -+ [Valid] EF - Altair - Sanity - Blocks - multiple_different_validator_exits_same_block [P OK -+ [Valid] EF - Altair - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - proposer_slashing [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - skipped_slots [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee__empty [Preset: minimal OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee__full [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee__half [Preset: minimal] OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK -+ [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK -+ [Valid] EF - Altair - Sanity - Blocks - voluntary_exit [Preset: minimal] OK -``` ## EF - Altair - Sanity - Slots [Preset: minimal] ```diff + EF - Altair - Slots - balance_change_affects_proposer [Preset: minimal] OK @@ -614,8 +535,8 @@ ConsensusSpecPreset-minimal + Rewards and penalties - duplicate_attestation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK + Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK @@ -642,16 +563,11 @@ ConsensusSpecPreset-minimal + Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK + Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK ``` -## EF - Bellatrix - Finality [Preset: minimal] -```diff -+ [Valid] EF - Bellatrix - Finality - finality_no_updates_at_genesis [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Finality - finality_rule_1 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Finality - finality_rule_2 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Finality - finality_rule_3 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Finality - finality_rule_4 [Preset: minimal] OK -``` ## EF - Bellatrix - Fork [Preset: minimal] ```diff ++ EF - Bellatrix - Fork - after_fork_deactivate_validators_from_altair_to_bellatrix [Preset: OK ++ EF - Bellatrix - Fork - after_fork_deactivate_validators_wo_block_from_altair_to_bellatrix OK ++ EF - Bellatrix - Fork - after_fork_new_validator_active_from_altair_to_bellatrix [Preset: OK + EF - Bellatrix - Fork - bellatrix_fork_random_0 [Preset: minimal] OK + EF - Bellatrix - Fork - bellatrix_fork_random_1 [Preset: minimal] OK + EF - Bellatrix - Fork - bellatrix_fork_random_2 [Preset: minimal] OK @@ -869,25 +785,6 @@ ConsensusSpecPreset-minimal + [Valid] EF - Bellatrix - Operations - Voluntary Exit - voluntary_exit_with_current_fork_ OK + [Valid] EF - Bellatrix - Operations - Voluntary Exit - voluntary_exit_with_previous_fork OK ``` -## EF - Bellatrix - Random [Preset: minimal] -```diff -+ [Valid] EF - Bellatrix - Random - randomized_0 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_1 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_10 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_11 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_12 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_13 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_14 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_15 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_2 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_3 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_4 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_5 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_6 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_7 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_8 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Random - randomized_9 [Preset: minimal] OK -``` ## EF - Bellatrix - Rewards [Preset: minimal] ```diff + EF - Bellatrix - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK @@ -970,64 +867,6 @@ ConsensusSpecPreset-minimal + Testing Validator OK + Testing VoluntaryExit OK ``` -## EF - Bellatrix - Sanity - Blocks [Preset: minimal] -```diff -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_duplicate_attester_slashing_same_bloc OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_blo OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [ OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_exp OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_pro OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minima OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: m OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_same_slot_block_transition [Preset: m OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_similar_proposer_slashings_same_block OK -+ [Invalid] EF - Bellatrix - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - attestation [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - attester_slashing [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - balance_driven_status_transitions [Preset: mi OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - block_transition_randomized_payload [Preset: OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - deposit_in_block [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - deposit_top_up [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - duplicate_attestation_same_block [Preset: min OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - empty_block_transition [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - empty_block_transition_large_validator_set [P OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - empty_block_transition_no_tx [Preset: minimal OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - empty_epoch_transition_large_validator_set [P OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - high_proposer_index [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - historical_batch [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - inactivity_scores_full_participation_leaking OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - is_execution_enabled_false [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - multiple_attester_slashings_no_overlap [Prese OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - multiple_attester_slashings_partial_overlap [ OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - multiple_different_proposer_slashings_same_bl OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - multiple_different_validator_exits_same_block OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - proposer_after_inactive_index [Preset: minima OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - proposer_slashing [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - skipped_slots [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee__empty [Preset: mini OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee__full [Preset: minim OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee__half [Preset: minim OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee_genesis__empty [Pres OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee_genesis__full [Prese OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee_genesis__half [Prese OK -+ [Valid] EF - Bellatrix - Sanity - Blocks - voluntary_exit [Preset: minimal] OK -``` ## EF - Bellatrix - Sanity - Slots [Preset: minimal] ```diff + EF - Bellatrix - Slots - balance_change_affects_proposer [Preset: minimal] OK @@ -1164,8 +1003,8 @@ ConsensusSpecPreset-minimal + Rewards and penalties - duplicate_attestation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK + Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK @@ -1192,16 +1031,11 @@ ConsensusSpecPreset-minimal + Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK + Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK ``` -## EF - Capella - Finality [Preset: minimal] -```diff -+ [Valid] EF - Capella - Finality - finality_no_updates_at_genesis [Preset: minimal] OK -+ [Valid] EF - Capella - Finality - finality_rule_1 [Preset: minimal] OK -+ [Valid] EF - Capella - Finality - finality_rule_2 [Preset: minimal] OK -+ [Valid] EF - Capella - Finality - finality_rule_3 [Preset: minimal] OK -+ [Valid] EF - Capella - Finality - finality_rule_4 [Preset: minimal] OK -``` ## EF - Capella - Fork [Preset: minimal] ```diff ++ EF - Capella - Fork - after_fork_deactivate_validators_from_bellatrix_to_capella [Preset: OK ++ EF - Capella - Fork - after_fork_deactivate_validators_wo_block_from_bellatrix_to_capella OK ++ EF - Capella - Fork - after_fork_new_validator_active_from_bellatrix_to_capella [Preset: m OK + EF - Capella - Fork - capella_fork_random_0 [Preset: minimal] OK + EF - Capella - Fork - capella_fork_random_1 [Preset: minimal] OK + EF - Capella - Fork - capella_fork_random_2 [Preset: minimal] OK @@ -1494,25 +1328,6 @@ ConsensusSpecPreset-minimal + [Valid] EF - Capella - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK + [Valid] EF - Capella - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK ``` -## EF - Capella - Random [Preset: minimal] -```diff -+ [Valid] EF - Capella - Random - randomized_0 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_1 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_10 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_11 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_12 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_13 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_14 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_15 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_2 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_3 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_4 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_5 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_6 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_7 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_8 [Preset: minimal] OK -+ [Valid] EF - Capella - Random - randomized_9 [Preset: minimal] OK -``` ## EF - Capella - Rewards [Preset: minimal] ```diff + EF - Capella - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK @@ -1599,78 +1414,6 @@ ConsensusSpecPreset-minimal + Testing VoluntaryExit OK + Testing Withdrawal OK ``` -## EF - Capella - Sanity - Blocks [Preset: minimal] -```diff -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Prese OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: m OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pr OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expec OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propo OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: min OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mi OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: min OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_same_slot_block_transition [Preset: min OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [ OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_ OK -+ [Invalid] EF - Capella - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_is OK -+ [Invalid] EF - Capella - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - activate_and_partial_withdrawal_max_effective_b OK -+ [Valid] EF - Capella - Sanity - Blocks - activate_and_partial_withdrawal_overdeposit [Pr OK -+ [Valid] EF - Capella - Sanity - Blocks - attestation [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - attester_slashing [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - balance_driven_status_transitions [Preset: mini OK -+ [Valid] EF - Capella - Sanity - Blocks - block_transition_randomized_payload [Preset: mi OK -+ [Valid] EF - Capella - Sanity - Blocks - bls_change [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - deposit_and_bls_change [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - deposit_in_block [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - deposit_top_up [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - duplicate_attestation_same_block [Preset: minim OK -+ [Valid] EF - Capella - Sanity - Blocks - empty_block_transition [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - empty_block_transition_large_validator_set [Pre OK -+ [Valid] EF - Capella - Sanity - Blocks - empty_block_transition_no_tx [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - empty_epoch_transition_large_validator_set [Pre OK -+ [Valid] EF - Capella - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: OK -+ [Valid] EF - Capella - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - exit_and_bls_change [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: mi OK -+ [Valid] EF - Capella - Sanity - Blocks - high_proposer_index [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - historical_batch [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - inactivity_scores_full_participation_leaking [P OK -+ [Valid] EF - Capella - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [P OK -+ [Valid] EF - Capella - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK -+ [Valid] EF - Capella - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pr OK -+ [Valid] EF - Capella - Sanity - Blocks - multiple_different_proposer_slashings_same_bloc OK -+ [Valid] EF - Capella - Sanity - Blocks - multiple_different_validator_exits_same_block [ OK -+ [Valid] EF - Capella - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: OK -+ [Valid] EF - Capella - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - proposer_slashing [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - skipped_slots [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee__empty [Preset: minima OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee__full [Preset: minimal OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee__half [Preset: minimal OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK -+ [Valid] EF - Capella - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK -+ [Valid] EF - Capella - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Pres OK -+ [Valid] EF - Capella - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mi OK -+ [Valid] EF - Capella - Sanity - Blocks - voluntary_exit [Preset: minimal] OK -+ [Valid] EF - Capella - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK -``` ## EF - Capella - Sanity - Slots [Preset: minimal] ```diff + EF - Capella - Slots - balance_change_affects_proposer [Preset: minimal] OK @@ -1817,8 +1560,8 @@ ConsensusSpecPreset-minimal + Rewards and penalties - duplicate_attestation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK + Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK @@ -1845,16 +1588,11 @@ ConsensusSpecPreset-minimal + Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK + Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK ``` -## EF - Deneb - Finality [Preset: minimal] -```diff -+ [Valid] EF - Deneb - Finality - finality_no_updates_at_genesis [Preset: minimal] OK -+ [Valid] EF - Deneb - Finality - finality_rule_1 [Preset: minimal] OK -+ [Valid] EF - Deneb - Finality - finality_rule_2 [Preset: minimal] OK -+ [Valid] EF - Deneb - Finality - finality_rule_3 [Preset: minimal] OK -+ [Valid] EF - Deneb - Finality - finality_rule_4 [Preset: minimal] OK -``` ## EF - Deneb - Fork [Preset: minimal] ```diff ++ EF - Deneb - Fork - after_fork_deactivate_validators_from_capella_to_deneb [Preset: minima OK ++ EF - Deneb - Fork - after_fork_deactivate_validators_wo_block_from_capella_to_deneb [Prese OK ++ EF - Deneb - Fork - after_fork_new_validator_active_from_capella_to_deneb [Preset: minimal OK + EF - Deneb - Fork - deneb_fork_random_0 [Preset: minimal] OK + EF - Deneb - Fork - deneb_fork_random_1 [Preset: minimal] OK + EF - Deneb - Fork - deneb_fork_random_2 [Preset: minimal] OK @@ -2161,25 +1899,6 @@ ConsensusSpecPreset-minimal + [Valid] EF - Deneb - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balan OK + [Valid] EF - Deneb - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balan OK ``` -## EF - Deneb - Random [Preset: minimal] -```diff -+ [Valid] EF - Deneb - Random - randomized_0 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_1 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_10 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_11 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_12 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_13 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_14 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_15 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_2 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_3 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_4 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_5 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_6 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_7 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_8 [Preset: minimal] OK -+ [Valid] EF - Deneb - Random - randomized_9 [Preset: minimal] OK -``` ## EF - Deneb - Rewards [Preset: minimal] ```diff + EF - Deneb - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK @@ -2268,87 +1987,6 @@ ConsensusSpecPreset-minimal + Testing VoluntaryExit OK + Testing Withdrawal OK ``` -## EF - Deneb - Sanity - Blocks [Preset: minimal] -```diff -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [P OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: min OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [ OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pres OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_exceed_max_blobs_per_block [Preset: minim OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expecte OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propose OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: minim OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_max_blobs_per_block_two_txs [Preset: mini OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_one_blob_max_plus_one_txs [Preset: minima OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mini OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: minim OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_same_slot_block_transition [Preset: minim OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pr OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sa OK -+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt OK -+ [Invalid] EF - Deneb - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - activate_and_partial_withdrawal_max_effective_bal OK -+ [Valid] EF - Deneb - Sanity - Blocks - activate_and_partial_withdrawal_overdeposit [Pres OK -+ [Valid] EF - Deneb - Sanity - Blocks - attestation [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - attester_slashing [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - balance_driven_status_transitions [Preset: minima OK -+ [Valid] EF - Deneb - Sanity - Blocks - block_transition_randomized_payload [Preset: mini OK -+ [Valid] EF - Deneb - Sanity - Blocks - bls_change [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - deposit_and_bls_change [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - deposit_in_block [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - deposit_top_up [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - duplicate_attestation_same_block [Preset: minimal OK -+ [Valid] EF - Deneb - Sanity - Blocks - empty_block_transition [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - empty_block_transition_large_validator_set [Prese OK -+ [Valid] EF - Deneb - Sanity - Blocks - empty_block_transition_no_tx [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - empty_epoch_transition_large_validator_set [Prese OK -+ [Valid] EF - Deneb - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: mi OK -+ [Valid] EF - Deneb - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - exit_and_bls_change [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: mini OK -+ [Valid] EF - Deneb - Sanity - Blocks - high_proposer_index [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - historical_batch [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pre OK -+ [Valid] EF - Deneb - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pre OK -+ [Valid] EF - Deneb - Sanity - Blocks - max_blobs_per_block [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: m OK -+ [Valid] EF - Deneb - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pres OK -+ [Valid] EF - Deneb - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK -+ [Valid] EF - Deneb - Sanity - Blocks - multiple_different_validator_exits_same_block [Pr OK -+ [Valid] EF - Deneb - Sanity - Blocks - one_blob [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - one_blob_max_txs [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - one_blob_two_txs [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: m OK -+ [Valid] EF - Deneb - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - proposer_slashing [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - skipped_slots [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee__empty [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee__full [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee__half [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: m OK -+ [Valid] EF - Deneb - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: m OK -+ [Valid] EF - Deneb - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset OK -+ [Valid] EF - Deneb - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mini OK -+ [Valid] EF - Deneb - Sanity - Blocks - voluntary_exit [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK -+ [Valid] EF - Deneb - Sanity - Blocks - zero_blob [Preset: minimal] OK -``` ## EF - Deneb - Sanity - Slots [Preset: minimal] ```diff + EF - Deneb - Slots - balance_change_affects_proposer [Preset: minimal] OK @@ -2506,7 +2144,6 @@ ConsensusSpecPreset-minimal + Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK + Pending deposits - apply_pending_deposit_top_up__min_activation_balance [Preset: minimal] OK + Pending deposits - apply_pending_deposit_top_up__min_activation_balance_compounding [Prese OK -+ Pending deposits - apply_pending_deposit_top_up__zero_balance [Preset: minimal] OK + Pending deposits - apply_pending_deposit_under_min_activation [Preset: minimal] OK + Pending deposits - apply_pending_deposit_with_previous_fork_version [Preset: minimal] OK + Pending deposits - ineffective_deposit_with_current_fork_version [Preset: minimal] OK @@ -2569,8 +2206,8 @@ ConsensusSpecPreset-minimal + Rewards and penalties - duplicate_attestation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK + Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK @@ -2597,16 +2234,11 @@ ConsensusSpecPreset-minimal + Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK + Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK ``` -## EF - Electra - Finality [Preset: minimal] -```diff -+ [Valid] EF - Electra - Finality - finality_no_updates_at_genesis [Preset: minimal] OK -+ [Valid] EF - Electra - Finality - finality_rule_1 [Preset: minimal] OK -+ [Valid] EF - Electra - Finality - finality_rule_2 [Preset: minimal] OK -+ [Valid] EF - Electra - Finality - finality_rule_3 [Preset: minimal] OK -+ [Valid] EF - Electra - Finality - finality_rule_4 [Preset: minimal] OK -``` ## EF - Electra - Fork [Preset: minimal] ```diff ++ EF - Electra - Fork - after_fork_deactivate_validators_from_deneb_to_electra [Preset: mini OK ++ EF - Electra - Fork - after_fork_deactivate_validators_wo_block_from_deneb_to_electra [Pre OK ++ EF - Electra - Fork - after_fork_new_validator_active_from_deneb_to_electra [Preset: minim OK + EF - Electra - Fork - electra_fork_random_0 [Preset: minimal] OK + EF - Electra - Fork - electra_fork_random_1 [Preset: minimal] OK + EF - Electra - Fork - electra_fork_random_2 [Preset: minimal] OK @@ -2745,6 +2377,7 @@ ConsensusSpecPreset-minimal + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_in_curre OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_in_new_c OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_source_h OK ++ [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_target_h OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_com OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_exc OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_ins OK @@ -2764,6 +2397,7 @@ ConsensusSpecPreset-minimal + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_source_address OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_source_has_pending OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_source_not_active_ OK ++ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_source_pubkey_is_t OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_source_with_bls_cr OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_target_with_bls_cr OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_target_with_eth1_c OK @@ -2804,6 +2438,8 @@ ConsensusSpecPreset-minimal ``` ## EF - Electra - Operations - Deposit Request [Preset: minimal] ```diff ++ [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_extra_gwei OK ++ [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_greater_th OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_invalid_si OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_max_effect OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_min_activa OK @@ -2812,6 +2448,7 @@ ConsensusSpecPreset-minimal + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_inv OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_max OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_min OK ++ [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_sti OK ``` ## EF - Electra - Operations - Execution Payload [Preset: minimal] ```diff @@ -2923,6 +2560,7 @@ ConsensusSpecPreset-minimal + [Valid] EF - Electra - Operations - Voluntary Exit - min_balance_exits_up_to_churn OK + [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__min_churn OK + [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK ++ [Valid] EF - Electra - Operations - Voluntary Exit - voluntary_exit_with_pending_deposit OK ``` ## EF - Electra - Operations - Withdrawal Request [Preset: minimal] ```diff @@ -2999,6 +2637,8 @@ ConsensusSpecPreset-minimal + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_next_epoch OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_no_excess_balance OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_one_skipped_one_ef OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_two_partial_withdr OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_two_partial_withdr OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_effective_swe OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_ineffective_s OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_ineffective_s OK @@ -3038,25 +2678,6 @@ ConsensusSpecPreset-minimal + [Valid] EF - Electra - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK + [Valid] EF - Electra - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK ``` -## EF - Electra - Random [Preset: minimal] -```diff -+ [Valid] EF - Electra - Random - randomized_0 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_1 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_10 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_11 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_12 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_13 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_14 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_15 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_2 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_3 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_4 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_5 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_6 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_7 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_8 [Preset: minimal] OK -+ [Valid] EF - Electra - Random - randomized_9 [Preset: minimal] OK -``` ## EF - Electra - Rewards [Preset: minimal] ```diff + EF - Electra - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK @@ -3153,111 +2774,11 @@ ConsensusSpecPreset-minimal + Testing Withdrawal OK + Testing WithdrawalRequest OK ``` -## EF - Electra - Sanity - Blocks [Preset: minimal] -```diff -+ [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK -+ [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_not_enough_eth1_dep OK -+ [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_too_many_eth1_depos OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Prese OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: m OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pr OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_exceed_max_blobs_per_block [Preset: min OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expec OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propo OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: min OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_max_blobs_per_block_two_txs [Preset: mi OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_one_blob_max_plus_one_txs [Preset: mini OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mi OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: min OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_same_slot_block_transition [Preset: min OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [ OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_ OK -+ [Invalid] EF - Electra - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_is OK -+ [Invalid] EF - Electra - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - activate_and_partial_withdrawal_max_effective_b OK -+ [Valid] EF - Electra - Sanity - Blocks - activate_and_partial_withdrawal_overdeposit [Pr OK -+ [Valid] EF - Electra - Sanity - Blocks - attestation [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - attester_slashing [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - balance_driven_status_transitions [Preset: mini OK -+ [Valid] EF - Electra - Sanity - Blocks - basic_btec_and_el_withdrawal_request_in_same_bl OK -+ [Valid] EF - Electra - Sanity - Blocks - basic_btec_before_el_withdrawal_request [Preset OK -+ [Valid] EF - Electra - Sanity - Blocks - basic_el_withdrawal_request [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - block_transition_randomized_payload [Preset: mi OK -+ [Valid] EF - Electra - Sanity - Blocks - bls_change [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - cl_exit_and_el_withdrawal_request_in_same_block OK -+ [Valid] EF - Electra - Sanity - Blocks - consolidation_requests_when_pending_consolidati OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_and_bls_change [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_in_block [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_request_with_same_pubkey_different_with OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_top_up [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__deposit_and_top_up_same_blo OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__deposit_with_same_pubkey_di OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_eth1_deposits [Pres OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_eth1_deposits_up_to OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_max_eth1_deposits [ OK -+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__start_index_is_set [Preset: OK -+ [Valid] EF - Electra - Sanity - Blocks - duplicate_attestation_same_block [Preset: minim OK -+ [Valid] EF - Electra - Sanity - Blocks - empty_block_transition [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - empty_block_transition_large_validator_set [Pre OK -+ [Valid] EF - Electra - Sanity - Blocks - empty_block_transition_no_tx [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - empty_epoch_transition_large_validator_set [Pre OK -+ [Valid] EF - Electra - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: OK -+ [Valid] EF - Electra - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - exit_and_bls_change [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: mi OK -+ [Valid] EF - Electra - Sanity - Blocks - high_proposer_index [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - historical_batch [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - inactivity_scores_full_participation_leaking [P OK -+ [Valid] EF - Electra - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [P OK -+ [Valid] EF - Electra - Sanity - Blocks - max_blobs_per_block [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - multiple_different_proposer_slashings_same_bloc OK -+ [Valid] EF - Electra - Sanity - Blocks - multiple_different_validator_exits_same_block [ OK -+ [Valid] EF - Electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_differe OK -+ [Valid] EF - Electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_same_va OK -+ [Valid] EF - Electra - Sanity - Blocks - one_blob [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - one_blob_max_txs [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - one_blob_two_txs [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: OK -+ [Valid] EF - Electra - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - proposer_slashing [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - skipped_slots [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - switch_to_compounding_requests_when_pending_con OK -+ [Valid] EF - Electra - Sanity - Blocks - switch_to_compounding_requests_when_too_little_ OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__empty [Preset: minima OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__full [Preset: minimal OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__half [Preset: minimal OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK -+ [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK -+ [Valid] EF - Electra - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Pres OK -+ [Valid] EF - Electra - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mi OK -+ [Valid] EF - Electra - Sanity - Blocks - voluntary_exit [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_consolidation_effective_balance_ OK -+ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_switch_to_compounding_request_sa OK -+ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_withdrawal_request_same_validato OK -+ [Valid] EF - Electra - Sanity - Blocks - withdrawal_requests_when_pending_withdrawal_que OK -+ [Valid] EF - Electra - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK -+ [Valid] EF - Electra - Sanity - Blocks - zero_blob [Preset: minimal] OK -``` ## EF - Electra - Sanity - Slots [Preset: minimal] ```diff + EF - Electra - Slots - balance_change_affects_proposer [Preset: minimal] OK + EF - Electra - Slots - double_empty_epoch [Preset: minimal] OK ++ EF - Electra - Slots - effective_decrease_balance_updates_lookahead [Preset: minimal] OK + EF - Electra - Slots - empty_epoch [Preset: minimal] OK + EF - Electra - Slots - historical_accumulator [Preset: minimal] OK + EF - Electra - Slots - multiple_pending_deposits_same_pubkey [Preset: minimal] OK @@ -3267,6 +2788,7 @@ ConsensusSpecPreset-minimal + EF - Electra - Slots - multiple_pending_deposits_same_pubkey_different_signature [Preset: OK + EF - Electra - Slots - over_epoch_boundary [Preset: minimal] OK + EF - Electra - Slots - pending_consolidation [Preset: minimal] OK ++ EF - Electra - Slots - pending_deposit_extra_gwei [Preset: minimal] OK + EF - Electra - Slots - slots_1 [Preset: minimal] OK + EF - Electra - Slots - slots_2 [Preset: minimal] OK ``` @@ -3420,7 +2942,6 @@ ConsensusSpecPreset-minimal + Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK + Pending deposits - apply_pending_deposit_top_up__min_activation_balance [Preset: minimal] OK + Pending deposits - apply_pending_deposit_top_up__min_activation_balance_compounding [Prese OK -+ Pending deposits - apply_pending_deposit_top_up__zero_balance [Preset: minimal] OK + Pending deposits - apply_pending_deposit_under_min_activation [Preset: minimal] OK + Pending deposits - apply_pending_deposit_with_previous_fork_version [Preset: minimal] OK + Pending deposits - ineffective_deposit_with_current_fork_version [Preset: minimal] OK @@ -3443,6 +2964,11 @@ ConsensusSpecPreset-minimal + Pending deposits - process_pending_deposits_withdrawable_validator [Preset: minimal] OK + Pending deposits - process_pending_deposits_withdrawable_validator_not_churned [Preset: mi OK ``` +## EF - Fulu - Epoch Processing - Proposer lookahead [Preset: minimal] +```diff ++ Proposer lookahead - proposer_lookahead_does_not_contain_exited_validators [Preset: minima OK ++ Proposer lookahead - proposer_lookahead_in_state_matches_computed_lookahead [Preset: minim OK +``` ## EF - Fulu - Epoch Processing - RANDAO mixes reset [Preset: minimal] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK @@ -3483,8 +3009,8 @@ ConsensusSpecPreset-minimal + Rewards and penalties - duplicate_attestation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK + Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK @@ -3511,16 +3037,11 @@ ConsensusSpecPreset-minimal + Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK + Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK ``` -## EF - Fulu - Finality [Preset: minimal] -```diff -+ [Valid] EF - Fulu - Finality - finality_no_updates_at_genesis [Preset: minimal] OK -+ [Valid] EF - Fulu - Finality - finality_rule_1 [Preset: minimal] OK -+ [Valid] EF - Fulu - Finality - finality_rule_2 [Preset: minimal] OK -+ [Valid] EF - Fulu - Finality - finality_rule_3 [Preset: minimal] OK -+ [Valid] EF - Fulu - Finality - finality_rule_4 [Preset: minimal] OK -``` ## EF - Fulu - Fork [Preset: minimal] ```diff ++ EF - Fulu - Fork - after_fork_deactivate_validators_from_electra_to_fulu [Preset: minimal] OK ++ EF - Fulu - Fork - after_fork_deactivate_validators_wo_block_from_electra_to_fulu [Preset: OK ++ EF - Fulu - Fork - after_fork_new_validator_active_from_electra_to_fulu [Preset: minimal] OK + EF - Fulu - Fork - fork_base_state [Preset: minimal] OK + EF - Fulu - Fork - fork_many_next_epoch [Preset: minimal] OK + EF - Fulu - Fork - fork_next_epoch [Preset: minimal] OK @@ -3535,6 +3056,9 @@ ConsensusSpecPreset-minimal + EF - Fulu - Fork - fulu_fork_random_large_validator_set [Preset: minimal] OK + EF - Fulu - Fork - fulu_fork_random_low_balances [Preset: minimal] OK + EF - Fulu - Fork - fulu_fork_random_misc_balances [Preset: minimal] OK ++ EF - Fulu - Fork - lookahead_consistency_at_fork [Preset: minimal] OK ++ EF - Fulu - Fork - lookahead_consistency_with_effective_balance_change_at_fork [Preset: mi OK ++ EF - Fulu - Fork - proposer_lookahead_init_at_fork_only_contains_active_validators [Preset OK ``` ## EF - Fulu - Operations - Attestation [Preset: minimal] ```diff @@ -3652,6 +3176,7 @@ ConsensusSpecPreset-minimal + [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_in_current_ OK + [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_in_new_cons OK + [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_source_has_ OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_target_has_ OK + [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_with_compou OK + [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_with_excess OK + [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_with_insuff OK @@ -3671,6 +3196,7 @@ ConsensusSpecPreset-minimal + [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_source_address OK + [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_source_has_pending_wi OK + [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_source_not_active_lon OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_source_pubkey_is_targ OK + [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_source_with_bls_crede OK + [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_target_with_bls_crede OK + [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_target_with_eth1_cred OK @@ -3704,12 +3230,15 @@ ConsensusSpecPreset-minimal + [Valid] EF - Fulu - Operations - Deposit - new_deposit_non_versioned_withdrawal_credenti OK + [Valid] EF - Fulu - Operations - Deposit - new_deposit_over_max OK + [Valid] EF - Fulu - Operations - Deposit - new_deposit_under_max OK ++ [Valid] EF - Fulu - Operations - Deposit - success_top_up_to_withdrawn_validator OK + [Valid] EF - Fulu - Operations - Deposit - top_up__less_effective_balance OK + [Valid] EF - Fulu - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Fulu - Operations - Deposit - top_up__zero_balance OK ``` ## EF - Fulu - Operations - Deposit Request [Preset: minimal] ```diff ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_extra_gwei OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_greater_than_ OK + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_invalid_sig OK + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_max_effective OK + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_min_activatio OK @@ -3718,11 +3247,14 @@ ConsensusSpecPreset-minimal + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_invali OK + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_max_ef OK + [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_min_ac OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_still_ OK ``` ## EF - Fulu - Operations - Execution Payload [Preset: minimal] ```diff + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_everything_first_payloa OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_everything_regular_payl OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_execution_first_payload OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_execution_regular_paylo OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_parent_hash_first_paylo OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_parent_hash_regular_pay OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_pre_randao_regular_payl OK @@ -3733,6 +3265,8 @@ ConsensusSpecPreset-minimal + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_future_timestamp_regular_pa OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_past_timestamp_first_payloa OK + [Invalid] EF - Fulu - Operations - Execution Payload - invalid_past_timestamp_regular_payl OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_randomized_non_validated_ex OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_randomized_non_validated_ex OK + [Valid] EF - Fulu - Operations - Execution Payload - incorrect_blob_tx_type OK + [Valid] EF - Fulu - Operations - Execution Payload - incorrect_block_hash OK + [Valid] EF - Fulu - Operations - Execution Payload - incorrect_commitment OK @@ -3744,6 +3278,18 @@ ConsensusSpecPreset-minimal + [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_no_blobs_but_ OK + [Valid] EF - Fulu - Operations - Execution Payload - no_commitments_for_transactions OK + [Valid] EF - Fulu - Operations - Execution Payload - no_transactions_with_commitments OK ++ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_extra_data_first_payload OK ++ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_extra_data_regular_payloa OK ++ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_transactions_first_payloa OK ++ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_transactions_regular_payl OK ++ [Valid] EF - Fulu - Operations - Execution Payload - randomized_non_validated_execution_ OK ++ [Valid] EF - Fulu - Operations - Execution Payload - randomized_non_validated_execution_ OK ++ [Valid] EF - Fulu - Operations - Execution Payload - success_first_payload OK ++ [Valid] EF - Fulu - Operations - Execution Payload - success_first_payload_with_gap_slot OK ++ [Valid] EF - Fulu - Operations - Execution Payload - success_regular_payload OK ++ [Valid] EF - Fulu - Operations - Execution Payload - success_regular_payload_with_gap_sl OK ++ [Valid] EF - Fulu - Operations - Execution Payload - zero_length_transaction_first_paylo OK ++ [Valid] EF - Fulu - Operations - Execution Payload - zero_length_transaction_regular_pay OK + [Valid] EF - Fulu - Operations - Execution Payload - zeroed_commitment OK ``` ## EF - Fulu - Operations - Proposer Slashing [Preset: minimal] @@ -3813,6 +3359,7 @@ ConsensusSpecPreset-minimal + [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exits_up_to_churn OK + [Valid] EF - Fulu - Operations - Voluntary Exit - success_exit_queue__min_churn OK + [Valid] EF - Fulu - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - voluntary_exit_with_pending_deposit OK ``` ## EF - Fulu - Operations - Withdrawal Request [Preset: minimal] ```diff @@ -3848,9 +3395,30 @@ ConsensusSpecPreset-minimal ``` ## EF - Fulu - Operations - Withdrawals [Preset: minimal] ```diff ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_few_ OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_a_lot_mixed_withdrawable_in_queue OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_a_lot_partially_withdrawable_too_ OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_address_full OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_address_partial OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_amount_full OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_amount_partial OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_withdrawal_index OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_many_incorrectly_full OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_many_incorrectly_partial OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_max_per_slot_full_withdrawals_and OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_max_per_slot_partial_withdrawals_ OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_non_withdrawable_non_empty_withdr OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_expected_full_withdrawal_and_ OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_expected_full_withdrawal_and_ OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_expected_partial_withdrawal_a OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_of_many_incorrectly_full OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_of_many_incorrectly_partial OK ++ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_two_expected_partial_withdrawal_a OK ++ [Valid] EF - Fulu - Operations - Withdrawals - all_withdrawal OK + [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK + [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK + [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK ++ [Valid] EF - Fulu - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK + [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK + [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK + [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK @@ -3868,50 +3436,46 @@ ConsensusSpecPreset-minimal + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_next_epoch OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_no_excess_balance OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_one_skipped_one_effec OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_two_partial_withdrawa OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_two_partial_withdrawa OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_effective_sweep_ OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_ineffective_swee OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_ineffective_swee OK + [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_sweep_different_ OK + [Valid] EF - Fulu - Operations - Withdrawals - random_0 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_0 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_1 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_2 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_3 OK + [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_1 OK + [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_2 OK + [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_3 OK + [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_4 OK + [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_5 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_all_fully_withdrawable OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_all_partially_withdrawable OK + [Valid] EF - Fulu - Operations - Withdrawals - success_excess_balance_but_no_max_effecti OK + [Valid] EF - Fulu - Operations - Withdrawals - success_excess_balance_but_no_max_effecti OK + [Valid] EF - Fulu - Operations - Withdrawals - success_max_partial_withdrawable OK + [Valid] EF - Fulu - Operations - Withdrawals - success_max_plus_one_withdrawable OK + [Valid] EF - Fulu - Operations - Withdrawals - success_mixed_fully_and_partial_withdrawa OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_mixed_fully_and_partial_withdrawa OK + [Valid] EF - Fulu - Operations - Withdrawals - success_no_excess_balance OK + [Valid] EF - Fulu - Operations - Withdrawals - success_no_excess_balance_compounding OK + [Valid] EF - Fulu - Operations - Withdrawals - success_no_max_effective_balance OK + [Valid] EF - Fulu - Operations - Withdrawals - success_no_max_effective_balance_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_full_withdrawal OK + [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_active_a OK + [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_exited OK + [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_exited_a OK + [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_in_exit_ OK + [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_not_yet_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawal OK + [Valid] EF - Fulu - Operations - Withdrawals - success_two_partial_withdrawable OK -``` -## EF - Fulu - Random [Preset: minimal] -```diff -+ [Valid] EF - Fulu - Random - randomized_0 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_1 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_10 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_11 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_12 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_13 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_14 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_15 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_2 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_3 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_4 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_5 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_6 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_7 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_8 [Preset: minimal] OK -+ [Valid] EF - Fulu - Random - randomized_9 [Preset: minimal] OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_zero_expected_withdrawals OK ++ [Valid] EF - Fulu - Operations - Withdrawals - withdrawable_epoch_but_0_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balanc OK ++ [Valid] EF - Fulu - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balanc OK ``` ## EF - Fulu - Rewards [Preset: minimal] ```diff @@ -3966,8 +3530,8 @@ ConsensusSpecPreset-minimal + Testing Checkpoint OK + Testing ConsolidationRequest OK + Testing ContributionAndProof OK -+ Testing DataColumnIdentifier OK + Testing DataColumnSidecar OK ++ Testing DataColumnsByRootIdentifier OK + Testing Deposit OK + Testing DepositData OK + Testing DepositMessage OK @@ -4012,75 +3576,11 @@ ConsensusSpecPreset-minimal + Testing Withdrawal OK + Testing WithdrawalRequest OK ``` -## EF - Fulu - Sanity - Blocks [Preset: minimal] -```diff -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: mini OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [P OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Prese OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expected OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_proposer OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: minim OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: minima OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_same_slot_block_transition [Preset: minima OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pre OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sam OK -+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt_ OK -+ [Invalid] EF - Fulu - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - activate_and_partial_withdrawal_max_effective_bala OK -+ [Valid] EF - Fulu - Sanity - Blocks - activate_and_partial_withdrawal_overdeposit [Prese OK -+ [Valid] EF - Fulu - Sanity - Blocks - attestation [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - attester_slashing [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - balance_driven_status_transitions [Preset: minimal OK -+ [Valid] EF - Fulu - Sanity - Blocks - bls_change [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - deposit_and_bls_change [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - deposit_in_block [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - deposit_top_up [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - duplicate_attestation_same_block [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - empty_block_transition [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - empty_block_transition_large_validator_set [Preset OK -+ [Valid] EF - Fulu - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - empty_epoch_transition_large_validator_set [Preset OK -+ [Valid] EF - Fulu - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: min OK -+ [Valid] EF - Fulu - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - exit_and_bls_change [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - high_proposer_index [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - historical_batch [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pres OK -+ [Valid] EF - Fulu - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pres OK -+ [Valid] EF - Fulu - Sanity - Blocks - multiple_different_proposer_slashings_same_block [ OK -+ [Valid] EF - Fulu - Sanity - Blocks - multiple_different_validator_exits_same_block [Pre OK -+ [Valid] EF - Fulu - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: mi OK -+ [Valid] EF - Fulu - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - proposer_slashing [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - skipped_slots [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__empty [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__full [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__half [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: m OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: mi OK -+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: mi OK -+ [Valid] EF - Fulu - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset: OK -+ [Valid] EF - Fulu - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: minim OK -+ [Valid] EF - Fulu - Sanity - Blocks - voluntary_exit [Preset: minimal] OK -+ [Valid] EF - Fulu - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK -``` ## EF - Fulu - Sanity - Slots [Preset: minimal] ```diff + EF - Fulu - Slots - balance_change_affects_proposer [Preset: minimal] OK + EF - Fulu - Slots - double_empty_epoch [Preset: minimal] OK ++ EF - Fulu - Slots - effective_decrease_balance_updates_lookahead [Preset: minimal] OK + EF - Fulu - Slots - empty_epoch [Preset: minimal] OK + EF - Fulu - Slots - historical_accumulator [Preset: minimal] OK + EF - Fulu - Slots - multiple_pending_deposits_same_pubkey [Preset: minimal] OK @@ -4090,9 +3590,835 @@ ConsensusSpecPreset-minimal + EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_different_signature [Preset: min OK + EF - Fulu - Slots - over_epoch_boundary [Preset: minimal] OK + EF - Fulu - Slots - pending_consolidation [Preset: minimal] OK ++ EF - Fulu - Slots - pending_deposit_extra_gwei [Preset: minimal] OK + EF - Fulu - Slots - slots_1 [Preset: minimal] OK + EF - Fulu - Slots - slots_2 [Preset: minimal] OK ``` +## EF - Fulu - Transition [Preset: minimal] +```diff ++ EF - Fulu - Transition - higher_churn_limit_to_lower [Preset: minimal] OK ++ EF - Fulu - Transition - non_empty_historical_roots [Preset: minimal] OK ++ EF - Fulu - Transition - normal_transition [Preset: minimal] OK ++ EF - Fulu - Transition - simple_transition [Preset: minimal] OK ++ EF - Fulu - Transition - transition_attestation_from_previous_fork_with_new_range [Preset: OK ++ EF - Fulu - Transition - transition_missing_first_post_block [Preset: minimal] OK ++ EF - Fulu - Transition - transition_missing_last_pre_fork_block [Preset: minimal] OK ++ EF - Fulu - Transition - transition_only_blocks_post_fork [Preset: minimal] OK ++ EF - Fulu - Transition - transition_randomized_state [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_activation_at_fork_epoch [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_attester_slashing_right_after_fork [Preset: minim OK ++ EF - Fulu - Transition - transition_with_attester_slashing_right_before_fork [Preset: mini OK ++ EF - Fulu - Transition - transition_with_btec_right_after_fork [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_btec_right_before_fork [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_consolidation_request_right_after_fork [Preset: m OK ++ EF - Fulu - Transition - transition_with_deposit_request_right_after_fork [Preset: minimal OK ++ EF - Fulu - Transition - transition_with_deposit_right_after_fork [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_deposit_right_before_fork [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_finality [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_full_withdrawal_request_right_after_fork [Preset: OK ++ EF - Fulu - Transition - transition_with_leaking_at_fork [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_leaking_pre_fork [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_no_attestations_until_after_fork [Preset: minimal OK ++ EF - Fulu - Transition - transition_with_non_empty_activation_queue [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_one_fourth_exiting_validators_exit_at_fork [Prese OK ++ EF - Fulu - Transition - transition_with_one_fourth_exiting_validators_exit_post_fork [Pre OK ++ EF - Fulu - Transition - transition_with_one_fourth_slashed_active_validators_pre_fork [Pr OK ++ EF - Fulu - Transition - transition_with_proposer_slashing_right_after_fork [Preset: minim OK ++ EF - Fulu - Transition - transition_with_proposer_slashing_right_before_fork [Preset: mini OK ++ EF - Fulu - Transition - transition_with_random_half_participation [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_random_three_quarters_participation [Preset: mini OK ++ EF - Fulu - Transition - transition_with_voluntary_exit_right_after_fork [Preset: minimal] OK ++ EF - Fulu - Transition - transition_with_voluntary_exit_right_before_fork [Preset: minimal OK +``` +## EF - Gloas - Epoch Processing - Builder pending payments [Preset: minimal] +```diff ++ Builder pending payments - process_builder_pending_payments_above_quorum [Preset: minimal] OK ++ Builder pending payments - process_builder_pending_payments_below_quorum [Preset: minimal] OK ++ Builder pending payments - process_builder_pending_payments_empty_queue [Preset: minimal] OK ++ Builder pending payments - process_builder_pending_payments_equal_quorum [Preset: minimal] OK ++ Builder pending payments - process_builder_pending_payments_large_amount_churn_impact [Pre OK ++ Builder pending payments - process_builder_pending_payments_mixed_weights [Preset: minimal OK ++ Builder pending payments - process_builder_pending_payments_multiple_above_quorum [Preset: OK ++ Builder pending payments - process_builder_pending_payments_queue_rotation [Preset: minima OK +``` +## EF - Gloas - Epoch Processing - Effective balance updates [Preset: minimal] +```diff ++ Effective balance updates - effective_balance_hysteresis [Preset: minimal] OK ++ Effective balance updates - effective_balance_hysteresis_with_compounding_credentials [Pre OK +``` +## EF - Gloas - Epoch Processing - Eth1 data reset [Preset: minimal] +```diff ++ Eth1 data reset - eth1_vote_no_reset [Preset: minimal] OK ++ Eth1 data reset - eth1_vote_reset [Preset: minimal] OK +``` +## EF - Gloas - Epoch Processing - Historical summaries update [Preset: minimal] +```diff ++ Historical summaries update - historical_summaries_accumulator [Preset: minimal] OK +``` +## EF - Gloas - Epoch Processing - Inactivity [Preset: minimal] +```diff ++ Inactivity - all_zero_inactivity_scores_empty_participation [Preset: minimal] OK ++ Inactivity - all_zero_inactivity_scores_empty_participation_leaking [Preset: minimal] OK ++ Inactivity - all_zero_inactivity_scores_full_participation [Preset: minimal] OK ++ Inactivity - all_zero_inactivity_scores_full_participation_leaking [Preset: minimal] OK ++ Inactivity - all_zero_inactivity_scores_random_participation [Preset: minimal] OK ++ Inactivity - all_zero_inactivity_scores_random_participation_leaking [Preset: minimal] OK ++ Inactivity - genesis [Preset: minimal] OK ++ Inactivity - genesis_random_scores [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_empty_participation [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_empty_participation_leaking [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_full_participation [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_full_participation_leaking [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_random_participation [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_random_participation_leaking [Preset: minimal] OK ++ Inactivity - randomized_state [Preset: minimal] OK ++ Inactivity - randomized_state_leaking [Preset: minimal] OK ++ Inactivity - some_exited_full_random_leaking [Preset: minimal] OK ++ Inactivity - some_slashed_full_random [Preset: minimal] OK ++ Inactivity - some_slashed_full_random_leaking [Preset: minimal] OK ++ Inactivity - some_slashed_zero_scores_full_participation [Preset: minimal] OK ++ Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: minimal] OK +``` +## EF - Gloas - Epoch Processing - Justification & Finalization [Preset: minimal] +```diff ++ Justification & Finalization - 123_ok_support [Preset: minimal] OK ++ Justification & Finalization - 123_poor_support [Preset: minimal] OK ++ Justification & Finalization - 12_ok_support [Preset: minimal] OK ++ Justification & Finalization - 12_ok_support_messed_target [Preset: minimal] OK ++ Justification & Finalization - 12_poor_support [Preset: minimal] OK ++ Justification & Finalization - 234_ok_support [Preset: minimal] OK ++ Justification & Finalization - 234_poor_support [Preset: minimal] OK ++ Justification & Finalization - 23_ok_support [Preset: minimal] OK ++ Justification & Finalization - 23_poor_support [Preset: minimal] OK ++ Justification & Finalization - balance_threshold_with_exited_validators [Preset: minimal] OK +``` +## EF - Gloas - Epoch Processing - Participation flag updates [Preset: minimal] +```diff ++ Participation flag updates - all_zeroed [Preset: minimal] OK ++ Participation flag updates - current_epoch_zeroed [Preset: minimal] OK ++ Participation flag updates - current_filled [Preset: minimal] OK ++ Participation flag updates - filled [Preset: minimal] OK ++ Participation flag updates - large_random [Preset: minimal] OK ++ Participation flag updates - previous_epoch_zeroed [Preset: minimal] OK ++ Participation flag updates - previous_filled [Preset: minimal] OK ++ Participation flag updates - random_0 [Preset: minimal] OK ++ Participation flag updates - random_1 [Preset: minimal] OK ++ Participation flag updates - random_2 [Preset: minimal] OK ++ Participation flag updates - random_genesis [Preset: minimal] OK ++ Participation flag updates - slightly_larger_random [Preset: minimal] OK +``` +## EF - Gloas - Epoch Processing - Pending consolidations [Preset: minimal] +```diff ++ Pending consolidations - all_consolidation_cases_together [Preset: minimal] OK ++ Pending consolidations - basic_pending_consolidation [Preset: minimal] OK ++ Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: minimal] OK ++ Pending consolidations - pending_consolidation_balance_computation_compounding [Preset: mi OK ++ Pending consolidations - pending_consolidation_balance_computation_eth1 [Preset: minimal] OK ++ Pending consolidations - pending_consolidation_compounding_creds [Preset: minimal] OK ++ Pending consolidations - pending_consolidation_future_epoch [Preset: minimal] OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective [ OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective_c OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective [Pre OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective_comp OK ++ Pending consolidations - pending_consolidation_with_pending_deposit [Preset: minimal] OK ++ Pending consolidations - skip_consolidation_when_source_slashed [Preset: minimal] OK +``` +## EF - Gloas - Epoch Processing - Pending deposits [Preset: minimal] +```diff ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_max [Preset: m OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max [Pres OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max_next_ OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_under_max [Pre OK ++ Pending deposits - apply_pending_deposit_correct_sig_but_forked_state [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_effective_deposit_with_genesis_fork_version [Pres OK ++ Pending deposits - apply_pending_deposit_eth1_withdrawal_credentials [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_incorrect_sig_new_deposit [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_incorrect_sig_top_up [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_incorrect_withdrawal_credentials_top_up [Preset: OK ++ Pending deposits - apply_pending_deposit_ineffective_deposit_with_bad_fork_version [Preset OK ++ Pending deposits - apply_pending_deposit_key_validate_invalid_decompression [Preset: minim OK ++ Pending deposits - apply_pending_deposit_key_validate_invalid_subgroup [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_min_activation [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials [Preset: min OK ++ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials_over_min_act OK ++ Pending deposits - apply_pending_deposit_over_min_activation [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_over_min_activation_next_increment [Preset: minim OK ++ Pending deposits - apply_pending_deposit_success_top_up_to_withdrawn_validator [Preset: mi OK ++ Pending deposits - apply_pending_deposit_top_up__less_effective_balance [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK ++ Pending deposits - apply_pending_deposit_top_up__min_activation_balance [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_top_up__min_activation_balance_compounding [Prese OK ++ Pending deposits - apply_pending_deposit_under_min_activation [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_with_previous_fork_version [Preset: minimal] OK ++ Pending deposits - ineffective_deposit_with_current_fork_version [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_balance_above_churn [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_balance_equal_churn [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_complete [Preset: minim OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_not_applied [Preset: mi OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_pending [Preset: minima OK ++ Pending deposits - process_pending_deposits_limit_is_reached [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_mixture_of_skipped_and_above_churn [Preset: mi OK ++ Pending deposits - process_pending_deposits_multiple_for_new_validator [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_multiple_pending_deposits_above_churn [Preset: OK ++ Pending deposits - process_pending_deposits_multiple_pending_deposits_below_churn [Preset: OK ++ Pending deposits - process_pending_deposits_multiple_pending_one_skipped [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_multiple_skipped_deposits_exiting_validators [ OK ++ Pending deposits - process_pending_deposits_not_finalized [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_preexisting_churn [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_scaled_churn [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_skipped_deposit_exiting_validator [Preset: min OK ++ Pending deposits - process_pending_deposits_withdrawable_validator [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_withdrawable_validator_not_churned [Preset: mi OK +``` +## EF - Gloas - Epoch Processing - Proposer lookahead [Preset: minimal] +```diff ++ Proposer lookahead - proposer_lookahead_does_not_contain_exited_validators [Preset: minima OK ++ Proposer lookahead - proposer_lookahead_in_state_matches_computed_lookahead [Preset: minim OK +``` +## EF - Gloas - Epoch Processing - RANDAO mixes reset [Preset: minimal] +```diff ++ RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK +``` +## EF - Gloas - Epoch Processing - Registry updates [Preset: minimal] +```diff ++ Registry updates - activation_churn_limit__equal_to_activation_limit [Preset: minimal] OK ++ Registry updates - activation_churn_limit__greater_than_activation_limit [Preset: minimal] OK ++ Registry updates - activation_churn_limit__less_than_activation_limit [Preset: minimal] OK ++ Registry updates - activation_queue_activation_and_ejection__1 [Preset: minimal] OK ++ Registry updates - activation_queue_activation_and_ejection__churn_limit [Preset: minimal] OK ++ Registry updates - activation_queue_activation_and_ejection__exceed_churn_limit [Preset: m OK ++ Registry updates - activation_queue_activation_and_ejection__exceed_scaled_churn_limit [Pr OK ++ Registry updates - activation_queue_activation_and_ejection__scaled_churn_limit [Preset: m OK ++ Registry updates - activation_queue_efficiency_min [Preset: minimal] OK ++ Registry updates - activation_queue_efficiency_scaled [Preset: minimal] OK ++ Registry updates - activation_queue_eligibility__greater_than_min_activation_balance [Pres OK ++ Registry updates - activation_queue_eligibility__less_than_min_activation_balance [Preset: OK ++ Registry updates - activation_queue_eligibility__min_activation_balance [Preset: minimal] OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_compounding_creds OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_eth1_creds [Preset OK ++ Registry updates - activation_queue_no_activation_no_finality [Preset: minimal] OK ++ Registry updates - activation_queue_sorting [Preset: minimal] OK ++ Registry updates - activation_queue_to_activated_if_finalized [Preset: minimal] OK ++ Registry updates - add_to_activation_queue [Preset: minimal] OK ++ Registry updates - ejection [Preset: minimal] OK ++ Registry updates - ejection_past_churn_limit_min [Preset: minimal] OK ++ Registry updates - ejection_past_churn_limit_scaled [Preset: minimal] OK ++ Registry updates - invalid_large_withdrawable_epoch [Preset: minimal] OK +``` +## EF - Gloas - Epoch Processing - Rewards and penalties [Preset: minimal] +```diff ++ Rewards and penalties - almost_empty_attestations [Preset: minimal] OK ++ Rewards and penalties - almost_empty_attestations_with_leak [Preset: minimal] OK ++ Rewards and penalties - almost_full_attestations [Preset: minimal] OK ++ Rewards and penalties - almost_full_attestations_with_leak [Preset: minimal] OK ++ Rewards and penalties - attestations_some_slashed [Preset: minimal] OK ++ Rewards and penalties - duplicate_attestation [Preset: minimal] OK ++ Rewards and penalties - full_attestation_participation [Preset: minimal] OK ++ Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK ++ Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK ++ Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK ++ Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK ++ Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK ++ Rewards and penalties - random_fill_attestations [Preset: minimal] OK ++ Rewards and penalties - random_fill_attestations_with_leak [Preset: minimal] OK +``` +## EF - Gloas - Epoch Processing - Slashings [Preset: minimal] +```diff ++ Slashings - low_penalty [Preset: minimal] OK ++ Slashings - max_penalties [Preset: minimal] OK ++ Slashings - minimal_penalty [Preset: minimal] OK ++ Slashings - scaled_penalties [Preset: minimal] OK ++ Slashings - slashings_with_random_state [Preset: minimal] OK +``` +## EF - Gloas - Epoch Processing - Slashings reset [Preset: minimal] +```diff ++ Slashings reset - flush_slashings [Preset: minimal] OK +``` +## EF - Gloas - Epoch Processing - Sync committee updates [Preset: minimal] +```diff ++ Sync committee updates - sync_committees_no_progress_not_at_period_boundary [Preset: minim OK ++ Sync committee updates - sync_committees_progress_genesis [Preset: minimal] OK ++ Sync committee updates - sync_committees_progress_misc_balances_genesis [Preset: minimal] OK ++ Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK ++ Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK +``` +## EF - Gloas - Fork [Preset: minimal] +```diff ++ EF - Gloas - Fork - after_fork_deactivate_validators_from_fulu_to_gloas [Preset: minimal] OK ++ EF - Gloas - Fork - after_fork_deactivate_validators_wo_block_from_fulu_to_gloas [Preset: OK ++ EF - Gloas - Fork - after_fork_new_validator_active_from_fulu_to_gloas [Preset: minimal] OK +``` +## EF - Gloas - Operations - Attestation [Preset: minimal] +```diff ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_after_max_inclusion_slot OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_attestation_data_index_not_zero OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_attestation_data_index_too_high OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_attestation_signature OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_bad_source_root OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_before_inclusion_delay OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_committee_index OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_correct_attestation_included_aft OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_current_source_root OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_empty_participants_seemingly_val OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_empty_participants_zeroes_sig OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_future_target_epoch OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_incorrect_head_and_target_includ OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_incorrect_head_included_after_ma OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_incorrect_target_included_after_ OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_index OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_mismatched_target_and_slot OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_new_source_epoch OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_nonset_bits_for_one_committee OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_nonset_committee_bits OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_nonset_multiple_committee_bits OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_old_source_epoch OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_old_target_epoch OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_previous_source_root OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_same_slot_attestation_index_one OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_source_root_is_target_root OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_too_few_aggregation_bits OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_too_many_aggregation_bits OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_too_many_committee_bits OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_wrong_index_for_committee_signat OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_wrong_index_for_slot_0 OK ++ [Invalid] EF - Gloas - Operations - Attestation - invalid_wrong_index_for_slot_1 OK ++ [Valid] EF - Gloas - Operations - Attestation - at_max_inclusion_slot OK ++ [Valid] EF - Gloas - Operations - Attestation - builder_payment_weight_no_double_countin OK ++ [Valid] EF - Gloas - Operations - Attestation - builder_payment_weight_tracking OK ++ [Valid] EF - Gloas - Operations - Attestation - correct_attestation_included_at_max_incl OK ++ [Valid] EF - Gloas - Operations - Attestation - correct_attestation_included_at_min_incl OK ++ [Valid] EF - Gloas - Operations - Attestation - correct_attestation_included_at_one_epoc OK ++ [Valid] EF - Gloas - Operations - Attestation - correct_attestation_included_at_sqrt_epo OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_and_target_included_at_ep OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_and_target_included_at_sq OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_and_target_min_inclusion_ OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_included_at_max_inclusion OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_included_at_min_inclusion OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_de OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_target_included_at_epoch_delay OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_target_included_at_min_inclusi OK ++ [Valid] EF - Gloas - Operations - Attestation - incorrect_target_included_at_sqrt_epoch_ OK ++ [Valid] EF - Gloas - Operations - Attestation - matching_payload_false_historical_slot OK ++ [Valid] EF - Gloas - Operations - Attestation - matching_payload_gets_head_flag OK ++ [Valid] EF - Gloas - Operations - Attestation - matching_payload_true_historical_slot OK ++ [Valid] EF - Gloas - Operations - Attestation - matching_payload_true_same_slot OK ++ [Valid] EF - Gloas - Operations - Attestation - mismatched_payload_no_head_flag OK ++ [Valid] EF - Gloas - Operations - Attestation - multi_proposer_index_iterations OK ++ [Valid] EF - Gloas - Operations - Attestation - multiple_committees OK ++ [Valid] EF - Gloas - Operations - Attestation - one_basic_attestation OK ++ [Valid] EF - Gloas - Operations - Attestation - one_committee_with_gap OK ++ [Valid] EF - Gloas - Operations - Attestation - previous_epoch OK ++ [Valid] EF - Gloas - Operations - Attestation - valid_attestation_data_index_one_previou OK ++ [Valid] EF - Gloas - Operations - Attestation - valid_attestation_data_index_one_previou OK ++ [Valid] EF - Gloas - Operations - Attestation - valid_attestation_data_index_zero_previo OK ++ [Valid] EF - Gloas - Operations - Attestation - valid_same_slot_attestation_index_zero OK +``` +## EF - Gloas - Operations - Attester Slashing [Preset: minimal] +```diff ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_all_empty_indices OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_bad_extra_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_bad_replaced_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_duplicate_index_doubl OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_duplicate_index_norma OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_empty_indices OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att1_high_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_bad_extra_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_bad_replaced_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_duplicate_index_doubl OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_duplicate_index_norma OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_empty_indices OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_att2_high_index OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_incorrect_sig_1 OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_incorrect_sig_1_and_2 OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_incorrect_sig_2 OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_no_double_or_surround OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_participants_already_slash OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_same_data OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_unsorted_att_1 OK ++ [Invalid] EF - Gloas - Operations - Attester Slashing - invalid_unsorted_att_2 OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - already_exited_long_ago OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - already_exited_recent OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - attestation_from_future OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - basic_double OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - basic_surround OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - low_balances OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - misc_balances OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - proposer_index_slashed OK ++ [Valid] EF - Gloas - Operations - Attester Slashing - with_effective_balance_disparity OK +``` +## EF - Gloas - Operations - BLS to execution change [Preset: minimal] +```diff ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_already_0x01 OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_bad_signature OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_current_fork_version OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_genesis_validators_r OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_incorrect_from_bls_p OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_previous_fork_versio OK ++ [Invalid] EF - Gloas - Operations - BLS to execution change - invalid_val_index_out_of_ran OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - genesis_fork_version OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success_exited OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success_in_activation_queue OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success_in_exit_queue OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success_not_activated OK ++ [Valid] EF - Gloas - Operations - BLS to execution change - success_withdrawable OK +``` +## EF - Gloas - Operations - Block Header [Preset: minimal] +```diff ++ [Invalid] EF - Gloas - Operations - Block Header - invalid_multiple_blocks_single_slot OK ++ [Invalid] EF - Gloas - Operations - Block Header - invalid_parent_root OK ++ [Invalid] EF - Gloas - Operations - Block Header - invalid_proposer_index OK ++ [Invalid] EF - Gloas - Operations - Block Header - invalid_proposer_slashed OK ++ [Invalid] EF - Gloas - Operations - Block Header - invalid_slot_block_header OK ++ [Valid] EF - Gloas - Operations - Block Header - basic_block_header OK +``` +## EF - Gloas - Operations - Consolidation Request [Preset: minimal] +```diff ++ [Valid] EF - Gloas - Operations - Consolidation Request - basic_consolidation_in_current OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - basic_consolidation_in_new_con OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - basic_consolidation_source_has OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - basic_consolidation_target_has OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - basic_consolidation_with_compo OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - basic_consolidation_with_exces OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - basic_consolidation_with_insuf OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - basic_consolidation_with_preex OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - basic_switch_to_compounding OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - consolidation_balance_larger_t OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - consolidation_balance_through_ OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - consolidation_churn_limit_bala OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_exceed_pending_conso OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_exited_source OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_exited_target OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_inactive_source OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_inactive_target OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_no_source_execution_ OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_not_enough_consolida OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_same_source_target OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_source_address OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_source_has_pending_w OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_source_not_active_lo OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_source_pubkey_is_tar OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_source_with_bls_cred OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_target_with_bls_cred OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_target_with_eth1_cre OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_unknown_source_pubke OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - incorrect_unknown_target_pubke OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_exited_s OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_inactive OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_not_auth OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_source_b OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_source_c OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_unknown_ OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_with_exc OK ++ [Valid] EF - Gloas - Operations - Consolidation Request - switch_to_compounding_with_pen OK +``` +## EF - Gloas - Operations - Deposit [Preset: minimal] +```diff ++ [Invalid] EF - Gloas - Operations - Deposit - invalid_bad_merkle_proof OK ++ [Invalid] EF - Gloas - Operations - Deposit - invalid_wrong_deposit_for_deposit_count OK ++ [Valid] EF - Gloas - Operations - Deposit - correct_sig_but_forked_state OK ++ [Valid] EF - Gloas - Operations - Deposit - effective_deposit_with_genesis_fork_version OK ++ [Valid] EF - Gloas - Operations - Deposit - incorrect_sig_new_deposit OK ++ [Valid] EF - Gloas - Operations - Deposit - incorrect_sig_top_up OK ++ [Valid] EF - Gloas - Operations - Deposit - incorrect_withdrawal_credentials_top_up OK ++ [Valid] EF - Gloas - Operations - Deposit - ineffective_deposit_with_bad_fork_version OK ++ [Valid] EF - Gloas - Operations - Deposit - ineffective_deposit_with_current_fork_versio OK ++ [Valid] EF - Gloas - Operations - Deposit - ineffective_deposit_with_previous_fork_versi OK ++ [Valid] EF - Gloas - Operations - Deposit - key_validate_invalid_decompression OK ++ [Valid] EF - Gloas - Operations - Deposit - key_validate_invalid_subgroup OK ++ [Valid] EF - Gloas - Operations - Deposit - new_deposit_eth1_withdrawal_credentials OK ++ [Valid] EF - Gloas - Operations - Deposit - new_deposit_max OK ++ [Valid] EF - Gloas - Operations - Deposit - new_deposit_non_versioned_withdrawal_credent OK ++ [Valid] EF - Gloas - Operations - Deposit - new_deposit_over_max OK ++ [Valid] EF - Gloas - Operations - Deposit - new_deposit_under_max OK ++ [Valid] EF - Gloas - Operations - Deposit - top_up__less_effective_balance OK ++ [Valid] EF - Gloas - Operations - Deposit - top_up__max_effective_balance OK ++ [Valid] EF - Gloas - Operations - Deposit - top_up__zero_balance OK +``` +## EF - Gloas - Operations - Deposit Request [Preset: minimal] +```diff ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_extra_gwei OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_greater_than OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_invalid_sig OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_max_effectiv OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_min_activati OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_set_start_in OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_set_start_in OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_top_up_inval OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_top_up_max_e OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_top_up_min_a OK ++ [Valid] EF - Gloas - Operations - Deposit Request - process_deposit_request_top_up_still OK +``` +## EF - Gloas - Operations - Execution Payload [Preset: minimal] +```diff ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_executio OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_invalid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_be OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_bl OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_bl OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_bu OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_ga OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_pa OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_pr OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_sl OK ++ [Invalid] EF - Gloas - Operations - Execution Payload - process_execution_payload_wrong_ti OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_large_pa OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_max_blob OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_self_bui OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_valid OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_with_blo OK ++ [Valid] EF - Gloas - Operations - Execution Payload - process_execution_payload_with_exe OK +``` +## EF - Gloas - Operations - Execution Payload Bid [Preset: minimal] +```diff ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Invalid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK ++ [Valid] EF - Gloas - Operations - Execution Payload Bid - process_execution_payload_bid_ OK +``` +## EF - Gloas - Operations - Payload Attestation [Preset: minimal] +```diff ++ [Invalid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_futu OK ++ [Invalid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_inva OK ++ [Invalid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_inva OK ++ [Invalid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_no_a OK ++ [Invalid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_too_ OK ++ [Valid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_part OK ++ [Valid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_payl OK ++ [Valid] EF - Gloas - Operations - Payload Attestation - process_payload_attestation_payl OK +``` +## EF - Gloas - Operations - Proposer Slashing [Preset: minimal] +```diff ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_different_proposer_indices OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_headers_are_same_sigs_are_ OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_headers_are_same_sigs_are_ OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_incorrect_proposer_index OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_incorrect_sig_1 OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_incorrect_sig_1_and_2 OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_incorrect_sig_1_and_2_swap OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_incorrect_sig_2 OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_proposer_is_not_activated OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_proposer_is_slashed OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_proposer_is_withdrawn OK ++ [Invalid] EF - Gloas - Operations - Proposer Slashing - invalid_slots_of_different_epochs OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - basic OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - block_header_from_future OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - builder_payment_deletion_current_e OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - builder_payment_deletion_previous_ OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - builder_payment_deletion_too_late OK ++ [Valid] EF - Gloas - Operations - Proposer Slashing - slashed_and_proposer_index_the_sam OK +``` +## EF - Gloas - Operations - Sync Aggregate [Preset: minimal] +```diff ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_bad_domain OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_extra_participant OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_infinite_signature_ OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_infinite_signature_ OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_missing_participant OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_no_participants OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_past_block OK ++ [Invalid] EF - Gloas - Operations - Sync Aggregate - invalid_signature_previous_committee OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - proposer_in_committee_with_participat OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - proposer_in_committee_without_partici OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_all_but_one_participating_with OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_high_participation_without_dup OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_low_participation_without_dupl OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_misc_balances_and_half_partici OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_only_one_participant_without_d OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - random_with_exits_without_duplicates OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_empty_particip OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_nonduplicate_c OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_rewards_not_full_parti OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_with_nonparticipating_ OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_with_nonparticipating_ OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_with_participating_exi OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - sync_committee_with_participating_wit OK ++ [Valid] EF - Gloas - Operations - Sync Aggregate - valid_signature_future_committee OK +``` +## EF - Gloas - Operations - Voluntary Exit [Preset: minimal] +```diff ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_incorrect_signature OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_already_exited OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_exit_in_future OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_has_pending_withdra OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_incorrect_validator OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_not_active OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_validator_not_active_long_eno OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_f OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_f OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_f OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_f OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK ++ [Invalid] EF - Gloas - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - basic OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - default_exit_epoch_subsequent_exit OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - min_balance_exit OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - min_balance_exits_above_churn OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - min_balance_exits_up_to_churn OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - success_exit_queue__min_churn OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK ++ [Valid] EF - Gloas - Operations - Voluntary Exit - voluntary_exit_with_pending_deposit OK +``` +## EF - Gloas - Operations - Withdrawal Request [Preset: minimal] +```diff ++ [Valid] EF - Gloas - Operations - Withdrawal Request - activation_epoch_less_than_shard_ OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - basic_partial_withdrawal_request OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - basic_partial_withdrawal_request_ OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - basic_partial_withdrawal_request_ OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - basic_withdrawal_request OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - basic_withdrawal_request_with_com OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - basic_withdrawal_request_with_fir OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - basic_withdrawal_request_with_ful OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - full_exit_request_has_partial_wit OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - incorrect_inactive_validator OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - incorrect_source_address OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - incorrect_withdrawal_credential_p OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - insufficient_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - insufficient_effective_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - no_compounding_credentials OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - no_excess_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - on_withdrawal_request_initiated_e OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_activation_epo OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_incorrect_sour OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_incorrect_with OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_on_exit_initia OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_queue_full OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_request_with_h OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_request_with_h OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_request_with_l OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_request_with_p OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - partial_withdrawal_request_with_p OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - pending_withdrawals_consume_all_e OK ++ [Valid] EF - Gloas - Operations - Withdrawal Request - unknown_pubkey OK +``` +## EF - Gloas - Operations - Withdrawals [Preset: minimal] +```diff ++ [Valid] EF - Gloas - Operations - Withdrawals - full_pending_withdrawals_but_first_skipp OK ++ [Valid] EF - Gloas - Operations - Withdrawals - full_pending_withdrawals_but_first_skipp OK ++ [Valid] EF - Gloas - Operations - Withdrawals - full_pending_withdrawals_but_first_skipp OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK ++ [Valid] EF - Gloas - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_at_max OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_at_max_mixed_with_sw OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_exiting_validator OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_low_effective_balanc OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_mixed_with_sweep_and OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_next_epoch OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_no_excess_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_one_skipped_one_effe OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_two_partial_withdraw OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_two_partial_withdraw OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_with_effective_sweep OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_with_ineffective_swe OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_with_ineffective_swe OK ++ [Valid] EF - Gloas - Operations - Withdrawals - pending_withdrawals_with_sweep_different OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_0 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_partial_withdrawals_1 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_partial_withdrawals_2 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_partial_withdrawals_3 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_partial_withdrawals_4 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - random_partial_withdrawals_5 OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_excess_balance_but_no_max_effect OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_excess_balance_but_no_max_effect OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_max_partial_withdrawable OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_max_plus_one_withdrawable OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_mixed_fully_and_partial_withdraw OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_no_excess_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_no_excess_balance_compounding OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_no_max_effective_balance OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_no_max_effective_balance_compoun OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_one_partial_withdrawable_active_ OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_one_partial_withdrawable_exited OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_one_partial_withdrawable_exited_ OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_one_partial_withdrawable_in_exit OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_one_partial_withdrawable_not_yet OK ++ [Valid] EF - Gloas - Operations - Withdrawals - success_two_partial_withdrawable OK +``` +## EF - Gloas - Rewards [Preset: minimal] +```diff ++ EF - Gloas - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK ++ EF - Gloas - Rewards - empty [Preset: minimal] OK ++ EF - Gloas - Rewards - empty_leak [Preset: minimal] OK ++ EF - Gloas - Rewards - full_all_correct [Preset: minimal] OK ++ EF - Gloas - Rewards - full_but_partial_participation [Preset: minimal] OK ++ EF - Gloas - Rewards - full_but_partial_participation_leak [Preset: minimal] OK ++ EF - Gloas - Rewards - full_leak [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_0 [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_1 [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_2 [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_3 [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_4 [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_leak [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_low_balances_0 [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_low_balances_1 [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_misc_balances [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_seven_epoch_leak [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_ten_epoch_leak [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_without_leak_0 [Preset: minimal] OK ++ EF - Gloas - Rewards - full_random_without_leak_and_current_exit_0 [Preset: minimal] OK ++ EF - Gloas - Rewards - half_full [Preset: minimal] OK ++ EF - Gloas - Rewards - half_full_leak [Preset: minimal] OK ++ EF - Gloas - Rewards - quarter_full [Preset: minimal] OK ++ EF - Gloas - Rewards - quarter_full_leak [Preset: minimal] OK ++ EF - Gloas - Rewards - some_very_low_effective_balances_that_attested [Preset: minimal] OK ++ EF - Gloas - Rewards - some_very_low_effective_balances_that_attested_leak [Preset: minima OK ++ EF - Gloas - Rewards - some_very_low_effective_balances_that_did_not_attest [Preset: minim OK ++ EF - Gloas - Rewards - some_very_low_effective_balances_that_did_not_attest_leak [Preset: OK ++ EF - Gloas - Rewards - with_exited_validators [Preset: minimal] OK ++ EF - Gloas - Rewards - with_exited_validators_leak [Preset: minimal] OK ++ EF - Gloas - Rewards - with_not_yet_activated_validators [Preset: minimal] OK ++ EF - Gloas - Rewards - with_not_yet_activated_validators_leak [Preset: minimal] OK ++ EF - Gloas - Rewards - with_slashed_validators [Preset: minimal] OK ++ EF - Gloas - Rewards - with_slashed_validators_leak [Preset: minimal] OK +``` +## EF - Gloas - SSZ consensus objects [Preset: minimal] +```diff ++ Testing AggregateAndProof OK ++ Testing Attestation OK ++ Testing AttestationData OK ++ Testing AttesterSlashing OK ++ Testing BLSToExecutionChange OK ++ Testing BeaconBlock OK ++ Testing BeaconBlockBody OK ++ Testing BeaconBlockHeader OK ++ Testing BeaconState OK ++ Testing BlobIdentifier OK ++ Testing BlobSidecar OK ++ Testing BuilderPendingPayment OK ++ Testing BuilderPendingWithdrawal OK ++ Testing Checkpoint OK ++ Testing ConsolidationRequest OK ++ Testing ContributionAndProof OK ++ Testing DataColumnSidecar OK ++ Testing DataColumnsByRootIdentifier OK ++ Testing Deposit OK ++ Testing DepositData OK ++ Testing DepositMessage OK ++ Testing DepositRequest OK ++ Testing Eth1Block OK ++ Testing Eth1Data OK ++ Testing ExecutionPayload OK ++ Testing ExecutionPayloadBid OK ++ Testing ExecutionPayloadEnvelope OK ++ Testing ExecutionPayloadHeader OK ++ Testing ExecutionRequests OK ++ Testing Fork OK ++ Testing ForkChoiceNode OK ++ Testing ForkData OK ++ Testing HistoricalBatch OK ++ Testing HistoricalSummary OK ++ Testing IndexedAttestation OK ++ Testing IndexedPayloadAttestation OK ++ Testing LightClientBootstrap OK ++ Testing LightClientFinalityUpdate OK ++ Testing LightClientHeader OK ++ Testing LightClientOptimisticUpdate OK ++ Testing LightClientUpdate OK ++ Testing MatrixEntry OK ++ Testing PayloadAttestation OK ++ Testing PayloadAttestationData OK ++ Testing PayloadAttestationMessage OK ++ Testing PendingAttestation OK ++ Testing PendingConsolidation OK ++ Testing PendingDeposit OK ++ Testing PendingPartialWithdrawal OK ++ Testing PowBlock OK ++ Testing ProposerSlashing OK ++ Testing SignedAggregateAndProof OK ++ Testing SignedBLSToExecutionChange OK ++ Testing SignedBeaconBlock OK ++ Testing SignedBeaconBlockHeader OK ++ Testing SignedContributionAndProof OK ++ Testing SignedExecutionPayloadBid OK ++ Testing SignedExecutionPayloadEnvelope OK ++ Testing SignedVoluntaryExit OK ++ Testing SigningData OK ++ Testing SingleAttestation OK ++ Testing SyncAggregate OK ++ Testing SyncAggregatorSelectionData OK ++ Testing SyncCommittee OK ++ Testing SyncCommitteeContribution OK ++ Testing SyncCommitteeMessage OK ++ Testing Validator OK ++ Testing VoluntaryExit OK ++ Testing Withdrawal OK ++ Testing WithdrawalRequest OK +``` +## EF - Gloas - Sanity - Slots [Preset: minimal] +```diff ++ EF - Gloas - Slots - balance_change_affects_proposer [Preset: minimal] OK ++ EF - Gloas - Slots - double_empty_epoch [Preset: minimal] OK ++ EF - Gloas - Slots - effective_decrease_balance_updates_lookahead [Preset: minimal] OK ++ EF - Gloas - Slots - empty_epoch [Preset: minimal] OK ++ EF - Gloas - Slots - execution_payload_availability_reset_from_set [Preset: minimal] OK ++ EF - Gloas - Slots - execution_payload_availability_reset_from_unset [Preset: minimal] OK ++ EF - Gloas - Slots - historical_accumulator [Preset: minimal] OK ++ EF - Gloas - Slots - multiple_pending_deposits_same_pubkey [Preset: minimal] OK ++ EF - Gloas - Slots - multiple_pending_deposits_same_pubkey_above_upward_threshold [Preset: OK ++ EF - Gloas - Slots - multiple_pending_deposits_same_pubkey_below_upward_threshold [Preset: OK ++ EF - Gloas - Slots - multiple_pending_deposits_same_pubkey_compounding [Preset: minimal] OK ++ EF - Gloas - Slots - multiple_pending_deposits_same_pubkey_different_signature [Preset: mi OK ++ EF - Gloas - Slots - over_epoch_boundary [Preset: minimal] OK ++ EF - Gloas - Slots - pending_consolidation [Preset: minimal] OK ++ EF - Gloas - Slots - pending_deposit_extra_gwei [Preset: minimal] OK ++ EF - Gloas - Slots - slots_1 [Preset: minimal] OK ++ EF - Gloas - Slots - slots_2 [Preset: minimal] OK +``` +## EF - Gloas - Transition [Preset: minimal] +```diff ++ EF - Gloas - Transition - higher_churn_limit_to_lower [Preset: minimal] OK ++ EF - Gloas - Transition - non_empty_historical_roots [Preset: minimal] OK ++ EF - Gloas - Transition - normal_transition [Preset: minimal] OK ++ EF - Gloas - Transition - simple_transition [Preset: minimal] OK ++ EF - Gloas - Transition - transition_attestation_from_previous_fork_with_new_range [Preset OK ++ EF - Gloas - Transition - transition_missing_first_post_block [Preset: minimal] OK ++ EF - Gloas - Transition - transition_missing_last_pre_fork_block [Preset: minimal] OK ++ EF - Gloas - Transition - transition_only_blocks_post_fork [Preset: minimal] OK ++ EF - Gloas - Transition - transition_randomized_state [Preset: minimal] OK ++ EF - Gloas - Transition - transition_with_activation_at_fork_epoch [Preset: minimal] OK ++ EF - Gloas - Transition - transition_with_attester_slashing_right_after_fork [Preset: mini OK ++ EF - Gloas - Transition - transition_with_attester_slashing_right_before_fork [Preset: min OK ++ EF - Gloas - Transition - transition_with_btec_right_after_fork [Preset: minimal] OK ++ EF - Gloas - Transition - transition_with_btec_right_before_fork [Preset: minimal] OK ++ EF - Gloas - Transition - transition_with_deposit_right_after_fork [Preset: minimal] OK ++ EF - Gloas - Transition - transition_with_deposit_right_before_fork [Preset: minimal] OK ++ EF - Gloas - Transition - transition_with_finality [Preset: minimal] OK ++ EF - Gloas - Transition - transition_with_leaking_at_fork [Preset: minimal] OK ++ EF - Gloas - Transition - transition_with_leaking_pre_fork [Preset: minimal] OK ++ EF - Gloas - Transition - transition_with_no_attestations_until_after_fork [Preset: minima OK ++ EF - Gloas - Transition - transition_with_non_empty_activation_queue [Preset: minimal] OK ++ EF - Gloas - Transition - transition_with_one_fourth_exiting_validators_exit_at_fork [Pres OK ++ EF - Gloas - Transition - transition_with_one_fourth_exiting_validators_exit_post_fork [Pr OK ++ EF - Gloas - Transition - transition_with_one_fourth_slashed_active_validators_pre_fork [P OK ++ EF - Gloas - Transition - transition_with_proposer_slashing_right_after_fork [Preset: mini OK ++ EF - Gloas - Transition - transition_with_proposer_slashing_right_before_fork [Preset: min OK ++ EF - Gloas - Transition - transition_with_random_half_participation [Preset: minimal] OK ++ EF - Gloas - Transition - transition_with_random_three_quarters_participation [Preset: min OK ++ EF - Gloas - Transition - transition_with_voluntary_exit_right_after_fork [Preset: minimal OK ++ EF - Gloas - Transition - transition_with_voluntary_exit_right_before_fork [Preset: minima OK +``` ## EF - Light client - Data collection [Preset: minimal] ```diff + Light client - Data collection - minimal/altair/light_client/data_collection/pyspec_tests/ OK @@ -4104,6 +4430,7 @@ ConsensusSpecPreset-minimal + Light client - Data collection - minimal/capella/light_client/data_collection/pyspec_tests OK + Light client - Data collection - minimal/deneb/light_client/data_collection/pyspec_tests/l OK + Light client - Data collection - minimal/electra/light_client/data_collection/pyspec_tests OK ++ Light client - Data collection - minimal/fulu/light_client/data_collection/pyspec_tests/li OK ``` ## EF - Light client - Single merkle proof [Preset: minimal] ```diff @@ -4125,6 +4452,10 @@ ConsensusSpecPreset-minimal + Light client - Single merkle proof - minimal/electra/light_client/single_merkle_proof/Beac OK + Light client - Single merkle proof - minimal/electra/light_client/single_merkle_proof/Beac OK + Light client - Single merkle proof - minimal/electra/light_client/single_merkle_proof/Beac OK ++ Light client - Single merkle proof - minimal/fulu/light_client/single_merkle_proof/BeaconB OK ++ Light client - Single merkle proof - minimal/fulu/light_client/single_merkle_proof/BeaconS OK ++ Light client - Single merkle proof - minimal/fulu/light_client/single_merkle_proof/BeaconS OK ++ Light client - Single merkle proof - minimal/fulu/light_client/single_merkle_proof/BeaconS OK ``` ## EF - Light client - Sync [Preset: minimal] ```diff @@ -4163,6 +4494,10 @@ ConsensusSpecPreset-minimal + Light client - Sync - minimal/electra/light_client/sync/pyspec_tests/light_client_sync OK + Light client - Sync - minimal/electra/light_client/sync/pyspec_tests/light_client_sync_no_ OK + Light client - Sync - minimal/electra/light_client/sync/pyspec_tests/supply_sync_committee OK ++ Light client - Sync - minimal/fulu/light_client/sync/pyspec_tests/advance_finality_without OK ++ Light client - Sync - minimal/fulu/light_client/sync/pyspec_tests/light_client_sync OK ++ Light client - Sync - minimal/fulu/light_client/sync/pyspec_tests/light_client_sync_no_for OK ++ Light client - Sync - minimal/fulu/light_client/sync/pyspec_tests/supply_sync_committee_fr OK ``` ## EF - Light client - Update ranking [Preset: minimal] ```diff @@ -4171,9 +4506,15 @@ ConsensusSpecPreset-minimal + Light client - Update ranking - minimal/capella/light_client/update_ranking/pyspec_tests/u OK + Light client - Update ranking - minimal/deneb/light_client/update_ranking/pyspec_tests/upd OK + Light client - Update ranking - minimal/electra/light_client/update_ranking/pyspec_tests/u OK ++ Light client - Update ranking - minimal/fulu/light_client/update_ranking/pyspec_tests/upda OK ``` ## EF - Merkle proof [Preset: minimal] ```diff + Merkle proof - Single merkle proof - blob_kzg_commitments_merkle_proof__basic Skip + Merkle proof - Single merkle proof - blob_kzg_commitments_merkle_proof__max_blobs Skip + Merkle proof - Single merkle proof - blob_kzg_commitments_merkle_proof__multiple_blobs Skip + Merkle proof - Single merkle proof - blob_kzg_commitments_merkle_proof__random_block_1 Skip + Merkle proof - Single merkle proof - eip7805 Skip + Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK + Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK + Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK @@ -4186,10 +4527,6 @@ ConsensusSpecPreset-minimal + Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK + Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK + Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK -+ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK -+ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK -+ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK -+ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK ``` ## EF - Phase 0 - Epoch Processing - Effective balance updates [Preset: minimal] ```diff @@ -4256,8 +4593,8 @@ ConsensusSpecPreset-minimal + Rewards and penalties - duplicate_participants_different_attestation_3 [Preset: minimal] OK + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK ++ Rewards and penalties - full_attestations_default_balances_except_a_validator_with_one_gwe OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - full_attestations_random_incorrect_fields [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK @@ -4506,80 +4843,780 @@ ConsensusSpecPreset-minimal + EF - Phase 0 - Slots - slots_1 [Preset: minimal] OK + EF - Phase 0 - Slots - slots_2 [Preset: minimal] OK ``` -## EF - Phase0 - Finality [Preset: minimal] -```diff -+ [Valid] EF - Phase0 - Finality - finality_no_updates_at_genesis [Preset: minimal] OK -+ [Valid] EF - Phase0 - Finality - finality_rule_1 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Finality - finality_rule_2 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Finality - finality_rule_3 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Finality - finality_rule_4 [Preset: minimal] OK -``` -## EF - Phase0 - Random [Preset: minimal] -```diff -+ [Valid] EF - Phase0 - Random - randomized_0 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_1 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_10 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_11 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_12 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_13 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_14 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_15 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_2 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_3 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_4 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_5 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_6 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_7 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_8 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Random - randomized_9 [Preset: minimal] OK -``` -## EF - Phase0 - Sanity - Blocks [Preset: minimal] -```diff -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [ OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: mi OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pre OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expect OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propos OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: min OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mini OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_proposal_for_genesis_slot [Preset: minim OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mini OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [P OK -+ [Invalid] EF - Phase0 - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - attestation [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - attester_slashing [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - balance_driven_status_transitions [Preset: minim OK -+ [Valid] EF - Phase0 - Sanity - Blocks - deposit_in_block [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - deposit_top_up [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - duplicate_attestation_same_block [Preset: minima OK -+ [Valid] EF - Phase0 - Sanity - Blocks - empty_block_transition [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - empty_block_transition_large_validator_set [Pres OK -+ [Valid] EF - Phase0 - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - empty_epoch_transition_large_validator_set [Pres OK -+ [Valid] EF - Phase0 - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: m OK -+ [Valid] EF - Phase0 - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - high_proposer_index [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - historical_batch [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK -+ [Valid] EF - Phase0 - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pre OK -+ [Valid] EF - Phase0 - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK -+ [Valid] EF - Phase0 - Sanity - Blocks - multiple_different_validator_exits_same_block [P OK -+ [Valid] EF - Phase0 - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - proposer_slashing [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - skipped_slots [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK -+ [Valid] EF - Phase0 - Sanity - Blocks - voluntary_exit [Preset: minimal] OK +## EF - altair - Finality [Preset: minimal] +```diff ++ [Valid] EF - altair - Finality - finality_no_updates_at_genesis [Preset: minimal] OK ++ [Valid] EF - altair - Finality - finality_rule_1 [Preset: minimal] OK ++ [Valid] EF - altair - Finality - finality_rule_2 [Preset: minimal] OK ++ [Valid] EF - altair - Finality - finality_rule_3 [Preset: minimal] OK ++ [Valid] EF - altair - Finality - finality_rule_4 [Preset: minimal] OK +``` +## EF - altair - Random [Preset: minimal] +```diff ++ [Valid] EF - altair - Random - randomized_0 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_1 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_10 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_11 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_12 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_13 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_14 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_15 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_2 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_3 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_4 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_5 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_6 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_7 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_8 [Preset: minimal] OK ++ [Valid] EF - altair - Random - randomized_9 [Preset: minimal] OK +``` +## EF - altair - Sanity - Blocks [Preset: minimal] +```diff ++ [Invalid] EF - altair - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [ OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: mi OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pre OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expect OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propos OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: min OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mini OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mini OK ++ [Invalid] EF - altair - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [P OK ++ [Invalid] EF - altair - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - attestation [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - attester_slashing [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - balance_driven_status_transitions [Preset: minim OK ++ [Valid] EF - altair - Sanity - Blocks - deposit_in_block [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - deposit_top_up [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - duplicate_attestation_same_block [Preset: minima OK ++ [Valid] EF - altair - Sanity - Blocks - empty_block_transition [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - empty_block_transition_large_validator_set [Pres OK ++ [Valid] EF - altair - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - empty_epoch_transition_large_validator_set [Pres OK ++ [Valid] EF - altair - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: m OK ++ [Valid] EF - altair - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - high_proposer_index [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - historical_batch [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pr OK ++ [Valid] EF - altair - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK ++ [Valid] EF - altair - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pre OK ++ [Valid] EF - altair - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK ++ [Valid] EF - altair - Sanity - Blocks - multiple_different_validator_exits_same_block [P OK ++ [Valid] EF - altair - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - proposer_slashing [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - skipped_slots [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee__empty [Preset: minimal OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee__full [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee__half [Preset: minimal] OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK ++ [Valid] EF - altair - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK ++ [Valid] EF - altair - Sanity - Blocks - voluntary_exit [Preset: minimal] OK +``` +## EF - bellatrix - Finality [Preset: minimal] +```diff ++ [Valid] EF - bellatrix - Finality - finality_no_updates_at_genesis [Preset: minimal] OK ++ [Valid] EF - bellatrix - Finality - finality_rule_1 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Finality - finality_rule_2 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Finality - finality_rule_3 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Finality - finality_rule_4 [Preset: minimal] OK +``` +## EF - bellatrix - Random [Preset: minimal] +```diff ++ [Valid] EF - bellatrix - Random - randomized_0 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_1 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_10 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_11 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_12 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_13 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_14 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_15 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_2 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_3 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_4 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_5 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_6 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_7 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_8 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Random - randomized_9 [Preset: minimal] OK +``` +## EF - bellatrix - Sanity - Blocks [Preset: minimal] +```diff ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_duplicate_attester_slashing_same_bloc OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_blo OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [ OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_exp OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_pro OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minima OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: m OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_same_slot_block_transition [Preset: m OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - invalid_similar_proposer_slashings_same_block OK ++ [Invalid] EF - bellatrix - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - attestation [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - attester_slashing [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - balance_driven_status_transitions [Preset: mi OK ++ [Valid] EF - bellatrix - Sanity - Blocks - block_transition_randomized_payload [Preset: OK ++ [Valid] EF - bellatrix - Sanity - Blocks - deposit_in_block [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - deposit_top_up [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - duplicate_attestation_same_block [Preset: min OK ++ [Valid] EF - bellatrix - Sanity - Blocks - empty_block_transition [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - empty_block_transition_large_validator_set [P OK ++ [Valid] EF - bellatrix - Sanity - Blocks - empty_block_transition_no_tx [Preset: minimal OK ++ [Valid] EF - bellatrix - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - empty_epoch_transition_large_validator_set [P OK ++ [Valid] EF - bellatrix - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset OK ++ [Valid] EF - bellatrix - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal OK ++ [Valid] EF - bellatrix - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - high_proposer_index [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - historical_batch [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - inactivity_scores_full_participation_leaking OK ++ [Valid] EF - bellatrix - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - is_execution_enabled_false [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - multiple_attester_slashings_no_overlap [Prese OK ++ [Valid] EF - bellatrix - Sanity - Blocks - multiple_attester_slashings_partial_overlap [ OK ++ [Valid] EF - bellatrix - Sanity - Blocks - multiple_different_proposer_slashings_same_bl OK ++ [Valid] EF - bellatrix - Sanity - Blocks - multiple_different_validator_exits_same_block OK ++ [Valid] EF - bellatrix - Sanity - Blocks - proposer_after_inactive_index [Preset: minima OK ++ [Valid] EF - bellatrix - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - proposer_slashing [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - skipped_slots [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee__empty [Preset: mini OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee__full [Preset: minim OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee__half [Preset: minim OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee_genesis__empty [Pres OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee_genesis__full [Prese OK ++ [Valid] EF - bellatrix - Sanity - Blocks - sync_committee_committee_genesis__half [Prese OK ++ [Valid] EF - bellatrix - Sanity - Blocks - voluntary_exit [Preset: minimal] OK +``` +## EF - capella - Finality [Preset: minimal] +```diff ++ [Valid] EF - capella - Finality - finality_no_updates_at_genesis [Preset: minimal] OK ++ [Valid] EF - capella - Finality - finality_rule_1 [Preset: minimal] OK ++ [Valid] EF - capella - Finality - finality_rule_2 [Preset: minimal] OK ++ [Valid] EF - capella - Finality - finality_rule_3 [Preset: minimal] OK ++ [Valid] EF - capella - Finality - finality_rule_4 [Preset: minimal] OK +``` +## EF - capella - Random [Preset: minimal] +```diff ++ [Valid] EF - capella - Random - randomized_0 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_1 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_10 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_11 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_12 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_13 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_14 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_15 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_2 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_3 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_4 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_5 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_6 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_7 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_8 [Preset: minimal] OK ++ [Valid] EF - capella - Random - randomized_9 [Preset: minimal] OK +``` +## EF - capella - Sanity - Blocks [Preset: minimal] +```diff ++ [Invalid] EF - capella - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Prese OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: m OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pr OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expec OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propo OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: min OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mi OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: min OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_same_slot_block_transition [Preset: min OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [ OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_ OK ++ [Invalid] EF - capella - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_is OK ++ [Invalid] EF - capella - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - activate_and_partial_withdrawal_max_effective_b OK ++ [Valid] EF - capella - Sanity - Blocks - activate_and_partial_withdrawal_overdeposit [Pr OK ++ [Valid] EF - capella - Sanity - Blocks - attestation [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - attester_slashing [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - balance_driven_status_transitions [Preset: mini OK ++ [Valid] EF - capella - Sanity - Blocks - block_transition_randomized_payload [Preset: mi OK ++ [Valid] EF - capella - Sanity - Blocks - bls_change [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - deposit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - deposit_in_block [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - deposit_top_up [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - duplicate_attestation_same_block [Preset: minim OK ++ [Valid] EF - capella - Sanity - Blocks - empty_block_transition [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - empty_block_transition_large_validator_set [Pre OK ++ [Valid] EF - capella - Sanity - Blocks - empty_block_transition_no_tx [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - empty_epoch_transition_large_validator_set [Pre OK ++ [Valid] EF - capella - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: OK ++ [Valid] EF - capella - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - exit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: mi OK ++ [Valid] EF - capella - Sanity - Blocks - high_proposer_index [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - historical_batch [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - inactivity_scores_full_participation_leaking [P OK ++ [Valid] EF - capella - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [P OK ++ [Valid] EF - capella - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK ++ [Valid] EF - capella - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pr OK ++ [Valid] EF - capella - Sanity - Blocks - multiple_different_proposer_slashings_same_bloc OK ++ [Valid] EF - capella - Sanity - Blocks - multiple_different_validator_exits_same_block [ OK ++ [Valid] EF - capella - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: OK ++ [Valid] EF - capella - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - proposer_slashing [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - skipped_slots [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee__empty [Preset: minima OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee__full [Preset: minimal OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee__half [Preset: minimal OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK ++ [Valid] EF - capella - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK ++ [Valid] EF - capella - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Pres OK ++ [Valid] EF - capella - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mi OK ++ [Valid] EF - capella - Sanity - Blocks - voluntary_exit [Preset: minimal] OK ++ [Valid] EF - capella - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK +``` +## EF - deneb - Finality [Preset: minimal] +```diff ++ [Valid] EF - deneb - Finality - finality_no_updates_at_genesis [Preset: minimal] OK ++ [Valid] EF - deneb - Finality - finality_rule_1 [Preset: minimal] OK ++ [Valid] EF - deneb - Finality - finality_rule_2 [Preset: minimal] OK ++ [Valid] EF - deneb - Finality - finality_rule_3 [Preset: minimal] OK ++ [Valid] EF - deneb - Finality - finality_rule_4 [Preset: minimal] OK +``` +## EF - deneb - Random [Preset: minimal] +```diff ++ [Valid] EF - deneb - Random - randomized_0 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_1 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_10 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_11 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_12 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_13 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_14 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_15 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_2 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_3 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_4 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_5 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_6 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_7 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_8 [Preset: minimal] OK ++ [Valid] EF - deneb - Random - randomized_9 [Preset: minimal] OK +``` +## EF - deneb - Sanity - Blocks [Preset: minimal] +```diff ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [P OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: min OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [ OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pres OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_exceed_max_blobs_per_block [Preset: minim OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expecte OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propose OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: minim OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_max_blobs_per_block_two_txs [Preset: mini OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_one_blob_max_plus_one_txs [Preset: minima OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mini OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: minim OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_same_slot_block_transition [Preset: minim OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pr OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sa OK ++ [Invalid] EF - deneb - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt OK ++ [Invalid] EF - deneb - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - activate_and_partial_withdrawal_max_effective_bal OK ++ [Valid] EF - deneb - Sanity - Blocks - activate_and_partial_withdrawal_overdeposit [Pres OK ++ [Valid] EF - deneb - Sanity - Blocks - attestation [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - attester_slashing [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - balance_driven_status_transitions [Preset: minima OK ++ [Valid] EF - deneb - Sanity - Blocks - block_transition_randomized_payload [Preset: mini OK ++ [Valid] EF - deneb - Sanity - Blocks - bls_change [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - deposit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - deposit_in_block [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - deposit_top_up [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - duplicate_attestation_same_block [Preset: minimal OK ++ [Valid] EF - deneb - Sanity - Blocks - empty_block_transition [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - empty_block_transition_large_validator_set [Prese OK ++ [Valid] EF - deneb - Sanity - Blocks - empty_block_transition_no_tx [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - empty_epoch_transition_large_validator_set [Prese OK ++ [Valid] EF - deneb - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: mi OK ++ [Valid] EF - deneb - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - exit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: mini OK ++ [Valid] EF - deneb - Sanity - Blocks - high_proposer_index [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - historical_batch [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pre OK ++ [Valid] EF - deneb - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pre OK ++ [Valid] EF - deneb - Sanity - Blocks - max_blobs_per_block [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: m OK ++ [Valid] EF - deneb - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pres OK ++ [Valid] EF - deneb - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK ++ [Valid] EF - deneb - Sanity - Blocks - multiple_different_validator_exits_same_block [Pr OK ++ [Valid] EF - deneb - Sanity - Blocks - one_blob [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - one_blob_max_txs [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - one_blob_two_txs [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: m OK ++ [Valid] EF - deneb - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - proposer_slashing [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - skipped_slots [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee__empty [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee__full [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee__half [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: m OK ++ [Valid] EF - deneb - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: m OK ++ [Valid] EF - deneb - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset OK ++ [Valid] EF - deneb - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mini OK ++ [Valid] EF - deneb - Sanity - Blocks - voluntary_exit [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK ++ [Valid] EF - deneb - Sanity - Blocks - zero_blob [Preset: minimal] OK +``` +## EF - electra - Finality [Preset: minimal] +```diff ++ [Valid] EF - electra - Finality - finality_no_updates_at_genesis [Preset: minimal] OK ++ [Valid] EF - electra - Finality - finality_rule_1 [Preset: minimal] OK ++ [Valid] EF - electra - Finality - finality_rule_2 [Preset: minimal] OK ++ [Valid] EF - electra - Finality - finality_rule_3 [Preset: minimal] OK ++ [Valid] EF - electra - Finality - finality_rule_4 [Preset: minimal] OK +``` +## EF - electra - Random [Preset: minimal] +```diff ++ [Valid] EF - electra - Random - randomized_0 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_1 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_10 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_11 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_12 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_13 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_14 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_15 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_2 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_3 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_4 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_5 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_6 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_7 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_8 [Preset: minimal] OK ++ [Valid] EF - electra - Random - randomized_9 [Preset: minimal] OK +``` +## EF - electra - Sanity - Blocks [Preset: minimal] +```diff ++ [Invalid] EF - electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK ++ [Invalid] EF - electra - Sanity - Blocks - deposit_transition__invalid_not_enough_eth1_dep OK ++ [Invalid] EF - electra - Sanity - Blocks - deposit_transition__invalid_too_many_eth1_depos OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Prese OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: m OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pr OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_exceed_max_blobs_per_block [Preset: min OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expec OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propo OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: min OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_max_blobs_per_block_two_txs [Preset: mi OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_one_blob_max_plus_one_txs [Preset: mini OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mi OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: min OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_same_slot_block_transition [Preset: min OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [ OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_ OK ++ [Invalid] EF - electra - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_is OK ++ [Invalid] EF - electra - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - activate_and_partial_withdrawal_max_effective_b OK ++ [Valid] EF - electra - Sanity - Blocks - activate_and_partial_withdrawal_overdeposit [Pr OK ++ [Valid] EF - electra - Sanity - Blocks - attestation [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - attester_slashing [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - balance_driven_status_transitions [Preset: mini OK ++ [Valid] EF - electra - Sanity - Blocks - basic_btec_and_el_withdrawal_request_in_same_bl OK ++ [Valid] EF - electra - Sanity - Blocks - basic_btec_before_el_withdrawal_request [Preset OK ++ [Valid] EF - electra - Sanity - Blocks - basic_el_withdrawal_request [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - block_transition_randomized_payload [Preset: mi OK ++ [Valid] EF - electra - Sanity - Blocks - bls_change [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - cl_exit_and_el_withdrawal_request_in_same_block OK ++ [Valid] EF - electra - Sanity - Blocks - consolidation_requests_when_pending_consolidati OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_in_block [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_request_max_per_payload [Preset: minima OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_request_with_same_pubkey_different_with OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_top_up [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__deposit_and_top_up_same_blo OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__deposit_with_same_pubkey_di OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__process_eth1_deposits [Pres OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__process_eth1_deposits_up_to OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__process_max_eth1_deposits [ OK ++ [Valid] EF - electra - Sanity - Blocks - deposit_transition__start_index_is_set [Preset: OK ++ [Valid] EF - electra - Sanity - Blocks - duplicate_attestation_same_block [Preset: minim OK ++ [Valid] EF - electra - Sanity - Blocks - effective_balance_increase_changes_lookahead [P OK ++ [Valid] EF - electra - Sanity - Blocks - empty_block_transition [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - empty_block_transition_large_validator_set [Pre OK ++ [Valid] EF - electra - Sanity - Blocks - empty_block_transition_no_tx [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - empty_epoch_transition_large_validator_set [Pre OK ++ [Valid] EF - electra - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: OK ++ [Valid] EF - electra - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - exit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: mi OK ++ [Valid] EF - electra - Sanity - Blocks - high_proposer_index [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - historical_batch [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - inactivity_scores_full_participation_leaking [P OK ++ [Valid] EF - electra - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [P OK ++ [Valid] EF - electra - Sanity - Blocks - max_blobs_per_block [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - multi_epoch_consolidation_chain [Preset: minima OK ++ [Valid] EF - electra - Sanity - Blocks - multiple_different_proposer_slashings_same_bloc OK ++ [Valid] EF - electra - Sanity - Blocks - multiple_different_validator_exits_same_block [ OK ++ [Valid] EF - electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_differe OK ++ [Valid] EF - electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_same_va OK ++ [Valid] EF - electra - Sanity - Blocks - one_blob [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - one_blob_max_txs [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - one_blob_two_txs [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: OK ++ [Valid] EF - electra - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - proposer_slashing [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - skipped_slots [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - switch_to_compounding_requests_when_pending_con OK ++ [Valid] EF - electra - Sanity - Blocks - switch_to_compounding_requests_when_too_little_ OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee__empty [Preset: minima OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee__full [Preset: minimal OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee__half [Preset: minimal OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: OK ++ [Valid] EF - electra - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK ++ [Valid] EF - electra - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Pres OK ++ [Valid] EF - electra - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mi OK ++ [Valid] EF - electra - Sanity - Blocks - voluntary_exit [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - withdrawal_and_consolidation_effective_balance_ OK ++ [Valid] EF - electra - Sanity - Blocks - withdrawal_and_switch_to_compounding_request_sa OK ++ [Valid] EF - electra - Sanity - Blocks - withdrawal_and_withdrawal_request_same_validato OK ++ [Valid] EF - electra - Sanity - Blocks - withdrawal_requests_when_pending_withdrawal_que OK ++ [Valid] EF - electra - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK ++ [Valid] EF - electra - Sanity - Blocks - zero_blob [Preset: minimal] OK +``` +## EF - fulu - Finality [Preset: minimal] +```diff ++ [Valid] EF - fulu - Finality - finality_no_updates_at_genesis [Preset: minimal] OK ++ [Valid] EF - fulu - Finality - finality_rule_1 [Preset: minimal] OK ++ [Valid] EF - fulu - Finality - finality_rule_2 [Preset: minimal] OK ++ [Valid] EF - fulu - Finality - finality_rule_3 [Preset: minimal] OK ++ [Valid] EF - fulu - Finality - finality_rule_4 [Preset: minimal] OK +``` +## EF - fulu - Random [Preset: minimal] +```diff ++ [Valid] EF - fulu - Random - randomized_0 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_1 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_10 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_11 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_12 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_13 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_14 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_15 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_2 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_3 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_4 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_5 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_6 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_7 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_8 [Preset: minimal] OK ++ [Valid] EF - fulu - Random - randomized_9 [Preset: minimal] OK +``` +## EF - fulu - Sanity - Blocks [Preset: minimal] +```diff ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: mini OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [P OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Prese OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_exceed_max_blobs_per_block [Preset: minima OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expected OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_proposer OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: minima OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_max_blobs_per_block_two_txs [Preset: minim OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_one_blob_max_plus_one_txs [Preset: minimal OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: minim OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: minima OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_same_slot_block_transition [Preset: minima OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pre OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sam OK ++ [Invalid] EF - fulu - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt_ OK ++ [Invalid] EF - fulu - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - activate_and_partial_withdrawal_max_effective_bala OK ++ [Valid] EF - fulu - Sanity - Blocks - activate_and_partial_withdrawal_overdeposit [Prese OK ++ [Valid] EF - fulu - Sanity - Blocks - attestation [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - attester_slashing [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - balance_driven_status_transitions [Preset: minimal OK ++ [Valid] EF - fulu - Sanity - Blocks - basic_btec_and_el_withdrawal_request_in_same_block OK ++ [Valid] EF - fulu - Sanity - Blocks - basic_btec_before_el_withdrawal_request [Preset: m OK ++ [Valid] EF - fulu - Sanity - Blocks - basic_el_withdrawal_request [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - block_transition_randomized_payload [Preset: minim OK ++ [Valid] EF - fulu - Sanity - Blocks - bls_change [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - cl_exit_and_el_withdrawal_request_in_same_block [P OK ++ [Valid] EF - fulu - Sanity - Blocks - consolidation_requests_when_pending_consolidation_ OK ++ [Valid] EF - fulu - Sanity - Blocks - deposit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - deposit_in_block [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - deposit_request_max_per_payload [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - deposit_request_with_same_pubkey_different_withdra OK ++ [Valid] EF - fulu - Sanity - Blocks - deposit_top_up [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - duplicate_attestation_same_block [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - effective_balance_increase_changes_lookahead [Pres OK ++ [Valid] EF - fulu - Sanity - Blocks - empty_block_transition [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - empty_block_transition_large_validator_set [Preset OK ++ [Valid] EF - fulu - Sanity - Blocks - empty_block_transition_no_tx [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - empty_epoch_transition_large_validator_set [Preset OK ++ [Valid] EF - fulu - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: min OK ++ [Valid] EF - fulu - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - exit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: minim OK ++ [Valid] EF - fulu - Sanity - Blocks - high_proposer_index [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - historical_batch [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pres OK ++ [Valid] EF - fulu - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pres OK ++ [Valid] EF - fulu - Sanity - Blocks - max_blobs_per_block [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - multi_epoch_consolidation_chain [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - multiple_different_proposer_slashings_same_block [ OK ++ [Valid] EF - fulu - Sanity - Blocks - multiple_different_validator_exits_same_block [Pre OK ++ [Valid] EF - fulu - Sanity - Blocks - multiple_el_partial_withdrawal_requests_different_ OK ++ [Valid] EF - fulu - Sanity - Blocks - multiple_el_partial_withdrawal_requests_same_valid OK ++ [Valid] EF - fulu - Sanity - Blocks - one_blob [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - one_blob_max_txs [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - one_blob_two_txs [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: mi OK ++ [Valid] EF - fulu - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - proposer_slashing [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - skipped_slots [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - switch_to_compounding_requests_when_pending_consol OK ++ [Valid] EF - fulu - Sanity - Blocks - switch_to_compounding_requests_when_too_little_con OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee__empty [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee__full [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee__half [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: m OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: mi OK ++ [Valid] EF - fulu - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: mi OK ++ [Valid] EF - fulu - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset: OK ++ [Valid] EF - fulu - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: minim OK ++ [Valid] EF - fulu - Sanity - Blocks - voluntary_exit [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - withdrawal_and_consolidation_effective_balance_upd OK ++ [Valid] EF - fulu - Sanity - Blocks - withdrawal_and_switch_to_compounding_request_same_ OK ++ [Valid] EF - fulu - Sanity - Blocks - withdrawal_and_withdrawal_request_same_validator [ OK ++ [Valid] EF - fulu - Sanity - Blocks - withdrawal_requests_when_pending_withdrawal_queue_ OK ++ [Valid] EF - fulu - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK ++ [Valid] EF - fulu - Sanity - Blocks - zero_blob [Preset: minimal] OK +``` +## EF - gloas - Finality [Preset: minimal] +```diff ++ [Valid] EF - gloas - Finality - finality_no_updates_at_genesis [Preset: minimal] OK ++ [Valid] EF - gloas - Finality - finality_rule_1 [Preset: minimal] OK ++ [Valid] EF - gloas - Finality - finality_rule_2 [Preset: minimal] OK ++ [Valid] EF - gloas - Finality - finality_rule_3 [Preset: minimal] OK ++ [Valid] EF - gloas - Finality - finality_rule_4 [Preset: minimal] OK +``` +## EF - gloas - Sanity - Blocks [Preset: minimal] +```diff ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: min OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [ OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pres OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expecte OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propose OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mini OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: minim OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_same_slot_block_transition [Preset: minim OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pr OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sa OK ++ [Invalid] EF - gloas - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt OK ++ [Invalid] EF - gloas - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - activate_and_partial_withdrawal_max_effective_bal OK ++ [Valid] EF - gloas - Sanity - Blocks - activate_and_partial_withdrawal_overdeposit [Pres OK ++ [Valid] EF - gloas - Sanity - Blocks - attestation [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - attester_slashing [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - balance_driven_status_transitions [Preset: minima OK ++ [Valid] EF - gloas - Sanity - Blocks - bls_change [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - deposit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - deposit_in_block [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - deposit_top_up [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - duplicate_attestation_same_block [Preset: minimal OK ++ [Valid] EF - gloas - Sanity - Blocks - empty_block_transition [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - empty_block_transition_large_validator_set [Prese OK ++ [Valid] EF - gloas - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - empty_epoch_transition_large_validator_set [Prese OK ++ [Valid] EF - gloas - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: mi OK ++ [Valid] EF - gloas - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - exit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - high_proposer_index [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - historical_batch [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pre OK ++ [Valid] EF - gloas - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pre OK ++ [Valid] EF - gloas - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK ++ [Valid] EF - gloas - Sanity - Blocks - multiple_different_validator_exits_same_block [Pr OK ++ [Valid] EF - gloas - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: m OK ++ [Valid] EF - gloas - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - proposer_slashing [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - skipped_slots [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee__empty [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee__full [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee__half [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: m OK ++ [Valid] EF - gloas - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: m OK ++ [Valid] EF - gloas - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset OK ++ [Valid] EF - gloas - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mini OK ++ [Valid] EF - gloas - Sanity - Blocks - voluntary_exit [Preset: minimal] OK ++ [Valid] EF - gloas - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK +``` +## EF - phase0 - Finality [Preset: minimal] +```diff ++ [Valid] EF - phase0 - Finality - finality_no_updates_at_genesis [Preset: minimal] OK ++ [Valid] EF - phase0 - Finality - finality_rule_1 [Preset: minimal] OK ++ [Valid] EF - phase0 - Finality - finality_rule_2 [Preset: minimal] OK ++ [Valid] EF - phase0 - Finality - finality_rule_3 [Preset: minimal] OK ++ [Valid] EF - phase0 - Finality - finality_rule_4 [Preset: minimal] OK +``` +## EF - phase0 - Random [Preset: minimal] +```diff ++ [Valid] EF - phase0 - Random - randomized_0 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_1 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_10 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_11 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_12 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_13 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_14 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_15 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_2 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_3 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_4 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_5 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_6 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_7 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_8 [Preset: minimal] OK ++ [Valid] EF - phase0 - Random - randomized_9 [Preset: minimal] OK +``` +## EF - phase0 - Sanity - Blocks [Preset: minimal] +```diff ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_duplicate_attester_slashing_same_block [ OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: mi OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Pre OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expect OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_propos OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: min OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mini OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_proposal_for_genesis_slot [Preset: minim OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mini OK ++ [Invalid] EF - phase0 - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [P OK ++ [Invalid] EF - phase0 - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - attestation [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - attester_slashing [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - balance_driven_status_transitions [Preset: minim OK ++ [Valid] EF - phase0 - Sanity - Blocks - deposit_in_block [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - deposit_top_up [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - duplicate_attestation_same_block [Preset: minima OK ++ [Valid] EF - phase0 - Sanity - Blocks - empty_block_transition [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - empty_block_transition_large_validator_set [Pres OK ++ [Valid] EF - phase0 - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - empty_epoch_transition_large_validator_set [Pres OK ++ [Valid] EF - phase0 - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: m OK ++ [Valid] EF - phase0 - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - high_proposer_index [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - historical_batch [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: OK ++ [Valid] EF - phase0 - Sanity - Blocks - multiple_attester_slashings_partial_overlap [Pre OK ++ [Valid] EF - phase0 - Sanity - Blocks - multiple_different_proposer_slashings_same_block OK ++ [Valid] EF - phase0 - Sanity - Blocks - multiple_different_validator_exits_same_block [P OK ++ [Valid] EF - phase0 - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - proposer_slashing [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - skipped_slots [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK ++ [Valid] EF - phase0 - Sanity - Blocks - voluntary_exit [Preset: minimal] OK ``` ## ForkChoice ```diff @@ -4846,6 +5883,7 @@ ConsensusSpecPreset-minimal ForkChoice - minimal/electra/fork_choice/should_override_forkchoice_update/pyspec_tests/sh Skip + ForkChoice - minimal/electra/fork_choice/withholding/pyspec_tests/withholding_attack OK + ForkChoice - minimal/electra/fork_choice/withholding/pyspec_tests/withholding_attack_unvia OK ++ ForkChoice - minimal/fulu/fork_choice/deposit_with_reorg/pyspec_tests/new_validator_deposi OK + ForkChoice - minimal/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest_at OK + ForkChoice - minimal/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_without_attest OK + ForkChoice - minimal/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_vanilla OK @@ -4854,10 +5892,13 @@ ConsensusSpecPreset-minimal + ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/discard_equivocations_slashed_ OK + ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/filtered_block_tree OK + ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/genesis OK ++ ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/proposer_boost_correct_head OK + ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier_weig OK + ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attestati OK + ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/voting_source_beyond_two_epoch OK + ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/voting_source_within_two_epoch OK + ForkChoice - minimal/fulu/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip + ForkChoice - minimal/fulu/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_root Skip + ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/basic OK + ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/incompatible_justification_upd OK + ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/incompatible_justification_upd OK @@ -4876,6 +5917,23 @@ ConsensusSpecPreset-minimal + ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_finalized_skip_slots OK + ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_finalized_skip_slots_ OK ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_future_block Skip ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_inde OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_inde OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_mism OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_wron OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__invalid_zero OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__not_availabl OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_peerdas__ok OK + ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/proposer_boost OK + ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/proposer_boost_is_first_block OK + ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_slot_ OK @@ -4889,6 +5947,8 @@ ConsensusSpecPreset-minimal + ForkChoice - minimal/fulu/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delayed_ju OK + ForkChoice - minimal/fulu/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delayed_ju OK + ForkChoice - minimal/fulu/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_without_en OK + ForkChoice - minimal/fulu/fork_choice/should_override_forkchoice_update/pyspec_tests/shoul Skip + ForkChoice - minimal/fulu/fork_choice/should_override_forkchoice_update/pyspec_tests/shoul Skip + ForkChoice - minimal/fulu/fork_choice/withholding/pyspec_tests/withholding_attack OK + ForkChoice - minimal/fulu/fork_choice/withholding/pyspec_tests/withholding_attack_unviable OK ``` @@ -4898,4 +5958,5 @@ ConsensusSpecPreset-minimal + Sync - minimal/capella/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK + Sync - minimal/deneb/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK + Sync - minimal/electra/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK ++ Sync - minimal/fulu/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK ``` diff --git a/Makefile b/Makefile index a3821af672..d8b55adc2a 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,6 @@ TOOLS_CORE_CUSTOMCOMPILE := \ libnimbus_lc.a TOOLS_CORE := \ - deposit_contract \ resttest \ mev_mock \ ncli \ @@ -80,7 +79,6 @@ TOOLS := $(TOOLS_CORE) nimbus_beacon_node TOOLS_DIRS := \ beacon_chain \ - beacon_chain/el \ ncli \ research \ tools @@ -115,6 +113,9 @@ endif # We don't need these `vendor/holesky` and `vendor/hoodi` files but # fetching them may trigger 'This repository is over its data quota' from GitHub +# +# MSYS_NO_PATHCONV=1: On Windows MSYS2, 1st path gets mangled without this flag! +GIT_SUBMODULE_ENV := MSYS_NO_PATHCONV=1 GIT_SUBMODULE_CONFIG := -c lfs.fetchexclude=/public-keys/all.txt,/metadata/genesis.ssz,/parsed/parsedConsensusGenesis.json ifeq ($(NIM_PARAMS),) @@ -133,11 +134,11 @@ $(error Git LFS not installed) endif endif -GIT_SUBMODULE_UPDATE := git $(GIT_SUBMODULE_CONFIG) submodule update --init --recursive +GIT_SUBMODULE_UPDATE := $(GIT_SUBMODULE_ENV) git $(GIT_SUBMODULE_CONFIG) submodule update --init --recursive .DEFAULT: +@ echo -e "Git submodules not found. Running '$(GIT_SUBMODULE_UPDATE)'.\n"; \ $(GIT_SUBMODULE_UPDATE) && \ - git submodule foreach --quiet 'git $(GIT_SUBMODULE_CONFIG) reset --quiet --hard' && \ + $(GIT_SUBMODULE_ENV) git submodule foreach --quiet 'git $(GIT_SUBMODULE_CONFIG) reset --quiet --hard' && \ echo # Now that the included *.mk files appeared, and are newer than this file, Make will restart itself: # https://www.gnu.org/software/make/manual/make.html#Remaking-Makefiles @@ -234,8 +235,8 @@ local-testnet-minimal: --signer-nodes 1 \ --remote-validators-count 512 \ --signer-type $(SIGNER_TYPE) \ - --deneb-fork-epoch 0 \ - --electra-fork-epoch 2 \ + --electra-fork-epoch 0 \ + --fulu-fork-epoch 100000 \ --stop-at-epoch 6 \ --disable-htop \ --enable-payload-builder \ @@ -263,8 +264,8 @@ local-testnet-mainnet: ./scripts/launch_local_testnet.sh \ --data-dir $@ \ --nodes 2 \ - --deneb-fork-epoch 0 \ - --electra-fork-epoch 2 \ + --electra-fork-epoch 0 \ + --fulu-fork-epoch 100000 \ --stop-at-epoch 6 \ --disable-htop \ --base-port $$(( $(MAINNET_TESTNET_BASE_PORT) + EXECUTOR_NUMBER * 400 + 0 )) \ @@ -298,7 +299,10 @@ XML_TEST_BINARIES := \ # test suite TEST_BINARIES := \ block_sim \ - test_libnimbus_lc + fork_choice \ + proto_array \ + test_libnimbus_lc \ + process_state .PHONY: $(TEST_BINARIES) $(XML_TEST_BINARIES) force_build_alone_all_tests # Preset-dependent tests @@ -383,6 +387,14 @@ block_sim: | build deps $(NIM_PARAMS) && \ echo -e $(BUILD_END_MSG) "build/$@" +process_state: | build deps + + echo -e $(BUILD_MSG) "build/$@" && \ + MAKE="$(MAKE)" V="$(V)" $(ENV_SCRIPT) scripts/compile_nim_program.sh \ + $@ \ + "beacon_chain/$@.nim" \ + $(NIM_PARAMS) && \ + echo -e $(BUILD_END_MSG) "build/$@" + DISABLE_TEST_FIXTURES_SCRIPT := 0 # This parameter passing scheme is ugly, but short. test: | $(XML_TEST_BINARIES) $(TEST_BINARIES) @@ -566,34 +578,6 @@ define CONNECT_TO_NETWORK_WITH_LIGHT_CLIENT --trusted-block-root="$(LC_TRUSTED_BLOCK_ROOT)" endef -define MAKE_DEPOSIT_DATA - build/nimbus_beacon_node deposits createTestnetDeposits \ - --network=$(1) \ - --new-wallet-file=build/data/shared_$(1)_$(NODE_ID)/wallet.json \ - --out-validators-dir=build/data/shared_$(1)_$(NODE_ID)/validators \ - --out-secrets-dir=build/data/shared_$(1)_$(NODE_ID)/secrets \ - --out-deposits-file=$(1)-deposits_data-$$(date +"%Y%m%d%H%M%S").json \ - --count=$(VALIDATORS) -endef - -define MAKE_DEPOSIT - build/nimbus_beacon_node deposits createTestnetDeposits \ - --network=$(1) \ - --out-deposits-file=nbc-$(1)-deposits.json \ - --new-wallet-file=build/data/shared_$(1)_$(NODE_ID)/wallet.json \ - --out-validators-dir=build/data/shared_$(1)_$(NODE_ID)/validators \ - --out-secrets-dir=build/data/shared_$(1)_$(NODE_ID)/secrets \ - --count=$(VALIDATORS) - - build/deposit_contract sendDeposits \ - $(2) \ - --deposit-contract=$$(cat vendor/$(1)/metadata/deposit_contract.txt) \ - --deposits-file=nbc-$(1)-deposits.json \ - --min-delay=$(DEPOSITS_DELAY) \ - --max-delay=$(DEPOSITS_DELAY) \ - --ask-for-key -endef - define CLEAN_NETWORK rm -rf build/data/shared_$(1)*/db rm -rf build/data/shared_$(1)*/dump @@ -623,9 +607,6 @@ sepolia-dev: | sepolia-build $(call CONNECT_TO_NETWORK_IN_DEV_MODE,sepolia,nimbus_beacon_node,$(SEPOLIA_WEB3_URL)) endif -sepolia-dev-deposit: | sepolia-build deposit_contract - $(call MAKE_DEPOSIT,sepolia,$(SEPOLIA_WEB3_URL)) - clean-sepolia: $(call CLEAN_NETWORK,sepolia) @@ -633,17 +614,12 @@ clean-sepolia: ### Gnosis chain binary ### -# TODO The `-d:gnosisChainBinary` override can be removed if the web3 library -# gains support for multiple "Chain Profiles" that consist of a set of -# consensus object (such as blocks and transactions) that are specific -# to the chain. gnosis-build gnosis-chain-build: | build deps + echo -e $(BUILD_MSG) "build/nimbus_beacon_node_gnosis" && \ MAKE="$(MAKE)" V="$(V)" $(ENV_SCRIPT) scripts/compile_nim_program.sh \ nimbus_beacon_node_gnosis \ beacon_chain/nimbus_beacon_node.nim \ $(NIM_PARAMS) \ - -d:gnosisChainBinary \ -d:const_preset=gnosis \ && \ echo -e $(BUILD_END_MSG) "build/nimbus_beacon_node_gnosis" @@ -654,7 +630,6 @@ gnosis-vc-build: | build deps nimbus_validator_client_gnosis \ beacon_chain/nimbus_validator_client.nim \ $(NIM_PARAMS) \ - -d:gnosisChainBinary \ -d:const_preset=gnosis \ && \ echo -e $(BUILD_END_MSG) "build/nimbus_validator_client_gnosis" @@ -670,9 +645,6 @@ gnosis-dev: | gnosis-build $(call CONNECT_TO_NETWORK_IN_DEV_MODE,gnosis,nimbus_beacon_node_gnosis,$(GNOSIS_WEB3_URLS)) endif -gnosis-dev-deposit: | gnosis-build deposit_contract - $(call MAKE_DEPOSIT,gnosis,$(GNOSIS_WEB3_URLS)) - clean-gnosis: $(call CLEAN_NETWORK,gnosis) @@ -690,10 +662,6 @@ gnosis-chain-dev: | gnosis-build $(call CONNECT_TO_NETWORK_IN_DEV_MODE,gnosis-chain,nimbus_beacon_node_gnosis,$(GNOSIS_WEB3_URLS)) endif -gnosis-chain-dev-deposit: | gnosis-build deposit_contract - echo `gnosis-chain-dev-deposit` is deprecated, use `gnosis-chain-dev-deposit` instead - $(call MAKE_DEPOSIT,gnosis-chain,$(GNOSIS_WEB3_URLS)) - clean-gnosis-chain: $(call CLEAN_NETWORK,gnosis-chain) diff --git a/README.md b/README.md index aa1717d1d7..99e86947a2 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ [![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) +[![Release](https://img.shields.io/github/v/release/status-im/nimbus-eth2)](https://github.com/status-im/nimbus-eth2/releases) [![Discord: Nimbus](https://img.shields.io/badge/discord-nimbus-orange.svg)](https://discord.gg/XRxWahP) [![Status: #nimbus-general](https://img.shields.io/badge/status-nimbus--general-orange.svg)](https://join.status.im/nimbus-general) [![gitpoap badge](https://public-api.gitpoap.io/v1/repo/status-im/nimbus-eth2/badge)](https://www.gitpoap.io/gh/status-im/nimbus-eth2) @@ -130,7 +131,7 @@ _Alternatively, fire up our [experimental Vagrant instance with Nim pre-installe -The [generic instructions from the Nimbus repo](https://github.com/status-im/nimbus/#metric-visualisation) apply here as well. +The [generic instructions from the Nimbus repo](https://github.com/status-im/nimbus-eth1/#metric-visualisation) apply here as well. Specific steps: diff --git a/beacon_chain.nimble b/beacon_chain.nimble index 2a3bba39d3..14a4fcbd33 100644 --- a/beacon_chain.nimble +++ b/beacon_chain.nimble @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -7,13 +7,13 @@ mode = ScriptMode.Verbose -version = "24.12.0" +version = "25.9.0" author = "Status Research & Development GmbH" description = "The Nimbus beacon chain node is a highly efficient Ethereum 2.0 client" license = "MIT or Apache License 2.0" requires( - "nim == 2.0.12", + "nim == 2.2.4", "https://github.com/status-im/NimYAML", "bearssl", "blscurve", diff --git a/beacon_chain/beacon_chain_db.nim b/beacon_chain/beacon_chain_db.nim index 3a80bea4cf..d1d490625a 100644 --- a/beacon_chain/beacon_chain_db.nim +++ b/beacon_chain/beacon_chain_db.nim @@ -14,20 +14,22 @@ import serialization, chronicles, snappy, eth/db/[kvstore, kvstore_sqlite3], ./networking/network_metadata, ./beacon_chain_db_immutable, - ./spec/[deposit_snapshots, - eth2_ssz_serialization, + ./spec/[eth2_ssz_serialization, eth2_merkleization, forks, presets, state_transition], - "."/[beacon_chain_db_light_client, filepath] + "."/[beacon_chain_db_light_client, + beacon_chain_db_quarantine, + db_utils, + filepath] from ./spec/datatypes/capella import BeaconState from ./spec/datatypes/deneb import TrustedSignedBeaconBlock export phase0, altair, eth2_ssz_serialization, eth2_merkleization, kvstore, - kvstore_sqlite3, deposit_snapshots + kvstore_sqlite3 logScope: topics = "bc_db" @@ -153,6 +155,10 @@ type ## ## See `summaries` for an index in the other direction. + quarantine: QuarantineDB + ## Pending data that passed basic checks including proposer signature + ## but that is not fully validated / trusted yet. + lcData: LightClientDataDB ## Persistent light client data to avoid expensive recomputations @@ -249,17 +255,18 @@ func subkey(root: Eth2Digest, slot: Slot): array[40, byte] = ret func blobkey(root: Eth2Digest, index: BlobIndex) : array[40, byte] = + # Note that this was botched. Data corresponding to the same block should be + # located close together, but instead, the logic groups data by `index`, i.e., + # all the index 0 blobs from all blocks come first, then all index 1 blobs etc var ret: array[40, byte] - ret[0..<8] = toBytes(index) + ret[0..<8] = toBytes(index) # Also botched, endian-dependent and should be BE ret[8..<40] = root.data - ret func columnkey(root: Eth2Digest, index: ColumnIndex) : array[40, byte] = var ret: array[40, byte] - ret[0..<8] = toBytes(index) - ret[8..<40] = root.data - + ret[0..<32] = root.data # 1. Group by block `root` + ret[32..<40] = toBytesBE(index) # 2. Order by `index` ret template expectDb(x: auto): untyped = @@ -496,7 +503,7 @@ proc new*(T: type BeaconChainDBV0, proc new*(T: type BeaconChainDB, db: SqStoreRef, - cfg: RuntimeConfig = defaultRuntimeConfig + cfg: RuntimeConfig ): BeaconChainDB = if not db.readOnly: # Remove the deposits table we used before we switched @@ -508,6 +515,12 @@ proc new*(T: type BeaconChainDB, if db.exec("DROP TABLE IF EXISTS validatorIndexFromPubKey;").isErr: debug "Failed to drop the validatorIndexFromPubKey table" + # 2025-06: Empty name table that was accidentally added before Fulu (#6677) + if db.exec("DROP TABLE IF EXISTS ``;").isErr: + debug "Failed to drop the `` table" + + debugGloasComment "use actual names when closer" + var genesisDepositsSeq = DbSeq[DepositData].init(db, "genesis_deposits").expectDb() @@ -516,43 +529,41 @@ proc new*(T: type BeaconChainDB, # V1 - expected-to-be small rows get without rowid optimizations keyValues = kvStore db.openKvStore("key_values", true).expectDb() - blocks = if cfg.FULU_FORK_EPOCH != FAR_FUTURE_EPOCH: [ - kvStore db.openKvStore("blocks").expectDb(), - kvStore db.openKvStore("altair_blocks").expectDb(), - kvStore db.openKvStore("bellatrix_blocks").expectDb(), - kvStore db.openKvStore("capella_blocks").expectDb(), - kvStore db.openKvStore("deneb_blocks").expectDb(), - kvStore db.openKvStore("electra_blocks").expectDb(), - kvStore db.openKvStore("fulu_blocks").expectDb()] - - else: [ + blocks = [ kvStore db.openKvStore("blocks").expectDb(), kvStore db.openKvStore("altair_blocks").expectDb(), kvStore db.openKvStore("bellatrix_blocks").expectDb(), kvStore db.openKvStore("capella_blocks").expectDb(), kvStore db.openKvStore("deneb_blocks").expectDb(), kvStore db.openKvStore("electra_blocks").expectDb(), - kvStore db.openKvStore("").expectDb()] + if cfg.FULU_FORK_EPOCH != FAR_FUTURE_EPOCH: + kvStore db.openKvStore("fulu_blocks").expectDb() + else: + nil, + if cfg.GLOAS_FORK_EPOCH != FAR_FUTURE_EPOCH: + kvStore db.openKvStore("foobar_not_real_name").expectDb() + else: + nil + ] stateRoots = kvStore db.openKvStore("state_roots", true).expectDb() - statesNoVal = if cfg.FULU_FORK_EPOCH != FAR_FUTURE_EPOCH: [ - kvStore db.openKvStore("state_no_validators").expectDb(), - kvStore db.openKvStore("altair_state_no_validators").expectDb(), - kvStore db.openKvStore("bellatrix_state_no_validators").expectDb(), - kvStore db.openKvStore("capella_state_no_validator_pubkeys").expectDb(), - kvStore db.openKvStore("deneb_state_no_validator_pubkeys").expectDb(), - kvStore db.openKvStore("electra_state_no_validator_pubkeys").expectDb(), - kvStore db.openKvStore("fulu_state_no_validator_pubkeys").expectDb()] - - else: [ - kvStore db.openKvStore("state_no_validators").expectDb(), - kvStore db.openKvStore("altair_state_no_validators").expectDb(), - kvStore db.openKvStore("bellatrix_state_no_validators").expectDb(), - kvStore db.openKvStore("capella_state_no_validator_pubkeys").expectDb(), - kvStore db.openKvStore("deneb_state_no_validator_pubkeys").expectDb(), - kvStore db.openKvStore("electra_state_no_validator_pubkeys").expectDb(), - kvStore db.openKvStore("").expectDb()] + statesNoVal = [ + kvStore db.openKvStore("state_no_validators").expectDb(), + kvStore db.openKvStore("altair_state_no_validators").expectDb(), + kvStore db.openKvStore("bellatrix_state_no_validators").expectDb(), + kvStore db.openKvStore("capella_state_no_validator_pubkeys").expectDb(), + kvStore db.openKvStore("deneb_state_no_validator_pubkeys").expectDb(), + kvStore db.openKvStore("electra_state_no_validator_pubkeys").expectDb(), + if cfg.FULU_FORK_EPOCH != FAR_FUTURE_EPOCH: + kvStore db.openKvStore("fulu_state_no_validator_pubkeys").expectDb() + else: + nil, + if cfg.GLOAS_FORK_EPOCH != FAR_FUTURE_EPOCH: + kvStore db.openKvStore("more_intentional_gibberish___").expectDb() + else: + nil + ] stateDiffs = kvStore db.openKvStore("state_diffs").expectDb() summaries = kvStore db.openKvStore("beacon_block_summaries", true).expectDb() @@ -593,6 +604,8 @@ proc new*(T: type BeaconChainDB, if cfg.FULU_FORK_EPOCH != FAR_FUTURE_EPOCH: columns = kvStore db.openKvStore("fulu_columns").expectDb() + let quarantine = db.initQuarantineDB().expectDb() + # Versions prior to 1.4.0 (altair) stored validators in `immutable_validators` # which stores validator keys in compressed format - this is # slow to load and has been superceded by `immutable_validators2` which uses @@ -634,12 +647,13 @@ proc new*(T: type BeaconChainDB, stateDiffs: stateDiffs, summaries: summaries, finalizedBlocks: finalizedBlocks, + quarantine: quarantine, lcData: lcData ) proc new*(T: type BeaconChainDB, dir: string, - cfg: RuntimeConfig = defaultRuntimeConfig, + cfg: RuntimeConfig, inMemory = false, readOnly = false ): BeaconChainDB = @@ -657,6 +671,9 @@ proc new*(T: type BeaconChainDB, dir, "nbc", readOnly = readOnly, manualCheckpoint = true).expectDb() BeaconChainDB.new(db, cfg) +template getQuarantineDB*(db: BeaconChainDB): QuarantineDB = + db.quarantine + template getLightClientDataDB*(db: BeaconChainDB): LightClientDataDB = db.lcData @@ -683,18 +700,6 @@ proc decodeSnappySSZ[T](data: openArray[byte], output: var T): bool = err = e.msg, typ = name(T), dataLen = data.len false -proc decodeSZSSZ[T](data: openArray[byte], output: var T): bool = - try: - let decompressed = decodeFramed(data, checkIntegrity = false) - readSszBytes(decompressed, output, updateRoot = false) - true - except CatchableError as e: - # If the data can't be deserialized, it could be because it's from a - # version of the software that uses a different SSZ encoding - warn "Unable to deserialize data, old database?", - err = e.msg, typ = name(T), dataLen = data.len - false - func encodeSSZ*(v: auto): seq[byte] = try: SSZ.encode(v) @@ -708,14 +713,6 @@ func encodeSnappySSZ(v: auto): seq[byte] = # In-memory encode shouldn't fail! raiseAssert err.msg -func encodeSZSSZ(v: auto): seq[byte] = - # https://github.com/google/snappy/blob/main/framing_format.txt - try: - encodeFramed(SSZ.encode(v)) - except CatchableError as err: - # In-memory encode shouldn't fail! - raiseAssert err.msg - proc getRaw(db: KvStoreRef, key: openArray[byte], T: type Eth2Digest): Opt[T] = var res: Opt[T] proc decode(data: openArray[byte]) = @@ -796,6 +793,7 @@ proc close*(db: BeaconChainDB) = if db.db == nil: return # Close things roughly in reverse order + db.quarantine.close() if not isNil(db.columns): discard db.columns.close() if not isNil(db.blobs): @@ -805,10 +803,12 @@ proc close*(db: BeaconChainDB) = discard db.summaries.close() discard db.stateDiffs.close() for kv in db.statesNoVal: - discard kv.close() + if kv != nil: + discard kv.close() discard db.stateRoots.close() for kv in db.blocks: - discard kv.close() + if kv != nil: + discard kv.close() discard db.keyValues.close() db.immutableValidatorsDb.close() @@ -829,20 +829,14 @@ proc putBeaconBlockSummary*( # Summaries are too simple / small to compress, store them as plain SSZ db.summaries.putSSZ(root.data, value) -proc putBlock*( - db: BeaconChainDB, - value: phase0.TrustedSignedBeaconBlock | altair.TrustedSignedBeaconBlock) = - db.withManyWrites: - db.blocks[type(value).kind].putSnappySSZ(value.root.data, value) - db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary()) - -proc putBlock*( - db: BeaconChainDB, - value: bellatrix.TrustedSignedBeaconBlock | - capella.TrustedSignedBeaconBlock | deneb.TrustedSignedBeaconBlock | - electra.TrustedSignedBeaconBlock | fulu.TrustedSignedBeaconBlock) = +proc putBlock*(db: BeaconChainDB, value: ForkyTrustedSignedBeaconBlock) = + const consensusFork = typeof(value).kind + doAssert db.blocks[consensusFork] != nil db.withManyWrites: - db.blocks[type(value).kind].putSZSSZ(value.root.data, value) + when consensusFork >= ConsensusFork.Bellatrix: + db.blocks[consensusFork].putSZSSZ(value.root.data, value) + else: + db.blocks[consensusFork].putSnappySSZ(value.root.data, value) db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary()) proc putBlobSidecar*( @@ -858,7 +852,7 @@ proc delBlobSidecar*( proc putDataColumnSidecar*( db: BeaconChainDB, - value: DataColumnSidecar) = + value: fulu.DataColumnSidecar) = let block_root = hash_tree_root(value.signed_block_header.message) db.columns.putSZSSZ(columnkey(block_root, value.index), value) @@ -881,48 +875,39 @@ proc updateImmutableValidators*( withdrawal_credentials: immutableValidator.withdrawal_credentials) db.immutableValidators.add immutableValidator -template toBeaconStateNoImmutableValidators(state: phase0.BeaconState): - Phase0BeaconStateNoImmutableValidators = - isomorphicCast[Phase0BeaconStateNoImmutableValidators](state) - -template toBeaconStateNoImmutableValidators(state: altair.BeaconState): - AltairBeaconStateNoImmutableValidators = - isomorphicCast[AltairBeaconStateNoImmutableValidators](state) - -template toBeaconStateNoImmutableValidators(state: bellatrix.BeaconState): - BellatrixBeaconStateNoImmutableValidators = - isomorphicCast[BellatrixBeaconStateNoImmutableValidators](state) - -template toBeaconStateNoImmutableValidators(state: capella.BeaconState): - CapellaBeaconStateNoImmutableValidators = - isomorphicCast[CapellaBeaconStateNoImmutableValidators](state) - -template toBeaconStateNoImmutableValidators(state: deneb.BeaconState): - DenebBeaconStateNoImmutableValidators = - isomorphicCast[DenebBeaconStateNoImmutableValidators](state) - -template toBeaconStateNoImmutableValidators(state: electra.BeaconState): - ElectraBeaconStateNoImmutableValidators = - isomorphicCast[ElectraBeaconStateNoImmutableValidators](state) - -template toBeaconStateNoImmutableValidators(state: fulu.BeaconState): - FuluBeaconStateNoImmutableValidators = - isomorphicCast[FuluBeaconStateNoImmutableValidators](state) +template BeaconStateNoImmutableValidators(kind: static ConsensusFork): auto = + when kind == ConsensusFork.Gloas: + typedesc[GloasBeaconStateNoImmutableValidators] + elif kind == ConsensusFork.Fulu: + typedesc[FuluBeaconStateNoImmutableValidators] + elif kind == ConsensusFork.Electra: + typedesc[ElectraBeaconStateNoImmutableValidators] + elif kind == ConsensusFork.Deneb: + typedesc[DenebBeaconStateNoImmutableValidators] + elif kind == ConsensusFork.Capella: + typedesc[CapellaBeaconStateNoImmutableValidators] + elif kind == ConsensusFork.Bellatrix: + typedesc[BellatrixBeaconStateNoImmutableValidators] + elif kind == ConsensusFork.Altair: + typedesc[AltairBeaconStateNoImmutableValidators] + elif kind == ConsensusFork.Phase0: + typedesc[Phase0BeaconStateNoImmutableValidators] + else: + {.error: "BeaconStateNoImmutableValidators does not support " & $kind.} -proc putState*( - db: BeaconChainDB, key: Eth2Digest, - value: phase0.BeaconState | altair.BeaconState) = - db.updateImmutableValidators(value.validators.asSeq()) - db.statesNoVal[type(value).kind].putSnappySSZ( - key.data, toBeaconStateNoImmutableValidators(value)) +template toBeaconStateNoImmutableValidators(state: ForkyBeaconState): auto = + isomorphicCast[typeof(state).kind.BeaconStateNoImmutableValidators](state) -proc putState*( - db: BeaconChainDB, key: Eth2Digest, - value: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | - electra.BeaconState | fulu.BeaconState) = +proc putState*(db: BeaconChainDB, key: Eth2Digest, value: ForkyBeaconState) = + const consensusFork = typeof(value).kind + doAssert db.statesNoVal[consensusFork] != nil db.updateImmutableValidators(value.validators.asSeq()) - db.statesNoVal[type(value).kind].putSZSSZ( - key.data, toBeaconStateNoImmutableValidators(value)) + when consensusFork >= ConsensusFork.Bellatrix: + db.statesNoVal[consensusFork].putSZSSZ( + key.data, toBeaconStateNoImmutableValidators(value)) + else: + db.statesNoVal[consensusFork].putSnappySSZ( + key.data, toBeaconStateNoImmutableValidators(value)) proc putState*(db: BeaconChainDB, state: ForkyHashedBeaconState) = db.withManyWrites: @@ -932,6 +917,7 @@ proc putState*(db: BeaconChainDB, state: ForkyHashedBeaconState) = # For testing rollback proc putCorruptState*( db: BeaconChainDB, fork: static ConsensusFork, key: Eth2Digest) = + doAssert db.statesNoVal[fork] != nil db.statesNoVal[fork].putSnappySSZ(key.data, Validator()) func stateRootKey(root: Eth2Digest, slot: Slot): array[40, byte] = @@ -950,6 +936,7 @@ proc putStateDiff*(db: BeaconChainDB, root: Eth2Digest, value: BeaconStateDiff) db.stateDiffs.putSnappySSZ(root.data, value) proc delBlock*(db: BeaconChainDB, fork: ConsensusFork, key: Eth2Digest): bool = + doAssert db.blocks[fork] != nil var deleted = false db.withManyWrites: discard db.summaries.del(key.data).expectDb() @@ -957,12 +944,15 @@ proc delBlock*(db: BeaconChainDB, fork: ConsensusFork, key: Eth2Digest): bool = deleted proc delState*(db: BeaconChainDB, fork: ConsensusFork, key: Eth2Digest) = + doAssert db.statesNoVal[fork] != nil discard db.statesNoVal[fork].del(key.data).expectDb() proc clearBlocks*(db: BeaconChainDB, fork: ConsensusFork): bool = + doAssert db.blocks[fork] != nil db.blocks[fork].clear().expectDb() proc clearStates*(db: BeaconChainDB, fork: ConsensusFork): bool = + doAssert db.statesNoVal[fork] != nil db.statesNoVal[fork].clear().expectDb() proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) = @@ -980,38 +970,6 @@ proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) = proc putGenesisBlock*(db: BeaconChainDB, key: Eth2Digest) = db.keyValues.putRaw(subkey(kGenesisBlock), key) -proc putDepositContractSnapshot*( - db: BeaconChainDB, snapshot: DepositContractSnapshot) = - db.withManyWrites: - db.keyValues.putSnappySSZ(subkey(kDepositContractSnapshot), - snapshot) - # TODO: We currently store this redundant old snapshot in order - # to allow the users to rollback to a previous version - # of Nimbus without problems. It would be reasonable - # to remove this in Nimbus 23.2 - db.keyValues.putSnappySSZ(subkey(kOldDepositContractSnapshot), - snapshot.toOldDepositContractSnapshot) - -proc hasDepositContractSnapshot*(db: BeaconChainDB): bool = - expectDb(subkey(kDepositContractSnapshot) in db.keyValues) - -proc getDepositContractSnapshot*(db: BeaconChainDB): Opt[DepositContractSnapshot] = - result.ok(default DepositContractSnapshot) - let r = db.keyValues.getSnappySSZ( - subkey(kDepositContractSnapshot), result.get) - if r != GetResult.found: result.err() - -proc getUpgradableDepositSnapshot*(db: BeaconChainDB): Option[OldDepositContractSnapshot] = - var dcs: OldDepositContractSnapshot - let oldKey = subkey(kOldDepositContractSnapshot) - if db.keyValues.getSnappySSZ(oldKey, dcs) != GetResult.found: - # Old record is not present in the current database. - # We need to take a look in the v0 database as well. - if db.v0.backend.getSnappySSZ(oldKey, dcs) != GetResult.found: - return - - return some dcs - proc getPhase0Block( db: BeaconChainDBV0, key: Eth2Digest): Opt[phase0.TrustedSignedBeaconBlock] = # We only store blocks that we trust in the database @@ -1023,42 +981,26 @@ proc getPhase0Block( # set root after deserializing (so it doesn't get zeroed) result.get().root = key -proc getBlock*( - db: BeaconChainDB, key: Eth2Digest, - T: type phase0.TrustedSignedBeaconBlock): Opt[T] = - # We only store blocks that we trust in the database - result.ok(default(T)) - if db.blocks[T.kind].getSnappySSZ(key.data, result.get) != GetResult.found: - # During the initial releases phase0, we stored blocks in a different table - result = db.v0.getPhase0Block(key) - else: - # set root after deserializing (so it doesn't get zeroed) - result.get().root = key - -proc getBlock*( - db: BeaconChainDB, key: Eth2Digest, - T: type altair.TrustedSignedBeaconBlock): Opt[T] = - # We only store blocks that we trust in the database - result.ok(default(T)) - if db.blocks[T.kind].getSnappySSZ(key.data, result.get) == GetResult.found: - # set root after deserializing (so it doesn't get zeroed) - result.get().root = key - else: - result.err() - -proc getBlock*[ - X: bellatrix.TrustedSignedBeaconBlock | capella.TrustedSignedBeaconBlock | - deneb.TrustedSignedBeaconBlock | electra.TrustedSignedBeaconBlock | - fulu.TrustedSignedBeaconBlock]( - db: BeaconChainDB, key: Eth2Digest, - T: type X): Opt[T] = +proc getBlock*[X: ForkyTrustedSignedBeaconBlock]( + db: BeaconChainDB, key: Eth2Digest, T: typedesc[X]): Opt[T] = # We only store blocks that we trust in the database - result.ok(default(T)) - if db.blocks[T.kind].getSZSSZ(key.data, result.get) == GetResult.found: - # set root after deserializing (so it doesn't get zeroed) - result.get().root = key - else: - result.err() + const consensusFork = T.kind + if db.blocks[consensusFork] != nil: + result.ok(default(T)) + let getResult = + when consensusFork >= ConsensusFork.Bellatrix: + db.blocks[consensusFork].getSZSSZ(key.data, result.unsafeGet) + else: + db.blocks[consensusFork].getSnappySSZ(key.data, result.unsafeGet) + if getResult != GetResult.found: + when consensusFork < ConsensusFork.Altair: + # During initial releases phase0, we stored blocks in a different table + result = db.v0.getPhase0Block(key) + else: + result.err() + else: + # set root after deserializing (so it doesn't get zeroed) + result.unsafeGet.root = key proc getPhase0BlockSSZ( db: BeaconChainDBV0, key: Eth2Digest, data: var seq[byte]): bool = @@ -1080,39 +1022,26 @@ proc getPhase0BlockSZ( db.backend.get(subkey(phase0.SignedBeaconBlock, key), decode).expectDb() and success -# SSZ implementations are separate so as to avoid unnecessary data copies -proc getBlockSSZ*( - db: BeaconChainDB, key: Eth2Digest, data: var seq[byte], - T: type phase0.TrustedSignedBeaconBlock): bool = - let dataPtr = addr data # Short-lived - var success = true - func decode(data: openArray[byte]) = - dataPtr[] = snappy.decode(data) - success = dataPtr[].len > 0 - db.blocks[ConsensusFork.Phase0].get(key.data, decode).expectDb() and success or - db.v0.getPhase0BlockSSZ(key, data) - -proc getBlockSSZ*( - db: BeaconChainDB, key: Eth2Digest, data: var seq[byte], - T: type altair.TrustedSignedBeaconBlock): bool = - let dataPtr = addr data # Short-lived - var success = true - func decode(data: openArray[byte]) = - dataPtr[] = snappy.decode(data) - success = dataPtr[].len > 0 - db.blocks[T.kind].get(key.data, decode).expectDb() and success - -proc getBlockSSZ*[ - X: bellatrix.TrustedSignedBeaconBlock | capella.TrustedSignedBeaconBlock | - deneb.TrustedSignedBeaconBlock | electra.TrustedSignedBeaconBlock | - fulu.TrustedSignedBeaconBlock]( - db: BeaconChainDB, key: Eth2Digest, data: var seq[byte], T: type X): bool = +proc getBlockSSZ*[X: ForkyTrustedSignedBeaconBlock]( + db: BeaconChainDB, key: Eth2Digest, + data: var seq[byte], T: typedesc[X]): bool = + const consensusFork = T.kind + if db.blocks[consensusFork] == nil: + return false let dataPtr = addr data # Short-lived var success = true func decode(data: openArray[byte]) = - dataPtr[] = decodeFramed(data, checkIntegrity = false) + when consensusFork >= ConsensusFork.Bellatrix: + dataPtr[] = decodeFramed(data, checkIntegrity = false) + else: + dataPtr[] = snappy.decode(data) success = dataPtr[].len > 0 - db.blocks[T.kind].get(key.data, decode).expectDb() and success + var res = + db.blocks[consensusFork].get(key.data, decode).expectDb() and success + when consensusFork < ConsensusFork.Altair: + # During initial releases phase0, we stored blocks in a different table + res = res or db.v0.getPhase0BlockSSZ(key, data) + res proc getBlockSSZ*( db: BeaconChainDB, key: Eth2Digest, data: var seq[byte], @@ -1131,7 +1060,7 @@ proc getBlobSidecar*(db: BeaconChainDB, root: Eth2Digest, index: BlobIndex, value: var BlobSidecar): bool = db.blobs.getSZSSZ(blobkey(root, index), value) == GetResult.found -proc getDataColumnSidecarSZ*(db: BeaconChainDB, root: Eth2Digest, +proc getDataColumnSidecarSZ*(db: BeaconChainDB, root: Eth2Digest, index: ColumnIndex, data: var seq[byte]): bool = let dataPtr = addr data # Short-lived func decode(data: openArray[byte]) = @@ -1139,39 +1068,31 @@ proc getDataColumnSidecarSZ*(db: BeaconChainDB, root: Eth2Digest, db.columns.get(columnkey(root, index), decode).expectDb() proc getDataColumnSidecar*(db: BeaconChainDB, root: Eth2Digest, index: ColumnIndex, - value: var DataColumnSidecar): bool = + value: var fulu.DataColumnSidecar): bool = + if db.columns == nil: # Fulu has not been scheduled; DB table does not exist + return false db.columns.getSZSSZ(columnkey(root, index), value) == GetResult.found -proc getBlockSZ*( - db: BeaconChainDB, key: Eth2Digest, data: var seq[byte], - T: type phase0.TrustedSignedBeaconBlock): bool = - let dataPtr = addr data # Short-lived - var success = true - func decode(data: openArray[byte]) = - dataPtr[] = snappy.encodeFramed(snappy.decode(data)) - success = dataPtr[].len > 0 - db.blocks[ConsensusFork.Phase0].get(key.data, decode).expectDb() and success or - db.v0.getPhase0BlockSZ(key, data) - -proc getBlockSZ*( - db: BeaconChainDB, key: Eth2Digest, data: var seq[byte], - T: type altair.TrustedSignedBeaconBlock): bool = +proc getBlockSZ*[X: ForkyTrustedSignedBeaconBlock]( + db: BeaconChainDB, key: Eth2Digest, + data: var seq[byte], T: typedesc[X]): bool = + const consensusFork = T.kind + if db.blocks[consensusFork] == nil: + return false let dataPtr = addr data # Short-lived var success = true func decode(data: openArray[byte]) = - dataPtr[] = snappy.encodeFramed(snappy.decode(data)) + when consensusFork >= ConsensusFork.Bellatrix: + assign(dataPtr[], data) + else: + dataPtr[] = snappy.encodeFramed(snappy.decode(data)) success = dataPtr[].len > 0 - db.blocks[T.kind].get(key.data, decode).expectDb() and success - -proc getBlockSZ*[ - X: bellatrix.TrustedSignedBeaconBlock | capella.TrustedSignedBeaconBlock | - deneb.TrustedSignedBeaconBlock | electra.TrustedSignedBeaconBlock | - fulu.TrustedSignedBeaconBlock]( - db: BeaconChainDB, key: Eth2Digest, data: var seq[byte], T: type X): bool = - let dataPtr = addr data # Short-lived - func decode(data: openArray[byte]) = - assign(dataPtr[], data) - db.blocks[T.kind].get(key.data, decode).expectDb() + var res = + db.blocks[consensusFork].get(key.data, decode).expectDb() and success + when consensusFork < ConsensusFork.Altair: + # During initial releases phase0, we stored blocks in a different table + res = res or db.v0.getPhase0BlockSZ(key, data) + res proc getBlockSZ*( db: BeaconChainDB, key: Eth2Digest, data: var seq[byte], @@ -1182,90 +1103,7 @@ proc getBlockSZ*( proc getStateOnlyMutableValidators( immutableValidators: openArray[ImmutableValidatorData2], store: KvStoreRef, key: openArray[byte], - output: var (phase0.BeaconState | altair.BeaconState), - rollback: RollbackProc): bool = - ## Load state into `output` - BeaconState is large so we want to avoid - ## re-allocating it if possible - ## Return `true` iff the entry was found in the database and `output` was - ## overwritten. - ## Rollback will be called only if output was partially written - if it was - ## not found at all, rollback will not be called - # TODO rollback is needed to deal with bug - use `noRollback` to ignore: - # https://github.com/nim-lang/Nim/issues/14126 - - let prevNumValidators = output.validators.len - - case store.getSnappySSZ(key, toBeaconStateNoImmutableValidators(output)) - of GetResult.found: - let numValidators = output.validators.len - doAssert immutableValidators.len >= numValidators - - for i in prevNumValidators ..< numValidators: - let - # Bypass hash cache invalidation - dstValidator = addr output.validators.data[i] - - assign( - dstValidator.pubkeyData, - HashedValidatorPubKey.init( - immutableValidators[i].pubkey.toPubKey())) - assign( - dstValidator.withdrawal_credentials, - immutableValidators[i].withdrawal_credentials) - output.validators.clearCaches(i) - - true - of GetResult.notFound: - false - of GetResult.corrupted: - rollback() - false - -proc getStateOnlyMutableValidators( - immutableValidators: openArray[ImmutableValidatorData2], - store: KvStoreRef, key: openArray[byte], - output: var bellatrix.BeaconState, rollback: RollbackProc): bool = - ## Load state into `output` - BeaconState is large so we want to avoid - ## re-allocating it if possible - ## Return `true` iff the entry was found in the database and `output` was - ## overwritten. - ## Rollback will be called only if output was partially written - if it was - ## not found at all, rollback will not be called - # TODO rollback is needed to deal with bug - use `noRollback` to ignore: - # https://github.com/nim-lang/Nim/issues/14126 - - let prevNumValidators = output.validators.len - - case store.getSZSSZ(key, toBeaconStateNoImmutableValidators(output)) - of GetResult.found: - let numValidators = output.validators.len - doAssert immutableValidators.len >= numValidators - - for i in prevNumValidators ..< numValidators: - # Bypass hash cache invalidation - let dstValidator = addr output.validators.data[i] - - assign( - dstValidator.pubkeyData, - HashedValidatorPubKey.init( - immutableValidators[i].pubkey.toPubKey())) - assign( - dstValidator.withdrawal_credentials, - immutableValidators[i].withdrawal_credentials) - output.validators.clearCaches(i) - - true - of GetResult.notFound: - false - of GetResult.corrupted: - rollback() - false - -proc getStateOnlyMutableValidators( - immutableValidators: openArray[ImmutableValidatorData2], - store: KvStoreRef, key: openArray[byte], - output: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState | - fulu.BeaconState), + output: var ForkyBeaconState, rollback: RollbackProc): bool = ## Load state into `output` - BeaconState is large so we want to avoid ## re-allocating it if possible @@ -1275,10 +1113,16 @@ proc getStateOnlyMutableValidators( ## not found at all, rollback will not be called # TODO rollback is needed to deal with bug - use `noRollback` to ignore: # https://github.com/nim-lang/Nim/issues/14126 + const consensusFork = typeof(output).kind + let + prevNumValidators = output.validators.len + getResult = + when consensusFork >= ConsensusFork.Bellatrix: + store.getSZSSZ(key, toBeaconStateNoImmutableValidators(output)) + else: + store.getSnappySSZ(key, toBeaconStateNoImmutableValidators(output)) - let prevNumValidators = output.validators.len - - case store.getSZSSZ(key, toBeaconStateNoImmutableValidators(output)) + case getResult of GetResult.found: let numValidators = output.validators.len doAssert immutableValidators.len >= numValidators @@ -1286,10 +1130,11 @@ proc getStateOnlyMutableValidators( for i in prevNumValidators ..< numValidators: # Bypass hash cache invalidation let dstValidator = addr output.validators.data[i] - assign( - dstValidator.pubkeyData, - HashedValidatorPubKey.init( - immutableValidators[i].pubkey.toPubKey())) + dstValidator.pubkeyData.assign(HashedValidatorPubKey.init( + immutableValidators[i].pubkey.toPubKey())) + when consensusFork < ConsensusFork.Capella: + dstValidator.withdrawal_credentials.assign( + immutableValidators[i].withdrawal_credentials) output.validators.clearCaches(i) true @@ -1328,31 +1173,9 @@ proc getState( rollback() false -proc getState*( - db: BeaconChainDB, key: Eth2Digest, output: var phase0.BeaconState, - rollback: RollbackProc): bool = - ## Load state into `output` - BeaconState is large so we want to avoid - ## re-allocating it if possible - ## Return `true` iff the entry was found in the database and `output` was - ## overwritten. - ## Rollback will be called only if output was partially written - if it was - ## not found at all, rollback will not be called - # TODO rollback is needed to deal with bug - use `noRollback` to ignore: - # https://github.com/nim-lang/Nim/issues/14126 - type T = type(output) - - if not getStateOnlyMutableValidators( - db.immutableValidators, db.statesNoVal[T.kind], key.data, output, rollback): - db.v0.getState(db.immutableValidators, key, output, rollback) - else: - true - proc getState*( db: BeaconChainDB, key: Eth2Digest, - output: var (altair.BeaconState | bellatrix.BeaconState | - capella.BeaconState | deneb.BeaconState | electra.BeaconState | - fulu.BeaconState), - rollback: RollbackProc): bool = + output: var ForkyBeaconState, rollback: RollbackProc): bool = ## Load state into `output` - BeaconState is large so we want to avoid ## re-allocating it if possible ## Return `true` iff the entry was found in the database and `output` was @@ -1361,10 +1184,15 @@ proc getState*( ## not found at all, rollback will not be called # TODO rollback is needed to deal with bug - use `noRollback` to ignore: # https://github.com/nim-lang/Nim/issues/14126 - type T = type(output) - getStateOnlyMutableValidators( - db.immutableValidators, db.statesNoVal[T.kind], key.data, output, - rollback) + const consensusFork = typeof(output).kind + var res = + db.statesNoVal[consensusFork] != nil and + db.immutableValidators.getStateOnlyMutableValidators( + db.statesNoVal[consensusFork], key.data, output, rollback) + when consensusFork < ConsensusFork.Altair: + # During initial releases phase0, we stored states in a different table + res = res or db.v0.getState(db.immutableValidators, key, output, rollback) + res proc getState*( db: BeaconChainDB, fork: ConsensusFork, state_root: Eth2Digest, @@ -1422,23 +1250,24 @@ proc getGenesisBlock*(db: BeaconChainDB): Opt[Eth2Digest] = proc containsBlock*(db: BeaconChainDBV0, key: Eth2Digest): bool = db.backend.contains(subkey(phase0.SignedBeaconBlock, key)).expectDb() -proc containsBlock*( - db: BeaconChainDB, key: Eth2Digest, - T: type phase0.TrustedSignedBeaconBlock): bool = - db.blocks[T.kind].contains(key.data).expectDb() or - db.v0.containsBlock(key) - -proc containsBlock*[ - X: altair.TrustedSignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock | - capella.TrustedSignedBeaconBlock | deneb.TrustedSignedBeaconBlock | - electra.TrustedSignedBeaconBlock | fulu.TrustedSignedBeaconBlock]( - db: BeaconChainDB, key: Eth2Digest, T: type X): bool = - db.blocks[X.kind].contains(key.data).expectDb() +proc containsBlock*[X: ForkyTrustedSignedBeaconBlock]( + db: BeaconChainDB, key: Eth2Digest, T: typedesc[X]): bool = + const consensusFork = T.kind + var res = + db.blocks[consensusFork] != nil and + db.blocks[consensusFork].contains(key.data).expectDb() + when consensusFork < ConsensusFork.Altair: + # During initial releases phase0, we stored states in a different table + res = res or db.v0.containsBlock(key) + res proc containsBlock*(db: BeaconChainDB, key: Eth2Digest, fork: ConsensusFork): bool = case fork - of ConsensusFork.Phase0: containsBlock(db, key, phase0.TrustedSignedBeaconBlock) - else: db.blocks[fork].contains(key.data).expectDb() + of ConsensusFork.Phase0: + containsBlock(db, key, phase0.TrustedSignedBeaconBlock) + else: + db.blocks[fork] != nil and + db.blocks[fork].contains(key.data).expectDb() proc containsBlock*(db: BeaconChainDB, key: Eth2Digest): bool = for fork in countdown(ConsensusFork.high, ConsensusFork.low): @@ -1454,13 +1283,15 @@ proc containsState*(db: BeaconChainDBV0, key: Eth2Digest): bool = proc containsState*(db: BeaconChainDB, fork: ConsensusFork, key: Eth2Digest, legacy: bool = true): bool = + if db.statesNoVal[fork] == nil: return false if db.statesNoVal[fork].contains(key.data).expectDb(): return true (legacy and fork == ConsensusFork.Phase0 and db.v0.containsState(key)) proc containsState*(db: BeaconChainDB, key: Eth2Digest, legacy: bool = true): bool = for fork in countdown(ConsensusFork.high, ConsensusFork.low): - if db.statesNoVal[fork].contains(key.data).expectDb(): return true + if db.statesNoVal[fork] != nil and + db.statesNoVal[fork].contains(key.data).expectDb(): return true (legacy and db.v0.containsState(key)) @@ -1539,10 +1370,11 @@ iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest): ) SELECT v FROM next; """ + static: doAssert BeaconBlockSummary.isFixedSize let stmt = expectDb db.db.prepareStmt( summariesQuery, array[32, byte], - array[sizeof(BeaconBlockSummary), byte], + array[BeaconBlockSummary.fixedPortionSize, byte], managed = false) defer: # in case iteration is stopped along the way @@ -1572,26 +1404,17 @@ iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest): # Backwards compat for reading old databases, or those that for whatever # reason lost a summary along the way.. - static: doAssert ConsensusFork.high == ConsensusFork.Fulu while true: - if db.v0.backend.getSnappySSZ( - subkey(BeaconBlockSummary, res.root), res.summary) == GetResult.found: - discard # Just yield below - elif (let blck = db.getBlock(res.root, phase0.TrustedSignedBeaconBlock); blck.isSome()): - res.summary = blck.get().message.toBeaconBlockSummary() - elif (let blck = db.getBlock(res.root, altair.TrustedSignedBeaconBlock); blck.isSome()): - res.summary = blck.get().message.toBeaconBlockSummary() - elif (let blck = db.getBlock(res.root, bellatrix.TrustedSignedBeaconBlock); blck.isSome()): - res.summary = blck.get().message.toBeaconBlockSummary() - elif (let blck = db.getBlock(res.root, capella.TrustedSignedBeaconBlock); blck.isSome()): - res.summary = blck.get().message.toBeaconBlockSummary() - elif (let blck = db.getBlock(res.root, deneb.TrustedSignedBeaconBlock); blck.isSome()): - res.summary = blck.get().message.toBeaconBlockSummary() - elif (let blck = db.getBlock(res.root, electra.TrustedSignedBeaconBlock); blck.isSome()): - res.summary = blck.get().message.toBeaconBlockSummary() - elif (let blck = db.getBlock(res.root, fulu.TrustedSignedBeaconBlock); blck.isSome()): - res.summary = blck.get().message.toBeaconBlockSummary() - else: + var found = false + withAll(ConsensusFork): + if not found: + let blck = db.getBlock(res.root, consensusFork.TrustedSignedBeaconBlock) + if blck.isSome: + res.summary = blck.unsafeGet.message.toBeaconBlockSummary() + found = true + found = found or db.v0.backend.getSnappySSZ( + subkey(BeaconBlockSummary, res.root), res.summary) == GetResult.found + if not found: break yield res diff --git a/beacon_chain/beacon_chain_db_immutable.nim b/beacon_chain/beacon_chain_db_immutable.nim index c95cd9629a..14e274fcbd 100644 --- a/beacon_chain/beacon_chain_db_immutable.nim +++ b/beacon_chain/beacon_chain_db_immutable.nim @@ -15,10 +15,11 @@ from ./spec/datatypes/capella import ExecutionPayloadHeader, HistoricalSummary, Withdrawal from ./spec/datatypes/deneb import ExecutionPayloadHeader from ./spec/datatypes/electra import - ExecutionPayloadHeader, PendingConsolidation, PendingDeposit, + PendingConsolidation, PendingDeposit, PendingPartialWithdrawal -from ./spec/datatypes/fulu import - ExecutionPayloadHeader +from ./spec/datatypes/gloas import + BuilderPendingPayment, BuilderPendingWithdrawal, ExecutionPayloadBid, + BUILDER_PENDING_WITHDRAWALS_LIMIT type # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate @@ -393,7 +394,7 @@ type next_sync_committee*: SyncCommittee # Execution - latest_execution_payload_header*: electra.ExecutionPayloadHeader + latest_execution_payload_header*: deneb.ExecutionPayloadHeader # Withdrawals next_withdrawal_index*: WithdrawalIndex @@ -477,7 +478,7 @@ type next_sync_committee*: SyncCommittee # Execution - latest_execution_payload_header*: fulu.ExecutionPayloadHeader + latest_execution_payload_header*: deneb.ExecutionPayloadHeader # Withdrawals next_withdrawal_index*: WithdrawalIndex @@ -502,3 +503,108 @@ type pending_consolidations*: HashList[PendingConsolidation, Limit PENDING_CONSOLIDATIONS_LIMIT] ## [New in Electra:EIP7251] + + # [New in Fulu:EIP7917] + proposer_lookahead*: + HashArray[Limit ((MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH), uint64] + + # Memory-representation-equivalent to a Gloas BeaconState for in-place SSZ + # reading and writing + GloasBeaconStateNoImmutableValidators* = object + # Versioning + genesis_time*: uint64 + genesis_validators_root*: Eth2Digest + slot*: Slot + fork*: Fork + + # History + latest_block_header*: BeaconBlockHeader + ## `latest_block_header.state_root == ZERO_HASH` temporarily + + block_roots*: HashArray[Limit SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] + ## Needed to process attestations, older to newer + + state_roots*: HashArray[Limit SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] + historical_roots*: HashList[Eth2Digest, Limit HISTORICAL_ROOTS_LIMIT] + ## Frozen in Capella, replaced by historical_summaries + + # Eth1 + eth1_data*: Eth1Data + eth1_data_votes*: + HashList[Eth1Data, Limit(EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH)] + eth1_deposit_index*: uint64 + + # Registry + validators*: + HashList[ValidatorStatusCapella, Limit VALIDATOR_REGISTRY_LIMIT] + balances*: HashList[Gwei, Limit VALIDATOR_REGISTRY_LIMIT] + + # Randomness + randao_mixes*: HashArray[Limit EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest] + + # Slashings + slashings*: HashArray[Limit EPOCHS_PER_SLASHINGS_VECTOR, Gwei] + ## Per-epoch sums of slashed effective balances + + # Participation + previous_epoch_participation*: EpochParticipationFlags + current_epoch_participation*: EpochParticipationFlags + + # Finality + justification_bits*: JustificationBits + ## Bit set for every recent justified epoch + + previous_justified_checkpoint*: Checkpoint + current_justified_checkpoint*: Checkpoint + finalized_checkpoint*: Checkpoint + + # Inactivity + inactivity_scores*: InactivityScores + + # Light client sync committees + current_sync_committee*: SyncCommittee + next_sync_committee*: SyncCommittee + + # Execution + latest_execution_payload_bid*: gloas.ExecutionPayloadBid + ## [Modified in Gloas:EIP7732] + + # Withdrawals + next_withdrawal_index*: WithdrawalIndex + next_withdrawal_validator_index*: uint64 + + # Deep history valid from Capella onwards + historical_summaries*: + HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT] + + deposit_requests_start_index*: uint64 # [New in Electra:EIP6110] + deposit_balance_to_consume*: Gwei # [New in Electra:EIP7251] + exit_balance_to_consume*: Gwei # [New in Electra:EIP7251] + earliest_exit_epoch*: Epoch # [New in Electra:EIP7251] + consolidation_balance_to_consume*: Gwei # [New in Electra:EIP7251] + earliest_consolidation_epoch*: Epoch # [New in Electra:EIP7251] + pending_deposits*: HashList[PendingDeposit, Limit PENDING_DEPOSITS_LIMIT] + ## [New in Electra:EIP7251] + + # [New in Electra:EIP7251] + pending_partial_withdrawals*: + HashList[PendingPartialWithdrawal, Limit PENDING_PARTIAL_WITHDRAWALS_LIMIT] + pending_consolidations*: + HashList[PendingConsolidation, Limit PENDING_CONSOLIDATIONS_LIMIT] + + # [New in Fulu:EIP7917] + proposer_lookahead*: + HashArray[Limit ((MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH), uint64] + + # [New in Gloas:EIP7732] + execution_payload_availability*: BitArray[int(SLOTS_PER_HISTORICAL_ROOT)] + # [New in Gloas:EIP7732] + builder_pending_payments*: + HashArray[Limit 2 * SLOTS_PER_EPOCH, BuilderPendingPayment] + # [New in Gloas:EIP7732] + builder_pending_withdrawals*: + HashList[BuilderPendingWithdrawal, Limit BUILDER_PENDING_WITHDRAWALS_LIMIT] + # [New in Gloas:EIP7732] + latest_block_hash*: Eth2Digest + # [New in Gloas:EIP7732] + latest_withdrawals_root*: Eth2Digest diff --git a/beacon_chain/beacon_chain_db_light_client.nim b/beacon_chain/beacon_chain_db_light_client.nim index e14a7b353c..4d1f1f0386 100644 --- a/beacon_chain/beacon_chain_db_light_client.nim +++ b/beacon_chain/beacon_chain_db_light_client.nim @@ -15,7 +15,7 @@ import # Beacon chain internals spec/datatypes/altair, spec/[eth2_ssz_serialization, helpers], - ./db_limits + ./db_utils logScope: topics = "lcdata" @@ -172,11 +172,6 @@ type ## Tracks the finalized sync committee periods for which complete data ## has been imported (from `dag.tail.slot`). -template disposeSafe(s: untyped): untyped = - if distinctBase(s) != nil: - s.dispose() - s = typeof(s)(nil) - proc initHeadersStore( backend: SqStoreRef, name, typeName: string): KvResult[LightClientHeaderStore] = diff --git a/beacon_chain/beacon_chain_db_quarantine.nim b/beacon_chain/beacon_chain_db_quarantine.nim new file mode 100644 index 0000000000..070924a847 --- /dev/null +++ b/beacon_chain/beacon_chain_db_quarantine.nim @@ -0,0 +1,222 @@ +# beacon_chain +# Copyright (c) 2022-2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + # Status libraries + chronicles, + eth/db/kvstore_sqlite3, + # Beacon chain internals + spec/helpers, + ./db_utils + +# Without this export compilation fails with error +# vendor\nim-chronicles\chronicles.nim(352, 21) Error: undeclared identifier: 'activeChroniclesStream' +# It actually is not needed, because chronicles is not used in this file, +# but because decodeSZSSZ() is generic and uses chronicles - generic expansion +# introduces this issue. +export chronicles + +logScope: topics = "qudata" + +type + ForkyDataSidecar* = deneb.BlobSidecar | fulu.DataColumnSidecar + + DataSidecarStore = object + getStmt: SqliteStmt[array[32, byte], seq[byte]] + putStmt: SqliteStmt[(array[32, byte], seq[byte]), void] + delStmt: SqliteStmt[array[32, byte], void] + countStmt: SqliteStmt[NoParams, int64] + + QuarantineDB* = ref object + backend: SqStoreRef + ## SQLite backend + + electraDataSidecar: DataSidecarStore + ## Proposer signature verified data blob sidecars. + fuluDataSidecar: DataSidecarStore + ## Proposer signature verified data column sidecars. + +template tableName(sidecar: typedesc[ForkyDataSidecar]): string = + when sidecar is deneb.BlobSidecar: + "electra_sidecars_quarantine" + elif sidecar is fulu.DataColumnSidecar: + "fulu_sidecars_quarantine" + else: + static: raiseAssert "Sidecar's fork is not supported" + +proc initDataSidecarStore( + backend: SqStoreRef, + name: string +): KvResult[DataSidecarStore] = + if not(backend.readOnly): + ? backend.exec("BEGIN TRANSACTION;") + ? backend.exec("DROP INDEX IF EXISTS `" & name & "_iblock_root`;") + ? backend.exec("DROP TABLE IF EXISTS `" & name & "`;") + ? backend.exec(""" + CREATE TABLE IF NOT EXISTS `""" & name & """` ( + `block_root` BLOB, -- `Eth2Digest` + `data_sidecar` BLOB -- `DataSidecar` (SZSSZ) + ); + """) + ? backend.exec(""" + CREATE INDEX IF NOT EXISTS `""" & name & """_iblock_root` + ON `""" & name & """`(block_root); + """) + ? backend.exec("COMMIT;") + + if not ? backend.hasTable(name): + return ok(DataSidecarStore()) + + let + getStmt = backend.prepareStmt(""" + SELECT `data_sidecar` FROM `""" & name & """` + WHERE `block_root` = ?; + """, array[32, byte], (seq[byte]), managed = false) + .expect("SQL query OK") + putStmt = backend.prepareStmt(""" + INSERT INTO `""" & name & """` ( + `block_root`, `data_sidecar` + ) VALUES (?, ?); + """, (array[32, byte], seq[byte]), void, managed = false).expect("SQL query OK") + delStmt = backend.prepareStmt(""" + DELETE FROM `""" & name & """` WHERE `block_root` == ?; + """, array[32, byte], void, managed = false).expect("SQL query OK") + countStmt = backend.prepareStmt(""" + SELECT COUNT(1) FROM `""" & name & """`; + """, NoParams, int64, managed = false).expect("SQL query OK") + + ok(DataSidecarStore( + getStmt: getStmt, + putStmt: putStmt, + delStmt: delStmt, + countStmt: countStmt + )) + +func close(store: var DataSidecarStore) = + if not(isNil(distinctBase(store.getStmt))): store.getStmt.disposeSafe() + if not(isNil(distinctBase(store.putStmt))): store.putStmt.disposeSafe() + if not(isNil(distinctBase(store.delStmt))): store.delStmt.disposeSafe() + if not(isNil(distinctBase(store.countStmt))): store.countStmt.disposeSafe() + +iterator sidecars*( + db: QuarantineDB, + T: typedesc[ForkyDataSidecar], + blockRoot: Eth2Digest +): T = + when T is deneb.BlobSidecar: + template statement: untyped = + db.electraDataSidecar.getStmt + template storeName: untyped = + "electraDataSidecar" + elif T is fulu.DataColumnSidecar: + template statement: untyped = + db.fuluDataSidecar.getStmt + template storeName: untyped = + "fuluDataSidecar" + else: + static: raiseAssert "Sidecar's fork is not supported" + + if not(isNil(distinctBase(statement))): + var row: statement.Result + for rowRes in statement.exec(blockRoot.data, row): + rowRes.expect("SQL query OK") + var res: T + if not(decodeSZSSZ(row, res)): + error "Quarantine store corrupted", store = storeName, + blockRoot + break + yield res + +proc putDataSidecars*[T: ForkyDataSidecar]( + db: QuarantineDB, + blockRoot: Eth2Digest, + dataSidecars: openArray[ref T] +) = + doAssert(not(db.backend.readOnly)) + + when T is deneb.BlobSidecar: + template statement: untyped = + db.electraDataSidecar.putStmt + elif T is fulu.DataColumnSidecar: + template statement: untyped = + db.fuluDataSidecar.putStmt + else: + static: raiseAssert "Sidecar's fork is not supported" + + if not(isNil(distinctBase(statement))): + db.backend.exec("BEGIN TRANSACTION;").expect("SQL query OK") + for sidecar in dataSidecars: + let blob = encodeSZSSZ(sidecar[]) + statement.exec((blockRoot.data, blob)). + expect("SQL query OK") + db.backend.exec("COMMIT;").expect("SQL query OK") + +proc removeDataSidecars*( + db: QuarantineDB, + T: typedesc[ForkyDataSidecar], + blockRoot: Eth2Digest +) = + doAssert not(db.backend.readOnly) + + when T is deneb.BlobSidecar: + template statement: untyped = + db.electraDataSidecar.delStmt + elif T is fulu.DataColumnSidecar: + template statement: untyped = + db.fuluDataSidecar.delStmt + else: + static: raiseAssert "Sidecar's fork is not supported" + + if not(isNil(distinctBase(statement))): + statement.exec(blockRoot.data).expect("SQL query OK") + +proc sidecarsCount*( + db: QuarantineDB, + T: typedesc[ForkyDataSidecar], +): int64 = + var recordCount = 0'i64 + + when T is deneb.BlobSidecar: + template statement: untyped = + db.electraDataSidecar.countStmt + elif T is fulu.DataColumnSidecar: + template statement: untyped = + db.fuluDataSidecar.countStmt + else: + static: raiseAssert "Sidecar's fork is not supported" + + if not(isNil(distinctBase(statement))): + discard statement.exec do (res: int64): + recordCount = res + recordCount + +proc initQuarantineDB*( + backend: SqStoreRef, +): KvResult[QuarantineDB] = + # Please note that all quarantine tables are temporary, each time the node is + # restarted these tables will be wiped out completely. + # Therefore there is no need to maintain forward or backward compatibility + # guarantees. + let + electraDataSidecar = + ? backend.initDataSidecarStore(tableName(deneb.BlobSidecar)) + fuluDataSidecar = + ? backend.initDataSidecarStore(tableName(fulu.DataColumnSidecar)) + + ok QuarantineDB( + backend: backend, + electraDataSidecar: electraDataSidecar, + fuluDataSidecar: fuluDataSidecar + ) + +proc close*(db: QuarantineDB) = + if not(isNil(db.backend)): + db.electraDataSidecar.close() + db.fuluDataSidecar.close() + db[].reset() diff --git a/beacon_chain/beacon_chain_file.nim b/beacon_chain/beacon_chain_file.nim index 46b7a4bc31..9124b67c15 100644 --- a/beacon_chain/beacon_chain_file.nim +++ b/beacon_chain/beacon_chain_file.nim @@ -84,14 +84,9 @@ func getBlockForkCode(fork: ConsensusFork): uint64 = uint64(fork) func getBlobForkCode(fork: ConsensusFork): uint64 = - case fork - of ConsensusFork.Deneb: - uint64(MaxForksCount) - of ConsensusFork.Electra: + if fork >= ConsensusFork.Deneb: uint64(MaxForksCount) + uint64(fork) - uint64(ConsensusFork.Deneb) - of ConsensusFork.Fulu: - uint64(MaxForksCount) + uint64(fork) - uint64(ConsensusFork.Electra) - of ConsensusFork.Phase0 .. ConsensusFork.Capella: + else: raiseAssert "Blobs are not supported for the fork" proc init(t: typedesc[ChainFileError], k: ChainFileErrorType, diff --git a/beacon_chain/beacon_clock.nim b/beacon_chain/beacon_clock.nim index f8915e1376..214a993066 100644 --- a/beacon_chain/beacon_clock.nim +++ b/beacon_chain/beacon_clock.nim @@ -31,16 +31,25 @@ type ## # TODO consider NTP and network-adjusted timestamps as outlined here: # https://ethresear.ch/t/network-adjusted-timestamps/4187 + timeConfig: TimeConfig genesis: Time GetBeaconTimeFn* = proc(): BeaconTime {.gcsafe, raises: [].} -proc init*(T: type BeaconClock, genesis_time: uint64): Opt[T] = - # Since we'll be converting beacon time differences to nanoseconds, - # the time can't be outrageously far from now - if genesis_time > (getTime().toUnix().uint64 + 100'u64 * 365'u64 * 24'u64 * - 60'u64 * 60'u64) or - genesis_time < GENESIS_SLOT * SECONDS_PER_SLOT: +proc init*( + T: type BeaconClock, + timeConfig: TimeConfig, + genesis_time: uint64): Opt[T] = + let + SECONDS_PER_SLOT = timeConfig.SECONDS_PER_SLOT + MIN_GENESIS_TIME = GENESIS_SLOT * SECONDS_PER_SLOT + MAX_GENESIS_TIME = + # Since we'll be converting beacon time differences to nanoseconds, + # the time can't be outrageously far from now + getTime().toUnix().uint64 + + 100'u64 * 365'u64 * 24'u64 * 60'u64 * 60'u64 + if SECONDS_PER_SLOT notin MIN_SECONDS_PER_SLOT .. MAX_SECONDS_PER_SLOT or + genesis_time notin MIN_GENESIS_TIME .. MAX_GENESIS_TIME: Opt.none(BeaconClock) else: let @@ -49,7 +58,12 @@ proc init*(T: type BeaconClock, genesis_time: uint64): Opt[T] = # offset to genesis instead of applying it at every time conversion unixGenesisOffset = times.seconds(int(GENESIS_SLOT * SECONDS_PER_SLOT)) - Opt.some T(genesis: unixGenesis - unixGenesisOffset) + Opt.some T( + timeConfig: timeConfig, + genesis: unixGenesis - unixGenesisOffset) + +func timeConfig*(c: BeaconClock): TimeConfig = + c.timeConfig # Readonly func toBeaconTime*(c: BeaconClock, t: Time): BeaconTime = BeaconTime(ns_since_genesis: inNanoseconds(t - c.genesis)) @@ -74,40 +88,33 @@ proc fromNow*(c: BeaconClock, t: BeaconTime): tuple[inFuture: bool, offset: Dura proc fromNow*(c: BeaconClock, slot: Slot): tuple[inFuture: bool, offset: Duration] = c.fromNow(slot.start_beacon_time()) -proc durationToNextSlot*(c: BeaconClock): Duration = - let - currentTime = c.now() - currentSlot = currentTime.toSlot() +func durationOrZero*(d: tuple[inFuture: bool, offset: Duration]): Duration = + if d.inFuture: + d.offset + else: + ZeroDuration - if currentSlot.afterGenesis: - let nextSlot = currentSlot.slot + 1 - nanoseconds( - (nextSlot.start_beacon_time() - currentTime).nanoseconds) +func nextSlotStartTime*( + exSlot: tuple[afterGenesis: bool, slot: Slot], + timeConfig: TimeConfig): BeaconTime = + if exSlot.afterGenesis: + (exSlot.slot + 1).start_beacon_time() else: - # absoluteTime = BeaconTime(-currentTime.ns_since_genesis). let - absoluteTime = Slot(0).start_beacon_time() + - (Slot(0).start_beacon_time() - currentTime) - timeToNextSlot = absoluteTime - currentSlot.slot.start_beacon_time() - nanoseconds(timeToNextSlot.nanoseconds) - -proc durationToNextEpoch*(c: BeaconClock): Duration = - let - currentTime = c.now() - currentSlot = currentTime.toSlot() - - if currentSlot.afterGenesis: - let nextEpochSlot = (currentSlot.slot.epoch() + 1).start_slot() - nanoseconds( - (nextEpochSlot.start_beacon_time() - currentTime).nanoseconds) + genesisTime = GENESIS_SLOT.start_beacon_time() + timeDiff = exSlot.slot.start_beacon_time() - genesisTime + genesisTime - timeDiff + +func nextEpochStartTime*( + exSlot: tuple[afterGenesis: bool, slot: Slot], + timeConfig: TimeConfig): BeaconTime = + if exSlot.afterGenesis: + (exSlot.slot.epoch + 1).start_slot.start_beacon_time() else: - # absoluteTime = BeaconTime(-currentTime.ns_since_genesis). let - absoluteTime = Slot(0).start_beacon_time() + - (Slot(0).start_beacon_time() - currentTime) - timeToNextEpoch = absoluteTime - - currentSlot.slot.epoch().start_slot().start_beacon_time() - nanoseconds(timeToNextEpoch.nanoseconds) + genesisTime = GENESIS_SLOT.start_beacon_time() + timeDiff = exSlot.slot.epoch.start_slot.start_beacon_time() - genesisTime + genesisTime - timeDiff func saturate*(d: tuple[inFuture: bool, offset: Duration]): Duration = if d.inFuture: d.offset else: seconds(0) diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index 04d67e1505..418f52bcb2 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -17,18 +17,18 @@ import metrics, metrics/chronos_httpserver, # Local modules - "."/[beacon_clock, beacon_chain_db, conf, light_client], + "."/[beacon_clock, beacon_chain_db, conf, light_client, version], ./gossip_processing/[eth2_processor, block_processor, optimistic_processor], ./networking/eth2_network, ./el/el_manager, ./consensus_object_pools/[ blockchain_dag, blob_quarantine, block_quarantine, consensus_manager, - data_column_quarantine, attestation_pool, sync_committee_msg_pool, validator_change_pool, + attestation_pool, sync_committee_msg_pool, validator_change_pool, blockchain_list], ./spec/datatypes/[base, altair], ./spec/eth2_apis/dynamic_fee_recipients, ./spec/signatures_batch, - ./sync/[sync_manager, request_manager, sync_types], + ./sync/[sync_manager, request_manager, sync_types, validator_custody], ./validators/[ action_tracker, message_router, validator_monitor, validator_pool, keystore_management], @@ -56,6 +56,7 @@ type phase0AttSlashQueue*: AsyncEventQueue[phase0.AttesterSlashing] electraAttSlashQueue*: AsyncEventQueue[electra.AttesterSlashing] blobSidecarQueue*: AsyncEventQueue[BlobSidecarInfoObject] + columnSidecarQueue*: AsyncEventQueue[DataColumnSidecarInfoObject] finalQueue*: AsyncEventQueue[FinalizationInfoObject] reorgQueue*: AsyncEventQueue[ReorgInfoObject] contribQueue*: AsyncEventQueue[SignedContributionAndProof] @@ -81,7 +82,7 @@ type list*: ChainListRef quarantine*: ref Quarantine blobQuarantine*: ref BlobQuarantine - dataColumnQuarantine*: ref DataColumnQuarantine + dataColumnQuarantine*: ref ColumnQuarantine attestationPool*: ref AttestationPool syncCommitteeMsgPool*: ref SyncCommitteeMsgPool lightClientPool*: ref LightClientPool @@ -95,11 +96,11 @@ type eventBus*: EventBus vcProcess*: Process requestManager*: RequestManager + validatorCustody*: ValidatorCustodyRef syncManager*: SyncManager[Peer, PeerId] backfiller*: SyncManager[Peer, PeerId] untrustedManager*: SyncManager[Peer, PeerId] syncOverseer*: SyncOverseerRef - genesisSnapshotContent*: string processor*: ref Eth2Processor batchVerifier*: ref BatchVerifier blockProcessor*: ref BlockProcessor @@ -121,6 +122,7 @@ type lastValidAttestedBlock*: Opt[BlockSlot] shutdownEvent*: AsyncEvent +# TODO https://github.com/status-im/nim-stew/pull/258 template findIt*(s: openArray, predicate: untyped): int = var res = -1 for i, it {.inject.} in s: @@ -135,6 +137,9 @@ template rng*(node: BeaconNode): ref HmacDrbgContext = proc currentSlot*(node: BeaconNode): Slot = node.beaconClock.now.slotOrZero +func hasRestAllowedOrigin*(node: BeaconNode): bool = + node.config.restAllowedOrigin.isSome + func getPayloadBuilderAddress*(config: BeaconNodeConf): Opt[string] = if config.payloadBuilderEnable: Opt.some config.payloadBuilderUrl @@ -171,4 +176,5 @@ proc getPayloadBuilderClient*( socketFlags = {SocketFlags.TcpNoDelay} RestClientRef.new(payloadBuilderAddress.get, flags = flags, - socketFlags = socketFlags) + socketFlags = socketFlags, + userAgent = nimbusAgentStr) diff --git a/beacon_chain/beacon_node_light_client.nim b/beacon_chain/beacon_node_light_client.nim index 4c1a7b6535..416eb3eb2e 100644 --- a/beacon_chain/beacon_node_light_client.nim +++ b/beacon_chain/beacon_node_light_client.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -42,7 +42,9 @@ proc initLightClient*( signedBlock: ForkedSignedBeaconBlock ): Future[void] {.async: (raises: [CancelledError]).} = withBlck(signedBlock): - when consensusFork >= ConsensusFork.Bellatrix: + when consensusFork == ConsensusFork.Gloas: + debugGloasComment "" + elif consensusFork >= ConsensusFork.Bellatrix: if forkyBlck.message.is_execution_block: template payload(): auto = forkyBlck.message.body.execution_payload if not payload.block_hash.isZero: @@ -50,7 +52,7 @@ proc initLightClient*( forkyBlck.message) else: discard optimisticProcessor = initOptimisticProcessor( - getBeaconTime, optimisticHandler) + cfg.time, getBeaconTime, optimisticHandler) shouldInhibitSync = func(): bool = if isNil(node.syncOverseer): @@ -181,11 +183,13 @@ proc updateLightClientFromDag*(node: BeaconNode) = return var header: ForkedLightClientHeader withBlck(bdata): - const lcDataFork = lcDataForkAtConsensusFork(consensusFork) - when lcDataFork > LightClientDataFork.None: - header = ForkedLightClientHeader.init( - forkyBlck.toLightClientHeader(lcDataFork)) - else: raiseAssert "Unreachable" + debugGloasComment "" + when consensusFork != ConsensusFork.Gloas: + const lcDataFork = lcDataForkAtConsensusFork(consensusFork) + when lcDataFork > LightClientDataFork.None: + header = ForkedLightClientHeader.init( + forkyBlck.toLightClientHeader(lcDataFork)) + else: raiseAssert "Unreachable" let current_sync_committee = block: let tmpState = assignClone(node.dag.headState) node.dag.currentSyncCommitteeForPeriod(tmpState[], dagPeriod).valueOr: diff --git a/beacon_chain/beacon_node_status.nim b/beacon_chain/beacon_node_status.nim deleted file mode 100644 index 8abb5c575d..0000000000 --- a/beacon_chain/beacon_node_status.nim +++ /dev/null @@ -1,18 +0,0 @@ -# beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} - -type - # "state" is already taken by BeaconState - BeaconNodeStatus* = enum - Starting - Running - Stopping - -# this needs to be global, so it can be set in the Ctrl+C signal handler -var bnStatus* = BeaconNodeStatus.Starting diff --git a/beacon_chain/buildinfo.nim b/beacon_chain/buildinfo.nim new file mode 100644 index 0000000000..2f99fe6a66 --- /dev/null +++ b/beacon_chain/buildinfo.nim @@ -0,0 +1,81 @@ +# beacon_chain +# Copyright (c) 2018-2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [], gcsafe.} + +## This module implements the version tagging details of all binaries included +## in the Nimbus release process (i.e. beacon_node, validator_client, etc) + +import std/[os, sequtils, strutils] + +proc gitFolderExists(path: string): bool {.compileTime.} = + # walk up parent folder to find `.git` folder + var currPath = path + while true: + if dirExists(currPath & "/.git"): + return true + let parts = splitPath(currPath) + if parts.tail.len == 0: + break + currPath = parts.head + false + +const + compileYear* = CompileDate[0 ..< 4] # YYYY-MM-DD (UTC) + + GitRevisionOverride {.strdefine.} = "" + + nimFullBanner* = staticExec("nim --version") + +template generateGitRevision*(repoPath: string): untyped = + # strip: remove spaces + # --short=8: ensure we get 8 chars of commit hash + # -C sourcePath: get the correct git hash no matter where the current dir is. + when GitRevisionOverride.len > 0: + static: + doAssert( + GitRevisionOverride.len == 8, "GitRevisionOverride must consist of 8 characters" + ) + doAssert( + GitRevisionOverride.allIt(it in HexDigits), + "GitRevisionOverride should contains only hex chars", + ) + + GitRevisionOverride + else: + if gitFolderExists(repoPath): + # only using git if the parent dir is a git repo. + strip( + staticExec("git -C " & strutils.escape(repoPath) & " rev-parse --short=8 HEAD") + ) + else: + # otherwise we use revision number given by build system. + # e.g. user download from release tarball, or Github zip download. + "00000000" + +func getNimGitHash(): string = + const gitPrefix = "git hash: " + let tmp = splitLines(nimFullBanner) + if tmp.len == 0: + return + for line in tmp: + if line.startsWith(gitPrefix) and line.len > 8 + gitPrefix.len: + result = line[gitPrefix.len ..< gitPrefix.len + 8] + +func nimBanner*(): string = + let gitHash = getNimGitHash() + let tmp = splitLines(nimFullBanner) + if gitHash.len > 0: + tmp[0] & " (" & gitHash & ")" + else: + tmp[0] + +when not defined(nimscript): + import metrics + declareGauge nimVersionGauge, + "Nim version info", ["version", "nim_commit"], name = "nim_version" + nimVersionGauge.set(1, labelValues = [NimVersion, getNimGitHash()]) diff --git a/beacon_chain/conf.nim b/beacon_chain/conf.nim index d12ba63bb8..bfb59798d3 100644 --- a/beacon_chain/conf.nim +++ b/beacon_chain/conf.nim @@ -5,22 +5,22 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import - std/[options, unicode, uri], + std/[options, os, unicode, uri], metrics, - + results, chronicles, chronicles/options as chroniclesOptions, confutils, confutils/defs, confutils/std/net, confutils/toml/defs as confTomlDefs, - confutils/toml/std/net as confTomlNet, - confutils/toml/std/uri as confTomlUri, + toml_serialization/std/net as confTomlNet, + toml_serialization/std/uri as confTomlUri, serialization/errors, stew/[io2, byteutils], unicodedb/properties, normalize, - eth/common/eth_types as commonEthTypes, eth/net/nat, - eth/p2p/discoveryv5/enr, - json_serialization, web3/[primitives, confutils_defs], + eth/net/nat, + eth/enr/enr, + json_serialization, json_serialization/std/net as jsnet, web3/confutils_defs, chronos/transports/common, kzg4844/kzg, ./spec/[engine_authentication, keystore, network, crypto], @@ -28,9 +28,8 @@ import ./networking/network_metadata, ./validators/slashing_protection_common, ./el/el_conf, - ./filepath + ./[filepath, nimbus_binary_common] -from std/os import getHomeDir, parentDir, `/` from std/strutils import parseBiggestUInt, replace from consensus_object_pools/block_pools_types_light_client import LightClientDataImportMode @@ -39,9 +38,9 @@ export uri, nat, enr, defaultEth2TcpPort, enabledLogLevel, defs, parseCmdArg, completeCmdArg, network_metadata, - el_conf, network, BlockHashOrNumber, - confTomlDefs, confTomlNet, confTomlUri, - LightClientDataImportMode + el_conf, network, + confTomlDefs, confTomlNet, confTomlUri, jsnet, + LightClientDataImportMode, slashing_protection_common, nimbus_binary_common declareGauge network_name, "network name", ["name"] @@ -52,7 +51,7 @@ const defaultSigningNodeRequestTimeout* = 60 defaultBeaconNode* = "http://127.0.0.1:" & $defaultEth2RestPort defaultBeaconNodeUri* = parseUri(defaultBeaconNode) - defaultGasLimit* = 36_000_000 + defaultGasLimit* = 60_000_000 defaultAdminListenAddressDesc* = $defaultAdminListenAddress defaultBeaconNodeDesc = $defaultBeaconNode @@ -84,7 +83,7 @@ type # status = "Displays status information about all deposits" exit = "Submits a validator voluntary exit" - SNStartUpCmd* = enum + SNStartUpCmd* {.pure.} = enum SNNoCommand RecordCmd* {.pure.} = enum @@ -99,22 +98,13 @@ type v2 both - StdoutLogKind* {.pure.} = enum - Auto = "auto" - Colors = "colors" - NoColors = "nocolors" - Json = "json" - None = "none" - HistoryMode* {.pure.} = enum Archive = "archive" Prune = "prune" - SlashProtCmd* = enum + SlashProtCmd* {.pure.} = enum `import` = "Import a EIP-3076 slashing protection interchange file" `export` = "Export a EIP-3076 slashing protection interchange file" - # migrateAll = "Export and remove the whole validator slashing protection DB." - # migrate = "Export and remove specified validators from Nimbus." ImportMethod* {.pure.} = enum Normal = "normal" @@ -159,12 +149,10 @@ type defaultValueDesc: "mainnet" name: "network" .}: Option[string] - dataDir* {. + dataDirFlag* {. desc: "The directory where nimbus will store all blockchain data" - defaultValue: config.defaultDataDir() - defaultValueDesc: "" abbr: "d" - name: "data-dir" .}: OutDir + name: "data-dir" .}: Option[OutDir] validatorsDirFlag* {. desc: "A directory containing validator keystores" @@ -202,7 +190,6 @@ type web3ForcePolling* {. hidden - desc: "Force the use of polling when determining the head block of Eth1 (obsolete)" name: "web3-force-polling" .}: Option[bool] web3Urls* {. @@ -374,7 +361,7 @@ type name: "genesis-state-url" .}: Option[Uri] finalizedDepositTreeSnapshot* {. - desc: "SSZ file specifying a recent finalized EIP-4881 deposit tree snapshot" + hidden name: "finalized-deposit-tree-snapshot" .}: Option[InputFile] finalizedCheckpointBlock* {. @@ -580,6 +567,34 @@ type defaultValue: false name: "dump" .}: bool + # Because certain EL (e.g., Geth) may return SYNCING/ACCEPTED even for + # execution payloads that have already been deemed INVALID in the past, + # this flag is needed to avoid optimistically importing beacon blocks + # that contain such payloads into fork choice when Nimbus is restarted. + # This helps manual recovery when the justified checkpoint is advanced + # optimistically based on attestations in blocks with invalid payloads, + # such as the botched Prague/Electra deployment onto the Holesky testnet. + # + # The recovery flow is as follows: + # (1) Upgrade to an EL version that correctly identifies the invalid block + # (2) Restart Nimbus with `--debug-invalidate-block-root` set to the first + # block known to have an invalid execution payload. Multiple blocks + # may be specified if necessary + # (3) If Nimbus is already synced to the canonical (but invalid) branch, + # wait until the EL informs Nimbus that this head is INVALID. + # Nimbus then rewinds back to the latest valid head + # (4) Restart Nimbus again, ensuring that `--debug-invalidate-block-root` + # is set up correctly. Nimbus will re-discover the invalid branch, + # but this time will not optimistically import it, preventing the + # invalid branch to become canonical in its local fork choice + # (5) Wait for a different branch to become canonical, and keep the + # `--debug-invalidate-block-root` flag present until finality has + # advanced beyond the problematic chain segment + invalidBlockRoots* {. + hidden + desc: "List of beacon block roots that, if the EL responds with SYNCING/ACCEPTED, are treated as if their execution payload was INVALID" + name: "debug-invalidate-block-root" .}: seq[Eth2Digest] + directPeers* {. desc: "The list of privileged, secure and known peers to connect and maintain the connection to. This requires a not random netkey-file. In the multiaddress format like: /ip4/
/tcp//p2p/, or enr format (enr:-xx). Peering agreements are established out of band and must be reciprocal" name: "direct-peer" .}: seq[string] @@ -631,7 +646,7 @@ type # https://github.com/prysmaticlabs/prysm/pull/10312 suggestedFeeRecipient* {. desc: "Suggested fee recipient" - name: "suggested-fee-recipient" .}: Option[Address] + name: "suggested-fee-recipient" .}: Option[Eth1Address] suggestedGasLimit* {. desc: "Suggested gas limit" @@ -895,12 +910,10 @@ type desc: "Specifies a path for the written JSON log file (deprecated)" name: "log-file" .}: Option[OutFile] - dataDir* {. + dataDirFlag* {. desc: "The directory where nimbus will store all blockchain data" - defaultValue: config.defaultDataDir() - defaultValueDesc: "" abbr: "d" - name: "data-dir" .}: OutDir + name: "data-dir" .}: Option[OutDir] doppelgangerDetection* {. # TODO This description is shared between the BN and the VC. @@ -964,7 +977,7 @@ type # https://github.com/prysmaticlabs/prysm/pull/10312 suggestedFeeRecipient* {. desc: "Suggested fee recipient" - name: "suggested-fee-recipient" .}: Option[Address] + name: "suggested-fee-recipient" .}: Option[Eth1Address] suggestedGasLimit* {. desc: "Suggested gas limit" @@ -1072,16 +1085,19 @@ type desc: "Specifies a path for the written JSON log file" name: "log-file" .}: Option[OutFile] + eth2Network* {. + desc: "The Eth2 network to join" + defaultValueDesc: "mainnet" + name: "network" .}: Option[string] + nonInteractive* {. desc: "Do not display interactive prompts. Quit on missing configuration" name: "non-interactive" .}: bool - dataDir* {. + dataDirFlag* {. desc: "The directory where nimbus will store validator's keys" - defaultValue: config.defaultDataDir() - defaultValueDesc: "" abbr: "d" - name: "data-dir" .}: OutDir + name: "data-dir" .}: Option[OutDir] validatorsDirFlag* {. desc: "A directory containing validator keystores" @@ -1094,7 +1110,7 @@ type expectedFeeRecipient* {. desc: "Signatures for blocks will require proofs of the specified " & "fee recipient" - name: "expected-fee-recipient".}: Option[Address] + name: "expected-fee-recipient".}: Option[Eth1Address] serverIdent* {. desc: "Server identifier which will be used in HTTP Host header" @@ -1133,28 +1149,76 @@ type AnyConf* = BeaconNodeConf | ValidatorClientConf | SigningNodeConf - Address = primitives.Address +proc loadEth2Network*(eth2Network: Option[string]): Eth2NetworkMetadata = + let metadata = + if eth2Network.isSome: + getMetadataForNetwork(eth2Network.get) + else: + when IsGnosisSupported: + getMetadataForNetwork("gnosis") + elif IsMainnetSupported: + getMetadataForNetwork("mainnet") + else: + # Presumably other configurations can have other defaults, but for now + # this simplifies the flow + fatal "Must specify network on non-mainnet node" + quit 1 + + network_name.set(2, labelValues = [metadata.cfg.name()]) -proc defaultDataDir*[Conf](config: Conf): string = - let dataDir = when defined(windows): - "AppData" / "Roaming" / "Nimbus" - elif defined(macosx): - "Library" / "Application Support" / "Nimbus" + metadata + +template loadEth2Network*(config: BeaconNodeConf|SigningNodeConf): Eth2NetworkMetadata = + loadEth2Network(config.eth2Network) + +proc shortNetworkName*(eth2Network: Option[string]): string = + # Given an eth2Network configuration, figure out a good canonical name for the + # network that can be used for directories etc. + if eth2Network.isSome() and + eth2Network.get() in + ["mainnet", "minimal", "gnosis", "chiado", "hoodi", "holesky", "sepolia"]: + eth2Network.get() + else: + eth2Network.loadEth2Network().cfg.name() + +proc legacyDataDir*(): Opt[string] = + let dir = + getHomeDir() / ( + when defined(windows): + "AppData" / "Roaming" / "Nimbus" / "BeaconNode" + elif defined(macosx): + "Library" / "Application Support" / "Nimbus" / "BeaconNode" + else: + ".cache" / "nimbus" / "BeaconNode" + ) + + if dirExists(dir): + Opt.some(dir) else: - ".cache" / "nimbus" + Opt.none(string) - getHomeDir() / dataDir / "BeaconNode" +proc defaultDataDir*(config: BeaconNodeConf): string = + defaultDataDir("", config.eth2Network.shortNetworkName()) -func dumpDir(config: AnyConf): string = +proc defaultDataDir*(_: ValidatorClientConf): string = + defaultDataDir("vc", "") + +proc defaultDataDir*(config: SigningNodeConf): string = + defaultDataDir("sn", config.eth2Network.shortNetworkName()) + +proc dataDir*(config: AnyConf): OutDir = + config.dataDirFlag.get(OutDir legacyDataDir().valueOr(defaultDataDir(config))) + +proc dumpDir(config: AnyConf): string = config.dataDir / "dump" -func dumpDirInvalid*(config: AnyConf): string = +proc dumpDirInvalid*(config: AnyConf): string = config.dumpDir / "invalid" # things that failed validation -func dumpDirIncoming*(config: AnyConf): string = +proc dumpDirIncoming*(config: AnyConf): string = config.dumpDir / "incoming" # things that couldn't be validated (missingparent etc) -func dumpDirOutgoing*(config: AnyConf): string = +proc dumpDirOutgoing*(config: AnyConf): string = config.dumpDir / "outgoing" # things we produced proc createDumpDirs*(config: BeaconNodeConf) = @@ -1189,13 +1253,6 @@ func parseCmdArg*(T: type GraffitiBytes, input: string): T func completeCmdArg*(T: type GraffitiBytes, input: string): seq[string] = return @[] -func parseCmdArg*(T: type BlockHashOrNumber, input: string): T - {.raises: [ValueError].} = - init(BlockHashOrNumber, input) - -func completeCmdArg*(T: type BlockHashOrNumber, input: string): seq[string] = - return @[] - func parseCmdArg*(T: type Uri, input: string): T {.raises: [ValueError].} = parseUri(input) @@ -1273,16 +1330,16 @@ proc parseCmdArg*(T: type enr.Record, p: string): T {.raises: [ValueError].} = func completeCmdArg*(T: type enr.Record, val: string): seq[string] = return @[] -func validatorsDir*[Conf](config: Conf): string = +proc validatorsDir*[Conf](config: Conf): string = string config.validatorsDirFlag.get(InputDir(config.dataDir / "validators")) -func secretsDir*[Conf](config: Conf): string = +proc secretsDir*[Conf](config: Conf): string = string config.secretsDirFlag.get(InputDir(config.dataDir / "secrets")) -func walletsDir*(config: BeaconNodeConf): string = +proc walletsDir*(config: BeaconNodeConf): string = string config.walletsDirFlag.get(InputDir(config.dataDir / "wallets")) -func eraDir*(config: BeaconNodeConf): string = +proc eraDir*(config: BeaconNodeConf): string = # The era directory should be shared between networks of the same type.. string config.eraDirFlag.get(InputDir(config.dataDir / "era")) @@ -1407,39 +1464,13 @@ proc readValue*(r: var TomlReader, a: var WalletName) except CatchableError: r.raiseUnexpectedValue("string expected") -proc readValue*(r: var TomlReader, a: var Address) +proc readValue*(r: var TomlReader, a: var Eth1Address) {.raises: [SerializationError].} = try: - a = parseCmdArg(Address, r.readValue(string)) + a = parseCmdArg(Eth1Address, r.readValue(string)) except CatchableError: r.raiseUnexpectedValue("string expected") -proc loadEth2Network*(eth2Network: Option[string]): Eth2NetworkMetadata = - const defaultName = - when const_preset == "gnosis": - "gnosis" - elif const_preset == "mainnet": - "mainnet" - else: - "(unspecified)" - network_name.set(2, labelValues = [eth2Network.get(otherwise = defaultName)]) - - if eth2Network.isSome: - getMetadataForNetwork(eth2Network.get) - else: - when const_preset == "gnosis": - getMetadataForNetwork("gnosis") - elif const_preset == "mainnet": - getMetadataForNetwork("mainnet") - else: - # Presumably other configurations can have other defaults, but for now - # this simplifies the flow - fatal "Must specify network on non-mainnet node" - quit 1 - -template loadEth2Network*(config: BeaconNodeConf): Eth2NetworkMetadata = - loadEth2Network(config.eth2Network) - func defaultFeeRecipient*(conf: AnyConf): Opt[Eth1Address] = if conf.suggestedFeeRecipient.isSome: Opt.some conf.suggestedFeeRecipient.get @@ -1479,12 +1510,12 @@ func configJwtSecretOpt*(jwtSecret: Option[InputFile]): Opt[InputFile] = proc loadJwtSecret*( rng: var HmacDrbgContext, - config: BeaconNodeConf, + config: auto, allowCreate: bool): Opt[seq[byte]] = rng.loadJwtSecret( string(config.dataDir), config.jwtSecret.configJwtSecretOpt, allowCreate) -proc engineApiUrls*(config: BeaconNodeConf): seq[EngineApiUrl] = +proc engineApiUrls*(config: auto): seq[EngineApiUrl] = let elUrls = if config.noEl: return newSeq[EngineApiUrl]() elif config.elUrls.len == 0 and config.web3Urls.len == 0: diff --git a/beacon_chain/conf_common.nim b/beacon_chain/conf_common.nim deleted file mode 100644 index 4610f6fbbb..0000000000 --- a/beacon_chain/conf_common.nim +++ /dev/null @@ -1,53 +0,0 @@ -# beacon_chain -# Copyright (c) 2023 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} - -import std/os -import "."/[conf, conf_light_client] -import results, confutils, confutils/defs, confutils/std/net, - confutils/toml/defs as confTomlDefs, - confutils/toml/std/net as confTomlNet, - confutils/toml/std/uri as confTomlUri - -proc makeBannerAndConfig*(clientId, copyright, banner, specVersion: string, - environment: openArray[string], - ConfType: type): Result[ConfType, string] = - let - version = clientId & "\p" & copyright & "\p\p" & - "eth2 specification v" & specVersion & "\p\p" & - banner - cmdLine = if len(environment) == 0: commandLineParams() - else: @environment - - # TODO for some reason, copyrights are printed when doing `--help` - {.push warning[ProveInit]: off.} - let config = try: - ConfType.load( - version = version, # but a short version string makes more sense... - copyrightBanner = clientId, - cmdLine = cmdLine, - secondarySources = proc ( - config: ConfType, sources: auto - ) {.raises: [ConfigurationError], gcsafe.} = - if config.configFile.isSome: - sources.addConfigFile(Toml, config.configFile.get) - ) - except CatchableError as exc: - # We need to log to stderr here, because logging hasn't been configured yet - var msg = "Failure while loading the configuration:\p" & exc.msg & "\p" - if (exc[] of ConfigurationError) and not(isNil(exc.parent)) and - (exc.parent[] of TomlFieldReadingError): - let fieldName = ((ref TomlFieldReadingError)(exc.parent)).field - if fieldName in ["el", "web3-url", "bootstrap-node", - "direct-peer", "validator-monitor-pubkey"]: - msg &= "Since the '" & fieldName & "' option is allowed to " & - "have more than one value, please make sure to supply " & - "a properly formatted TOML array\p" - return err(msg) - {.pop.} - ok(config) diff --git a/beacon_chain/conf_light_client.nim b/beacon_chain/conf_light_client.nim index 3c174c75e4..648599ba87 100644 --- a/beacon_chain/conf_light_client.nim +++ b/beacon_chain/conf_light_client.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -9,7 +9,7 @@ import json_serialization/std/net, - ./conf + ./conf, ./nimbus_binary_common export net, conf @@ -37,12 +37,10 @@ type LightClientConf* = object name: "log-file" .}: Option[OutFile] # Storage - dataDir* {. + dataDirFlag* {. desc: "The directory where nimbus will store all blockchain data" - defaultValue: config.defaultDataDir() - defaultValueDesc: "" abbr: "d" - name: "data-dir" .}: OutDir + name: "data-dir" .}: Option[OutDir] # Network eth2Network* {. @@ -149,22 +147,11 @@ type LightClientConf* = object defaultValue: 0 name: "debug-stop-at-epoch" .}: uint64 +proc defaultDataDir*(config: LightClientConf): string = + defaultDataDir("", config.eth2Network.shortNetworkName()) + +proc dataDir*(config: LightClientConf): OutDir = + config.dataDirFlag.get(OutDir legacyDataDir().valueOr(defaultDataDir(config))) + template databaseDir*(config: LightClientConf): string = config.dataDir.databaseDir - -template loadJwtSecret*( - rng: var HmacDrbgContext, - config: LightClientConf, - allowCreate: bool): Option[seq[byte]] = - rng.loadJwtSecret(string(config.dataDir), config.jwtSecret, allowCreate) - -proc engineApiUrls*(config: LightClientConf): seq[EngineApiUrl] = - let elUrls = if config.noEl: - return newSeq[EngineApiUrl]() - elif config.elUrls.len == 0 and config.web3Urls.len == 0: - @[getDefaultEngineApiUrl(config.jwtSecret)] - else: - config.elUrls - - (elUrls & config.web3Urls).toFinalEngineApiUrls( - config.jwtSecret.configJwtSecretOpt) diff --git a/beacon_chain/consensus_object_pools/README.md b/beacon_chain/consensus_object_pools/README.md index 7f3129b9f9..d4cc1e807b 100644 --- a/beacon_chain/consensus_object_pools/README.md +++ b/beacon_chain/consensus_object_pools/README.md @@ -4,7 +4,7 @@ This folder holds the various consensus object pools needed for a blockchain cli Object in those pools have passed the "gossip validation" filter according to specs: -- blocks: https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_block +- blocks: https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/p2p-interface.md#beacon_block - aggregate attestations: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof - unaggregated attestation: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id - voluntary exits: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#voluntary_exit diff --git a/beacon_chain/consensus_object_pools/attestation_pool.nim b/beacon_chain/consensus_object_pools/attestation_pool.nim index af9c5e3887..0d8e63e726 100644 --- a/beacon_chain/consensus_object_pools/attestation_pool.nim +++ b/beacon_chain/consensus_object_pools/attestation_pool.nim @@ -5,21 +5,22 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import # Status libraries metrics, chronicles, stew/byteutils, # Internal - ../spec/[ - beaconstate, eth2_merkleization, forks, state_transition_epoch, validator], + ../spec/[eth2_merkleization, forks, validator], "."/[spec_cache, blockchain_dag, block_quarantine], ../fork_choice/fork_choice, ../beacon_clock from std/algorithm import sort -from std/sequtils import keepItIf, maxIndex +from std/sequtils import keepItIf +from ../spec/beaconstate import check_attestation, dependent_root +from ../spec/state_transition_epoch import compute_unrealized_finality export blockchain_dag, fork_choice @@ -36,18 +37,15 @@ type OnSingleAttestationCallback = proc(data: SingleAttestation) {.gcsafe, raises: [].} - Validation[CVBType] = object + ElectraValidation = object ## Validations collect a set of signatures for a distinct attestation - in ## eth2, a single bit is used to keep track of which signatures have been ## added to the aggregate meaning that only non-overlapping aggregates may ## be further combined. - aggregation_bits: CVBType + aggregation_bits: ElectraCommitteeValidatorsBits aggregate_signature: AggregateSignature - Phase0Validation = Validation[CommitteeValidatorsBits] - ElectraValidation = Validation[ElectraCommitteeValidatorsBits] - - AttestationEntry[CVBType] = object + AttestationEntry = object ## Each entry holds the known signatures for a particular, distinct vote ## For electra+, the data has been changed to hold the committee index data: AttestationData @@ -56,20 +54,15 @@ type ## On the attestation subnets, only attestations with a single vote are ## allowed - these can be collected separately to top up aggregates with - ## here we collect them by mapping index in committee to a vote - aggregates: seq[Validation[CVBType]] + aggregates: seq[ElectraValidation] - Phase0AttestationEntry = AttestationEntry[CommitteeValidatorsBits] - ElectraAttestationEntry = AttestationEntry[ElectraCommitteeValidatorsBits] + ElectraAttestationEntry = AttestationEntry - AttestationTable[CVBType] = Table[Eth2Digest, AttestationEntry[CVBType]] + AttestationTable = Table[Eth2Digest, ElectraAttestationEntry] ## Depending on the world view of the various validators, they may have ## voted on different states - this map keeps track of each vote keyed by ## getAttestationCandidateKey() - CandidateIdxType {.pure.} = enum - phase0Idx - electraIdx - AttestationPool* = object ## The attestation pool keeps track of all attestations that potentially ## could be added to a block during block production. @@ -77,13 +70,7 @@ type ## "free" attestations with those found in past blocks - these votes ## are tracked separately in the fork choice. - phase0Candidates: array[ATTESTATION_LOOKBACK.int, - AttestationTable[CommitteeValidatorsBits]] ## \ - ## We keep one item per slot such that indexing matches slot number - ## together with startingSlot - - electraCandidates: array[ATTESTATION_LOOKBACK.int, - AttestationTable[ElectraCommitteeValidatorsBits]] ## \ + electraCandidates: array[ATTESTATION_LOOKBACK.int, AttestationTable] ## \ ## We keep one item per slot such that indexing matches slot number ## together with startingSlot @@ -99,7 +86,6 @@ type nextAttestationEpoch*: seq[tuple[subnet: Epoch, aggregate: Epoch]] ## \ ## sequence based on validator indices - onPhase0AttestationAdded: OnPhase0AttestationCallback onSingleAttestationAdded: OnSingleAttestationCallback CandidateKey = tuple @@ -191,7 +177,6 @@ proc init*(T: type AttestationPool, dag: ChainDAGRef, dag: dag, quarantine: quarantine, forkChoice: forkChoice, - onPhase0AttestationAdded: onPhase0Attestation, onSingleAttestationAdded: onSingleAttestation ) @@ -208,26 +193,19 @@ proc addForkChoiceVotes( # hopefully the fork choice will heal itself over time. error "Couldn't add attestation to fork choice, bug?", err = v.error() -func candidateIdx( - pool: AttestationPool, slot: Slot, candidateIdxType: CandidateIdxType): - Opt[int] = - static: doAssert pool.phase0Candidates.len == pool.electraCandidates.len - - let poolLength = if candidateIdxType == CandidateIdxType.electraIdx: - pool.electraCandidates.lenu64 else: pool.phase0Candidates.lenu64 +func candidateIdx(pool: AttestationPool, slot: Slot): Opt[int] = + const poolLength = pool.electraCandidates.lenu64 - if slot >= pool.startingSlot and - slot < (pool.startingSlot + poolLength): + if slot >= pool.startingSlot and slot < (pool.startingSlot + poolLength): Opt.some(int(slot mod poolLength)) else: Opt.none(int) proc updateCurrent(pool: var AttestationPool, wallSlot: Slot) = - if wallSlot + 1 < pool.phase0Candidates.lenu64: + if wallSlot + 1 < pool.electraCandidates.lenu64: return # Genesis - static: doAssert pool.phase0Candidates.len == pool.electraCandidates.len - let newStartingSlot = wallSlot + 1 - pool.phase0Candidates.lenu64 + let newStartingSlot = wallSlot + 1 - pool.electraCandidates.lenu64 if newStartingSlot < pool.startingSlot: error "Current slot older than attestation pool view, clock reset?", @@ -237,20 +215,17 @@ proc updateCurrent(pool: var AttestationPool, wallSlot: Slot) = # As time passes we'll clear out any old attestations as they are no longer # viable to be included in blocks - if newStartingSlot - pool.startingSlot >= pool.phase0Candidates.lenu64(): + if newStartingSlot - pool.startingSlot >= pool.electraCandidates.lenu64(): # In case many slots passed since the last update, avoid iterating over # the same indices over and over - pool.phase0Candidates.reset() pool.electraCandidates.reset() else: for i in pool.startingSlot..newStartingSlot: - pool.phase0Candidates[i.uint64 mod pool.phase0Candidates.lenu64].reset() pool.electraCandidates[i.uint64 mod pool.electraCandidates.lenu64].reset() pool.startingSlot = newStartingSlot -func oneIndex( - bits: CommitteeValidatorsBits | ElectraCommitteeValidatorsBits): Opt[int] = +func oneIndex(bits: ElectraCommitteeValidatorsBits): Opt[int] = # Find the index of the set bit, iff one bit is set var res = Opt.none(int) for idx in 0.. maxAttestationSlot: # Around genesis.. - break - - let - slot = Slot(maxAttestationSlot - i) - candidateIdx = pool.candidateIdx(slot, CandidateIdxType.phase0Idx) - - if candidateIdx.isNone(): - # Passed the collection horizon - shouldn't happen because it's based on - # ATTESTATION_LOOKBACK - break - - for _, entry in pool.phase0Candidates[candidateIdx.get()].mpairs(): - entry.updateAggregates() - - for j in 0.. 0 and res.lenu64() < MAX_ATTESTATIONS: - let entryCacheKey = block: - # Find the candidate with the highest score - slot is used as a - # tie-breaker so that more recent attestations are added first - let - candidate = - # Fast path for when all remaining candidates fit - if candidates.lenu64 < MAX_ATTESTATIONS: candidates.len - 1 - else: maxIndex(candidates) - (_, _, entry, j) = candidates[candidate] - - candidates.del(candidate) # careful, `del` reorders candidates - - res.add(entry[].toAttestation(entry[].aggregates[j])) - - # Update cache so that the new votes are taken into account when updating - # the score below - attCache.add(entry[].data, entry[].aggregates[j].aggregation_bits) - - entry[].data.getAttestationCacheKey - - block: - # Because we added some votes, it's quite possible that some candidates - # are no longer interesting - update the scores of the existing candidates - for it in candidates.mitems(): - # Aggregates not on the same (slot, committee) pair don't change scores - if it.entry[].data.getAttestationCacheKey != entryCacheKey: - continue - - it.score = attCache.score( - it.entry[].data, - it.entry[].aggregates[it.validation].aggregation_bits) - - candidates.keepItIf: - # Only keep candidates that might add coverage - it.score > 0 - - let - packingDur = Moment.now() - startPackingTick - - debug "Packed attestations for block", - newBlockSlot, packingDur, totalCandidates, attestations = res.len() - attestation_pool_block_attestation_packing_time.set( - packingDur.toFloatSeconds()) - - res - -proc getAttestationsForBlock*(pool: var AttestationPool, - state: ForkedHashedBeaconState, - cache: var StateCache): seq[phase0.Attestation] = - withState(state): - when consensusFork < ConsensusFork.Electra: - pool.getAttestationsForBlock(forkyState, cache) - else: - default(seq[phase0.Attestation]) - -proc getElectraAttestationsForBlock*( +proc getAttestationsForBlock*( pool: var AttestationPool, - state: electra.HashedBeaconState | fulu.HashedBeaconState, - cache: var StateCache): seq[electra.Attestation] = + state: electra.HashedBeaconState | fulu.HashedBeaconState | + gloas.HashedBeaconState, + cache: var StateCache, +): seq[electra.Attestation] = let newBlockSlot = state.data.slot.uint64 if newBlockSlot < MIN_ATTESTATION_INCLUSION_DELAY: @@ -948,7 +709,7 @@ proc getElectraAttestationsForBlock*( candidates: seq[tuple[ score: int, slot: Slot, entry: ptr ElectraAttestationEntry, validation: int]] - attCache = AttestationCache[ElectraCommitteeValidatorsBits].init(state, cache) + attCache = AttestationCache.init(state, cache) for i in 0.. maxAttestationSlot: # Around genesis.. @@ -956,7 +717,7 @@ proc getElectraAttestationsForBlock*( let slot = Slot(maxAttestationSlot - i) - candidateIdx = pool.candidateIdx(slot, CandidateIdxType.electraIdx) + candidateIdx = pool.candidateIdx(slot) if candidateIdx.isNone(): # Passed the collection horizon - shouldn't happen because it's based on @@ -1010,8 +771,7 @@ proc getElectraAttestationsForBlock*( # # For each round, we'll look for the best attestation and add it to the result # then re-score the other candidates. - var - candidatesPerBlock: OrderedTable[CandidateKey, seq[electra.Attestation]] + var candidatesPerBlock: OrderedTable[CandidateKey, seq[electra.Attestation]] let totalCandidates = candidates.len() while candidates.len > 0 and candidatesPerBlock.lenu64() < @@ -1087,17 +847,8 @@ proc getElectraAttestationsForBlock*( res -proc getElectraAttestationsForBlock*( - pool: var AttestationPool, state: ForkedHashedBeaconState, - cache: var StateCache): seq[electra.Attestation] = - withState(state): - when consensusFork >= ConsensusFork.Electra: - pool.getElectraAttestationsForBlock(forkyState, cache) - else: - default(seq[electra.Attestation]) - func bestValidation( - aggregates: openArray[Phase0Validation | ElectraValidation]): (int, int) = + aggregates: openArray[ElectraValidation]): (int, int) = # Look for best validation based on number of votes in the aggregate doAssert aggregates.len() > 0, "updateAggregates should have created at least one aggregate" @@ -1116,9 +867,7 @@ func getElectraAggregatedAttestation*( pool: var AttestationPool, slot: Slot, attestationDataRoot: Eth2Digest, committeeIndex: CommitteeIndex): Opt[electra.Attestation] = - - let - candidateIdx = pool.candidateIdx(slot, CandidateIdxType.electraIdx) + let candidateIdx = pool.candidateIdx(slot) if candidateIdx.isNone: return Opt.none(electra.Attestation) @@ -1144,9 +893,9 @@ func getElectraAggregatedAttestation*( # "Set `attestation.committee_bits = committee_bits`, where `committee_bits` # has the same value as in each individual attestation." implies that cannot # be used here, because otherwise they wouldn't have the same value. It thus - # leaves the cross-committee aggregation for getElectraAttestationsForBlock, - # which does do this. - let candidateIdx = pool.candidateIdx(slot, CandidateIdxType.electraIdx) + # leaves the cross-committee aggregation for getAttestationsForBlock() which + # does do this. + let candidateIdx = pool.candidateIdx(slot) if candidateIdx.isNone: return Opt.none(electra.Attestation) @@ -1166,49 +915,9 @@ func getElectraAggregatedAttestation*( res func getPhase0AggregatedAttestation*( - pool: var AttestationPool, slot: Slot, attestation_data_root: Eth2Digest): - Opt[phase0.Attestation] = - let - candidateIdx = pool.candidateIdx(slot, CandidateIdxType.phase0Idx) - if candidateIdx.isNone: - return Opt.none(phase0.Attestation) - - pool.phase0Candidates[candidateIdx.get].withValue( - attestation_data_root, entry): - entry[].updateAggregates() - - let (bestIndex, _) = bestValidation(entry[].aggregates) - - # Found the right hash, no need to look further - return Opt.some(entry[].toAttestation(entry[].aggregates[bestIndex])) - + _: var AttestationPool, _: Slot, _: Eth2Digest): Opt[phase0.Attestation] = Opt.none(phase0.Attestation) -func getPhase0AggregatedAttestation*( - pool: var AttestationPool, slot: Slot, index: CommitteeIndex): - Opt[phase0.Attestation] = - ## Select the attestation that has the most votes going for it in the given - ## slot/index - ## https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#construct-aggregate - let candidateIdx = pool.candidateIdx(slot, CandidateIdxType.phase0Idx) - if candidateIdx.isNone: - return Opt.none(phase0.Attestation) - - var res: Opt[phase0.Attestation] - for _, entry in pool.phase0Candidates[candidateIdx.get].mpairs(): - doAssert entry.data.slot == slot - if index != entry.data.index: - continue - - entry.updateAggregates() - - let (bestIndex, best) = bestValidation(entry.aggregates) - - if res.isNone() or best > res.get().aggregation_bits.countOnes(): - res = Opt.some(entry.toAttestation(entry.aggregates[bestIndex])) - - res - type BeaconHead* = object blck*: BlockRef safeExecutionBlockHash*, finalizedExecutionBlockHash*: Eth2Digest diff --git a/beacon_chain/consensus_object_pools/blob_quarantine.nim b/beacon_chain/consensus_object_pools/blob_quarantine.nim index 12da2ca4ff..7c374f9a59 100644 --- a/beacon_chain/consensus_object_pools/blob_quarantine.nim +++ b/beacon_chain/consensus_object_pools/blob_quarantine.nim @@ -5,42 +5,299 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import - std/tables, - ../spec/helpers + stew/bitops2, + std/[sets, tables], + results, metrics, + ../spec/[presets, helpers, column_map], + ../beacon_chain_db_quarantine -from std/sequtils import mapIt -from std/strutils import join +from ../spec/datatypes/deneb import SignedBeaconBlock +from ../spec/datatypes/electra import SignedBeaconBlock +from ../spec/datatypes/fulu import SignedBeaconBlock +from ../spec/datatypes/gloas import SignedBeaconBlock -func maxBlobs(MAX_BLOBS_PER_BLOCK_ELECTRA: uint64): uint64 = +export results + +declareGauge blob_quarantine_memory_slots_total, "Total count of available memory slots inside blob quarantine" +declareGauge blob_quarantine_memory_slots_occupied, "Number of occupied memory slots inside blob quarantine" +declareGauge blob_quarantine_database_slots_total, "Total count of availble database slots inside blob quarantine" +declareGauge blob_quarantine_database_slots_occupied, "Number of occupied database slots inside blob quarantine" + +type + SidecarHolderKind {.pure.} = enum + Empty, Loaded, Unloaded + + SidecarHolder[A] = object + index: uint64 + proposer_index: uint64 + slot: Slot + case kind: SidecarHolderKind + of SidecarHolderKind.Empty: + discard + of SidecarHolderKind.Unloaded: + discard + of SidecarHolderKind.Loaded: + data: ref A + + RootTableRecord[A] = object + sidecars: seq[SidecarHolder[A]] + slot: Slot + unloaded: int + count: int + + SidecarQuarantine[A, B] = object + minEpochsForSidecarsRequests: uint64 + maxMemSidecarsCount: int + memSidecarsCount: int + maxDiskSidecarsCount: int + diskSidecarsCount: int + maxSidecarsPerBlockCount: int + custodyColumns*: seq[ColumnIndex] + custodyMap: ColumnMap + roots: Table[Eth2Digest, RootTableRecord[A]] + memUsage: OrderedSet[Eth2Digest] + diskUsage: OrderedSet[Eth2Digest] + indexMap: seq[int] + db: QuarantineDB + onSidecarCallback*: B + + OnBlobSidecarCallback* = proc( + data: BlobSidecarInfoObject) {.gcsafe, raises: [].} + OnDataColumnSidecarCallback* = proc( + data: DataColumnSidecarInfoObject) {.gcsafe, raises: [].} + + BlobQuarantine* = + SidecarQuarantine[BlobSidecar, OnBlobSidecarCallback] + ColumnQuarantine* = + SidecarQuarantine[fulu.DataColumnSidecar, OnDataColumnSidecarCallback] + +func isEmpty[A](holder: SidecarHolder[A]): bool = + holder.kind == SidecarHolderKind.Empty + +func isUnloaded[A](holder: SidecarHolder[A]): bool = + holder.kind == SidecarHolderKind.Unloaded + +func isLoaded[A](holder: SidecarHolder[A]): bool = + holder.kind == SidecarHolderKind.Loaded + +func maxSidecars*(maxSidecarsPerBlock: uint64): int = # Same limit as `MaxOrphans` in `block_quarantine`; # blobs may arrive before an orphan is tagged `blobless` - 3 * SLOTS_PER_EPOCH * MAX_BLOBS_PER_BLOCK_ELECTRA + 3 * int(SLOTS_PER_EPOCH) * int(maxSidecarsPerBlock) -type - BlobQuarantine* = object - maxBlobs: uint64 - blobs*: - OrderedTable[(Eth2Digest, BlobIndex, KzgCommitment), ref BlobSidecar] - onBlobSidecarCallback*: OnBlobSidecarCallback +func init[A, B]( + t: typedesc[RootTableRecord], + q: SidecarQuarantine[A, B] +): RootTableRecord[A] = + RootTableRecord[A]( + sidecars: newSeq[SidecarHolder[A]](q.maxSidecarsPerBlockCount), + count: 0, unloaded: 0, slot: FAR_FUTURE_SLOT) + +func len*[A, B](quarantine: SidecarQuarantine[A, B]): int = + quarantine.memSidecarsCount + quarantine.diskSidecarsCount + +func lenMemory*[A, B](quarantine: SidecarQuarantine[A, B]): int = + quarantine.memSidecarsCount + +func lenDisk*[A, B](quarantine: SidecarQuarantine[A, B]): int = + quarantine.diskSidecarsCount + +proc removeRoot[A, B]( + quarantine: var SidecarQuarantine[A, B], + blockRoot: Eth2Digest +) = + # This procedore removes all the sidecars associated with `blockRoot` from + # memory and from disk. + var + rootRecord: RootTableRecord[A] + sidecarsOnDisk = 0 + + if quarantine.roots.pop(blockRoot, rootRecord): + for index in 0 ..< len(rootRecord.sidecars): + case rootRecord.sidecars[index].kind + of SidecarHolderKind.Empty: + discard + of SidecarHolderKind.Loaded: + rootRecord.sidecars[index].data = nil + dec(quarantine.memSidecarsCount) + blob_quarantine_memory_slots_occupied.set( + int64(quarantine.memSidecarsCount)) + of SidecarHolderKind.Unloaded: + dec(quarantine.diskSidecarsCount) + blob_quarantine_database_slots_occupied.set( + int64(quarantine.diskSidecarsCount)) + inc(sidecarsOnDisk) + + if sidecarsOnDisk > 0 and quarantine.maxMemSidecarsCount > 0: + quarantine.db.removeDataSidecars(A, blockRoot) + quarantine.diskUsage.excl(blockRoot) + + quarantine.memUsage.excl(blockRoot) + +proc remove*[A, B]( + quarantine: var SidecarQuarantine[A, B], + blockRoot: Eth2Digest +) = + ## Remove all the data columns or blobs related to the block root ``blockRoot` + ## from the quarantine ``quarantine``. + ## + ## Function do nothing, if ``blockRoot` is not part of the quarantine. + quarantine.removeRoot(blockRoot) + +func getOldestInMemoryRoot[A, B]( + quarantine: SidecarQuarantine[A, B] +): Eth2Digest = + var oldestRoot: Eth2Digest + for blockRoot in quarantine.memUsage: + oldestRoot = blockRoot + break + oldestRoot + +func getOldestOnDiskRoot[A, B]( + quarantine: SidecarQuarantine[A, B] +): Eth2Digest = + var oldestRoot: Eth2Digest + for blockRoot in quarantine.diskUsage: + oldestRoot = blockRoot + break + oldestRoot + +func fitsInMemory[A, B](quarantine: SidecarQuarantine[A, B], count: int): bool = + quarantine.memSidecarsCount + count <= quarantine.maxMemSidecarsCount + +func fitsOnDisk[A, B](quarantine: SidecarQuarantine[A, B], count: int): bool = + quarantine.diskSidecarsCount + count <= quarantine.maxDiskSidecarsCount + +proc pruneInMemoryRoot[A, B](quarantine: var SidecarQuarantine[A, B]) = + # Remove the all the blobs related to the oldest block root from the memory + # storage of quarantine ``quarantine``. + if len(quarantine.memUsage) == 0: + return + quarantine.remove(quarantine.getOldestInMemoryRoot()) + +proc pruneOnDiskRoot[A, B](quarantine: var SidecarQuarantine[A, B]) = + # Remove the all the blobs related to the oldest block root from the disk + # storage of quarantine ``quarantine``. + # Returns `true` if oldest block root on disk is equal to `unloadRoot`. + if len(quarantine.diskUsage) == 0: + return + quarantine.remove(quarantine.getOldestOnDiskRoot()) + +func getIndex(quarantine: BlobQuarantine, index: BlobIndex): int = + quarantine.indexMap[int(index)] + +func getIndex(quarantine: ColumnQuarantine, index: ColumnIndex): int = + quarantine.indexMap[int(index)] + +template slot(b: BlobSidecar|fulu.DataColumnSidecar): Slot = + b.signed_block_header.message.slot + +template proposer_index(b: BlobSidecar|fulu.DataColumnSidecar): uint64 = + b.signed_block_header.message.proposer_index - BlobFetchRecord* = object - block_root*: Eth2Digest - indices*: seq[BlobIndex] +func unload[A](holder: var SidecarHolder[A]): ref A = + doAssert(holder.kind == SidecarHolderKind.Loaded) + let res = holder.data + holder.data = nil + holder = SidecarHolder[A]( + kind: SidecarHolderKind.Unloaded, + slot: holder.slot, + index: holder.index, + proposer_index: holder.proposer_index, + ) + res - OnBlobSidecarCallback = proc( - data: BlobSidecarInfoObject) {.gcsafe, raises: [].} +func load[A](holder: var SidecarHolder[A], sidecar: ref A) = + holder = SidecarHolder[A]( + kind: SidecarHolderKind.Loaded, + slot: holder.slot, + index: holder.index, + proposer_index: holder.proposer_index, + data: sidecar + ) -func shortLog*(x: seq[BlobIndex]): string = - "<" & x.mapIt($it).join(", ") & ">" +proc unloadRoot[A, B](quarantine: var SidecarQuarantine[A, B]) = + doAssert(len(quarantine.memUsage) > 0) -func shortLog*(x: seq[BlobFetchRecord]): string = - "[" & x.mapIt(shortLog(it.block_root) & shortLog(it.indices)).join(", ") & "]" + if quarantine.maxDiskSidecarsCount == 0: + # Disk storage is disabled, so we use should prune memory storage instead. + quarantine.pruneInMemoryRoot() + return -func put*(quarantine: var BlobQuarantine, blobSidecar: ref BlobSidecar) = - if quarantine.blobs.lenu64 >= quarantine.maxBlobs: + let blockRoot = quarantine.getOldestInMemoryRoot() + + quarantine.roots.withValue(blockRoot, record): + if not(quarantine.fitsOnDisk(record[].count)): + quarantine.pruneOnDiskRoot() + # Pruning on disk also removes sidecars from memory, so this could be + # enough + return + + var res: seq[ref A] + for index in 0 ..< len(record[].sidecars): + if record[].sidecars[index].kind == SidecarHolderKind.Loaded: + res.add(record[].sidecars[index].unload()) + dec(quarantine.memSidecarsCount) + inc(quarantine.diskSidecarsCount) + blob_quarantine_memory_slots_occupied.set( + int64(quarantine.memSidecarsCount)) + blob_quarantine_database_slots_occupied.set( + int64(quarantine.diskSidecarsCount)) + inc(record[].unloaded) + + if len(res) > 0: + quarantine.db.putDataSidecars(blockRoot, res) + quarantine.memUsage.excl(blockRoot) + quarantine.diskUsage.incl(blockRoot) + +proc loadRoot[A, B](quarantine: var SidecarQuarantine[A, B], + blockRoot: Eth2Digest, + record: var RootTableRecord[A]) = + for sidecar in quarantine.db.sidecars(A, blockRoot): + let index = quarantine.getIndex(sidecar.index) + doAssert(index >= 0, "Incorrect sidecar index [" & $sidecar.index & "]") + doAssert(record.sidecars[index].isUnloaded(), + "Database storage is inconsistent") + record.sidecars[index].load(newClone(sidecar)) + dec(record.unloaded) + doAssert(record.unloaded == 0, "Record's unload counter should be zero") + +proc put[A, B](record: var RootTableRecord[A], q: var SidecarQuarantine[A, B], + sidecars: openArray[ref A]) = + for sidecar in sidecars: + # Sidecar should pass validation before being added to quarantine, + # so we assume that + # 1. sidecar.index is < MAX_BLOBS_PER_BLOCK for `deneb` and. + # 2. sidecar.index is < MAX_BLOBS_PER_BLOCK_ELECTRA for `electra`. + # 3. sidecar.index is in custody columns set for `fulu`. + let index = q.getIndex(sidecar.index) + doAssert(index >= 0, "Incorrect sidecar index [" & $sidecar.index & "]") + + if isEmpty(record.sidecars[index]): + inc(q.memSidecarsCount) + blob_quarantine_memory_slots_occupied.set(int64(q.memSidecarsCount)) + inc(record.count) + record.slot = sidecar[].slot() + + record.sidecars[index] = SidecarHolder[A]( + kind: SidecarHolderKind.Loaded, + slot: sidecar[].slot(), + index: uint64(sidecar[].index), + proposer_index: sidecar[].proposer_index(), + data: sidecar + ) + +proc put*[A, B]( + quarantine: var SidecarQuarantine[A, B], + blockRoot: Eth2Digest, + sidecar: ref A +) = + ## Function adds blob or data column sidecar associated with block root + ## ``blockRoot`` to the quarantine ``quarantine``. + while not(quarantine.fitsInMemory(1)): # FIFO if full. For example, sync manager and request manager can race to # put blobs in at the same time, so one gets blob insert -> block resolve # -> blob insert sequence, which leaves garbage blobs. @@ -49,66 +306,541 @@ func put*(quarantine: var BlobQuarantine, blobSidecar: ref BlobSidecar) = # blobs which are correctly signed, point to either correct block roots or a # block root which isn't ever seen, and then are for any reason simply never # used. - var oldest_blob_key: (Eth2Digest, BlobIndex, KzgCommitment) - for k in quarantine.blobs.keys: - oldest_blob_key = k - break - quarantine.blobs.del oldest_blob_key - let block_root = hash_tree_root(blobSidecar.signed_block_header.message) - discard quarantine.blobs.hasKeyOrPut( - (block_root, blobSidecar.index, blobSidecar.kzg_commitment), blobSidecar) - -func hasBlob*( + quarantine.unloadRoot() + + let rootRecord = RootTableRecord.init(quarantine) + quarantine.roots.mgetOrPut(blockRoot, rootRecord).put( + quarantine, [sidecar]) + quarantine.memUsage.incl(blockRoot) + +proc put*[A, B]( + quarantine: var SidecarQuarantine[A, B], + blockRoot: Eth2Digest, + sidecars: openArray[ref A] +) = + ## Function adds number of blobs or data columns sidecars associated to single + ## block with root ``blockRoot`` to the quarantine ``quarantine``. + if len(sidecars) == 0: + return + + while not(quarantine.fitsInMemory(len(sidecars))): + # FIFO if full. For example, sync manager and request manager can race to + # put blobs in at the same time, so one gets blob insert -> block resolve + # -> blob insert sequence, which leaves garbage blobs. + # + # This also therefore automatically garbage-collects otherwise valid garbage + # blobs which are correctly signed, point to either correct block roots or a + # block root which isn't ever seen, and then are for any reason simply never + # used. + quarantine.unloadRoot() + + let rootRecord = RootTableRecord.init(quarantine) + + quarantine.roots.mgetOrPut(blockRoot, rootRecord).put( + quarantine, sidecars) + quarantine.memUsage.incl(blockRoot) + +template hasSidecarImpl( + blockRoot: Eth2Digest, + slot: Slot, + proposerIndex: uint64, + sidecarIndex: typed +): bool = + let rootRecord = quarantine.roots.getOrDefault(blockRoot) + if rootRecord.count == 0: + return false + let index = quarantine.getIndex(index) + if (index == -1) or rootRecord.sidecars[index].isEmpty(): + return false + if (rootRecord.sidecars[index].proposer_index != proposer_index) or + (rootRecord.sidecars[index].slot != slot): + return false + true + +template hasSidecarImpl( + blockRoot: Eth2Digest, + sidecarIndex: typed +): bool = + let rootRecord = quarantine.roots.getOrDefault(blockRoot) + if rootRecord.count == 0: + return false + let index = quarantine.getIndex(sidecarIndex) + (index != -1) and not rootRecord.sidecars[index].isEmpty() + +func hasSidecar*( quarantine: BlobQuarantine, + blockRoot: Eth2Digest, slot: Slot, proposer_index: uint64, index: BlobIndex, - kzg_commitment: KzgCommitment): bool = - for blob_sidecar in quarantine.blobs.values: - template block_header: untyped = blob_sidecar.signed_block_header.message - if block_header.slot == slot and - block_header.proposer_index == proposer_index and - blob_sidecar.index == index and - blob_sidecar.kzg_commitment == kzg_commitment: - return true - false - -func popBlobs*( - quarantine: var BlobQuarantine, digest: Eth2Digest, - blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock | - fulu.SignedBeaconBlock): - seq[ref BlobSidecar] = - var r: seq[ref BlobSidecar] = @[] - for idx, kzg_commitment in blck.message.body.blob_kzg_commitments: - var b: ref BlobSidecar - if quarantine.blobs.pop((digest, BlobIndex idx, kzg_commitment), b): - r.add(b) - r - -func hasBlobs*(quarantine: BlobQuarantine, +): bool = + ## Function returns ``true``if quarantine has blob corresponding to specific + ## ``block root``, ``index``, ``slot`` and ``proposer_index``. + hasSidecarImpl(blockRoot, slot, proposer_index, index) + +func hasSidecar*( + quarantine: ColumnQuarantine, + blockRoot: Eth2Digest, + slot: Slot, + proposer_index: uint64, + index: ColumnIndex +): bool = + ## Function returns ``true``if quarantine has column corresponding to specific + ## ``index``, ``slot`` and ``proposer_index``. + hasSidecarImpl(blockRoot, slot, proposer_index, index) + +func hasSidecar*( + quarantine: ColumnQuarantine, + blockRoot: Eth2Digest, + index: ColumnIndex +): bool = + hasSidecarImpl(blockRoot, index) + +func hasSidecars*( + quarantine: BlobQuarantine, + blockRoot: Eth2Digest, blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock | - fulu.SignedBeaconBlock): bool = - # Having a fulu SignedBeaconBlock is incorrect atm, but - # shall be fixed once data columns are rebased to fulu - for idx, kzg_commitment in blck.message.body.blob_kzg_commitments: - if (blck.root, BlobIndex idx, kzg_commitment) notin quarantine.blobs: - return false + fulu.SignedBeaconBlock +): bool = + ## Function returns ``true`` if quarantine has all the blobs for block + ## ``blck`` with block root ``blockRoot``. + if len(blck.message.body.blob_kzg_commitments) == 0: + return true + + let record = quarantine.roots.getOrDefault(blockRoot) + if record.count == 0: + # block root not found. + return false + + if record.count < len(blck.message.body.blob_kzg_commitments): + # Quarantine does not hold enough blob sidecars. + return false + true + +func hasSidecars*( + quarantine: ColumnQuarantine, + blockRoot: Eth2Digest, + blck: fulu.SignedBeaconBlock +): bool = + ## Function returns ``true`` if quarantine has all the columns for block + ## ``blck`` with block root ``blockRoot``. + if len(blck.message.body.blob_kzg_commitments) == 0: + return true + + let record = quarantine.roots.getOrDefault(blockRoot) + if len(record.sidecars) == 0: + # block root not found, record.sidecars sequence was not initialized. + return false + + let + supernode = (len(quarantine.custodyColumns) == NUMBER_OF_COLUMNS) + columnsCount = + if supernode: + (NUMBER_OF_COLUMNS div 2 + 1) + else: + len(quarantine.custodyColumns) + + if record.count < columnsCount: + # Quarantine does not hold enough column sidecars. + return false true -func blobFetchRecord*(quarantine: BlobQuarantine, +func hasSidecars*( + quarantine: BlobQuarantine, + blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock | + fulu.SignedBeaconBlock +): bool = + ## Function returns ``true`` if quarantine has all the blobs for block + ## ``blck`` with block root ``blockRoot``. + hasSidecars(quarantine, blck.root, blck) + +func hasSidecars*( + quarantine: ColumnQuarantine, + blck: fulu.SignedBeaconBlock +): bool = + ## Function returns ``true`` if quarantine has all the columns for block + ## ``blck`` with block root ``blockRoot``. + hasSidecars(quarantine, blck.root, blck) + +proc popSidecars*( + quarantine: var BlobQuarantine, + blockRoot: Eth2Digest, + blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock +): Opt[seq[ref BlobSidecar]] = + ## Function returns sequence of blob sidecars for block root ``blockRoot`` and + ## block ``blck``. + ## If some of the blob sidecars are missing Opt.none() is returned. + ## If block do not have any blob sidecars Opt.some([]) is returned. + + when blck is gloas.SignedBeaconBlock: + quarantine.remove(blockRoot) + return Opt.some(default(seq[ref BlobSidecar])) + + let sidecarsCount = len(blck.message.body.blob_kzg_commitments) + if sidecarsCount == 0: + # Block does not have any blob sidecars. + quarantine.remove(blockRoot) + return Opt.some(default(seq[ref BlobSidecar])) + + var record = quarantine.roots.getOrDefault(blockRoot) + if len(record.sidecars) == 0: + # block root not found, record.sidecars sequence was not initialized. + return Opt.none(seq[ref BlobSidecar]) + + if record.count < sidecarsCount: + # Quarantine does not hold enough blob sidecars. + return Opt.none(seq[ref BlobSidecar]) + + if record.unloaded > 0: + # Quarantine unloaded some blobs to disk, we should load it back. + quarantine.loadRoot(blockRoot, record) + + var sidecars: seq[ref BlobSidecar] + for bindex in 0 ..< len(blck.message.body.blob_kzg_commitments): + let index = quarantine.getIndex(BlobIndex(bindex)) + doAssert(record.sidecars[index].isLoaded(), + "Record should only have loaded values at this point") + sidecars.add(record.sidecars[index].data) + + # popSidecars() should remove all the artifacts from the quarantine in both + # memory and disk. + quarantine.removeRoot(blockRoot) + + Opt.some(sidecars) + +proc popSidecars*( + quarantine: var ColumnQuarantine, + blockRoot: Eth2Digest, + blck: fulu.SignedBeaconBlock +): Opt[seq[ref fulu.DataColumnSidecar]] = + ## Function returns sequence of column sidecars for block root ``blockRoot`` + ## and block ``blck``. + ## If some of the column sidecars are missing Opt.none() is returned. + ## If block do not have any column sidecars bundledd Opt.some([]) is returned. + let sidecarsCount = len(blck.message.body.blob_kzg_commitments) + if sidecarsCount == 0: + # Block does not have any blob sidecars. + quarantine.remove(blockRoot) + return Opt.some(default(seq[ref fulu.DataColumnSidecar])) + + var record = quarantine.roots.getOrDefault(blockRoot) + if len(record.sidecars) == 0: + # block root not found, record.sidecars sequence was not allocated. + return Opt.none(seq[ref fulu.DataColumnSidecar]) + + let + supernode = (len(quarantine.custodyColumns) == NUMBER_OF_COLUMNS) + columnsCount = + if supernode: + (NUMBER_OF_COLUMNS div 2 + 1) + else: + len(quarantine.custodyColumns) + + if record.count < columnsCount: + # Quarantine does not hold enough column sidecars. + return Opt.none(seq[ref fulu.DataColumnSidecar]) + + if record.unloaded > 0: + # Quarantine unloaded some blobs to disk, we should load it back. + quarantine.loadRoot(blockRoot, record) + + var sidecars: seq[ref fulu.DataColumnSidecar] + if supernode: + for sidecar in record.sidecars: + # Supernode could have some of the columns not filled. + if not(sidecar.isEmpty()): + doAssert(sidecar.isLoaded(), + "Sidecars should be loaded at this moment") + sidecars.add(sidecar.data) + if len(sidecars) >= (NUMBER_OF_COLUMNS div 2 + 1): + break + + doAssert(len(sidecars) >= (NUMBER_OF_COLUMNS div 2 + 1), + "Incorrect amount of sidecars in record") + else: + for cindex in quarantine.custodyColumns: + let index = quarantine.getIndex(cindex) + doAssert(record.sidecars[index].isLoaded(), + "Sidecars should be loaded at this moment") + sidecars.add(record.sidecars[index].data) + + doAssert(len(sidecars) == len(quarantine.custodyColumns), + "Incorrect amount of sidecars in record") + + # popSidecars() should remove all the artifacts from the quarantine in both + # memory and disk. + quarantine.removeRoot(blockRoot) + + Opt.some(sidecars) + +proc popSidecars*( + quarantine: var BlobQuarantine, blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock | - fulu.SignedBeaconBlock): BlobFetchRecord = - var indices: seq[BlobIndex] - for i in 0.. 0: + @peerCustodyColumns + else: + quarantine.custodyColumns + if len(record.sidecars) == 0: + var columnsRequested = 0 + for column in columns: + if columnsRequested >= columnsCount: + # We don't need to request more than (NUMBER_OF_COLUMNS div 2 + 1) + # columns. + break + res.add(column) + inc(columnsRequested) + else: + if record.count >= columnsCount: + return + DataColumnsByRootIdentifier( + block_root: blockRoot, indices: DataColumnIndices(res)) + var columnsRequested = 0 + for column in columns: + if record.count + columnsRequested >= columnsCount: + # We don't need to request more than (NUMBER_OF_COLUMNS div 2 + 1) + # columns. + break + let index = quarantine.getIndex(column) + if (index == -1) or record.sidecars[index].isEmpty(): + res.add(column) + inc(columnsRequested) + else: + let peerMap = + if len(peerCustodyColumns) > 0: + ColumnMap.init(peerCustodyColumns) + else: + ColumnMap.init(quarantine.custodyColumns) + if len(record.sidecars) == 0: + for column in (peerMap and quarantine.custodyMap).items(): + res.add(column) + else: + for column in (peerMap and quarantine.custodyMap).items(): + let index = quarantine.getIndex(column) + if (index == -1) or (record.sidecars[index].isEmpty()): + res.add(column) + + DataColumnsByRootIdentifier( + block_root: blockRoot, indices: DataColumnIndices(res)) + +proc pruneAfterFinalization*( + quarantine: var BlobQuarantine, + epoch: Epoch, + backfillNeeded: bool +) = + let + startEpoch = + if backfillNeeded: + # Because BlobQuarantine could be used as temporary storage for incoming + # blob sidecars, we should not prune blobs which are behind + # `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epoch. Otherwise we will not + # be able to backfill blobs. + if epoch < quarantine.minEpochsForSidecarsRequests: + Epoch(0) + else: + epoch - quarantine.minEpochsForSidecarsRequests + else: + epoch + epochSlot = (startEpoch + 1).start_slot() + + var rootsToRemove: seq[Eth2Digest] + for mkey, mrecord in quarantine.roots.mpairs(): + if (mrecord.count > 0) and (mrecord.slot < epochSlot): + rootsToRemove.add(mkey) + + for root in rootsToRemove: + quarantine.removeRoot(root) + +proc pruneAfterFinalization*( + quarantine: var ColumnQuarantine, + epoch: Epoch, + backfillNeeded: bool +) = + let + startEpoch = + if backfillNeeded: + # Because ColumnQuarantine could be used as temporary storage for + # incoming data column sidecars, we should not prune data columns which + # are behind `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` epoch. + # Otherwise we will not be able to backfill data columns. + if epoch < quarantine.minEpochsForSidecarsRequests: + Epoch(0) + else: + epoch - quarantine.minEpochsForSidecarsRequests + else: + epoch + epochSlot = (startEpoch + 1).start_slot() + + var rootsToRemove: seq[Eth2Digest] + for mkey, mrecord in quarantine.roots.mpairs(): + if (mrecord.count > 0) and (mrecord.slot < epochSlot): + rootsToRemove.add(mkey) + + for root in rootsToRemove: + quarantine.removeRoot(root) + +template onBlobSidecarCallback*( + quarantine: BlobQuarantine +): OnBlobSidecarCallback = + quarantine.onSidecarCallback + +template onDataColumnSidecarCallback*( + quarantine: ColumnQuarantine +): OnDataColumnSidecarCallback = + quarantine.onSidecarCallback + +proc init*( + T: typedesc[BlobQuarantine], cfg: RuntimeConfig, - onBlobSidecarCallback: OnBlobSidecarCallback): T = - T(maxBlobs: cfg.MAX_BLOBS_PER_BLOCK_ELECTRA.maxBlobs(), - onBlobSidecarCallback: onBlobSidecarCallback) + database: QuarantineDB, + maxDiskSizeMultipler: int, + onBlobSidecarCallback: OnBlobSidecarCallback +): BlobQuarantine = + # BlobSidecars maps are trivial, but still useful + var indexMap = newSeqUninit[int](cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + for index in 0 ..< len(indexMap): + indexMap[index] = index + + let size = maxSidecars(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + + blob_quarantine_memory_slots_total.set(int64(size)) + blob_quarantine_database_slots_total.set( + int64(size) * int64(maxDiskSizeMultipler)) + blob_quarantine_memory_slots_occupied.set(0'i64) + blob_quarantine_database_slots_occupied.set(0'i64) + + BlobQuarantine( + minEpochsForSidecarsRequests: + cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + maxSidecarsPerBlockCount: + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA), + maxMemSidecarsCount: size, + maxDiskSidecarsCount: size * maxDiskSizeMultipler, + memSidecarsCount: 0, + diskSidecarsCount: 0, + indexMap: indexMap, + onSidecarCallback: onBlobSidecarCallback, + db: database + ) + +proc init*( + T: typedesc[ColumnQuarantine], + cfg: RuntimeConfig, + custodyColumns: openArray[ColumnIndex], + database: QuarantineDB, + maxDiskSizeMultipler: int, + onDataColumnSidecarCallback: OnDataColumnSidecarCallback +): ColumnQuarantine = + doAssert(len(custodyColumns) <= NUMBER_OF_COLUMNS) + var indexMap = newSeqUninit[int](NUMBER_OF_COLUMNS) + if len(custodyColumns) < NUMBER_OF_COLUMNS: + for i in 0 ..< len(indexMap): + indexMap[i] = -1 + for index, item in custodyColumns.pairs(): + doAssert(item < uint64(NUMBER_OF_COLUMNS)) + indexMap[int(item)] = index + + let size = maxSidecars(NUMBER_OF_COLUMNS) + + blob_quarantine_memory_slots_total.set(int64(size)) + blob_quarantine_database_slots_total.set( + int64(size) * int64(maxDiskSizeMultipler)) + blob_quarantine_memory_slots_occupied.set(0'i64) + blob_quarantine_database_slots_occupied.set(0'i64) + + ColumnQuarantine( + minEpochsForSidecarsRequests: + cfg.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS, + maxSidecarsPerBlockCount: len(custodyColumns), + maxMemSidecarsCount: size, + maxDiskSidecarsCount: size * maxDiskSizeMultipler, + memSidecarsCount: 0, + diskSidecarsCount: 0, + indexMap: indexMap, + custodyColumns: @custodyColumns, + custodyMap: ColumnMap.init(custodyColumns), + db: database, + onSidecarCallback: onDataColumnSidecarCallback + ) + +func updateColumnQuarantine*( + quarantine: ref ColumnQuarantine, + cfg: RuntimeConfig, + custodyColumns: openArray[ColumnIndex]) = + doAssert(len(custodyColumns) <= NUMBER_OF_COLUMNS) + var indexMap = newSeqUninit[int](NUMBER_OF_COLUMNS) + if len(custodyColumns) < NUMBER_OF_COLUMNS: + for i in 0 ..< len(indexMap): + indexMap[i] = -1 + for index, item in custodyColumns.pairs(): + doAssert(item < uint64(NUMBER_OF_COLUMNS)) + indexMap[int(item)] = index + + quarantine.maxSidecarsPerBlockCount = len(custodyColumns) + quarantine.indexMap = indexMap + quarantine.custodyColumns = @custodyColumns + quarantine.custodyMap = ColumnMap.init(custodyColumns) diff --git a/beacon_chain/consensus_object_pools/block_clearance.nim b/beacon_chain/consensus_object_pools/block_clearance.nim index b42125bc2c..59c124d40c 100644 --- a/beacon_chain/consensus_object_pools/block_clearance.nim +++ b/beacon_chain/consensus_object_pools/block_clearance.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import chronicles, @@ -32,25 +32,27 @@ proc addResolvedHeadBlock( dag: ChainDAGRef, state: var ForkedHashedBeaconState, trustedBlock: ForkyTrustedSignedBeaconBlock, - executionValid: bool, + optimisticStatus: OptimisticStatus, parent: BlockRef, cache: var StateCache, - onBlockAdded: OnForkyBlockAdded, + onBlockAdded: OnBlockAdded, stateDataDur, sigVerifyDur, stateVerifyDur: Duration ): BlockRef = doAssert state.matches_block_slot( trustedBlock.root, trustedBlock.message.slot), "Given state must have the new block applied" + const consensusFork = typeof(trustedBlock).kind let blockRoot = trustedBlock.root - blockRef = BlockRef.init( - blockRoot, executionValid = executionValid, trustedBlock.message) + blockRef = BlockRef.init(blockRoot, optimisticStatus, trustedBlock.message) startTick = Moment.now() link(parent, blockRef) - if executionValid: - dag.markBlockVerified(blockRef) + if optimisticStatus == OptimisticStatus.valid: + # Since the new block has a valid payload, its ancestors also do but this + # might be the first time we learn of it + parent.markExecutionValid(true) dag.forkBlocks.incl(KeyedBlockRef.init(blockRef)) @@ -81,7 +83,7 @@ proc addResolvedHeadBlock( debug "Block resolved", blockRoot = shortLog(blockRoot), blck = shortLog(trustedBlock.message), - executionValid, heads = dag.heads.len(), + optimisticStatus, heads = dag.heads.len(), stateDataDur, sigVerifyDur, stateVerifyDur, putBlockDur = putBlockTick - startTick, epochRefDur = epochRefTick - putBlockTick @@ -99,12 +101,15 @@ proc addResolvedHeadBlock( # Notify others of the new block before processing the quarantine, such that # notifications for parents happens before those of the children if onBlockAdded != nil: - let unrealized = withState(state): + let unrealized = when consensusFork >= ConsensusFork.Altair: - forkyState.data.compute_unrealized_finality() + state.forky(consensusFork).data.compute_unrealized_finality() else: - forkyState.data.compute_unrealized_finality(cache) - onBlockAdded(blockRef, trustedBlock, epochRef, unrealized) + state.forky(consensusFork).data.compute_unrealized_finality(cache) + onBlockAdded( + blockRef, trustedBlock, state.forky(consensusFork).data, epochRef, unrealized + ) + if not(isNil(dag.onBlockAdded)): dag.onBlockAdded(ForkedTrustedSignedBeaconBlock.init(trustedBlock)) @@ -134,27 +139,6 @@ proc checkStateTransition( else: ok() -proc advanceClearanceState*(dag: ChainDAGRef) = - # When the chain is synced, the most likely block to be produced is the block - # right after head - we can exploit this assumption and advance the state - # to that slot before the block arrives, thus allowing us to do the expensive - # epoch transition ahead of time. - # Notably, we use the clearance state here because that's where the block will - # first be seen - later, this state will be copied to the head state! - let advanced = withState(dag.clearanceState): - forkyState.data.slot > forkyState.data.latest_block_header.slot - if not advanced: - let - startTick = Moment.now() - next = getStateField(dag.clearanceState, slot) + 1 - var - cache = StateCache() - info = ForkedEpochInfo() - dag.advanceSlots(dag.clearanceState, next, true, cache, info, - dag.updateFlags) - debug "Prepared clearance state for next block", - next, updateStateDur = Moment.now() - startTick - proc checkHeadBlock*( dag: ChainDAGRef, signedBlock: ForkySignedBeaconBlock): Result[BlockRef, VerifierError] = @@ -225,7 +209,7 @@ proc checkHeadBlock*( proc addHeadBlockWithParent*( dag: ChainDAGRef, verifier: var BatchVerifier, signedBlock: ForkySignedBeaconBlock, parent: BlockRef, - executionValid: bool, onBlockAdded: OnForkyBlockAdded + optimisticStatus: OptimisticStatus, onBlockAdded: OnBlockAdded ): Result[BlockRef, VerifierError] = ## Try adding a block to the chain, verifying first that it passes the state ## transition function and contains correct cryptographic signature. @@ -307,7 +291,7 @@ proc addHeadBlockWithParent*( ok addResolvedHeadBlock( dag, dag.clearanceState, signedBlock.asTrusted(), - executionValid, + optimisticStatus, parent, cache, onBlockAdded, stateDataDur = stateDataTick - startTick, @@ -344,8 +328,7 @@ proc addBackfillBlock*( info "Invalid genesis block signature" return err(VerifierError.Invalid) else: - let proposerKey = dag.validatorKey(blck.proposer_index) - if proposerKey.isNone(): + let proposerKey = dag.validatorKey(blck.proposer_index).valueOr: # We've verified that the block root matches our expectations by following # the chain of parents all the way from checkpoint. If all those blocks # were valid, the proposer_index in this block must also be valid, and we @@ -362,7 +345,7 @@ proc addBackfillBlock*( getStateField(dag.headState, genesis_validators_root), blck.slot, signedBlock.root, - proposerKey.get(), + proposerKey, signedBlock.signature): info "Block signature verification failed" return err(VerifierError.Invalid) @@ -452,24 +435,6 @@ proc addBackfillBlock*( ok() -template BlockAdded(kind: static ConsensusFork): untyped = - when kind == ConsensusFork.Fulu: - OnFuluBlockAdded - elif kind == ConsensusFork.Electra: - OnElectraBlockAdded - elif kind == ConsensusFork.Deneb: - OnDenebBlockAdded - elif kind == ConsensusFork.Capella: - OnCapellaBlockAdded - elif kind == ConsensusFork.Bellatrix: - OnBellatrixBlockAdded - elif kind == ConsensusFork.Altair: - OnAltairBlockAdded - elif kind == ConsensusFork.Phase0: - OnPhase0BlockAdded - else: - static: raiseAssert "Unreachable" - proc verifyBlockProposer*( verifier: var BatchVerifier, fork: Fork, @@ -489,73 +454,57 @@ proc verifyBlockProposer*( proc addBackfillBlockData*( dag: ChainDAGRef, + consensusFork: static ConsensusFork, bdata: BlockData, onStateUpdated: OnStateUpdated, - onBlockAdded: OnForkedBlockAdded + onBlockAdded: OnBlockAdded, ): Result[void, VerifierError] = var cache = StateCache() + template forkyBlck: untyped = bdata.blck.forky(consensusFork) + let + parent = checkHeadBlock(dag, forkyBlck).valueOr: + if error == VerifierError.Duplicate: + return ok() + return err(error) + startTick = Moment.now() + clearanceBlock = BlockSlotId.init(parent.bid, forkyBlck.message.slot) + updateFlags1 = dag.updateFlags + # TODO (cheatfate): {skipLastStateRootCalculation} flag here could + # improve performance by 100%, but this approach needs some + # improvements, which is unclear. + + if not updateState(dag, dag.clearanceState, clearanceBlock, true, cache, + updateFlags1): + error "Unable to load clearance state for parent block, " & + "database corrupt?", clearanceBlock = shortLog(clearanceBlock) + return err(VerifierError.MissingParent) - withBlck(bdata.blck): - let - parent = checkHeadBlock(dag, forkyBlck).valueOr: - if error == VerifierError.Duplicate: - return ok() - return err(error) - startTick = Moment.now() - clearanceBlock = BlockSlotId.init(parent.bid, forkyBlck.message.slot) - updateFlags1 = dag.updateFlags - # TODO (cheatfate): {skipLastStateRootCalculation} flag here could - # improve performance by 100%, but this approach needs some - # improvements, which is unclear. - - if not updateState(dag, dag.clearanceState, clearanceBlock, true, cache, - updateFlags1): - error "Unable to load clearance state for parent block, " & - "database corrupt?", clearanceBlock = shortLog(clearanceBlock) - return err(VerifierError.MissingParent) + let proposerVerifyTick = Moment.now() + + if not(isNil(onStateUpdated)): + ? onStateUpdated(forkyBlck.message.slot) - let proposerVerifyTick = Moment.now() - - if not(isNil(onStateUpdated)): - ? onStateUpdated(forkyBlck.message.slot) - - let - stateDataTick = Moment.now() - updateFlags2 = - dag.updateFlags + {skipBlsValidation, skipStateRootValidation} - - ? checkStateTransition(dag, forkyBlck.asSigVerified(), cache, updateFlags2) - - let stateVerifyTick = Moment.now() - - if bdata.blob.isSome(): - for blob in bdata.blob.get(): - dag.db.putBlobSidecar(blob[]) - - type Trusted = typeof forkyBlck.asTrusted() - - proc onBlockAddedHandler( - blckRef: BlockRef, - trustedBlock: Trusted, - epochRef: EpochRef, - unrealized: FinalityCheckpoints - ) {.gcsafe, raises: [].} = - onBlockAdded( - blckRef, - ForkedTrustedSignedBeaconBlock.init(trustedBlock), - epochRef, - unrealized) - - let blockHandler: BlockAdded(consensusFork) = onBlockAddedHandler - - discard addResolvedHeadBlock( - dag, dag.clearanceState, - forkyBlck.asTrusted(), - true, - parent, cache, - blockHandler, - proposerVerifyTick - startTick, - stateDataTick - proposerVerifyTick, - stateVerifyTick - stateDataTick) + let + stateDataTick = Moment.now() + updateFlags2 = + dag.updateFlags + {skipBlsValidation, skipStateRootValidation} + + ? checkStateTransition(dag, forkyBlck.asSigVerified(), cache, updateFlags2) + + let stateVerifyTick = Moment.now() + + if bdata.blob.isSome(): + for blob in bdata.blob.get(): + dag.db.putBlobSidecar(blob[]) + + discard addResolvedHeadBlock( + dag, dag.clearanceState, + forkyBlck.asTrusted(), + OptimisticStatus.notValidated, + parent, cache, + onBlockAdded, + proposerVerifyTick - startTick, + stateDataTick - proposerVerifyTick, + stateVerifyTick - stateDataTick) ok() diff --git a/beacon_chain/consensus_object_pools/block_dag.nim b/beacon_chain/consensus_object_pools/block_dag.nim index bdb8017b6d..606211e1f0 100644 --- a/beacon_chain/consensus_object_pools/block_dag.nim +++ b/beacon_chain/consensus_object_pools/block_dag.nim @@ -1,11 +1,11 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import chronicles, @@ -14,6 +14,13 @@ import export chronicles, forks type + OptimisticStatus* {.pure.} = enum + # A simplified version of `PayloadStatusV1` + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/sync/optimistic.md#helpers + notValidated = "NOT_VALIDATED" + valid = "VALID" + invalidated = "INVALIDATED" + BlockRef* = ref object ## Node in object graph guaranteed to lead back to finalized head, and to ## have a corresponding entry in database. @@ -31,7 +38,7 @@ type ## Root that can be used to retrieve block data from database executionBlockHash*: Opt[Eth2Digest] - executionValid*: bool + optimisticStatus*: OptimisticStatus parent*: BlockRef ##\ ## Not nil, except for the finalized head @@ -50,33 +57,50 @@ template root*(blck: BlockRef): Eth2Digest = blck.bid.root template slot*(blck: BlockRef): Slot = blck.bid.slot func init*( - T: type BlockRef, root: Eth2Digest, - executionBlockHash: Opt[Eth2Digest], executionValid: bool, slot: Slot): - BlockRef = + T: type BlockRef, + root: Eth2Digest, + executionBlockHash: Opt[Eth2Digest], + optimisticStatus: OptimisticStatus, + slot: Slot, +): BlockRef = BlockRef( bid: BlockId(root: root, slot: slot), - executionBlockHash: executionBlockHash, executionValid: executionValid) + executionBlockHash: executionBlockHash, + optimisticStatus: optimisticStatus, + ) func init*( - T: type BlockRef, root: Eth2Digest, executionValid: bool, + T: type BlockRef, root: Eth2Digest, _: OptimisticStatus, blck: phase0.SomeBeaconBlock | altair.SomeBeaconBlock | phase0.TrustedBeaconBlock | altair.TrustedBeaconBlock): BlockRef = # Use same formal parameters for simplicity, but it's impossible for these # blocks to be optimistic. - BlockRef.init(root, Opt.some ZERO_HASH, executionValid = true, blck.slot) + BlockRef.init(root, Opt.some ZERO_HASH, OptimisticStatus.valid, blck.slot) func init*( - T: type BlockRef, root: Eth2Digest, executionValid: bool, + T: type BlockRef, root: Eth2Digest, optimisticStatus: OptimisticStatus, blck: bellatrix.SomeBeaconBlock | bellatrix.TrustedBeaconBlock | capella.SomeBeaconBlock | capella.TrustedBeaconBlock | deneb.SomeBeaconBlock | deneb.TrustedBeaconBlock | electra.SomeBeaconBlock | electra.TrustedBeaconBlock | fulu.SomeBeaconBlock | fulu.TrustedBeaconBlock): BlockRef = BlockRef.init( - root, Opt.some blck.body.execution_payload.block_hash, - executionValid = - executionValid or blck.body.execution_payload.block_hash == ZERO_HASH, - blck.slot) + root, Opt.some blck.body.execution_payload.block_hash, optimisticStatus, blck.slot + ) + +func init*( + T: type BlockRef, root: Eth2Digest, optimisticStatus: OptimisticStatus, + blck: gloas.SomeBeaconBlock | gloas.TrustedBeaconBlock): BlockRef = + BlockRef.init( + root, + Opt.some blck.body.signed_execution_payload_bid.message.block_hash, + if optimisticStatus == OptimisticStatus.valid or + blck.body.signed_execution_payload_bid.message.block_hash.isZero: + OptimisticStatus.valid + else: + optimisticStatus, + blck.slot, + ) func parent*(bs: BlockSlot): BlockSlot = ## Return a blockslot representing the previous slot, using the parent block @@ -228,5 +252,23 @@ func shortLog*(v: BlockSlot): string = else: # There was a gap - log it shortLog(v.blck) & "@" & $v.slot +func executionValid*(blck: BlockRef): bool = + blck.optimisticStatus == OptimisticStatus.valid + +proc markExecutionValid*(blck: BlockRef, valid: bool) = + ## Mark a block as having a valid or invalid excecution payload + if not valid: + blck.optimisticStatus = OptimisticStatus.invalidated + debug "Optimistic status updated", blck = shortLog(blck), valid + else: + # Being valid implies that the ancestors are also valid + var cur = blck + + while cur != nil and cur.optimisticStatus == OptimisticStatus.notValidated: + cur.optimisticStatus = OptimisticStatus.valid + debug "Optimistic status updated", blck = shortLog(cur), valid + + cur = cur.parent + chronicles.formatIt BlockSlot: shortLog(it) chronicles.formatIt BlockRef: shortLog(it) diff --git a/beacon_chain/consensus_object_pools/block_pools_types.nim b/beacon_chain/consensus_object_pools/block_pools_types.nim index 459a768a4e..68aed2ad64 100644 --- a/beacon_chain/consensus_object_pools/block_pools_types.nim +++ b/beacon_chain/consensus_object_pools/block_pools_types.nim @@ -5,27 +5,25 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import # Standard library std/[tables, hashes], # Status libraries chronicles, + results, # Internals ../spec/[signatures_batch, forks, helpers], ".."/[beacon_chain_db, era_db], ../validators/validator_monitor, ./block_dag, block_pools_types_light_client -from ../spec/datatypes/capella import TrustedSignedBeaconBlock -from ../spec/datatypes/deneb import TrustedSignedBeaconBlock - from "."/vanity_logs/vanity_logs import LogProc, VanityLogs export sets, tables, hashes, helpers, beacon_chain_db, era_db, block_dag, - block_pools_types_light_client, validator_monitor, LogProc, VanityLogs + block_pools_types_light_client, validator_monitor, LogProc, VanityLogs, results # ChainDAG and types related to forming a DAG of blocks, keeping track of their # relationships and allowing various forms of lookups @@ -131,6 +129,11 @@ type era*: EraDB + eaSlot*: Slot + ## Earliest available slot is the earliest slot at which the BN can + ## guarantee serving blocks (and sidecars which are a subset of slots + ## pertaining to the DA retention window of sidecars). + validatorMonitor*: ref ValidatorMonitor forkBlocks*: HashSet[KeyedBlockRef] @@ -292,24 +295,9 @@ type blck*: ForkedSignedBeaconBlock blob*: Opt[BlobSidecars] - OnBlockAdded*[T: ForkyTrustedSignedBeaconBlock] = proc( - blckRef: BlockRef, blck: T, epochRef: EpochRef, - unrealized: FinalityCheckpoints) {.gcsafe, raises: [].} - OnPhase0BlockAdded* = OnBlockAdded[phase0.TrustedSignedBeaconBlock] - OnAltairBlockAdded* = OnBlockAdded[altair.TrustedSignedBeaconBlock] - OnBellatrixBlockAdded* = OnBlockAdded[bellatrix.TrustedSignedBeaconBlock] - OnCapellaBlockAdded* = OnBlockAdded[capella.TrustedSignedBeaconBlock] - OnDenebBlockAdded* = OnBlockAdded[deneb.TrustedSignedBeaconBlock] - OnElectraBlockAdded* = OnBlockAdded[electra.TrustedSignedBeaconBlock] - OnFuluBlockAdded* = OnBlockAdded[fulu.TrustedSignedBeaconBlock] - - OnForkyBlockAdded* = - OnPhase0BlockAdded | OnAltairBlockAdded | OnBellatrixBlockAdded | - OnCapellaBlockAdded | OnDenebBlockAdded | OnElectraBlockAdded | - OnFuluBlockAdded - - OnForkedBlockAdded* = proc( - blckRef: BlockRef, blck: ForkedTrustedSignedBeaconBlock, epochRef: EpochRef, + OnBlockAdded*[consensusFork: static ConsensusFork] = proc( + blckRef: BlockRef, blck: consensusFork.TrustedSignedBeaconBlock, + state: consensusFork.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints) {.gcsafe, raises: [].} OnStateUpdated* = proc( @@ -322,7 +310,7 @@ type epoch_transition*: bool previous_duty_dependent_root*: Eth2Digest current_duty_dependent_root*: Eth2Digest - optimistic* {.serializedFieldName: "execution_optimistic".}: Option[bool] + optimistic* {.serializedFieldName: "execution_optimistic".}: Opt[bool] ReorgInfoObject* = object slot*: Slot @@ -331,41 +319,23 @@ type new_head_block*: Eth2Digest old_head_state*: Eth2Digest new_head_state*: Eth2Digest - optimistic* {.serializedFieldName: "execution_optimistic".}: Option[bool] + optimistic* {.serializedFieldName: "execution_optimistic".}: Opt[bool] FinalizationInfoObject* = object block_root* {.serializedFieldName: "block".}: Eth2Digest state_root* {.serializedFieldName: "state".}: Eth2Digest epoch*: Epoch - optimistic* {.serializedFieldName: "execution_optimistic".}: Option[bool] + optimistic* {.serializedFieldName: "execution_optimistic".}: Opt[bool] EventBeaconBlockObject* = object slot*: Slot block_root* {.serializedFieldName: "block".}: Eth2Digest - optimistic* {.serializedFieldName: "execution_optimistic".}: Option[bool] + optimistic* {.serializedFieldName: "execution_optimistic".}: Opt[bool] EventBeaconBlockGossipObject* = object slot*: Slot block_root* {.serializedFieldName: "block".}: Eth2Digest -template OnBlockAddedCallback*(kind: static ConsensusFork): auto = - when kind == ConsensusFork.Fulu: - typedesc[OnFuluBlockAdded] - elif kind == ConsensusFork.Electra: - typedesc[OnElectraBlockAdded] - elif kind == ConsensusFork.Deneb: - typedesc[OnDenebBlockAdded] - elif kind == ConsensusFork.Capella: - typedesc[OnCapellaBlockAdded] - elif kind == ConsensusFork.Bellatrix: - typedesc[OnBellatrixBlockAdded] - elif kind == ConsensusFork.Altair: - typedesc[OnAltairBlockAdded] - elif kind == ConsensusFork.Phase0: - typedesc[OnPhase0BlockAdded] - else: - static: raiseAssert "Unreachable" - func proposer_dependent_slot*(epochRef: EpochRef): Slot = epochRef.key.epoch.proposer_dependent_slot() @@ -396,6 +366,9 @@ func horizon*(dag: ChainDAGRef): Slot = else: GENESIS_SLOT +func earliestAvailableSlot*(dag: ChainDAGRef): Slot = + dag.eaSlot + template epoch*(e: EpochRef): Epoch = e.key.epoch func shortLog*(v: EpochKey): string = @@ -479,7 +452,7 @@ func init*(t: typedesc[FinalizationInfoObject], blockRoot: Eth2Digest, func init*(t: typedesc[EventBeaconBlockObject], v: ForkedTrustedSignedBeaconBlock, - optimistic: Option[bool]): EventBeaconBlockObject = + optimistic: Opt[bool]): EventBeaconBlockObject = withBlck(v): EventBeaconBlockObject( slot: forkyBlck.message.slot, @@ -493,4 +466,4 @@ func init*(t: typedesc[EventBeaconBlockGossipObject], EventBeaconBlockGossipObject( slot: forkyBlck.message.slot, block_root: forkyBlck.root - ) \ No newline at end of file + ) diff --git a/beacon_chain/consensus_object_pools/block_pools_types_light_client.nim b/beacon_chain/consensus_object_pools/block_pools_types_light_client.nim index b8ea3e6386..aac795c101 100644 --- a/beacon_chain/consensus_object_pools/block_pools_types_light_client.nim +++ b/beacon_chain/consensus_object_pools/block_pools_types_light_client.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -25,6 +25,9 @@ type OnDemand = "on-demand" ## Like `full`, but import on demand instead of on start. + LightClientVerifierError* {.pure.} = enum + Invalid, MissingParent, UnviableFork, Duplicate + OnLightClientFinalityUpdateCallback* = proc(data: ForkedLightClientFinalityUpdate) {.gcsafe, raises: [].} OnLightClientOptimisticUpdateCallback* = diff --git a/beacon_chain/consensus_object_pools/block_quarantine.nim b/beacon_chain/consensus_object_pools/block_quarantine.nim index ccee1527b1..e08a9b04c0 100644 --- a/beacon_chain/consensus_object_pools/block_quarantine.nim +++ b/beacon_chain/consensus_object_pools/block_quarantine.nim @@ -5,12 +5,12 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import std/tables, - chronicles, - ../spec/forks + chronicles, chronos, + ../spec/[block_id, forks, presets] export tables, forks @@ -21,9 +21,7 @@ const ## Arbitrary MaxOrphans = SLOTS_PER_EPOCH * 3 ## Enough for finalization in an alternative fork - MaxBlobless = SLOTS_PER_EPOCH - ## Arbitrary - MaxColumnless = SLOTS_PER_EPOCH + MaxSidecarless = SLOTS_PER_EPOCH * 128 ## Arbitrary MaxUnviables = 16 * 1024 ## About a day of blocks - most likely not needed but it's quite cheap.. @@ -52,18 +50,18 @@ type ## to be dropped. An orphan block may also be "blobless" (see ## below) - if so, upon resolving the parent, it should be ## added to the blobless table, after verifying its signature. - - blobless*: OrderedTable[Eth2Digest, ForkedSignedBeaconBlock] - ## Blocks that we don't have blobs for. When we have received - ## all blobs for this block, we can proceed to resolving the - ## block as well. A blobless block inserted into this table must + orphansEvent*: AsyncEvent + ## Asynchronous event which will be set, when new block appears in + ## orphans table. + + sidecarless*: OrderedTable[Eth2Digest, ForkedSignedBeaconBlock] + ## Blocks that we don't have sidecars (BlobSidecar/DataColumnSidecar) for. + ## When we have received all sidecars for this block, we can proceed to + ## resolving the block as well. Block inserted into this table must ## have a resolved parent (i.e., it is not an orphan). - - columnless*: OrderedTable[Eth2Digest, ForkedSignedBeaconBlock] - ## Blocks that we don't have columns for. When we have received - ## all columns for this block, we can proceed to resolving the - ## block as well. A columnless block inserted into this table must - ## have a resolved parent (i.e., it is not an orphan) + sidecarlessEvent*: AsyncEvent + ## Asynchronous event which will be set, when new block appears in + ## sidecarless table. unviable*: OrderedTable[Eth2Digest, tuple[]] ## Unviable blocks are those that come from a history that does not @@ -79,12 +77,26 @@ type ## only those we have observed, been able to verify as unviable and fit ## in this cache. + last_block_slot*: Opt[BlockId] + ## Stores the latest sidecarless block root and slot, in order to quickly + ## fetch the latest info without having to traverse sidecarless + ## quarantine. missing*: Table[Eth2Digest, MissingBlock] ## Roots of blocks that we would like to have (either parent_root of ## unresolved blocks or block roots of attestations) + missingEvent*: AsyncEvent + ## Asynchronous event which will be set, when new block appears in + ## missing table. -func init*(T: type Quarantine): T = - T() + cfg*: RuntimeConfig + +func init*(T: type Quarantine, cfg: RuntimeConfig): T = + T( + cfg: cfg, + sidecarlessEvent: newAsyncEvent(), + missingEvent: newAsyncEvent(), + orphansEvent: newAsyncEvent() + ) func checkMissing*(quarantine: var Quarantine, max: int): seq[FetchRecord] = ## Return a list of blocks that we should try to resolve from other client - @@ -106,7 +118,7 @@ func checkMissing*(quarantine: var Quarantine, max: int): seq[FetchRecord] = if result.len >= max: break -func addMissing*(quarantine: var Quarantine, root: Eth2Digest) = +proc addMissing*(quarantine: var Quarantine, root: Eth2Digest) = ## Schedule the download a the given block if quarantine.missing.len >= MaxMissingItems: return @@ -129,19 +141,16 @@ func addMissing*(quarantine: var Quarantine, root: Eth2Digest) = # Add if it's not there, but don't update missing counter if not found: discard quarantine.missing.hasKeyOrPut(r, MissingBlock()) + quarantine.missingEvent.fire() return func removeOrphan*( quarantine: var Quarantine, signedBlock: ForkySignedBeaconBlock) = quarantine.orphans.del((signedBlock.root, signedBlock.signature)) -func removeBlobless*( - quarantine: var Quarantine, signedBlock: ForkySignedBeaconBlock) = - quarantine.blobless.del(signedBlock.root) - -func removeColumnless*( +func removeSidecarless*( quarantine: var Quarantine, signedBlock: ForkySignedBeaconBlock) = - quarantine.columnless.del(signedBlock.root) + quarantine.sidecarless.del(signedBlock.root) func isViable( finalizedSlot: Slot, slot: Slot): bool = @@ -189,7 +198,7 @@ func removeUnviableOrphanTree( checked -func removeUnviableBloblessTree( +func removeUnviableSidecarlessTree( quarantine: var Quarantine, toCheck: var seq[Eth2Digest], tbl: var OrderedTable[Eth2Digest, ForkedSignedBeaconBlock]) = @@ -223,7 +232,7 @@ func addUnviable*(quarantine: var Quarantine, root: Eth2Digest) = quarantine.cleanupUnviable() var toCheck = @[root] var checked = quarantine.removeUnviableOrphanTree(toCheck, quarantine.orphans) - quarantine.removeUnviableBloblessTree(checked, quarantine.blobless) + quarantine.removeUnviableSidecarlessTree(checked, quarantine.sidecarless) quarantine.unviable[root] = () @@ -238,29 +247,17 @@ func cleanupOrphans(quarantine: var Quarantine, finalizedSlot: Slot) = quarantine.addUnviable k[0] quarantine.orphans.del k -func cleanupBlobless(quarantine: var Quarantine, finalizedSlot: Slot) = +func cleanupSidecarless(quarantine: var Quarantine, finalizedSlot: Slot) = var toDel: seq[Eth2Digest] - for k, v in quarantine.blobless: + for k, v in quarantine.sidecarless: withBlck(v): if not isViable(finalizedSlot, forkyBlck.message.slot): toDel.add k for k in toDel: quarantine.addUnviable k - quarantine.blobless.del k - -func cleanupColumnless(quarantine: var Quarantine, finalizedSlot: Slot) = - var toDel: seq[Eth2Digest] - - for k, v in quarantine.columnless: - withBlck(v): - if not isViable(finalizedSlot, forkyBlck.message.slot): - toDel.add k - - for k in toDel: - quarantine.addUnviable k - quarantine.columnless.del k + quarantine.sidecarless.del k func clearAfterReorg*(quarantine: var Quarantine) = ## Clear missing and orphans to start with a fresh slate in case of a reorg @@ -268,6 +265,28 @@ func clearAfterReorg*(quarantine: var Quarantine) = quarantine.missing.reset() quarantine.orphans.reset() +func pruneAfterFinalization*( + quarantine: var Quarantine, + epoch: Epoch, + needsBackfill: bool +) = + let + startEpoch = + if needsBackfill: + # Because Quarantine could be used as temporary storage for blocks which + # do not have sidecars yet, we should not prune blocks which are behind + # `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epoch. Otherwise we will not + # be able to backfill these blocks properly. + if epoch < quarantine.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: + Epoch(0) + else: + epoch - quarantine.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS + else: + epoch + slot = startEpoch.start_slot() + + quarantine.cleanupSidecarless(slot) + # Typically, blocks will arrive in mostly topological order, with some # out-of-order block pairs. Therefore, it is unhelpful to use either a # FIFO or LIFO discpline, and since by definition each block gets used @@ -279,9 +298,11 @@ func clearAfterReorg*(quarantine: var Quarantine) = # for future slots are rejected before reaching quarantine, this usually # will be a block for the last couple of slots for which the parent is a # likely imminent arrival. -func addOrphan*( - quarantine: var Quarantine, finalizedSlot: Slot, - signedBlock: ForkedSignedBeaconBlock): Result[void, cstring] = +proc addOrphan*( + quarantine: var Quarantine, + finalizedSlot: Slot, + signedBlock: ForkedSignedBeaconBlock +): Result[void, cstring] = ## Adds block to quarantine's `orphans` and `missing` lists. if not isViable(finalizedSlot, getForkedBlockField(signedBlock, slot)): @@ -312,9 +333,10 @@ func addOrphan*( oldest_orphan_key = k break quarantine.orphans.del oldest_orphan_key - quarantine.blobless.del oldest_orphan_key[0] + quarantine.sidecarless.del oldest_orphan_key[0] quarantine.orphans[(signedBlock.root, signedBlock.signature)] = signedBlock + quarantine.orphansEvent.fire() ok() @@ -332,75 +354,67 @@ iterator pop*(quarantine: var Quarantine, root: Eth2Digest): toRemove.add(k) yield v -proc addBlobless*( - quarantine: var Quarantine, finalizedSlot: Slot, +proc addSidecarless( + quarantine: var Quarantine, finalizedSlot: Opt[Slot], signedBlock: deneb.SignedBeaconBlock | electra.SignedBeaconBlock | - fulu.SignedBeaconBlock): bool = - - if not isViable(finalizedSlot, signedBlock.message.slot): - quarantine.addUnviable(signedBlock.root) - return false - - quarantine.cleanupBlobless(finalizedSlot) - - if quarantine.blobless.lenu64 >= MaxBlobless: - var oldest_blobless_key: Eth2Digest - for k in quarantine.blobless.keys: - oldest_blobless_key = k + fulu.SignedBeaconBlock | gloas.SignedBeaconBlock +): bool = + if finalizedSlot.isSome(): + if not isViable(finalizedSlot.get(), signedBlock.message.slot): + quarantine.addUnviable(signedBlock.root) + return false + + if quarantine.sidecarless.lenu64 >= MaxSidecarless: + var oldestKey: Eth2Digest + for k in quarantine.sidecarless.keys: + oldestKey = k break - quarantine.blobless.del oldest_blobless_key + quarantine.sidecarless.del(oldestKey) - debug "block quarantine: Adding blobless", blck = shortLog(signedBlock) - quarantine.blobless[signedBlock.root] = + debug "Block without sidecars has been added to the quarantine", + block_root = shortLog(signedBlock.root) + quarantine.sidecarless[signedBlock.root] = ForkedSignedBeaconBlock.init(signedBlock) + quarantine.last_block_slot = + Opt.some(BlockId(slot: signedBlock.message.slot, root: signedBlock.root)) quarantine.missing.del(signedBlock.root) + quarantine.sidecarlessEvent.fire() true -proc addColumnless*( - quarantine: var Quarantine, finalizedSlot: Slot, - signedBlock: fulu.SignedBeaconBlock): bool = - - if not isViable(finalizedSlot, signedBlock.message.slot): - quarantine.addUnviable(signedBlock.root) - return false - - quarantine.cleanupColumnless(finalizedSlot) - - if quarantine.columnless.lenu64 >= MaxColumnless: - var oldest_columnless_key: Eth2Digest - for k in quarantine.columnless.keys: - oldest_columnless_key = k - break - quarantine.blobless.del oldest_columnless_key - - debug "block quarantine: Adding columnless", blck = shortLog(signedBlock) - quarantine.columnless[signedBlock.root] = - ForkedSignedBeaconBlock.init(signedBlock) - quarantine.missing.del(signedBlock.root) - true - -func popBlobless*( +proc addSidecarless*( + quarantine: var Quarantine, finalizedSlot: Slot, + signedBlock: deneb.SignedBeaconBlock | electra.SignedBeaconBlock | + fulu.SignedBeaconBlock | gloas.SignedBeaconBlock +): bool = + quarantine.addSidecarless(Opt.some(finalizedSlot), signedBlock) + +proc addSidecarless*( + quarantine: var Quarantine, + signedBlock: deneb.SignedBeaconBlock | electra.SignedBeaconBlock | + fulu.SignedBeaconBlock | gloas.SignedBeaconBlock +) = + discard quarantine.addSidecarless(Opt.none(Slot), signedBlock) + +func popSidecarless*( quarantine: var Quarantine, - root: Eth2Digest): Opt[ForkedSignedBeaconBlock] = + root: Eth2Digest +): Opt[ForkedSignedBeaconBlock] = var blck: ForkedSignedBeaconBlock - if quarantine.blobless.pop(root, blck): + if quarantine.sidecarless.pop(root, blck): Opt.some(blck) else: Opt.none(ForkedSignedBeaconBlock) -func popColumnless*( +func getColumnless*( quarantine: var Quarantine, root: Eth2Digest): Opt[ForkedSignedBeaconBlock] = - var blck: ForkedSignedBeaconBlock - if quarantine.columnless.pop(root, blck): - Opt.some(blck) - else: + try: + Opt.some(quarantine.sidecarless[root]) + except KeyError: Opt.none(ForkedSignedBeaconBlock) -iterator peekBlobless*(quarantine: var Quarantine): ForkedSignedBeaconBlock = - for k, v in quarantine.blobless.mpairs(): - yield v - -iterator peekColumnless*(quarantine: var Quarantine): ForkedSignedBeaconBlock = - for k, v in quarantine.columnless.mpairs(): +iterator peekSidecarless*( + quarantine: var Quarantine +): ForkedSignedBeaconBlock = + for k, v in quarantine.sidecarless.mpairs(): yield v diff --git a/beacon_chain/consensus_object_pools/blockchain_dag.nim b/beacon_chain/consensus_object_pools/blockchain_dag.nim index 5296b80a59..e6faa3485e 100644 --- a/beacon_chain/consensus_object_pools/blockchain_dag.nim +++ b/beacon_chain/consensus_object_pools/blockchain_dag.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import std/[algorithm, sequtils, tables, sets], @@ -48,6 +48,8 @@ declareGauge beacon_processed_deposits_total, "Number of total deposits included declareCounter beacon_dag_state_replay_seconds, "Time spent replaying states" +declareGauge beacon_head_execution_number, "Execuction block number of the beacon head block" + const EPOCHS_PER_STATE_SNAPSHOT* = 32 ## When finality happens, we prune historical states from the database except @@ -266,8 +268,11 @@ proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] = # When we only have a digest, we don't know which fork it's from so we try # them one by one - this should be used sparingly - static: doAssert high(ConsensusFork) == ConsensusFork.Fulu - if (let blck = db.getBlock(root, fulu.TrustedSignedBeaconBlock); + static: doAssert high(ConsensusFork) == ConsensusFork.Gloas + if (let blck = db.getBlock(root, gloas.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + elif (let blck = db.getBlock(root, fulu.TrustedSignedBeaconBlock); blck.isSome()): ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) elif (let blck = db.getBlock(root, electra.TrustedSignedBeaconBlock); @@ -898,6 +903,15 @@ proc updateBeaconMetrics( beacon_active_validators.set(active_validators) beacon_current_active_validators.set(active_validators) + beacon_head_execution_number.set( + when consensusFork >= ConsensusFork.Bellatrix and + consensusFork < ConsensusFork.Gloas: + debugGloasComment "handle correctly for gloas" + forkyState.data.latest_execution_payload_header.block_number.toGaugeValue + else: + 0'u64.toGaugeValue + ) + import blockchain_dag_light_client export @@ -976,47 +990,10 @@ proc applyBlock( updateFlags: UpdateFlags): Result[void, cstring] = loadStateCache(dag, cache, bid, getStateField(state, slot).epoch) - discard case dag.cfg.consensusForkAtEpoch(bid.slot.epoch) - of ConsensusFork.Phase0: - let data = getBlock(dag, bid, phase0.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Altair: - let data = getBlock(dag, bid, altair.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Bellatrix: - let data = getBlock(dag, bid, bellatrix.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Capella: - let data = getBlock(dag, bid, capella.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Deneb: - let data = getBlock(dag, bid, deneb.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Electra: - let data = getBlock(dag, bid, electra.TrustedSignedBeaconBlock).valueOr: + withConsensusFork(dag.cfg.consensusForkAtEpoch(bid.slot.epoch)): + let data = getBlock(dag, bid, consensusFork.TrustedSignedBeaconBlock).valueOr: return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Fulu: - let data = getBlock(dag, bid, fulu.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( + discard ? state_transition( dag.cfg, state, data, cache, info, updateFlags + {slotProcessed}, noRollback) @@ -1094,11 +1071,21 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, for blck in db.getAncestorSummaries(head.root): # The execution block root gets filled in as needed. Nonfinalized Bellatrix # and later blocks are loaded as optimistic, which gets adjusted that first - # `VALID` fcU from an EL plus markBlockVerified. Pre-merge blocks still get + # `VALID` fcU from an EL plus markExecutionValid. Pre-merge blocks still get # marked as `VALID`. - let newRef = BlockRef.init( - blck.root, Opt.none Eth2Digest, executionValid = false, - blck.summary.slot) + let newRef = + if cfg.consensusForkAtEpoch(blck.summary.slot.epoch) >= ConsensusFork.Bellatrix: + BlockRef.init( + blck.root, + Opt.none Eth2Digest, + OptimisticStatus.notValidated, + blck.summary.slot, + ) + else: + BlockRef.init( + blck.root, Opt.some ZERO_HASH, OptimisticStatus.valid, blck.summary.slot + ) + if headRef == nil: headRef = newRef @@ -1181,6 +1168,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, of ConsensusFork.Deneb: denebFork(cfg) of ConsensusFork.Electra: electraFork(cfg) of ConsensusFork.Fulu: fuluFork(cfg) + of ConsensusFork.Gloas: gloasFork(cfg) stateFork = getStateField(dag.headState, fork) # Here, we check only the `current_version` field because the spec @@ -1362,6 +1350,9 @@ template genesis_validators_root*(dag: ChainDAGRef): Eth2Digest = proc genesisBlockRoot*(dag: ChainDAGRef): Eth2Digest = dag.db.getGenesisBlock().expect("DB must be initialized with genesis block") +func forkDigestAtEpoch*(dag: ChainDAGRef, epoch: Epoch): ForkDigest = + dag.forkDigests[].atEpoch(epoch, dag.cfg) + func getEpochRef*( dag: ChainDAGRef, state: ForkedHashedBeaconState, cache: var StateCache): EpochRef = ## Get a cached `EpochRef` or construct one based on the given state - always @@ -1461,7 +1452,10 @@ proc computeRandaoMix( bdata: ForkedTrustedSignedBeaconBlock): Opt[Eth2Digest] = ## Compute the requested RANDAO mix for `bdata` without `state`, if possible. withBlck(bdata): - when consensusFork >= ConsensusFork.Bellatrix: + debugGloasComment "" + when consensusFork == ConsensusFork.Gloas: + return Opt.none(Eth2Digest) + elif consensusFork >= ConsensusFork.Bellatrix: if forkyBlck.message.is_execution_block: var mix = eth2digest(forkyBlck.message.body.randao_reveal.toRaw()) mix.data.mxor forkyBlck.message.body.execution_payload.prev_randao.data @@ -1933,7 +1927,6 @@ proc pruneBlockSlot(dag: ChainDAGRef, bs: BlockSlot) = # Update light client data dag.deleteLightClientData(bs.blck.bid) - bs.blck.executionValid = true dag.forkBlocks.excl(KeyedBlockRef.init(bs.blck)) discard dag.db.delBlock( dag.cfg.consensusForkAtEpoch(bs.blck.slot.epoch), bs.blck.root) @@ -1993,26 +1986,7 @@ func is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool = # it could have been orphaned or the DB is slightly inconsistent. # Report it as optimistic until it becomes reachable or gets deleted return true - not blck.executionValid - -proc markBlockVerified*(dag: ChainDAGRef, blck: BlockRef) = - var cur = blck - - while true: - cur.executionValid = true - - debug "markBlockVerified", blck = shortLog(cur) - - if cur.parent.isNil: - break - - cur = cur.parent - - # Always check at least as far back as the parent so that when a new block - # is added with executionValid already set, it stil sets the ancestors, to - # the next valid in the chain. - if cur.executionValid: - return + blck.optimisticStatus != OptimisticStatus.valid iterator syncSubcommittee*( syncCommittee: openArray[ValidatorIndex], @@ -2274,6 +2248,9 @@ proc pruneHistory*(dag: ChainDAGRef, startup = false) = # that. break + # eaSlot would be the earliest slot for which we can reliably + # serve a block (and sidecars if it's within the DA retention window) + dag.eaSlot = bid.slot + 1 cur = dag.parent(bid) # TODO There have been varied reports of startup pruning causing long @@ -2300,24 +2277,37 @@ proc pruneHistory*(dag: ChainDAGRef, startup = false) = if dag.db.clearBlocks(fork): break -proc loadExecutionBlockHash*( - dag: ChainDAGRef, bid: BlockId): Opt[Eth2Digest] = +proc loadExecutionBlockHash*(dag: ChainDAGRef, bid: BlockId): Opt[Eth2Digest] = let blockData = dag.getForkedBlock(bid).valueOr: # Besides database inconsistency issues, this is hit with checkpoint sync. - # The initial `BlockRef` is creted before the checkpoint block is loaded. + # The initial `BlockRef` is created before the checkpoint block is loaded. # It is backfilled later, so return `none` and keep retrying. return Opt.none(Eth2Digest) withBlck(blockData): - when consensusFork >= ConsensusFork.Bellatrix: + debugGloasComment " " + when consensusFork == ConsensusFork.Gloas: + Opt.some ZERO_HASH + elif consensusFork >= ConsensusFork.Bellatrix: Opt.some forkyBlck.message.body.execution_payload.block_hash else: Opt.some ZERO_HASH -proc loadExecutionBlockHash*( - dag: ChainDAGRef, blck: BlockRef): Opt[Eth2Digest] = +proc loadExecutionBlockHash*(dag: ChainDAGRef, blck: BlockRef): Opt[Eth2Digest] = if blck.executionBlockHash.isNone: + # Execution block hashes are loaded lazily during startup blck.executionBlockHash = dag.loadExecutionBlockHash(blck.bid) + + if blck.executionBlockHash == static(Opt.some(ZERO_HASH)): + # The block belongs to Bellatrix+ but the merge has not yet happened + # meaning that its ancestors are also pre-merge + blck.markExecutionValid(true) + + var cur = blck.parent + while cur != nil and cur.executionBlockHash.isNone: + cur.executionBlockHash = blck.executionBlockHash + cur = cur.parent + blck.executionBlockHash from std/packedsets import PackedSet, incl, items @@ -2371,7 +2361,55 @@ func checkCompoundingChanges( # Since it tracks head, it's possible reorgs trigger reporting the same # validator indices multiple times; this is fine. withState(state): - anyIt(vis, forkyState.data.validators[it].has_compounding_withdrawal_credential) + anyIt(vis, has_compounding_withdrawal_credential( + consensusFork, forkyState.data.validators[it])) + +func trackVanityState( + dag: ChainDAGRef, knownValidators: openArray[ValidatorIndex]): auto = + ( + lastHeadKind: dag.headState.kind, + lastHeadEpoch: getStateField(dag.headState, slot).epoch, + lastKnownValidatorsChangeStatuses: + dag.headState.getBlsToExecutionChangeStatuses(knownValidators), + lastKnownCompoundingChangeStatuses: + dag.headState.getCompoundingStatuses(knownValidators) + ) + +proc processVanityLogs(dag: ChainDAGRef, vanityState: auto) = + if dag.headState.kind > vanityState.lastHeadKind: + proc logForkUpgrade(consensusFork: ConsensusFork, handler: LogProc) = + if handler != nil and + dag.headState.kind >= consensusFork and + vanityState.lastHeadKind < consensusFork: + handler() + + # Policy: Retain back through Mainnet's second latest fork. + ConsensusFork.Deneb.logForkUpgrade( + dag.vanityLogs.onUpgradeToDeneb) + ConsensusFork.Electra.logForkUpgrade( + dag.vanityLogs.onUpgradeToElectra) + ConsensusFork.Fulu.logForkUpgrade( + dag.vanityLogs.onUpgradeToFulu) + else: + if dag.vanityLogs.onBlobParametersUpdate != nil and + dag.headState.kind >= ConsensusFork.Fulu: + let headEpoch = getStateField(dag.headState, slot).epoch + if headEpoch > vanityState.lastHeadEpoch: + for entry in dag.cfg.BLOB_SCHEDULE: + if headEpoch >= entry.EPOCH: + if vanityState.lastHeadEpoch < entry.EPOCH: + dag.vanityLogs.onBlobParametersUpdate() + break + + if dag.vanityLogs.onKnownBlsToExecutionChange != nil and + checkBlsToExecutionChanges( + dag.headState, vanityState.lastKnownValidatorsChangeStatuses): + dag.vanityLogs.onKnownBlsToExecutionChange() + + if dag.vanityLogs.onKnownCompoundingChange != nil and + checkCompoundingChanges( + dag.headState, vanityState.lastKnownCompoundingChangeStatuses): + dag.vanityLogs.onKnownCompoundingChange() proc updateHead*( dag: ChainDAGRef, newHead: BlockRef, quarantine: var Quarantine, @@ -2410,11 +2448,7 @@ proc updateHead*( let lastHeadStateRoot = getStateRoot(dag.headState) - lastHeadKind = dag.headState.kind - lastKnownValidatorsChangeStatuses = getBlsToExecutionChangeStatuses( - dag.headState, knownValidators) - lastKnownCompoundingChangeStatuses = getCompoundingStatuses( - dag.headState, knownValidators) + vanityState = dag.trackVanityState(knownValidators) # Start off by making sure we have the right state - updateState will try # to use existing in-memory states to make this smooth @@ -2430,32 +2464,7 @@ proc updateHead*( quit 1 dag.head = newHead - - if dag.headState.kind > lastHeadKind: - proc logForkUpgrade(consensusFork: ConsensusFork, handler: LogProc) = - if handler != nil and - dag.headState.kind >= consensusFork and - lastHeadKind < consensusFork: - handler() - - # Policy: Retain back through Mainnet's second latest fork. - ConsensusFork.Capella.logForkUpgrade( - dag.vanityLogs.onUpgradeToCapella) - ConsensusFork.Deneb.logForkUpgrade( - dag.vanityLogs.onUpgradeToDeneb) - ConsensusFork.Electra.logForkUpgrade( - dag.vanityLogs.onUpgradeToElectra) - - if dag.vanityLogs.onKnownBlsToExecutionChange != nil and - checkBlsToExecutionChanges( - dag.headState, lastKnownValidatorsChangeStatuses): - dag.vanityLogs.onKnownBlsToExecutionChange() - - if dag.vanityLogs.onKnownCompoundingChange != nil and - checkCompoundingChanges( - dag.headState, lastKnownCompoundingChangeStatuses): - dag.vanityLogs.onKnownCompoundingChange() - + dag.processVanityLogs(vanityState) dag.db.putHeadBlock(newHead.root) updateBeaconMetrics(dag.headState, dag.head.bid, cache) @@ -2488,7 +2497,7 @@ proc updateHead*( justified = shortLog(getStateField( dag.headState, current_justified_checkpoint)), finalized = shortLog(getStateField(dag.headState, finalized_checkpoint)), - isOptHead = not newHead.executionValid + optStatus = newHead.optimisticStatus if not(isNil(dag.onReorgHappened)): let @@ -2510,7 +2519,7 @@ proc updateHead*( justified = shortLog(getStateField( dag.headState, current_justified_checkpoint)), finalized = shortLog(getStateField(dag.headState, finalized_checkpoint)), - isOptHead = not newHead.executionValid + optStatus = newHead.optimisticStatus if not(isNil(dag.onHeadChanged)): let @@ -2662,7 +2671,7 @@ proc getProposer*( proc getProposalState*( dag: ChainDAGRef, head: BlockRef, slot: Slot, cache: var StateCache): - Result[ref ForkedHashedBeaconState, cstring] = + Opt[ref ForkedHashedBeaconState] = ## Return a state suitable for making proposals for the given head and slot - ## in particular, the state can be discarded after use and does not have a ## state root set @@ -2682,7 +2691,7 @@ proc getProposalState*( error "Cannot get proposal state - skipping block production, database corrupt?", head = shortLog(head), slot - return err("Cannot create proposal state") + return err() else: loadStateCache(dag, cache, head.bid, slot.epoch) diff --git a/beacon_chain/consensus_object_pools/blockchain_dag_light_client.nim b/beacon_chain/consensus_object_pools/blockchain_dag_light_client.nim index 8c22bd5cef..0892f5ef8a 100644 --- a/beacon_chain/consensus_object_pools/blockchain_dag_light_client.nim +++ b/beacon_chain/consensus_object_pools/blockchain_dag_light_client.nim @@ -1,11 +1,11 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import # Status libraries @@ -118,7 +118,11 @@ func lightClientHeader( blck: ForkyTrustedSignedBeaconBlock): ForkedLightClientHeader = const lcDataFork = max( lcDataForkAtConsensusFork(typeof(blck).kind), LightClientDataFork.Altair) - ForkedLightClientHeader.init(blck.toLightClientHeader(lcDataFork)) + debugGloasComment "..." + when kind(typeof(blck)) == ConsensusFork.Gloas: + default(ForkedLightClientHeader) + else: + ForkedLightClientHeader.init(blck.toLightClientHeader(lcDataFork)) func sync_aggregate( blck: ForkyTrustedSignedBeaconBlock): SyncAggregate = @@ -246,7 +250,9 @@ proc initLightClientBootstrapForPeriod( res.err() continue withStateAndBlck(tmpState[], bdata): - when consensusFork >= ConsensusFork.Altair: + when consensusFork == ConsensusFork.Gloas: + debugGloasComment "..." + elif consensusFork >= ConsensusFork.Altair: const lcDataFork = lcDataForkAtConsensusFork(consensusFork) if not dag.lcDataStore.db.hasSyncCommittee(period): dag.lcDataStore.db.putSyncCommittee( @@ -397,7 +403,9 @@ proc initLightClientUpdateForPeriod( dag.handleUnexpectedLightClientError(bid.slot) return err() withStateAndBlck(updatedState, bdata): - when consensusFork >= ConsensusFork.Altair: + when consensusFork >= ConsensusFork.Gloas: + debugGloasComment "" + elif consensusFork >= ConsensusFork.Altair: const lcDataFork = lcDataForkAtConsensusFork(consensusFork) update = ForkedLightClientUpdate.init(lcDataFork.LightClientUpdate( attested_header: forkyBlck.toLightClientHeader(lcDataFork), @@ -418,12 +426,14 @@ proc initLightClientUpdateForPeriod( dag.handleUnexpectedLightClientError(finalizedBid.slot) return err() withBlck(bdata): - withForkyUpdate(update): - when lcDataFork > LightClientDataFork.None: - when lcDataFork >= lcDataForkAtConsensusFork(consensusFork): - forkyUpdate.finalized_header = - forkyBlck.toLightClientHeader(lcDataFork) - else: raiseAssert "Unreachable" + debugGloasComment "" + when consensusFork != ConsensusFork.Gloas: + withForkyUpdate(update): + when lcDataFork > LightClientDataFork.None: + when lcDataFork >= lcDataForkAtConsensusFork(consensusFork): + forkyUpdate.finalized_header = + forkyBlck.toLightClientHeader(lcDataFork) + else: raiseAssert "Unreachable" let bdata = dag.getExistingForkedBlock(signatureBid).valueOr: dag.handleUnexpectedLightClientError(signatureBid.slot) return err() @@ -719,7 +729,8 @@ proc createLightClientBootstrap( tmpState[], period) dag.lcDataStore.db.putSyncCommittee(period, syncCommittee) withBlck(bdata): - when consensusFork >= ConsensusFork.Altair: + debugGloasComment "" + when consensusFork >= ConsensusFork.Altair and consensusFork != ConsensusFork.Gloas: const lcDataFork = lcDataForkAtConsensusFork(consensusFork) dag.lcDataStore.db.putHeader( forkyBlck.toLightClientHeader(lcDataFork)) @@ -1099,7 +1110,8 @@ proc getLightClientBootstrap*( debug "LC bootstrap unavailable: Block not found", blockRoot return default(ForkedLightClientBootstrap) withBlck(bdata): - when consensusFork >= ConsensusFork.Altair: + debugGloasComment "" + when consensusFork >= ConsensusFork.Altair and consensusFork != ConsensusFork.Gloas: const lcDataFork = lcDataForkAtConsensusFork(consensusFork) let header = forkyBlck.toLightClientHeader(lcDataFork) diff --git a/beacon_chain/consensus_object_pools/blockchain_list.nim b/beacon_chain/consensus_object_pools/blockchain_list.nim index b7677f8b0b..a9cb1704d7 100644 --- a/beacon_chain/consensus_object_pools/blockchain_list.nim +++ b/beacon_chain/consensus_object_pools/blockchain_list.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -142,7 +142,7 @@ proc store*(clist: ChainListRef, signedBlock: ForkedSignedBeaconBlock, proc checkBlobs(signedBlock: ForkedSignedBeaconBlock, blobsOpt: Opt[BlobSidecars]): Result[void, VerifierError] = withBlck(signedBlock): - when consensusFork >= ConsensusFork.Deneb: + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: if blobsOpt.isSome(): let blobs = blobsOpt.get() diff --git a/beacon_chain/consensus_object_pools/consensus_manager.nim b/beacon_chain/consensus_object_pools/consensus_manager.nim index 86528b515a..b16d7c68a1 100644 --- a/beacon_chain/consensus_object_pools/consensus_manager.nim +++ b/beacon_chain/consensus_object_pools/consensus_manager.nim @@ -5,25 +5,19 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import - chronicles, chronos, web3/[primitives, engine_api_types], + chronicles, chronos, ../spec/datatypes/base, + ../spec/beaconstate, ../consensus_object_pools/[blockchain_dag, block_quarantine, attestation_pool], ../el/el_manager, ../beacon_clock, ./common_tools -from ../el/engine_api_conversions import asBlockHash -from ../spec/beaconstate import - get_expected_withdrawals, has_eth1_withdrawal_credential -from ../spec/datatypes/capella import Withdrawal from ../spec/eth2_apis/dynamic_fee_recipients import DynamicFeeRecipientsStore, getDynamicFeeRecipient -from ../validators/keystore_management import - KeymanagerHost, getPerValidatorDefaultFeeRecipient, getSuggestedFeeRecipient, - getSuggestedGasLimit from ../validators/action_tracker import ActionTracker, getNextProposalSlot logScope: topics = "cman" @@ -60,6 +54,11 @@ type # Tracking last proposal forkchoiceUpdated payload information # ---------------------------------------------------------------- optimisticHead: tuple[bid: BlockId, execution_block_hash: Eth2Digest] + optimisticHeadStatus: OptimisticStatus + ## forkchoiceUpdated response about the optimistic head + + forkchoiceInflight: bool + ## True when there's an async `forkchoiceUpdated` in flight # Initialization # ------------------------------------------------------------------------------ @@ -90,11 +89,23 @@ func new*(T: type ConsensusManager, # Consensus Management # ----------------------------------------------------------------------------------- +func to*(v: PayloadExecutionStatus, T: type OptimisticStatus): T = + case v + of PayloadExecutionStatus.valid: + OptimisticStatus.valid + of PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted: + OptimisticStatus.notValidated + of invalid, invalid_block_hash: + OptimisticStatus.invalidated + proc checkExpectedBlock(self: var ConsensusManager) = if self.expectedBlockReceived == nil: return - if self.dag.head.slot < self.expectedSlot: + if self.dag.head.slot < self.expectedSlot or not self.dag.head.executionValid: + # Don't trigger `expectBlock` if the head is optimistic - this gives the + # `forkchoiceUpdated` call time to maybe update the optimistic status before + # it's time to validate return self.expectedBlockReceived.complete(true) @@ -137,7 +148,8 @@ func shouldSyncOptimistically*( true func shouldSyncOptimistically*(self: ConsensusManager, wallSlot: Slot): bool = - if self.optimisticHead.execution_block_hash.isZero: + if self.optimisticHeadStatus == OptimisticStatus.invalidated or + self.optimisticHead.execution_block_hash.isZero: return false shouldSyncOptimistically( @@ -151,73 +163,20 @@ func optimisticHead*(self: ConsensusManager): BlockId = func optimisticExecutionBlockHash*(self: ConsensusManager): Eth2Digest = self.optimisticHead.execution_block_hash -func setOptimisticHead*( +proc setOptimisticHead*( self: var ConsensusManager, bid: BlockId, execution_block_hash: Eth2Digest) = - self.optimisticHead = (bid: bid, execution_block_hash: execution_block_hash) - -proc updateExecutionClientHead*( - self: ref ConsensusManager, - newHead: BeaconHead -): Future[Opt[void]] {.async: (raises: [CancelledError]).} = - let headExecutionBlockHash = - self.dag.loadExecutionBlockHash(newHead.blck).valueOr: - # `BlockRef` are only created for blocks that have passed - # execution block hash validation, either explicitly in - # `block_processor.storeBlock`, or implicitly, e.g., through - # checkpoint sync. With checkpoint sync, the checkpoint block - # is initially not available, so if there is a reorg to it, - # this may be triggered. Such a reorg could happen if the first - # imported chain is completely invalid (after the checkpoint block) - # and is subsequently pruned, in which case checkpoint block is head. - # Because execution block hash validation has already passed, - # we can treat this as `SYNCING`. - warn "Failed to load head execution block hash", head = newHead.blck - return Opt[void].ok() - - if headExecutionBlockHash.isZero: - # Blocks without execution payloads can't be optimistic. - self.dag.markBlockVerified(newHead.blck) - return Opt[void].ok() - - template callForkchoiceUpdated(attributes: untyped): auto = - await self.elManager.forkchoiceUpdated( - headBlockHash = headExecutionBlockHash, - safeBlockHash = newHead.safeExecutionBlockHash, - finalizedBlockHash = newHead.finalizedExecutionBlockHash, - payloadAttributes = Opt.none attributes) - - # Can't use dag.head here because it hasn't been updated yet - let - consensusFork = - self.dag.cfg.consensusForkAtEpoch(newHead.blck.bid.slot.epoch) - (payloadExecutionStatus, _) = withConsensusFork(consensusFork): - when consensusFork >= ConsensusFork.Bellatrix: - callForkchoiceUpdated(consensusFork.PayloadAttributes) - else: - callForkchoiceUpdated(PayloadAttributesV1) - - case payloadExecutionStatus - of PayloadExecutionStatus.valid: - self.dag.markBlockVerified(newHead.blck) - of PayloadExecutionStatus.invalid, PayloadExecutionStatus.invalid_block_hash: - self.attestationPool[].forkChoice.mark_root_invalid(newHead.blck.root) - self.quarantine[].addUnviable(newHead.blck.root) - return Opt.none(void) - of PayloadExecutionStatus.accepted, PayloadExecutionStatus.syncing: - # Don't do anything. Either newHead.blck.executionValid was already false, - # in which case it'd be superfluous to set it to false again, or the block - # was marked as `VALID` in the `newPayload` path already, in which case it - # is fine to keep it as valid here. Conceptually, were this to be lines of - # code, it'd be something like - # if newHead.blck.executionValid: - # do nothing because of latter case - # else: - # do nothing because it's a no-op - # So, either way, do nothing. - discard - - return Opt[void].ok() + if self.optimisticHeadStatus == OptimisticStatus.invalidated: + # If the light client was wrong in the past, either the execution client or + # the light client has been compromised and we shouldn't trust either until + # a restart + warn "Ignoring optimistic head update due to previous invalidity", + bid, execution_block_hash + else: + let newHead = (bid: bid, execution_block_hash: execution_block_hash) + if self.optimisticHead != newHead: + self.optimisticHead = newHead + self.optimisticHeadStatus = OptimisticStatus.notValidated func getKnownValidatorsForBlsChangeTracking( self: ConsensusManager, newHead: BlockRef): seq[ValidatorIndex] = @@ -258,11 +217,6 @@ proc updateHead*(self: var ConsensusManager, wallSlot: Slot) = warn "Head selection failed, using previous head", head = shortLog(self.dag.head), wallSlot return - executionBlockHash = self.dag.loadExecutionBlockHash(newHead.blck) - - if executionBlockHash.isSome and executionBlockHash.unsafeGet.isZero: - # Blocks without execution payloads can't be optimistic. - self.dag.markBlockVerified(newHead.blck) self.updateHead(newHead.blck) @@ -328,128 +282,276 @@ proc getFeeRecipient*( proc getGasLimit*(self: ConsensusManager, pubkey: ValidatorPubKey): uint64 = getGasLimit(self.validatorsDir, self.defaultGasLimit, pubkey) -proc runProposalForkchoiceUpdated*( - self: ref ConsensusManager, wallSlot: Slot): Future[Opt[void]] {.async: (raises: [CancelledError]).} = +proc prepareNextSlot*( + self: ref ConsensusManager, proposalSlot: Slot, deadline: DeadlineFuture +) {.async: (raises: [CancelledError]).} = + ## Send a "warm-up" forkchoiceUpdated to the execution client, assuming that + ## `clearanceState` has been updated to the expected epoch of the proposal - + ## at the same time, ensure that the clearance state is ready for the next + ## block + + # When the chain is synced, the most likely block to be produced is the block + # right after head - we can exploit this assumption and advance the state + # to that slot before the block arrives, thus allowing us to do the expensive + # epoch transition ahead of time. + # Notably, we use the clearance state here because that's what the clearance + # function uses to validate the incoming block (or the one that's about to be + # produced) let - nextWallSlot = wallSlot + 1 - (validatorIndex, nextProposer) = self.checkNextProposer(wallSlot).valueOr: - return err() - debug "runProposalForkchoiceUpdated: expected to be proposing next slot", - nextWallSlot, validatorIndex, nextProposer - - # In Capella and later, computing correct withdrawals would mean creating a - # proposal state. Instead, only do that at proposal time. - if nextWallSlot.is_epoch: - debug "runProposalForkchoiceUpdated: not running early fcU for epoch-aligned proposal slot", - nextWallSlot, validatorIndex, nextProposer - return err() + dag = self.dag + head = dag.head + nextBsi = BlockSlotId.init(head.bid, proposalSlot) + startTick = Moment.now() + + var cache = StateCache() + if not dag.updateState(dag.clearanceState, nextBsi, true, cache, dag.updateFlags): + # This should never happen since we're basically advancing the slots of the + # head state + warn "Cannot prepare clearance state for next block - bug?" + return + + debug "Prepared clearance state for next block", + nextBsi, updateStateDur = Moment.now() - startTick + + if self.forkchoiceInflight: + debug "Skipping proposal fcU, forkchoiceUpdated already in flight", proposalSlot + return + + let + preSlot = proposalSlot - 1 + (validatorIndex, nextProposer) = self.checkNextProposer(preSlot).valueOr: + debug "Skipping proposal fcU, no proposers registered", head, proposalSlot + return + + self.forkchoiceInflight = true + defer: + self.forkchoiceInflight = false # Approximately lines up with validator_duties version. Used optimistically/ # opportunistically, so mismatches are fine if not too frequent. - let - timestamp = withState(self.dag.headState): - compute_timestamp_at_slot(forkyState.data, nextWallSlot) - # If the current head block still forms the basis of the eventual proposal - # state, then its `get_randao_mix` will remain unchanged as well, as it is - # constant until the next block. - randomData = withState(self.dag.headState): - get_randao_mix(forkyState.data, get_current_epoch(forkyState.data)).data - feeRecipient = self[].getFeeRecipient( - nextProposer, Opt.some(validatorIndex), nextWallSlot.epoch) - beaconHead = self.attestationPool[].getBeaconHead(self.dag.head) - headBlockHash = ? self.dag.loadExecutionBlockHash(beaconHead.blck) - - if headBlockHash.isZero: - return err() - - let safeBlockHash = beaconHead.safeExecutionBlockHash - - withState(self.dag.headState): - template callForkchoiceUpdated(fcPayloadAttributes: auto) = + withState(dag.clearanceState): + when consensusFork == ConsensusFork.Gloas: + debugGloasComment "well, likely can't keep reusing V3 much longer" + elif consensusFork in ConsensusFork.Bellatrix .. ConsensusFork.Fulu: + debug "Sending proposal fcU", proposalSlot, validatorIndex, nextProposer + let + timestamp = compute_timestamp_at_slot(forkyState.data, proposalSlot) + # If the current head block still forms the basis of the eventual proposal + # state, then its `get_randao_mix` will remain unchanged as well, as it is + # constant until the next block. + prevRandao = get_randao_mix(forkyState.data, get_current_epoch(forkyState.data)) + feeRecipient = self[].getFeeRecipient( + nextProposer, Opt.some(validatorIndex), proposalSlot.epoch + ) + beaconHead = self.attestationPool[].getBeaconHead(head) + headBlockHash = dag.loadExecutionBlockHash(beaconHead.blck).valueOr: + return + + if headBlockHash.isZero: + return + + when consensusFork >= ConsensusFork.Deneb: + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/prague.md + # does not define any new forkchoiceUpdated, so reuse V3 from Dencun + let attributes = PayloadAttributesV3( + timestamp: Quantity timestamp, + prevRandao: Bytes32 prevRandao.to(Hash32), + suggestedFeeRecipient: feeRecipient, + withdrawals: toEngineWithdrawals get_expected_withdrawals(forkyState.data), + parentBeaconBlockRoot: beaconHead.blck.bid.root.to(Hash32), + ) + elif consensusFork >= ConsensusFork.Capella: + let attributes = PayloadAttributesV2( + timestamp: Quantity timestamp, + prevRandao: Bytes32 prevRandao.to(Hash32), + suggestedFeeRecipient: feeRecipient, + withdrawals: toEngineWithdrawals get_expected_withdrawals(forkyState.data), + ) + else: + let attributes = PayloadAttributesV1( + timestamp: Quantity timestamp, + prevRandao: Bytes32 prevRandao.to(Hash32), + suggestedFeeRecipient: feeRecipient, + ) + let (status, _) = await self.elManager.forkchoiceUpdated( - headBlockHash, safeBlockHash, + headBlockHash, + beaconHead.safeExecutionBlockHash, beaconHead.finalizedExecutionBlockHash, - payloadAttributes = Opt.some fcPayloadAttributes) - debug "Fork-choice updated for proposal", status - - static: doAssert high(ConsensusFork) == ConsensusFork.Fulu - debugFuluComment "Will Fulu need fcuV4? Or there shall be a new fcuV introduced in Fulu? We don't know" - when consensusFork >= ConsensusFork.Deneb: - # https://github.com/ethereum/execution-apis/blob/90a46e9137c89d58e818e62fa33a0347bba50085/src/engine/prague.md - # does not define any new forkchoiceUpdated, so reuse V3 from Dencun - callForkchoiceUpdated(PayloadAttributesV3( - timestamp: Quantity timestamp, - prevRandao: FixedBytes[32] randomData, - suggestedFeeRecipient: feeRecipient, - withdrawals: - toEngineWithdrawals get_expected_withdrawals(forkyState.data), - parentBeaconBlockRoot: beaconHead.blck.bid.root.to(Hash32))) - elif consensusFork >= ConsensusFork.Capella: - callForkchoiceUpdated(PayloadAttributesV2( - timestamp: Quantity timestamp, - prevRandao: FixedBytes[32] randomData, - suggestedFeeRecipient: feeRecipient, - withdrawals: - toEngineWithdrawals get_expected_withdrawals(forkyState.data))) + Opt.some(attributes), + deadline, + false, + ) + debug "Fork-choice updated for proposal", status, headBlockHash, attributes + elif consensusFork in ConsensusFork.Phase0 .. ConsensusFork.Altair: + discard else: - callForkchoiceUpdated(PayloadAttributesV1( - timestamp: Quantity timestamp, - prevRandao: FixedBytes[32] randomData, - suggestedFeeRecipient: feeRecipient)) + {.error: "Unknown consensus fork " & $consensusFork.} - ok() +proc forkchoiceUpdated*( + self: ref ConsensusManager, + slot: Slot, + headBlockHash, safeBlockHash, finalizedBlockHash: Eth2Digest, + deadline: DeadlineFuture, + retry: bool, +): Future[PayloadExecutionStatus] {.async: (raises: [CancelledError]).} = + ## Call non-proposer version of forkchoiceUpdated using the given slot to + ## select the correct PayloadAttributes version + withConsensusFork(self[].dag.cfg.consensusForkAtEpoch(slot.epoch)): + when consensusFork >= ConsensusFork.Bellatrix: + if headBlockHash.isZero: + # Merge not yet activated + PayloadExecutionStatus.valid + else: + let (status, _) = await self.elManager.forkchoiceUpdated( + headBlockHash, + safeBlockHash, + finalizedBlockHash, + Opt.none consensusFork.PayloadAttributes, + deadline, + retry, + ) + status + else: + PayloadExecutionStatus.valid -proc updateHeadWithExecution*( - self: ref ConsensusManager, initialNewHead: BeaconHead, - getBeaconTimeFn: GetBeaconTimeFn) {.async: (raises: [CancelledError]).} = - ## Trigger fork choice and update the DAG with the new head block - ## This does not automatically prune the DAG after finalization - ## `pruneFinalized` must be called for pruning. +proc forkchoiceUpdated( + self: ref ConsensusManager, + head: BeaconHead, + wallSlot: Slot, + deadline: DeadlineFuture, + retry: bool, +): Future[bool] {.async: (raises: [CancelledError]).} = + ## Send forkchoiceUpdated to the client, return false iff the head was invalid + ## and true otherwise + + if self[].shouldSyncOptimistically(wallSlot): + # No point retrying for optimistic slots since there will be a new attempt + # "soon" + # However, we will make the call even if the optimistic head hasn't changed + # since the last slot since the finalized / safe blocks might have changed + let status = await self.forkchoiceUpdated( + self.optimisticHead.bid.slot, self.optimisticHead.execution_block_hash, + head.safeExecutionBlockHash, head.finalizedExecutionBlockHash, deadline, false, + ) + + self.optimisticHeadStatus = status.to(OptimisticStatus) + + case self.optimisticHeadStatus + of OptimisticStatus.valid, OptimisticStatus.notValidated: + true + of OptimisticStatus.invalidated: + warn "Light execution payload invalid - the execution client or the light client data is faulty", + payloadExecutionStatus = status, + optimisticBlockHash = self.optimisticHead.execution_block_hash + false + else: + let + headExecutionBlockHash = self.dag.loadExecutionBlockHash(head.blck).valueOr: + # `BlockRef` are only created for blocks that have passed + # execution block hash validation, either explicitly in + # `block_processor.storeBlock`, or implicitly, e.g., through + # checkpoint sync. With checkpoint sync, the checkpoint block + # is initially not available, so if there is a reorg to it, + # this may be triggered. Such a reorg could happen if the first + # imported chain is completely invalid (after the checkpoint block) + # and is subsequently pruned, in which case checkpoint block is head. + # Because execution block hash validation has already passed, + # we can treat this as `SYNCING`. + warn "Failed to load head execution block hash", head = head.blck + return true + status = await self.forkchoiceUpdated( + head.blck.slot, headExecutionBlockHash, head.safeExecutionBlockHash, + head.finalizedExecutionBlockHash, deadline, retry, + ) + + case status.to(OptimisticStatus) + of OptimisticStatus.valid: + head.blck.markExecutionValid(true) + true + of OptimisticStatus.notValidated: + if head.blck.optimisticStatus != OptimisticStatus.notValidated: + info "Previously validated block not accepted as new head by execution client", + blck = head.blck, + prevStatus = head.blck.optimisticStatus, + payloadExecutionStatus = status + true + of OptimisticStatus.invalidated: + if head.blck.executionValid: + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/sync/optimistic.md#transitioning-from-valid---invalidated-or-invalidated---valid + warn "Previously valid execution payload turned invalid during fork choice update - check execution client for faults and restart the beacon node", + blck = head.blck, + prevStatus = head.blck.optimisticStatus, + payloadExecutionStatus = status + + head.blck.markExecutionValid(false) + self.attestationPool[].forkChoice.mark_root_invalid(head.blck.root) + self.quarantine[].addUnviable(head.blck.root) + false + +proc updateExecutionHead*( + self: ref ConsensusManager, + deadline: DeadlineFuture, + retry: bool, + getBeaconTimeFn: GetBeaconTimeFn, +) {.async: (raises: [CancelledError]).} = + ## Update the execution client with consensus information from the latest + ## head selection. + ## + ## In the case that we were optimistically synced and the execution client has + ## determined that the payload was invalid, we will also attempt to update + ## the consensus head towards a valid / nonValidated block by rerunning + ## fork choice with the new information about invalid blocks in mind. + + if self.forkchoiceInflight: + return - # Grab the new head according to our latest attestation data - try: - # Ensure dag.updateHead has most current information - var - attempts = 0 - newHead = initialNewHead - while (await self.updateExecutionClientHead(newHead)).isErr: - # This proc is called on every new block; guarantee timely return - inc attempts - const maxAttempts = 5 - if attempts >= maxAttempts: - warn "updateHeadWithExecution: too many attempts to recover from invalid payload", - attempts, maxAttempts, newHead, initialNewHead - break - - # Select new head for next attempt - let - wallTime = getBeaconTimeFn() - nextHead = self.attestationPool[].selectOptimisticHead(wallTime).valueOr: - warn "Head selection failed after invalid block, using previous head", - newHead, wallSlot = wallTime.slotOrZero - break - warn "updateHeadWithExecution: attempting to recover from invalid payload", - attempts, maxAttempts, newHead, initialNewHead, nextHead - newHead = nextHead + self.forkchoiceInflight = true + defer: + self.forkchoiceInflight = false + + var + attempts = 0 + wallTime = getBeaconTimeFn() + head = self.attestationPool[].getBeaconHead(self.dag.head) + + while not (await self.forkchoiceUpdated(head, wallTime.slotOrZero(), deadline, retry)): + # Each failed call to forkchoiceUpdated that fails should reveal new + # information about the suggested new head - a side effect of the failure is + # that the block should be marked as invalid and removed from fork choice + # consideration, meaning that a new fork choice should select either an + # earlier block or a different fork (as attestations keep coming in). + # + # When light client data is available, we might also run into the case where + # the optimistic head is broken - this is very bad and light client head + # will simply be ignored until the next restart. + + if deadline.finished: + # We will try again soon .. hopefully with a new head + warn "Deadline expired while looking for valid payload", attempts, head + break + + # Select new head for next attempt + wallTime = getBeaconTimeFn() + let nextHead = self.attestationPool[].selectOptimisticHead(wallTime).valueOr: + warn "Head selection failed after invalid block, using previous head", + head, wallSlot = wallTime.slotOrZero + break + + warn "updateHeadWithExecution: attempting to recover from invalid payload", + attempts, head, nextHead + + head = nextHead # Store the new head in the chain DAG - this may cause epochs to be # justified and finalized self.dag.updateHead( - newHead.blck, self.quarantine[], - self[].getKnownValidatorsForBlsChangeTracking(newHead.blck)) - - # If this node should propose next slot, start preparing payload. Both - # fcUs are useful: the updateExecutionClientHead(newHead) call updates - # the head state (including optimistic status) that self.dagUpdateHead - # needs while runProposalForkchoiceUpdated requires RANDAO information - # from the head state corresponding to the `newHead` block, which only - # self.dag.updateHead(...) sets up. - discard await self.runProposalForkchoiceUpdated(getBeaconTimeFn().slotOrZero) - - self[].checkExpectedBlock() - except CatchableError as exc: - debug "updateHeadWithExecution error", - error = exc.msg + head.blck, + self.quarantine[], + self[].getKnownValidatorsForBlsChangeTracking(head.blck), + ) + + attempts += 1 proc pruneStateCachesAndForkChoice*(self: var ConsensusManager) = ## Prune unneeded and invalidated data after finalization @@ -460,4 +562,4 @@ proc pruneStateCachesAndForkChoice*(self: var ConsensusManager) = # Cleanup DAG & fork choice if we have a finalized head if self.dag.needStateCachesAndForkChoicePruning(): self.dag.pruneStateCachesDAG() - self.attestationPool[].prune() \ No newline at end of file + self.attestationPool[].prune() diff --git a/beacon_chain/consensus_object_pools/data_column_quarantine.nim b/beacon_chain/consensus_object_pools/data_column_quarantine.nim deleted file mode 100644 index 63c53a1ff4..0000000000 --- a/beacon_chain/consensus_object_pools/data_column_quarantine.nim +++ /dev/null @@ -1,189 +0,0 @@ -# beacon_chain -# Copyright (c) 2018-2025 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} - -import - std/tables, - ../spec/datatypes/fulu, - ../spec/helpers - -from std/sequtils import mapIt -from std/strutils import join - -const - MaxDataColumns = 3 * SLOTS_PER_EPOCH * NUMBER_OF_COLUMNS - ## Same limit as `MaxOrphans` in `block_quarantine` - ## data columns may arrive before an orphan is tagged `columnless` - -type - DataColumnQuarantine* = object - data_columns*: - OrderedTable[DataColumnIdentifier, ref DataColumnSidecar] - supernode*: bool - custody_columns*: seq[ColumnIndex] - onDataColumnSidecarCallback*: OnDataColumnSidecarCallback - - DataColumnFetchRecord* = object - block_root*: Eth2Digest - indices*: seq[ColumnIndex] - - OnDataColumnSidecarCallback = proc(data: DataColumnSidecar) {.gcsafe, raises: [].} - -func init*(T: type DataColumnQuarantine): T = - T() - -func shortLog*(x: seq[DataColumnFetchRecord]): string = - "[" & x.mapIt(shortLog(it.block_root) & shortLog(it.indices)).join(", ") & "]" - -func put*(quarantine: var DataColumnQuarantine, - dataColumnSidecar: ref DataColumnSidecar) = - if quarantine.data_columns.len >= static(MaxDataColumns.int): - # FIFO if full. For example, sync manager and request manager can race - # to put data columns in at the same time, so one gets data column - # insert -> block resolve -> data column insert, which leaves - # garbage data columns. - # - # This also therefore automatically garbage-collects otherwise valid - # data columns that are correctly signed, point to either correct block - # root which isn't ever seen, and then for any reason simply never used. - var oldest_column_key: DataColumnIdentifier - for k in quarantine.data_columns.keys: - oldest_column_key = k - break - quarantine.data_columns.del(oldest_column_key) - let block_root = - hash_tree_root(dataColumnSidecar.signed_block_header.message) - discard quarantine.data_columns.hasKeyOrPut( - DataColumnIdentifier(block_root: block_root, - index: dataColumnSidecar.index), - dataColumnSidecar) - -func hasDataColumn*( - quarantine: DataColumnQuarantine, - slot: Slot, - proposer_index: uint64, - index: ColumnIndex): bool = - for data_column_sidecar in quarantine.data_columns.values: - template block_header: untyped = - data_column_sidecar.signed_block_header.message - if block_header.slot == slot and - block_header.proposer_index == proposer_index and - data_column_sidecar.index == index: - return true - false - -func peekColumnIndices*(quarantine: DataColumnQuarantine, - blck: fulu.SignedBeaconBlock): - seq[ColumnIndex] = - # Peeks into the currently received column indices - # from quarantine, necessary data availability checks - var indices: seq[ColumnIndex] - for col_idx in quarantine.custody_columns: - if quarantine.data_columns.hasKey( - DataColumnIdentifier(block_root: blck.root, - index: ColumnIndex col_idx)): - indices.add(col_idx) - indices - -func gatherDataColumns*(quarantine: DataColumnQuarantine, - digest: Eth2Digest): - seq[ref DataColumnSidecar] = - # Returns the current data columns queried by a block header - var columns: seq[ref DataColumnSidecar] - for i in quarantine.custody_columns: - let dc_identifier = - DataColumnIdentifier( - block_root: digest, - index: i) - if quarantine.data_columns.hasKey(dc_identifier): - let value = - quarantine.data_columns.getOrDefault(dc_identifier, - default(ref DataColumnSidecar)) - columns.add(value) - columns - -func popDataColumns*( - quarantine: var DataColumnQuarantine, digest: Eth2Digest, - blck: fulu.SignedBeaconBlock): - seq[ref DataColumnSidecar] = - var r: DataColumnSidecars - for idx in quarantine.custody_columns: - var c: ref DataColumnSidecar - if quarantine.data_columns.pop( - DataColumnIdentifier(block_root: digest, - index: idx), - c): - r.add(c) - r - -func hasMissingDataColumns*(quarantine: DataColumnQuarantine, - blck: fulu.SignedBeaconBlock): bool = - # `hasMissingDataColumns` consists of the data columns that, - # have been missed over gossip, also in case of a supernode, - # the method would return missing columns when the supernode - # has not received data columns upto the requisite limit (i.e 50% - # of NUMBER_OF_COLUMNS). - - # This method shall be actively used by the `RequestManager` to - # root request columns over RPC. - var col_counter = 0 - for idx in quarantine.custody_columns: - let dc_identifier = - DataColumnIdentifier( - block_root: blck.root, - index: idx) - if dc_identifier notin quarantine.data_columns: - inc col_counter - if quarantine.supernode and col_counter != NUMBER_OF_COLUMNS: - return false - elif quarantine.supernode == false and - col_counter != max(SAMPLES_PER_SLOT, CUSTODY_REQUIREMENT): - return false - else: - return true - -func hasEnoughDataColumns*(quarantine: DataColumnQuarantine, - blck: fulu.SignedBeaconBlock): bool = - # `hasEnoughDataColumns` dictates whether there is `enough` - # data columns for a block to be enqueued, ideally for a supernode - # if it receives atleast 50%+ gossip and RPC - - # Once 50%+ columns are available we can use this function to - # check it, and thereby check column reconstructability, right from - # gossip validation, consequently populating the quarantine with - # rest of the data columns. - if quarantine.supernode: - let - collectedColumns = quarantine.gatherDataColumns(blck.root) - if collectedColumns.len >= (quarantine.custody_columns.len div 2): - return true - else: - for i in quarantine.custody_columns: - let dc_identifier = - DataColumnIdentifier( - block_root: blck.root, - index: i) - if dc_identifier notin quarantine.data_columns: - return false - else: - return true - -func dataColumnFetchRecord*(quarantine: DataColumnQuarantine, - blck: fulu.SignedBeaconBlock): - DataColumnFetchRecord = - var indices: seq[ColumnIndex] - for i in quarantine.custody_columns: - let - idx = ColumnIndex(i) - dc_id = DataColumnIdentifier( - block_root: blck.root, - index: idx) - if not quarantine.data_columns.hasKey( - dc_id): - indices.add(idx) - DataColumnFetchRecord(block_root: blck.root, indices: indices) diff --git a/beacon_chain/consensus_object_pools/spec_cache.nim b/beacon_chain/consensus_object_pools/spec_cache.nim index e014317047..9640238aa7 100644 --- a/beacon_chain/consensus_object_pools/spec_cache.nim +++ b/beacon_chain/consensus_object_pools/spec_cache.nim @@ -27,7 +27,7 @@ logScope: topics = "spec_cache" func count_active_validators*(shufflingRef: ShufflingRef): uint64 = shufflingRef.shuffled_active_validator_indices.lenu64 -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#get_committee_count_per_slot +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#get_committee_count_per_slot func get_committee_count_per_slot*(shufflingRef: ShufflingRef): uint64 = get_committee_count_per_slot(count_active_validators(shufflingRef)) @@ -66,7 +66,7 @@ func get_beacon_committee*( committees_per_slot * SLOTS_PER_EPOCH ) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#get_beacon_committee +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#get_beacon_committee func get_beacon_committee_len*( shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex): uint64 = ## Return the number of members in the beacon committee at ``slot`` for ``index``. @@ -229,14 +229,14 @@ func get_attesting_indices_one*(shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex, bits: CommitteeValidatorsBits | ElectraCommitteeValidatorsBits): - Option[ValidatorIndex] = + Opt[ValidatorIndex] = # A variation on get_attesting_indices that returns the validator index only # if only one validator index is set - var res = none(ValidatorIndex) + var res = Opt.none(ValidatorIndex) for validator_index in get_attesting_indices( shufflingRef, slot, committee_index, bits): - if res.isSome(): return none(ValidatorIndex) - res = some(validator_index) + if res.isSome(): return Opt.none(ValidatorIndex) + res = Opt.some(validator_index) res func get_attesting_indices_one*(shufflingRef: ShufflingRef, diff --git a/beacon_chain/consensus_object_pools/validator_change_pool.nim b/beacon_chain/consensus_object_pools/validator_change_pool.nim index f8c9bc68ee..9ea7a7450b 100644 --- a/beacon_chain/consensus_object_pools/validator_change_pool.nim +++ b/beacon_chain/consensus_object_pools/validator_change_pool.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2020-2024 Status Research & Development GmbH +# Copyright (c) 2020-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -136,7 +136,7 @@ func addValidatorChangeMessage( subpool.addLast(validatorChangeMessage) doAssert subpool.lenu64 <= bound -iterator getValidatorIndices(proposer_slashing: ProposerSlashing): uint64 = +iterator getValidatorIndices*(proposer_slashing: ProposerSlashing): uint64 = yield proposer_slashing.signed_header_1.message.proposer_index iterator getValidatorIndices(voluntary_exit: SignedVoluntaryExit): uint64 = @@ -259,11 +259,11 @@ proc getValidatorChangeMessagesForBlock( if not validateValidatorChangeMessage(cfg, state, validator_change_message): continue - var skip = false + # Filter messages that don't affect any new validators + var skip = true for slashed_index in getValidatorIndices(validator_change_message): - if seen.containsOrIncl(slashed_index): - skip = true - break + if not seen.containsOrIncl(slashed_index): + skip = false if skip: continue @@ -273,31 +273,34 @@ proc getValidatorChangeMessagesForBlock( proc getBeaconBlockValidatorChanges*( pool: var ValidatorChangePool, cfg: RuntimeConfig, state: ForkyBeaconState): BeaconBlockValidatorChanges = - var - indices: HashSet[uint64] - res: BeaconBlockValidatorChanges - - getValidatorChangeMessagesForBlock( - pool.phase0_attester_slashings, cfg, state, indices, - res.phase0_attester_slashings) - getValidatorChangeMessagesForBlock( - pool.proposer_slashings, cfg, state, indices, res.proposer_slashings) - getValidatorChangeMessagesForBlock( - pool.voluntary_exits, cfg, state, indices, res.voluntary_exits) - - when typeof(state).kind >= ConsensusFork.Capella: - # Prioritize these + var res: BeaconBlockValidatorChanges + + # Exits (with priority on slashings) + block: + var indices: HashSet[uint64] + when typeof(state).kind >= ConsensusFork.Electra: + getValidatorChangeMessagesForBlock( + pool.electra_attester_slashings, cfg, state, indices, + res.electra_attester_slashings) + else: + getValidatorChangeMessagesForBlock( + pool.phase0_attester_slashings, cfg, state, indices, + res.phase0_attester_slashings) getValidatorChangeMessagesForBlock( - pool.bls_to_execution_changes_api, cfg, state, indices, - res.bls_to_execution_changes) - + pool.proposer_slashings, cfg, state, indices, res.proposer_slashings) getValidatorChangeMessagesForBlock( - pool.bls_to_execution_changes_gossip, cfg, state, indices, - res.bls_to_execution_changes) + pool.voluntary_exits, cfg, state, indices, res.voluntary_exits) - when typeof(state).kind >= ConsensusFork.Electra: - getValidatorChangeMessagesForBlock( - pool.electra_attester_slashings, cfg, state, indices, - res.electra_attester_slashings) + # Credential changes (can be combined with exit) + when typeof(state).kind >= ConsensusFork.Capella: + block: + var indices: HashSet[uint64] + # Prioritize those from API + getValidatorChangeMessagesForBlock( + pool.bls_to_execution_changes_api, cfg, state, indices, + res.bls_to_execution_changes) + getValidatorChangeMessagesForBlock( + pool.bls_to_execution_changes_gossip, cfg, state, indices, + res.bls_to_execution_changes) res diff --git a/beacon_chain/consensus_object_pools/vanity_logs/fulu/color.ans b/beacon_chain/consensus_object_pools/vanity_logs/fulu/color.ans new file mode 100644 index 0000000000..90bf43981a --- /dev/null +++ b/beacon_chain/consensus_object_pools/vanity_logs/fulu/color.ans @@ -0,0 +1,25 @@ + : : : : : : |`-. /-"| |""""""". + : : ./"""`: : : .-'""""\ |. \/ .| | [] | +.:....:....../ ::::::\ ...........:....:../:::::. \..|:|\../|:|.|...... /..... + : : |.::::::::`\.---""""""""""--/::::: . . | |:| `' |:| |:::[]::\ +.:....:......| .... ::: .""""""mmm""""".. : . |..|_| |_|.|________|.... + : : `\ . .' mmmmmMmmmmmm `. /' .------.. .--. + : : :| .::. "mmm...: .mmmm" :. | |..----..| | | + : : :`\ .'.::..: ...:::... } ::, <' |:| |:| | | + : : :./:..:.\.... ..' `. _.../ : : `. |:`----':| |:: """"\ + : : / :::: : \ 0 \ . / 0 /' :: ::`. `--------' `--------' + : : / :: : : """// . . """ : :: \ |-------. .------.. +.:....:......./ :: :: :..'' .: :: \..| /\ |.| .----. |.... + : : / :: : . . . . :: :: \ |.. "" < | | | | +.:....:.....{ :: :: :' : :' :::' :: \..|::|""\ :|.|:`----':|.... + : : ,{ :::: `.: .__ .. ___ .:::: \ |__| |_| `--------' + : : _/:: :::: /' ' \_`---' _} ' :::: \ .--------. |""""""". + : _.:/: : : _-""""(```. `\ /' ...''|---.__ \ | .______| | [] | + :-' : :' .--'_-'""""\```... |". . ... )--. |---.|: ...| |...... / +N : :' : /`` /' __..---.::__-:-_` ::.---. `. `.-_ |::---^--. |:::[]::\ +1 : : : .' : / .' ::::::`--' `\ ` | `.--------' |________| +M ::' : .':. { .'.'.'. } ::`. ::. \ .-------. +B : : | `-,_ `::::::::' .',: |:: :|............| _____|.... +U : : be. `\ """"" ::' :: : | `\..\__ +S : :: at. . `\ ::. .. :: .'':`............_\_ :::\..... + : `::. scribe... `: : ::: `\ .:: .:' |________| diff --git a/beacon_chain/consensus_object_pools/vanity_logs/fulu/mono.txt b/beacon_chain/consensus_object_pools/vanity_logs/fulu/mono.txt new file mode 100644 index 0000000000..fc9e5c3e1c --- /dev/null +++ b/beacon_chain/consensus_object_pools/vanity_logs/fulu/mono.txt @@ -0,0 +1,25 @@ + : : : : : : |`-. /-"| |""""""". + : : ./"""`: : : .-'""""\ |. \/ .| | [] | +.:....:....../ ::::::\ ...........:....:../:::::. \..|:|\../|:|.|...... /..... + : : |.::::::::`\.---""""""""""--/::::: . . | |:| `' |:| |:::[]::\ +.:....:......| .... ::: .""""""mmm""""".. : . |..|_| |_|.|________|.... + : : `\ . .' mmmmmMmmmmmm `. /' .------.. .--. + : : :| .::. "mmm...: .mmmm" :. | |..----..| | | + : : :`\ .'.::..: ...:::... } ::, <' |:| |:| | | + : : :./:..:.\.... ..' `. _.../ : : `. |:`----':| |:: """"\ + : : / :::: : \ 0 \ . / 0 /' :: ::`. `--------' `--------' + : : / :: : : """// . . """ : :: \ |-------. .------.. +.:....:......./ :: :: :..'' .: :: \..| /\ |.| .----. |.... + : : / :: : . . . . :: :: \ |.. "" < | | | | +.:....:.....{ :: :: :' : :' :::' :: \..|::|""\ :|.|:`----':|.... + : : ,{ :::: `.: .__ .. ___ .:::: \ |__| |_| `--------' + : : _/:: :::: /' ' \_`---' _} ' :::: \ .--------. |""""""". + : _.:/: : : _-""""(```. `\ /' ...''|---.__ \ | .______| | [] | + :-' : :' .--'_-'""""\```... |". . ... )--. |---.|: ...| |...... / +N : :' : /`` /' __..---.::__-:-_` ::.---. `. `.-_ |::---^--. |:::[]::\ +1 : : : .' : / .' ::::::`--' `\ ` | `.--------' |________| +M ::' : .':. { .'.'.'. } ::`. ::. \ .-------. +B : : | `-,_ `::::::::' .',: |:: :|............| _____|.... +U : : be. `\ """"" ::' :: : | `\..\__ +S : :: at. . `\ ::. .. :: .'':`............_\_ :::\..... + : `::. scribe... `: : ::: `\ .:: .:' |________| diff --git a/beacon_chain/consensus_object_pools/vanity_logs/vanity_logs.nim b/beacon_chain/consensus_object_pools/vanity_logs/vanity_logs.nim index e3e9a1a797..a86a79ac5f 100644 --- a/beacon_chain/consensus_object_pools/vanity_logs/vanity_logs.nim +++ b/beacon_chain/consensus_object_pools/vanity_logs/vanity_logs.nim @@ -15,10 +15,6 @@ type LogProc* = proc() {.gcsafe, raises: [].} VanityLogs* = object - # Gets displayed on upgrade to Capella. May be displayed multiple times - # in case of chain reorgs around the upgrade. - onUpgradeToCapella*: LogProc - # Gets displayed on when a BLS to execution change message for a validator # known by this node appears in a head block onKnownBlsToExecutionChange*: LogProc @@ -35,13 +31,20 @@ type # known in a head block. onKnownCompoundingChange*: LogProc + # Gets displayed on upgrade to Fulu. May be displayed multiple times + # in case of chain reorgs around the upgrade. + onUpgradeToFulu*: LogProc + + # Gets displayed on a blob parameters update. + # May be displayed multiple times in case of chain reorgs. + onBlobParametersUpdate*: LogProc + # Created by https://beatscribe.com (beatscribe#1008 on Discord) # These need to be the main body of the log not to be reformatted or escaped. # # Policy: Retain retired art files in the directory, but don't link them anymore proc capellaMono*() = notice "\n" & staticRead("capella" / "mono.txt") -proc capellaColor*() = notice "\n" & staticRead("capella" / "color.ans") proc capellaBlink*() = notice "\n" & staticRead("capella" / "blink.ans") proc denebMono*() = notice "\n" & staticRead("deneb" / "mono.txt") @@ -50,3 +53,6 @@ proc denebColor*() = notice "\n" & staticRead("deneb" / "color.ans") proc electraMono*() = notice "\n" & staticRead("electra" / "mono.txt") proc electraColor*() = notice "\n" & staticRead("electra" / "color.ans") proc electraBlink*() = notice "\n" & staticRead("electra" / "blink.ans") + +proc fuluMono*() = notice "\n" & staticRead("fulu" / "mono.txt") +proc fuluColor*() = notice "\n" & staticRead("fulu" / "color.ans") diff --git a/beacon_chain/db_limits.nim b/beacon_chain/db_limits.nim deleted file mode 100644 index 567b24eeee..0000000000 --- a/beacon_chain/db_limits.nim +++ /dev/null @@ -1,16 +0,0 @@ -# beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} - -import spec/datatypes/constants - -# No `uint64` support in Sqlite -template isSupportedBySQLite*(slot: Slot): bool = - slot <= int64.high.Slot -template isSupportedBySQLite*(period: SyncCommitteePeriod): bool = - period <= int64.high.SyncCommitteePeriod diff --git a/beacon_chain/db_utils.nim b/beacon_chain/db_utils.nim new file mode 100644 index 0000000000..a83667552f --- /dev/null +++ b/beacon_chain/db_utils.nim @@ -0,0 +1,46 @@ +# beacon_chain +# Copyright (c) 2022-2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + chronicles, + snappy, + spec/datatypes/constants, + spec/eth2_ssz_serialization + +# No `uint64` support in Sqlite +template isSupportedBySQLite*(slot: Slot): bool = + slot <= int64.high.Slot +template isSupportedBySQLite*(period: SyncCommitteePeriod): bool = + period <= int64.high.SyncCommitteePeriod + +template disposeSafe*(s: untyped): untyped = + if distinctBase(s) != nil: + s.dispose() + s = typeof(s)(nil) + +proc decodeSZSSZ*[T]( + data: openArray[byte], output: var T, updateRoot = false): bool = + try: + let decompressed = decodeFramed(data, checkIntegrity = false) + readSszBytes(decompressed, output, updateRoot) + true + except CatchableError as e: + # If the data can't be deserialized, it could be because it's from a + # version of the software that uses a different SSZ encoding + warn "Unable to deserialize data, old database?", + err = e.msg, typ = name(T), dataLen = data.len + false + +func encodeSZSSZ*(v: auto): seq[byte] = + # https://github.com/google/snappy/blob/main/framing_format.txt + try: + encodeFramed(SSZ.encode(v)) + except CatchableError as err: + # In-memory encode shouldn't fail! + raiseAssert err.msg diff --git a/beacon_chain/deposits.nim b/beacon_chain/deposits.nim index 040d90db2b..be7491335b 100644 --- a/beacon_chain/deposits.nim +++ b/beacon_chain/deposits.nim @@ -27,10 +27,10 @@ type of ValidatorStorageKind.Identifier: ident: ValidatorIdent -static: doAssert(high(ConsensusFork) == ConsensusFork.Fulu, +static: doAssert(high(ConsensusFork) == ConsensusFork.Gloas, "Update OptionalForks constant!") const - OptionalForks* = {ConsensusFork.Electra, ConsensusFork.Fulu} + OptionalForks* = {ConsensusFork.Fulu, ConsensusFork.Gloas} ## When a new ConsensusFork is added and before this fork is activated on ## `mainnet`, it should be part of `OptionalForks`. ## In this case, the client will ignore missing _VERSION @@ -208,14 +208,19 @@ proc restValidatorExit(config: BeaconNodeConf) {.async.} = quit 1 let currentEpoch = block: + if config.eth2Network.isNone: + fatal "Please specify the intended network for the exits" + quit 1 let - beaconClock = BeaconClock.init(genesis.genesis_time).valueOr: + metadata = config.loadEth2Network() + genesisTime = genesis.genesis_time + beaconClock = BeaconClock.init(metadata.cfg.time, genesisTime).valueOr: error "Server returned invalid genesis time", genesis quit 1 time = getTime() slot = beaconClock.toSlot(time).slot - Epoch(slot.uint64 div 32) + slot.epoch let exitAtEpoch = if config.exitAtEpoch.isSome: Epoch config.exitAtEpoch.get @@ -370,7 +375,7 @@ proc doDeposits*(config: BeaconNodeConf, rng: var HmacDrbgContext) {. case config.depositsCmd of DepositsCmd.createTestnetDeposits: if config.eth2Network.isNone: - fatal "Please specify the intended testnet for the deposits" + fatal "Please specify the intended network for the deposits" quit 1 let metadata = config.loadEth2Network() var seed: KeySeed diff --git a/beacon_chain/el/deposit_contract.nim b/beacon_chain/el/deposit_contract.nim deleted file mode 100644 index 6aa87a3c1d..0000000000 --- a/beacon_chain/el/deposit_contract.nim +++ /dev/null @@ -1,310 +0,0 @@ -# beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} - -import - std/[os, sequtils, strutils, options, json, terminal], - chronos, chronicles, confutils, stint, json_serialization, - ../filepath, - ../networking/network_metadata, - web3, web3/confutils_defs, eth/common/keys, eth/p2p/discoveryv5/random2, - stew/[io2, byteutils], - ../spec/eth2_merkleization, - ../spec/datatypes/base, - ../validators/keystore_management - -# Compiled version of /scripts/depositContract.v.py in this repo -# The contract was compiled in Remix (https://remix.ethereum.org/) with vyper (remote) compiler. -const contractCode = - hexToSeqByte staticRead "deposit_contract_code.txt" - -type - Eth1Address = web3.Address - - StartUpCommand {.pure.} = enum - deploy - sendEth - generateSimulationDeposits - sendDeposits - - CliConfig = object - web3Url* {. - defaultValue: "", - desc: "URL of the Web3 server to observe Eth1" - name: "web3-url" }: string - - privateKey* {. - defaultValue: "" - desc: "Private key of the controlling account" - name: "private-key" }: string - - askForKey {. - defaultValue: false - desc: "Ask for an Eth1 private key interactively" - name: "ask-for-key" }: bool - - eth2Network* {. - desc: "The Eth2 network preset to use" - name: "network" }: Option[string] - - case cmd* {.command.}: StartUpCommand - of deploy: - discard - - of sendEth: - toAddress {.name: "to".}: Eth1Address - valueEth {.name: "eth".}: string - - of generateSimulationDeposits: - simulationDepositsCount {. - desc: "The number of validator keystores to generate" - name: "count" }: Natural - - outValidatorsDir {. - desc: "A directory to store the generated validator keystores" - name: "out-validators-dir" }: OutDir - - outSecretsDir {. - desc: "A directory to store the generated keystore password files" - name: "out-secrets-dir" }: OutDir - - outDepositsFile {. - desc: "A LaunchPad deposits file to write" - name: "out-deposits-file" }: OutFile - - threshold {. - defaultValue: 1 - desc: "Used to generate distributed keys" - name: "threshold" }: uint32 - - remoteValidatorsCount {. - defaultValue: 0 - desc: "The number of distributed validators validator" - name: "remote-validators-count" }: uint32 - - remoteSignersUrls {. - desc: "URLs of the remote signers" - name: "remote-signer" }: seq[string] - - of sendDeposits: - depositsFile {. - desc: "A LaunchPad deposits file" - name: "deposits-file" }: InputFile - - depositContractAddress {. - desc: "Address of the deposit contract" - name: "deposit-contract" }: Eth1Address - - minDelay {. - defaultValue: 0.0 - desc: "Minimum possible delay between making two deposits (in seconds)" - name: "min-delay" }: float - - maxDelay {. - defaultValue: 0.0 - desc: "Maximum possible delay between making two deposits (in seconds)" - name: "max-delay" }: float - -type - PubKeyBytes = DynamicBytes[48, 48] - WithdrawalCredentialsBytes = DynamicBytes[32, 32] - SignatureBytes = DynamicBytes[96, 96] - -contract(DepositContract): - proc deposit(pubkey: PubKeyBytes, - withdrawalCredentials: WithdrawalCredentialsBytes, - signature: SignatureBytes, - deposit_data_root: FixedBytes[32]) - -proc deployContract*(web3: Web3, code: seq[byte]): Future[ReceiptObject] {.async.} = - let tr = TransactionArgs( - `from`: Opt.some web3.defaultAccount, - data: Opt.some code, - gas: Opt.some Quantity(3000000), - gasPrice: Opt.some Quantity(1)) - - let r = await web3.send(tr) - result = await web3.getMinedTransactionReceipt(r) - -proc sendEth(web3: Web3, to: Eth1Address, valueEth: int): Future[TxHash] = - let tr = TransactionArgs( - `from`: Opt.some web3.defaultAccount, - # TODO: Force json-rpc to generate 'data' field - # should not be needed anymore, new execution-api schema - # is using `input` field - data: Opt.some(newSeq[byte]()), - gas: Opt.some Quantity(3000000), - gasPrice: Opt.some Quantity(1), - value: Opt.some(valueEth.u256 * 1000000000000000000.u256), - to: Opt.some(to)) - web3.send(tr) - -type - DelayGenerator* = proc(): chronos.Duration {.gcsafe, raises: [].} - -proc ethToWei(eth: UInt256): UInt256 = - eth * 1000000000000000000.u256 - -proc initWeb3(web3Url, privateKey: string): Future[Web3] {.async.} = - result = await newWeb3(web3Url) - if privateKey.len != 0: - result.privateKey = Opt.some(PrivateKey.fromHex(privateKey)[]) - else: - let accounts = await result.provider.eth_accounts() - doAssert(accounts.len > 0) - result.defaultAccount = accounts[0] - -# TODO: async functions should note take `seq` inputs because -# this leads to full copies. -proc sendDeposits*(deposits: seq[LaunchPadDeposit], - web3Url, privateKey: string, - depositContractAddress: Eth1Address, - delayGenerator: DelayGenerator = nil) {.async.} = - notice "Sending deposits", - web3 = web3Url, - depositContract = depositContractAddress - - var web3 = await initWeb3(web3Url, privateKey) - let gasPrice = int(await web3.provider.eth_gasPrice()) * 2 - let depositContract = web3.contractSender(DepositContract, - Address depositContractAddress) - for i in 4200 ..< deposits.len: - let dp = deposits[i] as DepositData - - while true: - try: - let tx = depositContract.deposit( - PubKeyBytes(@(dp.pubkey.toRaw())), - WithdrawalCredentialsBytes(@(dp.withdrawal_credentials.data)), - SignatureBytes(@(dp.signature.toRaw())), - FixedBytes[32](hash_tree_root(dp).data)) - - let status = await tx.send(value = 32.u256.ethToWei, gasPrice = gasPrice) - - info "Deposit sent", tx = $status - - if delayGenerator != nil: - await sleepAsync(delayGenerator()) - - break - except CatchableError: - await sleepAsync(60.seconds) - web3 = await initWeb3(web3Url, privateKey) - -{.pop.} # TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError -proc main() {.async.} = - var conf = try: CliConfig.load() - except CatchableError as exc: - raise exc - except Exception as exc: # TODO fix confutils - raiseAssert exc.msg - - let rng = HmacDrbgContext.new() - - if conf.cmd == StartUpCommand.generateSimulationDeposits: - let - mnemonic = generateMnemonic(rng[]) - seed = getSeed(mnemonic, KeystorePass.init "") - cfg = getRuntimeConfig(conf.eth2Network) - threshold = if conf.remoteSignersUrls.len > 0: conf.threshold - else: 0 - - if conf.remoteValidatorsCount > 0 and - conf.remoteSignersUrls.len == 0: - fatal "Please specify at least one remote signer URL" - quit 1 - - if (let res = secureCreatePath(string conf.outValidatorsDir); res.isErr): - warn "Could not create validators folder", - path = string conf.outValidatorsDir, err = ioErrorMsg(res.error) - - if (let res = secureCreatePath(string conf.outSecretsDir); res.isErr): - warn "Could not create secrets folder", - path = string conf.outSecretsDir, err = ioErrorMsg(res.error) - - let deposits = generateDeposits( - cfg, - rng[], - seed, - 0, conf.simulationDepositsCount, - string conf.outValidatorsDir, - string conf.outSecretsDir, - conf.remoteSignersUrls, - threshold, - conf.remoteValidatorsCount, - KeystoreMode.Fast) - - if deposits.isErr: - fatal "Failed to generate deposits", err = deposits.error - quit 1 - - let launchPadDeposits = - mapIt(deposits.value, LaunchPadDeposit.init(cfg, it)) - - Json.saveFile(string conf.outDepositsFile, launchPadDeposits) - notice "Deposit data written", filename = conf.outDepositsFile - quit 0 - - var deposits: seq[LaunchPadDeposit] - if conf.cmd == StartUpCommand.sendDeposits: - deposits = Json.loadFile(string conf.depositsFile, seq[LaunchPadDeposit]) - - if conf.askForKey: - var - privateKey: string # TODO consider using a SecretString type - reasonForKey = "" - - if conf.cmd == StartUpCommand.sendDeposits: - let - depositsWord = if deposits.len > 1: "deposits" else: "deposit" - totalEthNeeded = 32 * deposits.len - reasonForKey = " in order to make your $1 (you'll need access to $2 ETH)" % - [depositsWord, $totalEthNeeded] - - echo "Please enter your Goerli Eth1 private key in hex form (e.g. 0x1a2...f3c)" & - reasonForKey - - if not readPasswordFromStdin("> ", privateKey): - error "Failed to read an Eth1 private key from standard input" - - if privateKey.len > 0: - conf.privateKey = privateKey - - let web3 = await initWeb3(conf.web3Url, conf.privateKey) - - case conf.cmd - of StartUpCommand.deploy: - let receipt = await web3.deployContract(contractCode) - echo receipt.contractAddress.get, ";", receipt.blockHash - - of StartUpCommand.sendEth: - echo await sendEth(web3, conf.toAddress, conf.valueEth.parseInt) - - of StartUpCommand.sendDeposits: - var delayGenerator: DelayGenerator - if not (conf.maxDelay > 0.0): - conf.maxDelay = conf.minDelay - elif conf.minDelay > conf.maxDelay: - echo "The minimum delay should not be larger than the maximum delay" - quit 1 - - if conf.maxDelay > 0.0: - delayGenerator = proc (): chronos.Duration = - let - minDelay = (conf.minDelay*1000).int64 - maxDelay = (conf.maxDelay*1000).int64 - chronos.milliseconds (rng[].rand(maxDelay - minDelay) + minDelay) - - await sendDeposits(deposits, conf.web3Url, conf.privateKey, - conf.depositContractAddress, delayGenerator) - - of StartUpCommand.generateSimulationDeposits: - # This is handled above before the case statement - discard - -when isMainModule: waitFor main() diff --git a/beacon_chain/el/deposit_contract.nim.cfg b/beacon_chain/el/deposit_contract.nim.cfg deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/beacon_chain/el/el_conf.nim b/beacon_chain/el/el_conf.nim index 1e2dbc97c7..51832a89cd 100644 --- a/beacon_chain/el/el_conf.nim +++ b/beacon_chain/el/el_conf.nim @@ -1,18 +1,20 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +{.push raises: [], gcsafe.} + import std/[options, uri], results, chronicles, confutils, confutils/toml/defs as confTomlDefs, - confutils/toml/std/net as confTomlNet, - confutils/toml/std/uri as confTomlUri, json_serialization, # for logging toml_serialization, toml_serialization/lexer, + toml_serialization/std/net as confTomlNet, + toml_serialization/std/uri as confTomlUri, ../spec/engine_authentication from std/strutils import toLowerAscii, split, startsWith diff --git a/beacon_chain/el/el_manager.nim b/beacon_chain/el/el_manager.nim index c88defa1ec..96d9dd4f20 100644 --- a/beacon_chain/el/el_manager.nim +++ b/beacon_chain/el/el_manager.nim @@ -5,10 +5,10 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import - std/[strformat, typetraits, json, sequtils], + std/json, # Nimble packages: chronos, metrics, chronicles/timings, json_rpc/[client, errors], @@ -16,20 +16,20 @@ import eth/common/eth_types, results, kzg4844/[kzg_abi, kzg], - stew/[assign2, byteutils, objects], + stew/objects, # Local modules: - ../spec/[eth2_merkleization, forks], + ../spec/forks, ../networking/network_metadata, - ".."/beacon_node_status, - "."/[el_conf, engine_api_conversions, eth1_chain] + "."/[el_conf, engine_api_conversions] +from std/sequtils import anyIt, filterIt, mapIt from std/times import getTime, inSeconds, initTime, `-` +from std/typetraits import distinctBase from ../spec/engine_authentication import getSignedIatToken -from ../spec/helpers import bytes_to_uint64 from ../spec/state_transition_block import kzg_commitment_to_versioned_hash export - eth1_chain, el_conf, engine_api, base + el_conf, engine_api, base logScope: topics = "elman" @@ -39,141 +39,63 @@ const [100.milliseconds, 200.milliseconds, 500.milliseconds, 1.seconds] type - FixedBytes[N: static int] = web3.FixedBytes[N] - PubKeyBytes = DynamicBytes[48, 48] - WithdrawalCredentialsBytes = DynamicBytes[32, 32] - SignatureBytes = DynamicBytes[96, 96] - Int64LeBytes = DynamicBytes[8, 8] - WithoutTimeout* = distinct int - - DeadlineObject* = object - # TODO (cheatfate): This object declaration could be removed when - # `Raising()` macro starts to support procedure arguments. - future*: Future[void].Raising([CancelledError]) + WithoutTimeout = distinct int + + DeadlineFuture* = Future[void].Raising([CancelledError]) SomeEnginePayloadWithValue = BellatrixExecutionPayloadWithValue | GetPayloadV2Response | GetPayloadV3Response | - GetPayloadV4Response - -contract(DepositContract): - proc deposit(pubkey: PubKeyBytes, - withdrawalCredentials: WithdrawalCredentialsBytes, - signature: SignatureBytes, - deposit_data_root: FixedBytes[32]) - - proc get_deposit_root(): FixedBytes[32] - proc get_deposit_count(): Int64LeBytes - - proc DepositEvent(pubkey: PubKeyBytes, - withdrawalCredentials: WithdrawalCredentialsBytes, - amount: Int64LeBytes, - signature: SignatureBytes, - index: Int64LeBytes) {.event.} + GetPayloadV4Response | + GetPayloadV5Response const noTimeout = WithoutTimeout(0) - hasDepositRootChecks = defined(has_deposit_root_checks) - - targetBlocksPerLogsRequest = 1000'u64 - # TODO - # - # This is currently set to 1000, because this was the default maximum - # value in Besu circa our 22.3.0 release. Previously, we've used 5000, - # but this was effectively forcing the fallback logic in `syncBlockRange` - # to always execute multiple requests before getting a successful response. - # - # Besu have raised this default to 5000 in https://github.com/hyperledger/besu/pull/5209 - # which is expected to ship in their next release. - # - # Full deposits sync time with various values for this parameter: - # - # Blocks per request | Geth running on the same host | Geth running on a more distant host - # ---------------------------------------------------------------------------------------- - # 1000 | 11m 20s | 22m - # 5000 | 5m 20s | 15m 40s - # 100000 | 4m 10s | not tested - # - # The number of requests scales linearly with the parameter value as you would expect. - # - # These results suggest that it would be reasonable for us to get back to 5000 once the - # Besu release is well-spread within their userbase. # Engine API timeouts - engineApiConnectionTimeout = 5.seconds # How much we wait before giving up connecting to the Engine API - web3RequestsTimeout* = 8.seconds # How much we wait for eth_* requests (e.g. eth_getBlockByHash) + engineApiConnectionTimeout = 5.seconds # How long we wait before giving up connecting to the Engine API + web3RequestsTimeout = 8.seconds # How long we wait for eth_* requests - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#request-2 - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/shanghai.md#request-2 + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/paris.md#request-2 + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/shanghai.md#request-2 GETPAYLOAD_TIMEOUT = 1.seconds + GETBLOBS_TIMEOUT = 250.milliseconds + connectionStateChangeHysteresisThreshold = 15 ## How many unsuccesful/successful requests we must see ## before declaring the connection as degraded/restored type - NextExpectedPayloadParams* = object - headBlockHash*: Eth2Digest - safeBlockHash*: Eth2Digest - finalizedBlockHash*: Eth2Digest - payloadAttributes*: PayloadAttributesV3 - - ELManagerState* {.pure.} = enum - Running, Closing, Closed + NextExpectedPayloadParams = object + headBlockHash: Eth2Digest + safeBlockHash: Eth2Digest + finalizedBlockHash: Eth2Digest + payloadAttributes: PayloadAttributesV3 ELManager* = ref object eth1Network: Opt[Eth1Network] ## If this value is supplied the EL manager will check whether ## all configured EL nodes are connected to the same network. - depositContractAddress*: Eth1Address - depositContractBlockNumber: uint64 - depositContractBlockHash: Hash32 - - blocksPerLogsRequest: uint64 - ## This value is used to dynamically adjust the number of - ## blocks we are trying to download at once during deposit - ## syncing. By default, the value is set to the constant - ## `targetBlocksPerLogsRequest`, but if the EL is failing - ## to serve this number of blocks per single `eth_getLogs` - ## request, we temporarily lower the value until the request - ## succeeds. The failures are generally expected only in - ## periods in the history for very high deposit density. - elConnections: seq[ELConnection] ## All active EL connections - eth1Chain: Eth1Chain - ## At larger distances, this chain consists of all blocks - ## with deposits. Within the relevant voting period, it - ## also includes blocks without deposits because we must - ## vote for a block only if it's part of our known history. + checkChainIdLoopFut: Future[void] + nextExpectedPayloadParams: Opt[NextExpectedPayloadParams] - syncTargetBlock: Opt[Eth1BlockNumber] - - chainSyncingLoopFut: Future[void] - exchangeTransitionConfigurationLoopFut: Future[void] - managerState: ELManagerState - - nextExpectedPayloadParams*: Option[NextExpectedPayloadParams] - - EtcStatus {.pure.} = enum + ChainIdStatus {.pure.} = enum notExchangedYet mismatch match - DepositContractSyncStatus {.pure.} = enum - unknown - notSynced - synced - ELConnectionState {.pure.} = enum NeverTested Working Degraded - ELConnection* = ref object + ELConnection = ref object engineUrl: EngineApiUrl web3: Opt[Web3] @@ -184,45 +106,14 @@ type connectingFut: Future[Result[Web3, string]].Raising([CancelledError]) ## This future will be replaced when the connection is lost. - etcStatus: EtcStatus - ## The latest status of the `exchangeTransitionConfiguration` - ## exchange. + chainIdStatus: ChainIdStatus + ## The latest status of the `checkChainId` exchange. state: ELConnectionState hysteresisCounter: int - - depositContractSyncStatus: DepositContractSyncStatus - ## Are we sure that this EL has synced the deposit contract? - lastPayloadId: Opt[Bytes8] - FullBlockId* = object - number: Eth1BlockNumber - hash: Hash32 - - DataProviderFailure* = object of CatchableError - CorruptDataProvider* = object of DataProviderFailure - DataProviderTimeout* = object of DataProviderFailure - DataProviderConnectionFailure* = object of DataProviderFailure - - DisconnectHandler* = proc () {.gcsafe, raises: [].} - - DepositEventHandler* = proc ( - pubkey: PubKeyBytes, - withdrawalCredentials: WithdrawalCredentialsBytes, - amount: Int64LeBytes, - signature: SignatureBytes, - merkleTreeIndex: Int64LeBytes, - j: JsonNode) {.gcsafe, raises: [].} - -declareCounter failed_web3_requests, - "Failed web3 requests" - -declareGauge eth1_latest_head, - "The highest Eth1 block number observed on the network" - -declareGauge eth1_synced_head, - "Block number of the highest synchronized block according to follow distance" + DataProviderTimeout* = object of CatchableError declareCounter engine_api_responses, "Number of successful requests to the newPayload Engine API end-point", @@ -241,10 +132,7 @@ declareCounter engine_api_last_minute_forkchoice_updates_sent, "Number of last minute requests to the forkchoiceUpdated Engine API end-point just before block proposals", labels = ["url"] -proc init*(t: typedesc[DeadlineObject], d: Duration): DeadlineObject = - DeadlineObject(future: sleepAsync(d)) - -proc variedSleep*( +proc variedSleep( counter: var int, durations: openArray[Duration] ): Future[void] {.async: (raises: [CancelledError], raw: true).} = @@ -271,14 +159,14 @@ proc close(connection: ELConnection): Future[void] {.async: (raises: []).} = debug "Failed to close execution layer", error = $exc.name, reason = $exc.msg -proc increaseCounterTowardsStateChange(connection: ELConnection): bool = +func increaseCounterTowardsStateChange(connection: ELConnection): bool = result = connection.hysteresisCounter >= connectionStateChangeHysteresisThreshold if result: connection.hysteresisCounter = 0 else: inc connection.hysteresisCounter -proc decreaseCounterTowardsStateChange(connection: ELConnection) = +func decreaseCounterTowardsStateChange(connection: ELConnection) = if connection.hysteresisCounter > 0: # While we increase the counter by 1, we decreate it by 20% in order # to require a steady and affirmative change instead of allowing @@ -378,26 +266,14 @@ func raiseIfNil(web3block: BlockObject): BlockObject {.raises: [ValueError].} = raise newException(ValueError, "EL returned 'null' result for block") web3block -template cfg(m: ELManager): auto = - m.eth1Chain.cfg - -func hasJwtSecret*(m: ELManager): bool = +func hasJwtSecret(m: ELManager): bool = for c in m.elConnections: if c.engineUrl.jwtSecret.isSome: return true - -func isSynced*(m: ELManager): bool = - m.syncTargetBlock.isSome and - m.eth1Chain.blocks.len > 0 and - m.syncTargetBlock.get <= m.eth1Chain.blocks[^1].number - -template eth1ChainBlocks*(m: ELManager): Deque[Eth1Block] = - m.eth1Chain.blocks + false # TODO: Add cfg validation # MIN_GENESIS_ACTIVE_VALIDATOR_COUNT should be larger than SLOTS_PER_EPOCH -# doAssert SECONDS_PER_ETH1_BLOCK * cfg.ETH1_FOLLOW_DISTANCE < GENESIS_DELAY, -# "Invalid configuration: GENESIS_DELAY is set too low" func isConnected(connection: ELConnection): bool = connection.web3.isSome @@ -455,26 +331,7 @@ proc connectedRpcClient(connection: ELConnection): Future[RpcClient] {. connection.web3.get.provider -proc getBlockByHash( - rpcClient: RpcClient, - hash: Hash32 -): Future[BlockObject] {.async: (raises: [CatchableError]).} = - await rpcClient.eth_getBlockByHash(hash, false) - -proc getBlockByNumber*( - rpcClient: RpcClient, - number: Eth1BlockNumber -): Future[BlockObject] {.async: (raises: [CatchableError]).} = - let hexNumber = try: - let num = distinctBase(number) - &"0x{num:X}" # No leading 0's! - except ValueError as exc: - # Since the format above is valid, failing here should not be possible - raiseAssert exc.msg - - await rpcClient.eth_getBlockByNumber(hexNumber, false) - -func areSameAs(expectedParams: Option[NextExpectedPayloadParams], +func areSameAs(expectedParams: Opt[NextExpectedPayloadParams], latestHead, latestSafe, latestFinalized: Eth2Digest, timestamp: uint64, randomData: Eth2Digest, @@ -511,7 +368,7 @@ proc getPayloadFromSingleEL( consensusHead: Eth2Digest, headBlock, safeBlock, finalizedBlock: Eth2Digest, timestamp: uint64, - randomData: Eth2Digest, + prevRandao: Eth2Digest, suggestedFeeRecipient: Eth1Address, withdrawals: seq[WithdrawalV1] ): Future[GetPayloadResponseType] {.async: (raises: [CatchableError]).} = @@ -531,7 +388,7 @@ proc getPayloadFromSingleEL( finalizedBlockHash: finalizedBlock.asBlockHash), Opt.some PayloadAttributesV1( timestamp: Quantity timestamp, - prevRandao: FixedBytes[32] randomData.data, + prevRandao: Bytes32 prevRandao.to(Hash32), suggestedFeeRecipient: suggestedFeeRecipient)) elif GetPayloadResponseType is engine_api.GetPayloadV2Response: let response = await rpcClient.forkchoiceUpdated( @@ -541,13 +398,15 @@ proc getPayloadFromSingleEL( finalizedBlockHash: finalizedBlock.asBlockHash), Opt.some PayloadAttributesV2( timestamp: Quantity timestamp, - prevRandao: FixedBytes[32] randomData.data, + prevRandao: Bytes32 prevRandao.to(Hash32), suggestedFeeRecipient: suggestedFeeRecipient, withdrawals: withdrawals)) elif GetPayloadResponseType is engine_api.GetPayloadV3Response or - GetPayloadResponseType is engine_api.GetPayloadV4Response: - # https://github.com/ethereum/execution-apis/blob/90a46e9137c89d58e818e62fa33a0347bba50085/src/engine/prague.md + GetPayloadResponseType is engine_api.GetPayloadV4Response or + GetPayloadResponseType is engine_api.GetPayloadV5Response: + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/prague.md # does not define any new forkchoiceUpdated, so reuse V3 from Dencun + # https://github.com/ethereum/execution-apis/blob/5d634063ccfd897a6974ea589c00e2c1d889abc9/src/engine/osaka.md let response = await rpcClient.forkchoiceUpdated( ForkchoiceStateV1( headBlockHash: headBlock.asBlockHash, @@ -555,7 +414,7 @@ proc getPayloadFromSingleEL( finalizedBlockHash: finalizedBlock.asBlockHash), Opt.some PayloadAttributesV3( timestamp: Quantity timestamp, - prevRandao: FixedBytes[32] randomData.data, + prevRandao: Bytes32 prevRandao.to(Hash32), suggestedFeeRecipient: suggestedFeeRecipient, withdrawals: withdrawals, parentBeaconBlockRoot: consensusHead.to(Hash32))) @@ -564,7 +423,7 @@ proc getPayloadFromSingleEL( if response.payloadStatus.status != PayloadExecutionStatus.valid or response.payloadId.isNone: - raise newException(CatchableError, "Head block is not a valid payload") + raise newException(CatchableError, "Head block is not a valid payload; " & $response) # Give the EL some time to assemble the block await sleepAsync(chronos.milliseconds 500) @@ -598,7 +457,7 @@ template EngineApiResponseType*(T: type electra.ExecutionPayloadForSigning): typ engine_api.GetPayloadV4Response template EngineApiResponseType*(T: type fulu.ExecutionPayloadForSigning): type = - engine_api.GetPayloadV4Response + engine_api.GetPayloadV5Response template toEngineWithdrawals*(withdrawals: seq[capella.Withdrawal]): seq[WithdrawalV1] = mapIt(withdrawals, toEngineWithdrawal(it)) @@ -618,18 +477,19 @@ proc getPayload*( consensusHead: Eth2Digest, headBlock, safeBlock, finalizedBlock: Eth2Digest, timestamp: uint64, - randomData: Eth2Digest, + prevRandao: Eth2Digest, suggestedFeeRecipient: Eth1Address, withdrawals: seq[capella.Withdrawal] ): Future[Opt[PayloadType]] {.async: (raises: [CancelledError]).} = if m.elConnections.len == 0: - return err() + notice "No engine configured, using empty payload" + return Opt.none(PayloadType) let engineApiWithdrawals = toEngineWithdrawals withdrawals isFcUpToDate = m.nextExpectedPayloadParams.areSameAs( headBlock, safeBlock, finalizedBlock, timestamp, - randomData, suggestedFeeRecipient, engineApiWithdrawals) + prevRandao, suggestedFeeRecipient, engineApiWithdrawals) # `getPayloadFromSingleEL` may introduce additional latency const extraProcessingOverhead = 500.milliseconds @@ -644,7 +504,7 @@ proc getPayload*( m.elConnections.mapIt( it.getPayloadFromSingleEL(EngineApiResponseType(PayloadType), isFcUpToDate, consensusHead, headBlock, safeBlock, finalizedBlock, - timestamp, randomData, suggestedFeeRecipient, engineApiWithdrawals)) + timestamp, prevRandao, suggestedFeeRecipient, engineApiWithdrawals)) let timeoutExceeded = try: @@ -722,110 +582,14 @@ proc getPayload*( requests.filterIt(not(it.finished())).mapIt(it.cancelAndWait()) await noCancel allFutures(pending) - when PayloadType.kind == ConsensusFork.Fulu: - if bestPayloadIdx.isSome(): - return ok(requests[bestPayloadIdx.get()].value().asConsensusTypeFulu) - else: - if bestPayloadIdx.isSome(): - return ok(requests[bestPayloadIdx.get()].value().asConsensusType) + if bestPayloadIdx.isSome(): + return ok(requests[bestPayloadIdx.get()].value().asConsensusType) if timeoutExceeded: break err() -proc waitELToSyncDeposits( - connection: ELConnection, - minimalRequiredBlock: Hash32 -) {.async: (raises: [CancelledError]).} = - var rpcClient: RpcClient = nil - - if connection.depositContractSyncStatus == DepositContractSyncStatus.synced: - return - - var attempt = 0 - - while true: - if isNil(rpcClient): - rpcClient = await connection.connectedRpcClient() - - try: - discard raiseIfNil await connection.engineApiRequest( - rpcClient.getBlockByHash(minimalRequiredBlock), - "getBlockByHash", Moment.now(), - web3RequestsTimeout, failureAllowed = true) - connection.depositContractSyncStatus = DepositContractSyncStatus.synced - return - except CancelledError as exc: - trace "waitELToSyncDepositContract interrupted", - url = connection.engineUrl.url - raise exc - except CatchableError as exc: - connection.depositContractSyncStatus = DepositContractSyncStatus.notSynced - if attempt == 0: - warn "Failed to obtain the most recent known block from the " & - "execution layer node (the node is probably not synced)", - url = connection.engineUrl.url, - blk = minimalRequiredBlock, - reason = exc.msg - elif attempt mod 60 == 0: - # This warning will be produced every 30 minutes - warn "Still failing to obtain the most recent known block from the " & - "execution layer node (the node is probably still not synced)", - url = connection.engineUrl.url, - blk = minimalRequiredBlock, - reason = exc.msg - inc(attempt) - await sleepAsync(seconds(30)) - rpcClient = nil - -func networkHasDepositContract(m: ELManager): bool = - not m.cfg.DEPOSIT_CONTRACT_ADDRESS.isDefaultValue - -func mostRecentKnownBlock(m: ELManager): Hash32 = - if m.eth1Chain.finalizedDepositsMerkleizer.getChunkCount() > 0: - m.eth1Chain.finalizedBlockHash.asBlockHash - else: - m.depositContractBlockHash - -proc selectConnectionForChainSyncing( - m: ELManager -): Future[ELConnection] {.async: (raises: [CancelledError, - DataProviderConnectionFailure]).} = - doAssert m.elConnections.len > 0 - - let pendingConnections = m.elConnections.mapIt( - if m.networkHasDepositContract: - FutureBase waitELToSyncDeposits(it, m.mostRecentKnownBlock) - else: - FutureBase connectedRpcClient(it)) - - while true: - var pendingFutures = pendingConnections - try: - discard await race(pendingFutures) - except ValueError: - raiseAssert "pendingFutures should not be empty at this moment" - except CancelledError as exc: - let pending = pendingConnections.filterIt(not(it.finished())). - mapIt(it.cancelAndWait()) - await noCancel allFutures(pending) - raise exc - - pendingFutures.reset() - for index, future in pendingConnections.pairs(): - if future.completed(): - let pending = pendingConnections.filterIt(not(it.finished())). - mapIt(it.cancelAndWait()) - await noCancel allFutures(pending) - return m.elConnections[index] - elif not(future.finished()): - pendingFutures.add(future) - - if len(pendingFutures) == 0: - raise newException(DataProviderConnectionFailure, - "Unable to establish connection for chain syncing") - proc sendNewPayloadToSingleEL( connection: ELConnection, payload: engine_api.ExecutionPayloadV1 @@ -844,24 +608,31 @@ proc sendNewPayloadToSingleEL( connection: ELConnection, payload: engine_api.ExecutionPayloadV3, versioned_hashes: seq[engine_api.VersionedHash], - parent_beacon_block_root: FixedBytes[32] + parent_beacon_block_root: Hash32 ): Future[PayloadStatusV1] {.async: (raises: [CatchableError]).} = let rpcClient = await connection.connectedRpcClient() await rpcClient.engine_newPayloadV3( - payload, versioned_hashes, Hash32 parent_beacon_block_root) + payload, versioned_hashes, parent_beacon_block_root) proc sendNewPayloadToSingleEL( connection: ELConnection, payload: engine_api.ExecutionPayloadV3, versioned_hashes: seq[engine_api.VersionedHash], - parent_beacon_block_root: FixedBytes[32], + parent_beacon_block_root: Hash32, executionRequests: seq[seq[byte]] ): Future[PayloadStatusV1] {.async: (raises: [CatchableError]).} = let rpcClient = await connection.connectedRpcClient() await rpcClient.engine_newPayloadV4( - payload, versioned_hashes, Hash32 parent_beacon_block_root, + payload, versioned_hashes, parent_beacon_block_root, executionRequests) +proc sendGetBlobsV2toSingleEl( + connection: ELConnection, + versioned_hashes: seq[engine_api.VersionedHash] +): Future[GetBlobsV2Response] {.async: (raises: [CatchableError]).} = + let rpcClient = await connection.connectedRpcClient() + await rpcClient.engine_getBlobsV2(versioned_hashes) + type StatusRelation = enum newStatusIsPreferable @@ -959,7 +730,7 @@ proc processResponse( url2 = connections[idx].engineUrl.url, status2 = status -proc couldBeBetter(d: ELConsensusViolationDetector): bool = +func couldBeBetter(d: ELConsensusViolationDetector): bool = const SyncingOrAccepted = { PayloadExecutionStatus.syncing, @@ -969,10 +740,7 @@ proc couldBeBetter(d: ELConsensusViolationDetector): bool = return false if d.selectedStatus.isNone(): return true - if d.selectedStatus.get() in SyncingOrAccepted: - true - else: - false + d.selectedStatus.get() in SyncingOrAccepted proc lazyWait(futures: seq[FutureBase]) {.async: (raises: []).} = block: @@ -990,69 +758,117 @@ proc lazyWait(futures: seq[FutureBase]) {.async: (raises: []).} = if len(pending) > 0: await noCancel allFutures(pending) +proc sendGetBlobsV2*( + m: ELManager, + blck: fulu.SignedBeaconBlock | gloas.SignedBeaconBlock, +): Future[Opt[seq[BlobAndProofV2]]] {.async: (raises: [CancelledError]).} = + if m.elConnections.len == 0: + return err() + + when blck is gloas.SignedBeaconBlock: + debugGloasComment "handle correctly for Gloas?" + return err() + else: + let deadline = sleepAsync(GETBLOBS_TIMEOUT) + + var bestIdx: Opt[int] + + while true: + let requests = m.elConnections.mapIt( + sendGetBlobsV2toSingleEl(it, + mapIt(blck.message.body.blob_kzg_commitments, + kzg_commitment_to_versioned_hash(it)) + ) + ) + + let timeoutExceeded = + try: + await allFutures(requests).wait(deadline) + false + except AsyncTimeoutError: + true + except CancelledError as exc: + # cancel anything still running, then re-raise + await noCancel allFutures( + requests.filterIt(not it.finished()).mapIt(it.cancelAndWait()) + ) + raise exc + + for idx, req in requests: + if req.finished(): + # choose the first successful (not failed) response + if req.error.isNil and bestIdx.isNone: + bestIdx = Opt.some(idx) + else: + # finished == false + let errmsg = + if req.error.isNil: "request still pending" + else: req.error.msg + warn "Timeout while getting blobs & proofs", + url = m.elConnections[idx].engineUrl.url, + reason = errmsg + + await noCancel allFutures( + requests.filterIt(not it.finished()).mapIt(it.cancelAndWait()) + ) + + if bestIdx.isSome(): + let chosen = requests[bestIdx.get()] + # chosen is finished; but could still be an error, so guard again + if chosen.error.isNil: + return ok(chosen.value()) + else: + warn "Chosen EL failed unexpectedly", reason = chosen.error.msg + if timeoutExceeded: + break + + err() + proc sendNewPayload*( m: ELManager, blck: SomeForkyBeaconBlock, - deadlineObj: DeadlineObject, - maxRetriesCount: int -): Future[PayloadExecutionStatus] {.async: (raises: [CancelledError]).} = - doAssert maxRetriesCount > 0 + deadline: DeadlineFuture, + retry: bool, +): Future[Opt[PayloadExecutionStatus]] {.async: (raises: [CancelledError]).} = + if m.elConnections.len == 0: + info "No execution client configured; cannot process block payloads", + executionPayload = shortLog(blck.body.execution_payload) + return Opt.none(PayloadExecutionStatus) + + const consensusFork = typeof(blck).kind let startTime = Moment.now() - deadline = deadlineObj.future - payload = blck.body.asEngineExecutionPayload + payload = blck.body.execution_payload.asEngineExecutionPayload + + when consensusFork >= ConsensusFork.Deneb: + let + versioned_hashes = blck.body.blob_kzg_commitments.asEngineVersionedHashes() + parent_root = blck.parent_root.to(Hash32) + + when consensusFork >= ConsensusFork.Electra: + let execution_requests = blck.body.execution_requests.asEngineExecutionRequests() + var responseProcessor = ELConsensusViolationDetector.init() sleepCounter = 0 - retriesCount = 0 while true: block mainLoop: - let - requests = m.elConnections.mapIt: - let req = - when typeof(blck).kind >= ConsensusFork.Electra: - # https://github.com/ethereum/execution-apis/blob/4140e528360fea53c34a766d86a000c6c039100e/src/engine/prague.md#engine_newpayloadv4 - let - versioned_hashes = mapIt( - blck.body.blob_kzg_commitments, - engine_api.VersionedHash(kzg_commitment_to_versioned_hash(it))) - # https://github.com/ethereum/execution-apis/blob/7c9772f95c2472ccfc6f6128dc2e1b568284a2da/src/engine/prague.md#request - # "Each list element is a `requests` byte array as defined by - # EIP-7685. The first byte of each element is the `request_type` - # and the remaining bytes are the `request_data`. Elements of - # the list MUST be ordered by `request_type` in ascending order. - # Elements with empty `request_data` MUST be excluded from the - # list." - execution_requests = block: - var requests: seq[seq[byte]] - for request_type, request_data in - [SSZ.encode(blck.body.execution_requests.deposits), - SSZ.encode(blck.body.execution_requests.withdrawals), - SSZ.encode(blck.body.execution_requests.consolidations)]: - if request_data.len > 0: - requests.add @[request_type.byte] & request_data - requests - - sendNewPayloadToSingleEL( - it, payload, versioned_hashes, - FixedBytes[32] blck.parent_root.data, execution_requests) - elif typeof(blck).kind == ConsensusFork.Deneb: - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.1/specs/deneb/beacon-chain.md#process_execution_payload - # Verify the execution payload is valid - # [Modified in Deneb] Pass `versioned_hashes` to Execution Engine - let versioned_hashes = mapIt( - blck.body.blob_kzg_commitments, - engine_api.VersionedHash(kzg_commitment_to_versioned_hash(it))) - sendNewPayloadToSingleEL( - it, payload, versioned_hashes, - FixedBytes[32] blck.parent_root.data) - elif typeof(blck).kind in [ConsensusFork.Bellatrix, ConsensusFork.Capella]: - sendNewPayloadToSingleEL(it, payload) - else: - static: doAssert false - engineApiRequest(it, req, "newPayload", startTime, noTimeout) + let requests = m.elConnections.mapIt: + let req = + when consensusFork >= ConsensusFork.Electra: + sendNewPayloadToSingleEL( + it, payload, versioned_hashes, parent_root, execution_requests + ) + elif consensusFork >= ConsensusFork.Deneb: + sendNewPayloadToSingleEL(it, payload, versioned_hashes, parent_root) + elif consensusFork >= ConsensusFork.Bellatrix: + sendNewPayloadToSingleEL(it, payload) + else: + {.error: "Unsupported fork " & $consensusFork.} + + engineApiRequest(it, req, "newPayload", startTime, noTimeout) var pendingRequests = requests @@ -1087,8 +903,9 @@ proc sendNewPayload*( pendingRequests.filterIt(not(it.finished())). mapIt(it.cancelAndWait()) await noCancel allFutures(pending) - return PayloadExecutionStatus.invalid - elif responseProcessor.selectedResponse.isSome(): + return Opt.some PayloadExecutionStatus.invalid + + if responseProcessor.selectedResponse.isSome(): if (len(pendingRequests) == 0) or not(responseProcessor.couldBeBetter()): # We spawn task which will wait for all other responses which are @@ -1096,7 +913,7 @@ proc sendNewPayload*( # cancelled. asyncSpawn lazyWait(pendingRequests.mapIt(FutureBase(it))) return - requests[responseProcessor.selectedResponse.get].value().status + Opt.some requests[responseProcessor.selectedResponse.get].value().status if timeoutExceeded: # Timeout exceeded, cancelling all pending requests. @@ -1104,26 +921,18 @@ proc sendNewPayload*( pendingRequests.filterIt(not(it.finished())). mapIt(it.cancelAndWait()) await noCancel allFutures(pending) - return PayloadExecutionStatus.syncing + return Opt.none(PayloadExecutionStatus) if len(pendingRequests) == 0: # All requests failed. - inc(retriesCount) - if retriesCount == maxRetriesCount: - return PayloadExecutionStatus.syncing + if not retry: + return Opt.none(PayloadExecutionStatus) # To avoid continous spam of requests when EL node is offline we # going to sleep until next attempt. await variedSleep(sleepCounter, SleepDurations) break mainLoop -proc sendNewPayload*( - m: ELManager, - blck: SomeForkyBeaconBlock -): Future[PayloadExecutionStatus] {. - async: (raises: [CancelledError], raw: true).} = - sendNewPayload(m, blck, DeadlineObject.init(NEWPAYLOAD_TIMEOUT), high(int)) - proc forkchoiceUpdatedForSingleEL( connection: ELConnection, state: ref ForkchoiceStateV1, @@ -1152,13 +961,11 @@ proc forkchoiceUpdated*( payloadAttributes: Opt[PayloadAttributesV1] | Opt[PayloadAttributesV2] | Opt[PayloadAttributesV3], - deadlineObj: DeadlineObject, - maxRetriesCount: int + deadline: DeadlineFuture, + retry: bool, ): Future[(PayloadExecutionStatus, Opt[Hash32])] {. async: (raises: [CancelledError]).} = - doAssert not headBlockHash.isZero - doAssert maxRetriesCount > 0 # Allow finalizedBlockHash to be 0 to avoid sync deadlocks. # @@ -1214,7 +1021,6 @@ proc forkchoiceUpdated*( safeBlockHash: safeBlockHash.asBlockHash, finalizedBlockHash: finalizedBlockHash.asBlockHash) startTime = Moment.now - deadline = deadlineObj.future var responseProcessor = ELConsensusViolationDetector.init() @@ -1264,13 +1070,11 @@ proc forkchoiceUpdated*( # matches, and similarly that if the fcU fails or times out for other # reasons, the expected payload params remain synchronized with # EL state. - assign( - m.nextExpectedPayloadParams, - some NextExpectedPayloadParams( - headBlockHash: headBlockHash, - safeBlockHash: safeBlockHash, - finalizedBlockHash: finalizedBlockHash, - payloadAttributes: payloadAttributesV3)) + m.nextExpectedPayloadParams = Opt.some NextExpectedPayloadParams( + headBlockHash: headBlockHash, + safeBlockHash: safeBlockHash, + finalizedBlockHash: finalizedBlockHash, + payloadAttributes: payloadAttributesV3) template getSelected: untyped = let data = requests[responseProcessor.selectedResponse.get].value() @@ -1302,7 +1106,7 @@ proc forkchoiceUpdated*( # All requests failed, we will continue our attempts until deadline # is not finished. inc(retriesCount) - if retriesCount == maxRetriesCount: + if not retry: return (PayloadExecutionStatus.syncing, Opt.none Hash32) # To avoid continous spam of requests when EL node is offline we @@ -1320,20 +1124,16 @@ proc forkchoiceUpdated*( async: (raises: [CancelledError], raw: true).} = forkchoiceUpdated( m, headBlockHash, safeBlockHash, finalizedBlockHash, - payloadAttributes, DeadlineObject.init(FORKCHOICEUPDATED_TIMEOUT), - high(int)) - -# TODO can't be defined within exchangeConfigWithSingleEL -func `==`(x, y: Quantity): bool {.borrow.} + payloadAttributes, sleepAsync(FORKCHOICEUPDATED_TIMEOUT), true) -proc exchangeConfigWithSingleEL( +proc checkChainIdWithSingleEL( m: ELManager, connection: ELConnection ) {.async: (raises: [CancelledError]).} = let rpcClient = await connection.connectedRpcClient() if m.eth1Network.isSome and - connection.etcStatus == EtcStatus.notExchangedYet: + connection.chainIdStatus == ChainIdStatus.notExchangedYet: try: let providerChain = await connection.engineApiRequest( @@ -1351,7 +1151,7 @@ proc exchangeConfigWithSingleEL( url = connection.engineUrl, expectedChain = distinctBase(expectedChain), actualChain = distinctBase(providerChain) - connection.etcStatus = EtcStatus.mismatch + connection.chainIdStatus = ChainIdStatus.mismatch return except CancelledError as exc: debug "Configuration exchange was interrupted" @@ -1361,15 +1161,15 @@ proc exchangeConfigWithSingleEL( # endpoint has been otherwise working. debug "Failed to obtain eth_chainId", reason = exc.msg - connection.etcStatus = EtcStatus.match + connection.chainIdStatus = ChainIdStatus.match -proc exchangeTransitionConfiguration*( +proc checkChainId( m: ELManager ) {.async: (raises: [CancelledError]).} = if m.elConnections.len == 0: return - let requests = m.elConnections.mapIt(m.exchangeConfigWithSingleEL(it)) + let requests = m.elConnections.mapIt(m.checkChainIdWithSingleEL(it)) try: await allFutures(requests).wait(3.seconds) except AsyncTimeoutError: @@ -1402,351 +1202,14 @@ proc exchangeTransitionConfiguration*( warn "Failed to exchange configuration with the configured EL end-points", completed = finished, failed = failed, timed_out = len(pending) -template readJsonField(logEvent, field: untyped, ValueType: type): untyped = - if logEvent.field.isNone: - raise newException(CatchableError, - "Web3 provider didn't return needed logEvent field " & astToStr(field)) - logEvent.field.get - -template init[N: static int](T: type DynamicBytes[N, N]): T = - T newSeq[byte](N) - -proc fetchTimestamp( - connection: ELConnection, - rpcClient: RpcClient, - blk: Eth1Block -) {.async: (raises: [CatchableError]).} = - debug "Fetching block timestamp", blockNum = blk.number - - let web3block = raiseIfNil await connection.engineApiRequest( - rpcClient.getBlockByHash(blk.hash.asBlockHash), - "getBlockByHash", Moment.now(), web3RequestsTimeout) - - blk.timestamp = Eth1BlockTimestamp(web3block.timestamp) - -func depositEventsToBlocks( - depositsList: openArray[JsonString] -): seq[Eth1Block] {.raises: [CatchableError].} = - var lastEth1Block: Eth1Block - - for logEventData in depositsList: - let - logEvent = JrpcConv.decode(logEventData.string, LogObject) - blockNumber = Eth1BlockNumber readJsonField(logEvent, blockNumber, Quantity) - blockHash = readJsonField(logEvent, blockHash, Hash32) - - if lastEth1Block == nil or lastEth1Block.number != blockNumber: - lastEth1Block = Eth1Block( - hash: blockHash.asEth2Digest, - number: blockNumber - # The `timestamp` is set in `syncBlockRange` immediately - # after calling this function, because we don't want to - # make this function `async` - ) - - result.add lastEth1Block - - var - pubkey = init PubKeyBytes - withdrawalCredentials = init WithdrawalCredentialsBytes - amount = init Int64LeBytes - signature = init SignatureBytes - index = init Int64LeBytes - - var offset = 0 - offset += decode(logEvent.data, 0, offset, pubkey) - offset += decode(logEvent.data, 0, offset, withdrawalCredentials) - offset += decode(logEvent.data, 0, offset, amount) - offset += decode(logEvent.data, 0, offset, signature) - offset += decode(logEvent.data, 0, offset, index) - - if pubkey.len != 48 or - withdrawalCredentials.len != 32 or - amount.len != 8 or - signature.len != 96 or - index.len != 8: - raise newException(CorruptDataProvider, - "Web3 provider supplied invalid deposit logs") - - lastEth1Block.deposits.add DepositData( - pubkey: ValidatorPubKey.init(pubkey.toArray), - withdrawal_credentials: Eth2Digest(data: withdrawalCredentials.toArray), - amount: bytes_to_uint64(amount.toArray).Gwei, - signature: ValidatorSig.init(signature.toArray)) - -type - DepositContractDataStatus = enum - Fetched - VerifiedCorrect - DepositRootIncorrect - DepositRootUnavailable - DepositCountIncorrect - DepositCountUnavailable - -when hasDepositRootChecks: - const - contractCallTimeout = 60.seconds - - proc fetchDepositContractData( - connection: ELConnection, - rpcClient: RpcClient, - depositContract: Sender[DepositContract], - blk: Eth1Block - ): Future[DepositContractDataStatus] {.async: (raises: [CancelledError]).} = - let - startTime = Moment.now() - deadline = sleepAsync(contractCallTimeout) - depositRootFut = - depositContract.get_deposit_root.call(blockNumber = blk.number) - rawCountFut = - depositContract.get_deposit_count.call(blockNumber = blk.number) - engineFut1 = connection.engineApiRequest( - depositRootFut, "get_deposit_root", startTime, deadline, - failureAllowed = true) - engineFut2 = connection.engineApiRequest( - rawCountFut, "get_deposit_count", startTime, deadline, - failureAllowed = true) - - try: - await allFutures(engineFut1, engineFut2) - except CancelledError as exc: - var pending: seq[Future[void]] - if not(engineFut1.finished()): - pending.add(engineFut1.cancelAndWait()) - if not(engineFut2.finished()): - pending.add(engineFut2.cancelAndWait()) - await noCancel allFutures(pending) - raise exc - - var res: DepositContractDataStatus - - try: - # `engineFut1` could hold timeout exception `DataProviderTimeout`. - discard engineFut1.read() - let fetchedRoot = asEth2Digest(depositRootFut.read()) - if blk.depositRoot.isZero: - blk.depositRoot = fetchedRoot - res = Fetched - elif blk.depositRoot == fetchedRoot: - res = VerifiedCorrect - else: - res = DepositRootIncorrect - except CatchableError as exc: - debug "Failed to fetch deposits root", block_number = blk.number, - reason = exc.msg - res = DepositRootUnavailable - - try: - # `engineFut2` could hold timeout exception `DataProviderTimeout`. - discard engineFut2.read() - let fetchedCount = bytes_to_uint64(rawCountFut.read().toArray) - if blk.depositCount == 0: - blk.depositCount = fetchedCount - elif blk.depositCount != fetchedCount: - res = DepositCountIncorrect - except CatchableError as exc: - debug "Failed to fetch deposits count", block_number = blk.number, - reason = exc.msg - res = DepositCountUnavailable - res - -template trackFinalizedState*(m: ELManager, - finalizedEth1Data: Eth1Data, - finalizedStateDepositIndex: uint64): bool = - trackFinalizedState(m.eth1Chain, finalizedEth1Data, finalizedStateDepositIndex) - -template getBlockProposalData*(m: ELManager, - state: ForkedHashedBeaconState, - finalizedEth1Data: Eth1Data, - finalizedStateDepositIndex: uint64): - BlockProposalEth1Data = - getBlockProposalData( - m.eth1Chain, state, finalizedEth1Data, finalizedStateDepositIndex) - func new*(T: type ELConnection, engineUrl: EngineApiUrl): T = - ELConnection( - engineUrl: engineUrl, - depositContractSyncStatus: DepositContractSyncStatus.unknown) - -proc new*(T: type ELManager, - cfg: RuntimeConfig, - depositContractBlockNumber: uint64, - depositContractBlockHash: Eth2Digest, - db: BeaconChainDB, + ELConnection(engineUrl: engineUrl) + +func new*(T: type ELManager, engineApiUrls: seq[EngineApiUrl], eth1Network: Opt[Eth1Network]): T = - let - eth1Chain = Eth1Chain.init( - cfg, db, depositContractBlockNumber, depositContractBlockHash) - - debug "Initializing ELManager", - depositContractBlockNumber, - depositContractBlockHash - - T(eth1Chain: eth1Chain, - depositContractAddress: cfg.DEPOSIT_CONTRACT_ADDRESS, - depositContractBlockNumber: depositContractBlockNumber, - depositContractBlockHash: depositContractBlockHash.asBlockHash, - elConnections: mapIt(engineApiUrls, ELConnection.new(it)), - eth1Network: eth1Network, - blocksPerLogsRequest: targetBlocksPerLogsRequest, - managerState: ELManagerState.Running) - -proc stop(m: ELManager) {.async: (raises: []).} = - if m.managerState notin {ELManagerState.Closing, ELManagerState.Closed}: - m.managerState = ELManagerState.Closing - var pending: seq[Future[void].Raising([])] - if not(m.chainSyncingLoopFut.isNil()) and - not(m.chainSyncingLoopFut.finished()): - pending.add(m.chainSyncingLoopFut.cancelAndWait()) - if not(m.exchangeTransitionConfigurationLoopFut.isNil()) and - not(m.exchangeTransitionConfigurationLoopFut.finished()): - pending.add(m.exchangeTransitionConfigurationLoopFut.cancelAndWait()) - for connection in m.elConnections: - pending.add(connection.close()) - await noCancel allFutures(pending) - m.managerState = ELManagerState.Closed - -const - votedBlocksSafetyMargin = 50 - -func earliestBlockOfInterest( - m: ELManager, - latestEth1BlockNumber: Eth1BlockNumber): Eth1BlockNumber = - let blocksOfInterestRange = - SLOTS_PER_ETH1_VOTING_PERIOD + - (2 * m.cfg.ETH1_FOLLOW_DISTANCE) + - votedBlocksSafetyMargin - - if latestEth1BlockNumber > blocksOfInterestRange.Eth1BlockNumber: - latestEth1BlockNumber - blocksOfInterestRange - else: - 0.Eth1BlockNumber - -proc syncBlockRange( - m: ELManager, - connection: ELConnection, - rpcClient: RpcClient, - depositContract: Sender[DepositContract], - fromBlock, toBlock, - fullSyncFromBlock: Eth1BlockNumber -) {.async: (raises: [CatchableError]).} = - doAssert m.eth1Chain.blocks.len > 0 - - var currentBlock = fromBlock - while currentBlock <= toBlock: - var - depositLogs: seq[JsonString] - maxBlockNumberRequested: Eth1BlockNumber - backoff = 100 - - while true: - maxBlockNumberRequested = - min(toBlock, currentBlock + m.blocksPerLogsRequest - 1) - - debug "Obtaining deposit log events", - fromBlock = currentBlock, - toBlock = maxBlockNumberRequested, - backoff - - debug.logTime "Deposit logs obtained": - # Reduce all request rate until we have a more general solution - # for dealing with Infura's rate limits - await sleepAsync(milliseconds(backoff)) - - depositLogs = - try: - await connection.engineApiRequest( - depositContract.getJsonLogs( - DepositEvent, - fromBlock = Opt.some blockId(currentBlock), - toBlock = Opt.some blockId(maxBlockNumberRequested)), - "getLogs", Moment.now(), 30.seconds) - except CancelledError as exc: - debug "Request for deposit logs was interrupted" - raise exc - except CatchableError as exc: - debug "Request for deposit logs failed", reason = exc.msg - inc failed_web3_requests - backoff = (backoff * 3) div 2 - m.blocksPerLogsRequest = m.blocksPerLogsRequest div 2 - if m.blocksPerLogsRequest == 0: - m.blocksPerLogsRequest = 1 - raise exc - continue - m.blocksPerLogsRequest = min( - (m.blocksPerLogsRequest * 3 + 1) div 2, - targetBlocksPerLogsRequest) - - currentBlock = maxBlockNumberRequested + 1 - break - - let blocksWithDeposits = depositEventsToBlocks(depositLogs) - - for i in 0 ..< blocksWithDeposits.len: - let blk = blocksWithDeposits[i] - if blk.number > fullSyncFromBlock: - try: - await fetchTimestamp(connection, rpcClient, blk) - except CancelledError as exc: - debug "Request for block timestamp was interrupted", - block_number = blk.number - raise exc - except CatchableError as exc: - debug "Request for block timestamp failed", - block_number = blk.number, reason = exc.msg - - let lastBlock = m.eth1Chain.blocks.peekLast - for n in max(lastBlock.number + 1, fullSyncFromBlock) ..< blk.number: - debug "Obtaining block without deposits", blockNum = n - let noDepositsBlock = - try: - raiseIfNil await connection.engineApiRequest( - rpcClient.getBlockByNumber(n), - "getBlockByNumber", Moment.now(), web3RequestsTimeout) - except CancelledError as exc: - debug "The process of obtaining the block was interrupted", - block_number = n - raise exc - except CatchableError as exc: - debug "Request for block failed", block_number = n, - reason = exc.msg - raise exc - - m.eth1Chain.addBlock( - lastBlock.makeSuccessorWithoutDeposits(noDepositsBlock)) - eth1_synced_head.set noDepositsBlock.number.toGaugeValue - - m.eth1Chain.addBlock blk - eth1_synced_head.set blk.number.toGaugeValue - - if blocksWithDeposits.len > 0: - let lastIdx = blocksWithDeposits.len - 1 - template lastBlock: auto = blocksWithDeposits[lastIdx] - - let status = - when hasDepositRootChecks: - await fetchDepositContractData( - connection, rpcClient, depositContract, lastBlock) - else: - DepositRootUnavailable - - when hasDepositRootChecks: - debug "Deposit contract state verified", - status = $status, - ourCount = lastBlock.depositCount, - ourRoot = lastBlock.depositRoot - - case status - of DepositRootIncorrect, DepositCountIncorrect: - raise newException(CorruptDataProvider, - "The deposit log events disagree with the deposit contract state") - else: - discard - - info "Eth1 sync progress", - blockNumber = lastBlock.number, - depositsProcessed = lastBlock.depositCount + T(elConnections: mapIt(engineApiUrls, ELConnection.new(it)), + eth1Network: eth1Network) func hasConnection*(m: ELManager): bool = m.elConnections.len > 0 @@ -1754,233 +1217,21 @@ func hasConnection*(m: ELManager): bool = func hasAnyWorkingConnection*(m: ELManager): bool = m.elConnections.anyIt(it.state == Working or it.state == NeverTested) -func hasProperlyConfiguredConnection*(m: ELManager): bool = - for connection in m.elConnections: - if connection.etcStatus == EtcStatus.match: - return true - - false - -proc startExchangeTransitionConfigurationLoop( +proc startCheckChainIdLoop( m: ELManager ) {.async: (raises: [CancelledError]).} = - debug "Starting exchange transition configuration loop" + debug "Starting chain ID checking loop" while true: - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#specification-3 - await m.exchangeTransitionConfiguration() + await m.checkChainId() await sleepAsync(60.seconds) -proc syncEth1Chain( - m: ELManager, - connection: ELConnection -) {.async: (raises: [CatchableError]).} = - let rpcClient = - try: - await connection.connectedRpcClient().wait(1.seconds) - except AsyncTimeoutError: - raise newException(DataProviderTimeout, "Connection timed out") - - let - # BEWARE - # `connectedRpcClient` guarantees that connection.web3 will not be - # `none` here, but it's not safe to initialize this later (e.g closer - # to where it's used) because `connection.web3` may be set to `none` - # at any time after a failed request. Luckily, the `contractSender` - # object is very cheap to create. - depositContract = connection.web3.get.contractSender( - DepositContract, m.depositContractAddress) - - shouldProcessDeposits = not ( - m.depositContractAddress.isZeroMemory or - m.eth1Chain.finalizedBlockHash.data.isZeroMemory) - - trace "Starting syncEth1Chain", shouldProcessDeposits - - logScope: - url = connection.engineUrl.url - - # We might need to reset the chain if the new provider disagrees - # with the previous one regarding the history of the chain or if - # we have detected a conensus violation - our view disagreeing with - # the majority of the validators in the network. - # - # Consensus violations happen in practice because the web3 providers - # sometimes return incomplete or incorrect deposit log events even - # when they don't indicate any errors in the response. When this - # happens, we are usually able to download the data successfully - # on the second attempt. - # - # TODO - # Perhaps the above problem was manifesting only with the obsolete - # JSON-RPC data providers, which can no longer be used with Nimbus. - if m.eth1Chain.blocks.len > 0: - let needsReset = m.eth1Chain.hasConsensusViolation or (block: - let - lastKnownBlock = m.eth1Chain.blocks.peekLast - matchingBlockAtNewEl = - try: - raiseIfNil await connection.engineApiRequest( - rpcClient.getBlockByNumber(lastKnownBlock.number), - "getBlockByNumber", Moment.now(), web3RequestsTimeout) - except CancelledError as exc: - debug "getBlockByNumber request has been interrupted", - last_known_block_number = lastKnownBlock.number - raise exc - except CatchableError as exc: - debug "getBlockByNumber request failed", - last_known_block_number = lastKnownBlock.number, - reason = exc.msg - raise exc - - lastKnownBlock.hash.asBlockHash != matchingBlockAtNewEl.hash) - - if needsReset: - trace "Resetting the Eth1 chain", - hasConsensusViolation = m.eth1Chain.hasConsensusViolation - m.eth1Chain.clear() - - var eth1SyncedTo: Eth1BlockNumber - if shouldProcessDeposits: - if m.eth1Chain.blocks.len == 0: - let finalizedBlockHash = m.eth1Chain.finalizedBlockHash.asBlockHash - let startBlock = - try: - raiseIfNil await connection.engineApiRequest( - rpcClient.getBlockByHash(finalizedBlockHash), - "getBlockByHash", Moment.now(), web3RequestsTimeout) - except CancelledError as exc: - debug "getBlockByHash() request has been interrupted", - finalized_block_hash = finalizedBlockHash - raise exc - except CatchableError as exc: - debug "getBlockByHash() request has failed", - finalized_block_hash = finalizedBlockHash, - reason = exc.msg - raise exc - - m.eth1Chain.addBlock Eth1Block( - hash: m.eth1Chain.finalizedBlockHash, - number: Eth1BlockNumber startBlock.number, - timestamp: Eth1BlockTimestamp startBlock.timestamp) - - eth1SyncedTo = m.eth1Chain.blocks[^1].number - - eth1_synced_head.set eth1SyncedTo.toGaugeValue - eth1_finalized_head.set eth1SyncedTo.toGaugeValue - eth1_finalized_deposits.set( - m.eth1Chain.finalizedDepositsMerkleizer.getChunkCount.toGaugeValue) - - debug "Starting Eth1 syncing", `from` = shortLog(m.eth1Chain.blocks[^1]) - - var latestBlockNumber: Eth1BlockNumber - while true: - debug "syncEth1Chain tick", - shouldProcessDeposits, latestBlockNumber, eth1SyncedTo - - # TODO (cheatfate): This should be removed - if bnStatus == BeaconNodeStatus.Stopping: - await noCancel m.stop() - return - - if m.eth1Chain.hasConsensusViolation: - raise newException(CorruptDataProvider, - "Eth1 chain contradicts Eth2 consensus") - - let latestBlock = - try: - raiseIfNil await connection.engineApiRequest( - rpcClient.eth_getBlockByNumber(blockId("latest"), false), - "getBlockByNumber", Moment.now(), web3RequestsTimeout) - except CancelledError as exc: - debug "Latest block request has been interrupted" - raise exc - except CatchableError as exc: - warn "Failed to obtain the latest block from the EL", reason = exc.msg - raise exc - - latestBlockNumber = latestBlock.number - - m.syncTargetBlock = Opt.some( - if latestBlock.number > m.cfg.ETH1_FOLLOW_DISTANCE.Eth1BlockNumber: - latestBlock.number - m.cfg.ETH1_FOLLOW_DISTANCE - else: - 0.Eth1BlockNumber) - if m.syncTargetBlock.get <= eth1SyncedTo: - # The chain reorged to a lower height. - # It's relatively safe to ignore that. - await sleepAsync(m.cfg.SECONDS_PER_ETH1_BLOCK.int.seconds) - continue - - eth1_latest_head.set latestBlock.number.toGaugeValue - - if shouldProcessDeposits and - latestBlock.number.uint64 > m.cfg.ETH1_FOLLOW_DISTANCE: - try: - await m.syncBlockRange(connection, - rpcClient, - depositContract, - eth1SyncedTo + 1, - m.syncTargetBlock.get, - m.earliestBlockOfInterest(latestBlock.number)) - except CancelledError as exc: - debug "Syncing block range process has been interrupted" - raise exc - except CatchableError as exc: - debug "Syncing block range process has been failed", reason = exc.msg - raise exc - - eth1SyncedTo = m.syncTargetBlock.get - eth1_synced_head.set eth1SyncedTo.toGaugeValue - -proc startChainSyncingLoop( - m: ELManager -) {.async: (raises: []).} = - info "Starting execution layer deposit syncing", - contract = $m.depositContractAddress - - var syncedConnectionFut = m.selectConnectionForChainSyncing() - info "Connection attempt started" - - var runLoop = true - while runLoop: - try: - let connection = await syncedConnectionFut.wait(60.seconds) - await syncEth1Chain(m, connection) - except AsyncTimeoutError: - notice "No synced EL nodes available for deposit syncing" - try: - await sleepAsync(chronos.seconds(30)) - except CancelledError: - runLoop = false - except CancelledError: - runLoop = false - except CatchableError: - try: - await sleepAsync(10.seconds) - except CancelledError: - runLoop = false - break - debug "Restarting the deposit syncing loop" - # A more detailed error is already logged by trackEngineApiRequest - # To be extra safe, we will make a fresh connection attempt - await syncedConnectionFut.cancelAndWait() - syncedConnectionFut = m.selectConnectionForChainSyncing() - - debug "EL chain syncing process has been stopped" - proc start*(m: ELManager, syncChain = true) {.gcsafe.} = if m.elConnections.len == 0: return - ## Calling `ELManager.start()` on an already started ELManager is a noop - if syncChain and m.chainSyncingLoopFut.isNil: - m.chainSyncingLoopFut = - m.startChainSyncingLoop() - - if m.hasJwtSecret and m.exchangeTransitionConfigurationLoopFut.isNil: - m.exchangeTransitionConfigurationLoopFut = - m.startExchangeTransitionConfigurationLoop() + if m.hasJwtSecret and m.checkChainIdLoopFut.isNil: + m.checkChainIdLoopFut = m.startCheckChainIdLoop() func `$`(x: Quantity): string = $(x.uint64) @@ -1989,9 +1240,7 @@ func `$`(x: BlockObject): string = $(x.number) & " [" & $(x.hash) & "]" proc testWeb3Provider*( - web3Url: Uri, - depositContractAddress: Eth1Address, - jwtSecret: Opt[seq[byte]] + web3Url: Uri, jwtSecret: Opt[seq[byte]] ) {.async: (raises: [CatchableError]).} = stdout.write "Establishing web3 connection..." @@ -2026,12 +1275,3 @@ proc testWeb3Provider*( discard request "Sync status": web3.provider.eth_syncing() - - let - latestBlock = request "Latest block": - web3.provider.eth_getBlockByNumber(blockId("latest"), false) - - ns = web3.contractSender(DepositContract, depositContractAddress) - - discard request "Deposit root": - ns.get_deposit_root.call(blockNumber = latestBlock.number) diff --git a/beacon_chain/el/engine_api_conversions.nim b/beacon_chain/el/engine_api_conversions.nim index 39d5bc51f9..d8712fbdda 100644 --- a/beacon_chain/el/engine_api_conversions.nim +++ b/beacon_chain/el/engine_api_conversions.nim @@ -1,15 +1,16 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import kzg4844/[kzg_abi, kzg], ../spec/datatypes/[bellatrix, capella, deneb, electra, fulu], + ../spec/[eth2_ssz_serialization, state_transition_block], web3/[engine_api, engine_api_types] from std/sequtils import mapIt @@ -29,14 +30,14 @@ func asConsensusWithdrawal*(w: WithdrawalV1): capella.Withdrawal = capella.Withdrawal( index: w.index.uint64, validator_index: w.validatorIndex.uint64, - address: ExecutionAddress(data: w.address.distinctBase), + address: w.address, amount: Gwei w.amount) func asEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 = WithdrawalV1( index: Quantity(w.index), validatorIndex: Quantity(w.validator_index), - address: Address(w.address.data), + address: w.address, amount: Quantity(w.amount)) func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1): @@ -46,8 +47,7 @@ func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1): bellatrix.ExecutionPayload( parent_hash: rpcExecutionPayload.parentHash.asEth2Digest, - feeRecipient: - ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase), + feeRecipient: rpcExecutionPayload.feeRecipient, state_root: rpcExecutionPayload.stateRoot.asEth2Digest, receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest, logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase), @@ -78,8 +78,7 @@ func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1OrV2|ExecutionPaylo capella.ExecutionPayload( parent_hash: rpcExecutionPayload.parentHash.asEth2Digest, - feeRecipient: - ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase), + feeRecipient: rpcExecutionPayload.feeRecipient, state_root: rpcExecutionPayload.stateRoot.asEth2Digest, receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest, logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase), @@ -109,62 +108,7 @@ func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV3): deneb.ExecutionPayload( parent_hash: rpcExecutionPayload.parentHash.asEth2Digest, - feeRecipient: - ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase), - state_root: rpcExecutionPayload.stateRoot.asEth2Digest, - receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest, - logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase), - prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest, - block_number: rpcExecutionPayload.blockNumber.uint64, - gas_limit: rpcExecutionPayload.gasLimit.uint64, - gas_used: rpcExecutionPayload.gasUsed.uint64, - timestamp: rpcExecutionPayload.timestamp.uint64, - extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.data), - base_fee_per_gas: rpcExecutionPayload.baseFeePerGas, - block_hash: rpcExecutionPayload.blockHash.asEth2Digest, - transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init( - mapIt(rpcExecutionPayload.transactions, it.getTransaction)), - withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init( - mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)), - blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64, - excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64) - -func asElectraConsensusPayload(rpcExecutionPayload: ExecutionPayloadV3): - electra.ExecutionPayload = - template getTransaction(tt: TypedTransaction): bellatrix.Transaction = - bellatrix.Transaction.init(tt.distinctBase) - - electra.ExecutionPayload( - parent_hash: rpcExecutionPayload.parentHash.asEth2Digest, - feeRecipient: - ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase), - state_root: rpcExecutionPayload.stateRoot.asEth2Digest, - receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest, - logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase), - prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest, - block_number: rpcExecutionPayload.blockNumber.uint64, - gas_limit: rpcExecutionPayload.gasLimit.uint64, - gas_used: rpcExecutionPayload.gasUsed.uint64, - timestamp: rpcExecutionPayload.timestamp.uint64, - extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.data), - base_fee_per_gas: rpcExecutionPayload.baseFeePerGas, - block_hash: rpcExecutionPayload.blockHash.asEth2Digest, - transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init( - mapIt(rpcExecutionPayload.transactions, it.getTransaction)), - withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init( - mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)), - blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64, - excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64) - -func asFuluConsensusPayload(rpcExecutionPayload: ExecutionPayloadV3): - fulu.ExecutionPayload = - template getTransaction(tt: TypedTransaction): bellatrix.Transaction = - bellatrix.Transaction.init(tt.distinctBase) - - fulu.ExecutionPayload( - parent_hash: rpcExecutionPayload.parentHash.asEth2Digest, - feeRecipient: - ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase), + feeRecipient: rpcExecutionPayload.feeRecipient, state_root: rpcExecutionPayload.stateRoot.asEth2Digest, receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest, logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase), @@ -196,7 +140,7 @@ func asConsensusType*(payload: engine_api.GetPayloadV3Response): commitments: KzgCommitments.init( payload.blobsBundle.commitments.mapIt( kzg_abi.KzgCommitment(bytes: it.data))), - proofs: KzgProofs.init( + proofs: deneb.KzgProofs.init( payload.blobsBundle.proofs.mapIt( kzg_abi.KzgProof(bytes: it.data))), blobs: Blobs.init( @@ -206,7 +150,7 @@ func asConsensusType*( payload: engine_api.GetPayloadV4Response): electra.ExecutionPayloadForSigning = electra.ExecutionPayloadForSigning( - executionPayload: payload.executionPayload.asElectraConsensusPayload, + executionPayload: payload.executionPayload.asConsensusType(), blockValue: payload.blockValue, # TODO # The `mapIt` calls below are necessary only because we use different distinct @@ -216,44 +160,41 @@ func asConsensusType*( commitments: KzgCommitments.init( payload.blobsBundle.commitments.mapIt( kzg_abi.KzgCommitment(bytes: it.data))), - proofs: KzgProofs.init( + proofs: deneb.KzgProofs.init( payload.blobsBundle.proofs.mapIt( kzg_abi.KzgProof(bytes: it.data))), blobs: Blobs.init( payload.blobsBundle.blobs.mapIt(it.data))), executionRequests: payload.executionRequests) -func asConsensusTypeFulu*( - payload: GetPayloadV4Response): - fulu.ExecutionPayloadForSigning = +func asConsensusType*( + payload: GetPayloadV5Response): fulu.ExecutionPayloadForSigning = fulu.ExecutionPayloadForSigning( - executionPayload: payload.executionPayload.asFuluConsensusPayload, + executionPayload: payload.executionPayload.asConsensusType, blockValue: payload.blockValue, # TODO # The `mapIt` calls below are necessary only because we use different distinct # types for KZG commitments and Blobs in the `web3` and the `deneb` spec types. # Both are defined as `array[N, byte]` under the hood. - blobsBundle: deneb.BlobsBundle( + blobsBundle: fulu.BlobsBundle( commitments: KzgCommitments.init( payload.blobsBundle.commitments.mapIt( kzg_abi.KzgCommitment(bytes: it.data))), - proofs: KzgProofs.init( + proofs: fulu.KzgProofs.init( payload.blobsBundle.proofs.mapIt( kzg_abi.KzgProof(bytes: it.data))), blobs: Blobs.init( payload.blobsBundle.blobs.mapIt(it.data))), executionRequests: payload.executionRequests) -func asEngineExecutionPayload*(blockBody: bellatrix.BeaconBlockBody): +func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload): ExecutionPayloadV1 = - template executionPayload(): untyped = blockBody.execution_payload - template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction = TypedTransaction(tt.distinctBase) engine_api.ExecutionPayloadV1( parentHash: executionPayload.parent_hash.asBlockHash, - feeRecipient: Address(executionPayload.fee_recipient.data), + feeRecipient: executionPayload.fee_recipient, stateRoot: executionPayload.state_root.asBlockHash, receiptsRoot: executionPayload.receipts_root.asBlockHash, logsBloom: @@ -272,18 +213,16 @@ template toEngineWithdrawal*(w: capella.Withdrawal): WithdrawalV1 = WithdrawalV1( index: Quantity(w.index), validatorIndex: Quantity(w.validator_index), - address: Address(w.address.data), + address: w.address, amount: Quantity(w.amount)) -func asEngineExecutionPayload*(blockBody: capella.BeaconBlockBody): +func asEngineExecutionPayload*(executionPayload: capella.ExecutionPayload): ExecutionPayloadV2 = - template executionPayload(): untyped = blockBody.execution_payload - template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction = TypedTransaction(tt.distinctBase) engine_api.ExecutionPayloadV2( parentHash: executionPayload.parent_hash.asBlockHash, - feeRecipient: Address(executionPayload.fee_recipient.data), + feeRecipient: executionPayload.fee_recipient, stateRoot: executionPayload.state_root.asBlockHash, receiptsRoot: executionPayload.receipts_root.asBlockHash, logsBloom: @@ -299,18 +238,14 @@ func asEngineExecutionPayload*(blockBody: capella.BeaconBlockBody): transactions: mapIt(executionPayload.transactions, it.getTypedTransaction), withdrawals: mapIt(executionPayload.withdrawals, it.toEngineWithdrawal)) -func asEngineExecutionPayload*( - blockBody: deneb.BeaconBlockBody | electra.BeaconBlockBody | - fulu.BeaconBlockBody): +func asEngineExecutionPayload*(executionPayload: deneb.ExecutionPayload): ExecutionPayloadV3 = - template executionPayload(): untyped = blockBody.execution_payload - template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction = TypedTransaction(tt.distinctBase) engine_api.ExecutionPayloadV3( parentHash: executionPayload.parent_hash.asBlockHash, - feeRecipient: Address(executionPayload.fee_recipient.data), + feeRecipient: executionPayload.fee_recipient, stateRoot: executionPayload.state_root.asBlockHash, receiptsRoot: executionPayload.receipts_root.asBlockHash, logsBloom: @@ -327,3 +262,31 @@ func asEngineExecutionPayload*( withdrawals: mapIt(executionPayload.withdrawals, it.asEngineWithdrawal), blobGasUsed: Quantity(executionPayload.blob_gas_used), excessBlobGas: Quantity(executionPayload.excess_blob_gas)) + +proc asEngineVersionedHashes*( + blob_kzg_commitments: KzgCommitments +): seq[VersionedHash] = + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.1/specs/deneb/beacon-chain.md#process_execution_payload + + mapIt(blob_kzg_commitments, kzg_commitment_to_versioned_hash(it)) + +proc asEngineExecutionRequests*( + execution_requests: electra.ExecutionRequests +): seq[seq[byte]] = + # https://github.com/ethereum/execution-apis/blob/7c9772f95c2472ccfc6f6128dc2e1b568284a2da/src/engine/prague.md#request + # "Each list element is a `requests` byte array as defined by + # EIP-7685. The first byte of each element is the `request_type` + # and the remaining bytes are the `request_data`. Elements of + # the list MUST be ordered by `request_type` in ascending order. + # Elements with empty `request_data` MUST be excluded from the + # list." + + var requests: seq[seq[byte]] + for request_type, request_data in [ + SSZ.encode(execution_requests.deposits), + SSZ.encode(execution_requests.withdrawals), + SSZ.encode(execution_requests.consolidations), + ]: + if request_data.len > 0: + requests.add @[request_type.byte] & request_data + requests diff --git a/beacon_chain/el/eth1_chain.nim b/beacon_chain/el/eth1_chain.nim deleted file mode 100644 index 5383bafe1f..0000000000 --- a/beacon_chain/el/eth1_chain.nim +++ /dev/null @@ -1,405 +0,0 @@ -# beacon_chain -# Copyright (c) 2018-2025 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} - -import - std/[deques, tables, strformat], - chronicles, metrics, - ../beacon_chain_db, - ../spec/[deposit_snapshots, digest, eth2_merkleization, forks, network], - ../spec/datatypes/base, - web3/[conversions, eth_api_types], - ./merkle_minimal - -from ./engine_api_conversions import asBlockHash, asEth2Digest - -export beacon_chain_db, deques, digest, base, forks - -logScope: - topics = "elchain" - -declarePublicGauge eth1_finalized_head, - "Block number of the highest Eth1 block finalized by Eth2 consensus" - -declarePublicGauge eth1_finalized_deposits, - "Number of deposits that were finalized by the Eth2 consensus" - -declareGauge eth1_chain_len, - "The length of the in-memory chain of Eth1 blocks" - -template toGaugeValue*(x: Quantity): int64 = - toGaugeValue(distinctBase x) - -type - Eth1BlockNumber* = Quantity - Eth1BlockTimestamp* = uint64 - - Eth1BlockObj* = object - hash*: Eth2Digest - number*: Eth1BlockNumber - timestamp*: Eth1BlockTimestamp - ## Basic properties of the block - ## These must be initialized in the constructor - - deposits*: seq[DepositData] - ## Deposits inside this particular block - - depositRoot*: Eth2Digest - depositCount*: uint64 - ## Global deposits count and hash tree root of the entire sequence - ## These are computed when the block is added to the chain (see `addBlock`) - - Eth1Block* = ref Eth1BlockObj - - Eth1Chain* = object - db: BeaconChainDB - cfg*: RuntimeConfig - finalizedBlockHash*: Eth2Digest - finalizedDepositsMerkleizer*: DepositsMerkleizer - ## The latest block that reached a 50% majority vote from - ## the Eth2 validators according to the follow distance and - ## the ETH1_VOTING_PERIOD - - blocks*: Deque[Eth1Block] - ## A non-forkable chain of blocks ending at the block with - ## ETH1_FOLLOW_DISTANCE offset from the head. - - blocksByHash: Table[Hash32, Eth1Block] - - headMerkleizer: DepositsMerkleizer - ## Merkleizer state after applying all `blocks` - - hasConsensusViolation*: bool - ## The local chain contradicts the observed consensus on the network - - BlockProposalEth1Data* = object - vote*: Eth1Data - deposits*: seq[Deposit] - hasMissingDeposits*: bool - -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#get_eth1_data -func compute_time_at_slot(genesis_time: uint64, slot: Slot): uint64 = - genesis_time + slot * SECONDS_PER_SLOT - -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#get_eth1_data -func voting_period_start_time(state: ForkedHashedBeaconState): uint64 = - let eth1_voting_period_start_slot = - getStateField(state, slot) - getStateField(state, slot) mod - SLOTS_PER_ETH1_VOTING_PERIOD.uint64 - compute_time_at_slot( - getStateField(state, genesis_time), eth1_voting_period_start_slot) - -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#get_eth1_data -func is_candidate_block(cfg: RuntimeConfig, - blk: Eth1Block, - period_start: uint64): bool = - (blk.timestamp + cfg.SECONDS_PER_ETH1_BLOCK * cfg.ETH1_FOLLOW_DISTANCE <= period_start) and - (blk.timestamp + cfg.SECONDS_PER_ETH1_BLOCK * cfg.ETH1_FOLLOW_DISTANCE * 2 >= period_start) - -func shortLog*(b: Eth1Block): string = - try: - &"{b.number}:{shortLog b.hash}(deposits = {b.depositCount})" - except ValueError as exc: raiseAssert exc.msg - -template findBlock(chain: Eth1Chain, eth1Data: Eth1Data): Eth1Block = - getOrDefault(chain.blocksByHash, asBlockHash(eth1Data.block_hash), nil) - -func makeSuccessorWithoutDeposits*(existingBlock: Eth1Block, - successor: BlockObject): Eth1Block = - Eth1Block( - hash: successor.hash.asEth2Digest, - number: Eth1BlockNumber successor.number, - timestamp: Eth1BlockTimestamp successor.timestamp) - -func latestCandidateBlock(chain: Eth1Chain, periodStart: uint64): Eth1Block = - for i in countdown(chain.blocks.len - 1, 0): - let blk = chain.blocks[i] - if is_candidate_block(chain.cfg, blk, periodStart): - return blk - -proc popFirst(chain: var Eth1Chain) = - let removed = chain.blocks.popFirst - chain.blocksByHash.del removed.hash.asBlockHash - eth1_chain_len.set chain.blocks.len.int64 - -proc addBlock*(chain: var Eth1Chain, newBlock: Eth1Block) = - for deposit in newBlock.deposits: - chain.headMerkleizer.addChunk hash_tree_root(deposit).data - - newBlock.depositCount = chain.headMerkleizer.getChunkCount - newBlock.depositRoot = chain.headMerkleizer.getDepositsRoot - - chain.blocks.addLast newBlock - chain.blocksByHash[newBlock.hash.asBlockHash] = newBlock - - eth1_chain_len.set chain.blocks.len.int64 - -func toVoteData(blk: Eth1Block): Eth1Data = - Eth1Data( - deposit_root: blk.depositRoot, - deposit_count: blk.depositCount, - block_hash: blk.hash) - -func hash*(x: Eth1Data): Hash = - hash(x.block_hash) - -proc pruneOldBlocks(chain: var Eth1Chain, depositIndex: uint64) = - ## Called on block finalization to delete old and now redundant data. - let initialChunks = chain.finalizedDepositsMerkleizer.getChunkCount - var lastBlock: Eth1Block - - while chain.blocks.len > 0: - let blk = chain.blocks.peekFirst - if blk.depositCount >= depositIndex: - break - else: - for deposit in blk.deposits: - chain.finalizedDepositsMerkleizer.addChunk hash_tree_root(deposit).data - chain.popFirst() - lastBlock = blk - - if chain.finalizedDepositsMerkleizer.getChunkCount > initialChunks: - chain.finalizedBlockHash = lastBlock.hash - chain.db.putDepositContractSnapshot DepositContractSnapshot( - eth1Block: lastBlock.hash, - depositContractState: chain.finalizedDepositsMerkleizer.toDepositContractState, - blockHeight: distinctBase(lastBlock.number)) - - eth1_finalized_head.set lastBlock.number.toGaugeValue - eth1_finalized_deposits.set lastBlock.depositCount.toGaugeValue - - debug "Eth1 blocks pruned", - newTailBlock = lastBlock.hash, - depositsCount = lastBlock.depositCount - -func advanceMerkleizer(chain: Eth1Chain, - merkleizer: var DepositsMerkleizer, - depositIndex: uint64): bool = - if chain.blocks.len == 0: - return depositIndex == merkleizer.getChunkCount - - if chain.blocks.peekLast.depositCount < depositIndex: - return false - - let - firstBlock = chain.blocks[0] - depositsInLastPrunedBlock = firstBlock.depositCount - - firstBlock.deposits.lenu64 - - # advanceMerkleizer should always be called shortly after prunning the chain - doAssert depositsInLastPrunedBlock == merkleizer.getChunkCount - - for blk in chain.blocks: - for deposit in blk.deposits: - if merkleizer.getChunkCount < depositIndex: - merkleizer.addChunk hash_tree_root(deposit).data - else: - return true - - return merkleizer.getChunkCount == depositIndex - -iterator getDepositsRange*(chain: Eth1Chain, first, last: uint64): DepositData = - # TODO It's possible to make this faster by performing binary search that - # will locate the blocks holding the `first` and `last` indices. - # TODO There is an assumption here that the requested range will be present - # in the Eth1Chain. This should hold true at the call sites right now, - # but we need to guard the pre-conditions better. - for blk in chain.blocks: - if blk.depositCount <= first: - continue - - let firstDepositIdxInBlk = blk.depositCount - blk.deposits.lenu64 - if firstDepositIdxInBlk >= last: - break - - for i in 0 ..< blk.deposits.lenu64: - let globalIdx = firstDepositIdxInBlk + i - if globalIdx >= first and globalIdx < last: - yield blk.deposits[i] - -func lowerBound(chain: Eth1Chain, depositCount: uint64): Eth1Block = - # TODO: This can be replaced with a proper binary search in the - # future, but the `algorithm` module currently requires an - # `openArray`, which the `deques` module can't provide yet. - for eth1Block in chain.blocks: - if eth1Block.depositCount > depositCount: - return - result = eth1Block - -proc trackFinalizedState*(chain: var Eth1Chain, - finalizedEth1Data: Eth1Data, - finalizedStateDepositIndex: uint64, - blockProposalExpected = false): bool = - ## This function will return true if the ELManager is synced - ## to the finalization point. - - if chain.blocks.len == 0: - debug "Eth1 chain not initialized" - return false - - let latest = chain.blocks.peekLast - if latest.depositCount < finalizedEth1Data.deposit_count: - if blockProposalExpected: - error "The Eth1 chain is not synced", - ourDepositsCount = latest.depositCount, - targetDepositsCount = finalizedEth1Data.deposit_count - return false - - let matchingBlock = chain.lowerBound(finalizedEth1Data.deposit_count) - result = if matchingBlock != nil: - if matchingBlock.depositRoot == finalizedEth1Data.deposit_root: - true - else: - error "Corrupted deposits history detected", - ourDepositsCount = matchingBlock.depositCount, - targetDepositsCount = finalizedEth1Data.deposit_count, - ourDepositsRoot = matchingBlock.depositRoot, - targetDepositsRoot = finalizedEth1Data.deposit_root - chain.hasConsensusViolation = true - false - else: - error "The Eth1 chain is in inconsistent state", - checkpointHash = finalizedEth1Data.block_hash, - checkpointDeposits = finalizedEth1Data.deposit_count, - localChainStart = shortLog(chain.blocks.peekFirst), - localChainEnd = shortLog(chain.blocks.peekLast) - chain.hasConsensusViolation = true - false - - if result: - chain.pruneOldBlocks(finalizedStateDepositIndex) - -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#get_eth1_data -proc getBlockProposalData*(chain: var Eth1Chain, - state: ForkedHashedBeaconState, - finalizedEth1Data: Eth1Data, - finalizedStateDepositIndex: uint64): BlockProposalEth1Data = - let - periodStart = voting_period_start_time(state) - hasLatestDeposits = chain.trackFinalizedState(finalizedEth1Data, - finalizedStateDepositIndex, - blockProposalExpected = true) - - var otherVotesCountTable = initCountTable[Eth1Data]() - for vote in getStateField(state, eth1_data_votes): - let eth1Block = chain.findBlock(vote) - if eth1Block != nil and - eth1Block.depositRoot == vote.deposit_root and - vote.deposit_count >= getStateField(state, eth1_data).deposit_count and - is_candidate_block(chain.cfg, eth1Block, periodStart): - otherVotesCountTable.inc vote - else: - debug "Ignoring eth1 vote", - root = vote.block_hash, - deposits = vote.deposit_count, - depositsRoot = vote.deposit_root, - localDeposits = getStateField(state, eth1_data).deposit_count - - let - stateDepositIdx = getStateField(state, eth1_deposit_index) - stateDepositsCount = getStateField(state, eth1_data).deposit_count - - # A valid state should never have this condition, but it doesn't hurt - # to be extra defensive here because we are working with uint types - var pendingDepositsCount = if stateDepositsCount > stateDepositIdx: - stateDepositsCount - stateDepositIdx - else: - 0 - - if otherVotesCountTable.len > 0: - let (winningVote, votes) = otherVotesCountTable.largest - debug "Voting on eth1 head with majority", votes - result.vote = winningVote - if uint64((votes + 1) * 2) > SLOTS_PER_ETH1_VOTING_PERIOD: - pendingDepositsCount = winningVote.deposit_count - stateDepositIdx - - else: - let latestBlock = chain.latestCandidateBlock(periodStart) - if latestBlock == nil: - debug "No acceptable eth1 votes and no recent candidates. Voting no change" - result.vote = getStateField(state, eth1_data) - else: - debug "No acceptable eth1 votes. Voting for latest candidate" - result.vote = latestBlock.toVoteData - - if pendingDepositsCount > 0: - if hasLatestDeposits: - let - totalDepositsInNewBlock = - withState(state): - when consensusFork >= ConsensusFork.Electra: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/validator.md#deposits - let eth1_deposit_index_limit = min( - forkyState.data.eth1_data.deposit_count, - forkyState.data.deposit_requests_start_index) - if forkyState.data.eth1_deposit_index < eth1_deposit_index_limit: - min(MAX_DEPOSITS, pendingDepositsCount) - else: - 0 - else: - min(MAX_DEPOSITS, pendingDepositsCount) - postStateDepositIdx = stateDepositIdx + pendingDepositsCount - var - deposits = newSeqOfCap[DepositData](totalDepositsInNewBlock) - depositRoots = newSeqOfCap[Eth2Digest](pendingDepositsCount) - for data in chain.getDepositsRange(stateDepositIdx, postStateDepositIdx): - if deposits.lenu64 < totalDepositsInNewBlock: - deposits.add data - depositRoots.add hash_tree_root(data) - - var scratchMerkleizer = chain.finalizedDepositsMerkleizer - if chain.advanceMerkleizer(scratchMerkleizer, stateDepositIdx): - let proofs = scratchMerkleizer.addChunksAndGenMerkleProofs(depositRoots) - for i in 0 ..< totalDepositsInNewBlock: - var proof: array[33, Eth2Digest] - proof[0..31] = proofs.getProof(i.int) - proof[32] = default(Eth2Digest) - proof[32].data[0..7] = toBytesLE uint64(postStateDepositIdx) - result.deposits.add Deposit(data: deposits[i], proof: proof) - else: - error "The Eth1 chain is in inconsistent state" # This should not really happen - result.hasMissingDeposits = true - else: - result.hasMissingDeposits = true - -func clear*(chain: var Eth1Chain) = - chain.blocks.clear() - chain.blocksByHash.clear() - chain.headMerkleizer = chain.finalizedDepositsMerkleizer - chain.hasConsensusViolation = false - -proc init*( - T: type Eth1Chain, - cfg: RuntimeConfig, - db: BeaconChainDB, - depositContractBlockNumber: uint64, - depositContractBlockHash: Eth2Digest): T = - let - (finalizedBlockHash, depositContractState) = - if db != nil: - let snapshot = db.getDepositContractSnapshot() - if snapshot.isSome: - (snapshot.get.eth1Block, snapshot.get.depositContractState) - else: - let oldSnapshot = db.getUpgradableDepositSnapshot() - if oldSnapshot.isSome: - (oldSnapshot.get.eth1Block, oldSnapshot.get.depositContractState) - else: - db.putDepositContractSnapshot DepositContractSnapshot( - eth1Block: depositContractBlockHash, - blockHeight: depositContractBlockNumber) - (depositContractBlockHash, default(DepositContractState)) - else: - (depositContractBlockHash, default(DepositContractState)) - m = DepositsMerkleizer.init(depositContractState) - - T(db: db, - cfg: cfg, - finalizedBlockHash: finalizedBlockHash, - finalizedDepositsMerkleizer: m, - headMerkleizer: m) diff --git a/beacon_chain/el/merkle_minimal.nim b/beacon_chain/el/merkle_minimal.nim index e7c90d3345..587714f15f 100644 --- a/beacon_chain/el/merkle_minimal.nim +++ b/beacon_chain/el/merkle_minimal.nim @@ -13,13 +13,12 @@ # --------------------------------------------------------------- import - std/sequtils, - stew/endians2, - # Specs ../spec/[eth2_merkleization, digest], ../spec/datatypes/base -template getProof*( +from std/sequtils import mapIt + +template getProof( proofs: seq[Eth2Digest], idxParam: int): openArray[Eth2Digest] = let idx = idxParam diff --git a/beacon_chain/era_db.nim b/beacon_chain/era_db.nim index 51828fbc84..27279ceea2 100644 --- a/beacon_chain/era_db.nim +++ b/beacon_chain/era_db.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import std/os, @@ -126,7 +126,7 @@ proc getBlockSSZ*( if len > int.high.uint64: return err("Invalid uncompressed size") - bytes = newSeqUninitialized[byte](len) + bytes = newSeqUninit[byte](len) # Where it matters, we will integrity-check the data with SSZ - no # need to waste cycles on crc32 @@ -171,7 +171,7 @@ proc getStateSSZ*( min(len, partial.get().uint64 + maxUncompressedFrameDataLen - 1) else: len - bytes = newSeqUninitialized[byte](wanted) + bytes = newSeqUninit[byte](wanted) # Where it matters, we will integrity-check the data with SSZ - no # need to waste cycles on crc32 @@ -421,8 +421,8 @@ proc getPartialState( try: readSszBytes(tmp.toOpenArray(0, partialBytes - 1), output) true - except CatchableError: - # TODO log? + except CatchableError as exc: + error "Failed to parse partial beacon state", slot = slot, msg = exc.msg false iterator getBlockIds*( @@ -441,7 +441,7 @@ iterator getBlockIds*( # `case` ensures we're on a fork for which the `PartialBeaconState` # definition is consistent case db.cfg.consensusForkAtEpoch(slot.epoch) - of ConsensusFork.Phase0 .. ConsensusFork.Fulu: + of ConsensusFork.Phase0 .. ConsensusFork.Gloas: let stateSlot = (slot.era() + 1).start_slot() if not getPartialState( db, historical_roots, historical_summaries, stateSlot, state[]): diff --git a/beacon_chain/fork_choice/fork_choice.nim b/beacon_chain/fork_choice/fork_choice.nim index 338e6fdcbc..c74bdc126a 100644 --- a/beacon_chain/fork_choice/fork_choice.nim +++ b/beacon_chain/fork_choice/fork_choice.nim @@ -9,7 +9,7 @@ import # Standard library - std/[sequtils, tables], + std/tables, # Status libraries results, chronicles, # Internal @@ -19,6 +19,7 @@ import ./fork_choice_types, ./proto_array, ../consensus_object_pools/[spec_cache, blockchain_dag] +from std/sequtils import keepItIf export results, fork_choice_types export proto_array.len @@ -292,7 +293,7 @@ proc process_block*(self: var ForkChoice, # Add proposer score boost if the block is timely let slot = self.checkpoints.time.slotOrZero if slot == blck.slot and - self.checkpoints.time < slot.attestation_deadline and + self.checkpoints.time < slot.attestation_deadline(dag.cfg.time) and self.checkpoints.proposer_boost_root == ZERO_HASH: self.checkpoints.proposer_boost_root = blckRef.root @@ -357,7 +358,7 @@ func find_head( return ok(new_head) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/fork-choice.md#get_head +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/fork-choice.md#get_head proc get_head*(self: var ForkChoice, dag: ChainDAGRef, wallTime: BeaconTime): FcResult[Eth2Digest] = @@ -487,7 +488,7 @@ when isMainModule: echo " fork_choice compute_deltas - test zero votes" const validator_count = 16 - var deltas = newSeqUninitialized[Delta](validator_count) + var deltas = newSeqUninit[Delta](validator_count) var indices: Table[Eth2Digest, Index] var votes: seq[VoteTracker] @@ -518,7 +519,7 @@ when isMainModule: const Balance = Gwei(42) validator_count = 16 - var deltas = newSeqUninitialized[Delta](validator_count) + var deltas = newSeqUninit[Delta](validator_count) var indices: Table[Eth2Digest, Index] var votes: seq[VoteTracker] @@ -557,7 +558,7 @@ when isMainModule: const Balance = Gwei(42) validator_count = 16 - var deltas = newSeqUninitialized[Delta](validator_count) + var deltas = newSeqUninit[Delta](validator_count) var indices: Table[Eth2Digest, Index] var votes: seq[VoteTracker] @@ -594,7 +595,7 @@ when isMainModule: Balance = Gwei(42) validator_count = 16 TotalDeltas = Delta(Balance * validator_count) - var deltas = newSeqUninitialized[Delta](validator_count) + var deltas = newSeqUninit[Delta](validator_count) var indices: Table[Eth2Digest, Index] var votes: seq[VoteTracker] @@ -642,7 +643,7 @@ when isMainModule: indices.add fakeHash(1), 0 # 2 validators - var deltas = newSeqUninitialized[Delta](2) + var deltas = newSeqUninit[Delta](2) let old_balances = @[Balance, Balance] let new_balances = @[Balance, Balance] @@ -681,7 +682,7 @@ when isMainModule: validator_count = 16 TotalOldDeltas = Delta(OldBalance * validator_count) TotalNewDeltas = Delta(NewBalance * validator_count) - var deltas = newSeqUninitialized[Delta](validator_count) + var deltas = newSeqUninit[Delta](validator_count) var indices: Table[Eth2Digest, Index] var votes: seq[VoteTracker] @@ -730,7 +731,7 @@ when isMainModule: indices.add fakeHash(2), 1 # 1 validator at the start, 2 at the end - var deltas = newSeqUninitialized[Delta](2) + var deltas = newSeqUninit[Delta](2) let old_balances = @[Balance] let new_balances = @[Balance, Balance] @@ -769,7 +770,7 @@ when isMainModule: indices.add fakeHash(2), 1 # 2 validator at the start, 1 at the end - var deltas = newSeqUninitialized[Delta](2) + var deltas = newSeqUninit[Delta](2) let old_balances = @[Balance, Balance] let new_balances = @[Balance] diff --git a/beacon_chain/gossip_processing/README.md b/beacon_chain/gossip_processing/README.md index 1cf74f4ed2..ce49d2eff0 100644 --- a/beacon_chain/gossip_processing/README.md +++ b/beacon_chain/gossip_processing/README.md @@ -13,8 +13,8 @@ Gossip validation is different from consensus verification in particular for blo - Attestations (aggregated): https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof - Attestations (unaggregated): https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnets - Voluntary exits: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#voluntary_exit -- Proposer slashings: https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/p2p-interface.md#proposer_slashing -- Attester slashing: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#attester_slashing +- Proposer slashings: https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/p2p-interface.md#proposer_slashing +- Attester slashing: https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/p2p-interface.md#attester_slashing There are multiple consumers of validated consensus objects: - a `ValidationResult.Accept` output triggers rebroadcasting in libp2p diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index aab58d0df1..58a2f305ca 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -5,34 +5,34 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import chronicles, chronos, metrics, - ../spec/[forks, helpers_el, signatures, signatures_batch], + ../spec/[forks, helpers_el, signatures, signatures_batch, peerdas_helpers], ../sszdump from std/deques import Deque, addLast, contains, initDeque, items, len, shrink from std/sequtils import anyIt, mapIt from ../consensus_object_pools/consensus_manager import - ConsensusManager, checkNextProposer, optimisticExecutionBlockHash, - runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead, - updateHeadWithExecution + ConsensusManager, to, updateHead, updateExecutionHead from ../consensus_object_pools/blockchain_dag import getBlockRef, getForkedBlock, getProposer, forkAtEpoch, loadExecutionBlockHash, - markBlockVerified, validatorKey, is_optimistic + markExecutionValid, validatorKey, is_optimistic from ../beacon_clock import GetBeaconTimeFn, toFloatSeconds -from ../consensus_object_pools/block_dag import BlockRef, root, shortLog, slot +from ../consensus_object_pools/block_dag import + BlockRef, OptimisticStatus, executionValid, root, shortLog, slot from ../consensus_object_pools/block_pools_types import - EpochRef, VerifierError + ChainDAGRef, EpochRef, OnBlockAdded, VerifierError from ../consensus_object_pools/block_quarantine import - addBlobless, addOrphan, addUnviable, pop, removeOrphan + addSidecarless, addOrphan, addUnviable, pop, removeOrphan, removeSidecarless from ../consensus_object_pools/blob_quarantine import - BlobQuarantine, hasBlobs, popBlobs, put + BlobQuarantine, ColumnQuarantine, popSidecars, put from ../validators/validator_monitor import MsgSource, ValidatorMonitor, registerAttestationInBlock, registerBeaconBlock, registerSyncAggregateInBlock -from ../beacon_chain_db import getBlobSidecar, putBlobSidecar +from ../beacon_chain_db import getBlobSidecar, putBlobSidecar, + getDataColumnSidecar, putDataColumnSidecar from ../spec/state_transition_block import validate_blobs export sszdump, signatures_batch @@ -54,16 +54,6 @@ const ## Number of slots from wall time that we start processing every payload type - BlockEntry = object - blck*: ForkedSignedBeaconBlock - blobs*: Opt[BlobSidecars] - maybeFinalized*: bool - ## The block source claims the block has been finalized already - resfut*: Future[Result[void, VerifierError]].Raising([CancelledError]) - queueTick*: Moment # Moment when block was enqueued - validationDur*: Duration # Time it took to perform gossip validation - src*: MsgSource - BlockProcessor* = object ## This manages the processing of blocks from different sources ## Blocks and attestations are enqueued in a gossip-validated state @@ -87,35 +77,35 @@ type dumpEnabled: bool dumpDirInvalid: string dumpDirIncoming: string + invalidBlockRoots: seq[Eth2Digest] # Producers # ---------------------------------------------------------------- - blockQueue: AsyncQueue[BlockEntry] + storeLock: AsyncLock + ## storeLock ensures that storeBlock is only called by one async task at + ## a time, queueing the others for processing in order + pendingStores: int # Consumer # ---------------------------------------------------------------- - consensusManager: ref ConsensusManager + consensusManager*: ref ConsensusManager ## Blockchain DAG, AttestationPool and Quarantine ## Blockchain DAG, AttestationPool, Quarantine, and ELManager validatorMonitor: ref ValidatorMonitor getBeaconTime: GetBeaconTimeFn blobQuarantine: ref BlobQuarantine + dataColumnQuarantine*: ref ColumnQuarantine verifier: BatchVerifier lastPayload: Slot ## The slot at which we sent a payload to the execution client the last ## time - NewPayloadStatus* {.pure.} = enum - valid - notValid - invalid - noResponse + NoSidecars* = typeof(()) + SomeOptSidecars = NoSidecars | Opt[BlobSidecars] | Opt[DataColumnSidecars] - ProcessingStatus {.pure.} = enum - completed - notCompleted +const noSidecars* = default(NoSidecars) # Initialization # ------------------------------------------------------------------------------ @@ -127,15 +117,23 @@ proc new*(T: type BlockProcessor, consensusManager: ref ConsensusManager, validatorMonitor: ref ValidatorMonitor, blobQuarantine: ref BlobQuarantine, - getBeaconTime: GetBeaconTimeFn): ref BlockProcessor = + dataColumnQuarantine: ref ColumnQuarantine, + getBeaconTime: GetBeaconTimeFn, + invalidBlockRoots: seq[Eth2Digest] = @[]): ref BlockProcessor = + if invalidBlockRoots.len > 0: + warn "Config requests blocks to be treated as invalid", + debugInvalidateBlockRoot = invalidBlockRoots + (ref BlockProcessor)( dumpEnabled: dumpEnabled, dumpDirInvalid: dumpDirInvalid, dumpDirIncoming: dumpDirIncoming, - blockQueue: newAsyncQueue[BlockEntry](), + invalidBlockRoots: invalidBlockRoots, + storeLock: newAsyncLock(), consensusManager: consensusManager, validatorMonitor: validatorMonitor, blobQuarantine: blobQuarantine, + dataColumnQuarantine: dataColumnQuarantine, getBeaconTime: getBeaconTime, verifier: batchVerifier[] ) @@ -144,7 +142,7 @@ proc new*(T: type BlockProcessor, # ------------------------------------------------------------------------------ func hasBlocks*(self: BlockProcessor): bool = - self.blockQueue.len() > 0 + self.pendingStores > 0 # Storage # ------------------------------------------------------------------------------ @@ -154,10 +152,10 @@ proc dumpInvalidBlock*( if self.dumpEnabled: dump(self.dumpDirInvalid, signedBlock) -proc dumpBlock[T]( +proc dumpBlock( self: BlockProcessor, signedBlock: ForkySignedBeaconBlock, - res: Result[T, VerifierError]) = + res: Result[void, VerifierError]) = if self.dumpEnabled and res.isErr: case res.error of VerifierError.Invalid: @@ -170,36 +168,80 @@ proc dumpBlock[T]( from ../consensus_object_pools/block_clearance import addBackfillBlock, addHeadBlockWithParent, checkHeadBlock -proc storeBackfillBlock( - self: var BlockProcessor, +proc verifySidecars( signedBlock: ForkySignedBeaconBlock, - blobsOpt: Opt[BlobSidecars]): Result[void, VerifierError] = - - # The block is certainly not missing any more - self.consensusManager.quarantine[].missing.del(signedBlock.root) - - # Establish blob viability before calling addbackfillBlock to avoid - # writing the block in case of blob error. - var blobsOk = true - when typeof(signedBlock).kind >= ConsensusFork.Deneb: - if blobsOpt.isSome: - let blobs = blobsOpt.get() + sidecarsOpt: SomeOptSidecars, +): Result[void, VerifierError] = + const consensusFork = typeof(signedBlock).kind + + when consensusFork == ConsensusFork.Gloas: + # For Gloas, we still need to store the columns if they're provided + # but skip validation since we don't have kzg_commitments in the block + if sidecarsOpt.isSome: + debugGloasComment "potentially validate against payload envelope" + let columns = sidecarsOpt.get() + discard + elif consensusFork == ConsensusFork.Fulu: + if sidecarsOpt.isSome: + let columns = sidecarsOpt.get() + let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq + if columns.len > 0 and kzgCommits.len > 0: + for i in 0 ..< columns.len: + let r = verify_data_column_sidecar_kzg_proofs(columns[i][]) + if r.isErr(): + debug "data column validation failed", + blockRoot = shortLog(signedBlock.root), + column_sidecar = shortLog(columns[i][]), + blck = shortLog(signedBlock.message), + signature = shortLog(signedBlock.signature), + msg = r.error() + return err(VerifierError.Invalid) + elif consensusFork in ConsensusFork.Deneb .. ConsensusFork.Electra: + if sidecarsOpt.isSome: + let blobs = sidecarsOpt.get() let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq if blobs.len > 0 or kzgCommits.len > 0: - let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)), - blobs.mapIt(it.kzg_proof)) + let r = validate_blobs( + kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)), blobs.mapIt(it.kzg_proof) + ) if r.isErr(): - debug "backfill blob validation failed", + debug "blob validation failed", blockRoot = shortLog(signedBlock.root), blobs = shortLog(blobs), blck = shortLog(signedBlock.message), kzgCommits = mapIt(kzgCommits, shortLog(it)), signature = shortLog(signedBlock.signature), msg = r.error() - blobsOk = r.isOk() + return err(VerifierError.Invalid) + elif consensusFork in ConsensusFork.Phase0 .. ConsensusFork.Capella: + static: doAssert sidecarsOpt is NoSidecars + else: + {.error: "Unknown consensus fork " & $consensusFork.} - if not blobsOk: - return err(VerifierError.Invalid) + ok() + +proc storeSidecars(self: BlockProcessor, sidecarsOpt: Opt[BlobSidecars]) = + if sidecarsOpt.isSome(): + for b in sidecarsOpt[]: + self.consensusManager.dag.db.putBlobSidecar(b[]) + +proc storeSidecars(self: BlockProcessor, sidecarsOpt: Opt[DataColumnSidecars]) = + if sidecarsOpt.isSome(): + for c in sidecarsOpt[]: + self.consensusManager.dag.db.putDataColumnSidecar(c[]) + +proc storeSidecars(self: BlockProcessor, sidecarsOpt: NoSidecars) = + discard + +proc storeBackfillBlock( + self: var BlockProcessor, + signedBlock: ForkySignedBeaconBlock, + sidecarsOpt: SomeOptSidecars, +): Result[void, VerifierError] = + # The block is certainly not missing any more + self.consensusManager.quarantine[].missing.del(signedBlock.root) + + ?verifySidecars(signedBlock, sidecarsOpt) let res = self.consensusManager.dag.addBackfillBlock(signedBlock) @@ -211,7 +253,6 @@ proc storeBackfillBlock( # DAG doesn't know about unviable ancestor blocks - we do! Translate # this to the appropriate error so that sync etc doesn't retry the block self.consensusManager.quarantine[].addUnviable(signedBlock.root) - return err(VerifierError.UnviableFork) of VerifierError.UnviableFork: # Track unviables so that descendants can be discarded properly @@ -219,107 +260,36 @@ proc storeBackfillBlock( else: discard return res - # Only store blobs after successfully establishing block viability. - let blobs = blobsOpt.valueOr: BlobSidecars @[] - for b in blobs: - self.consensusManager.dag.db.putBlobSidecar(b[]) + # Only store side cars after successfully establishing block viability. + self.storeSidecars(sidecarsOpt) res -from web3/engine_api_types import - PayloadAttributesV1, PayloadAttributesV2, PayloadAttributesV3, - PayloadExecutionStatus, PayloadStatusV1 -from ../el/el_manager import - ELManager, DeadlineObject, forkchoiceUpdated, hasConnection, - hasProperlyConfiguredConnection, sendNewPayload, init - -proc expectValidForkchoiceUpdated( - elManager: ELManager, headBlockPayloadAttributesType: typedesc, - headBlockHash, safeBlockHash, finalizedBlockHash: Eth2Digest, - receivedBlock: ForkySignedBeaconBlock, - deadlineObj: DeadlineObject, - maxRetriesCount: int -): Future[void] {.async: (raises: [CancelledError]).} = - let - (payloadExecutionStatus, _) = await elManager.forkchoiceUpdated( - headBlockHash = headBlockHash, - safeBlockHash = safeBlockHash, - finalizedBlockHash = finalizedBlockHash, - payloadAttributes = Opt.none headBlockPayloadAttributesType, - deadlineObj = deadlineObj, - maxRetriesCount = maxRetriesCount) - receivedExecutionBlockHash = - when typeof(receivedBlock).kind >= ConsensusFork.Bellatrix: - receivedBlock.message.body.execution_payload.block_hash - else: - # https://github.com/nim-lang/Nim/issues/19802 - (static(default(Eth2Digest))) - - # Only called when expecting this to be valid because `newPayload` or some - # previous `forkchoiceUpdated` had already marked it as valid. However, if - # it's not the block that was received, don't info/warn either way given a - # relative lack of immediate evidence. - if receivedExecutionBlockHash != headBlockHash: - return - - case payloadExecutionStatus - of PayloadExecutionStatus.valid: - # situation nominal - discard - of PayloadExecutionStatus.accepted, PayloadExecutionStatus.syncing: - info "execution payload forkChoiceUpdated status ACCEPTED/SYNCING, but was previously VALID", - payloadExecutionStatus = $payloadExecutionStatus, headBlockHash, - safeBlockHash, finalizedBlockHash, - receivedBlock = shortLog(receivedBlock) - of PayloadExecutionStatus.invalid, PayloadExecutionStatus.invalid_block_hash: - warn "execution payload forkChoiceUpdated status INVALID, but was previously VALID", - payloadExecutionStatus = $payloadExecutionStatus, headBlockHash, - safeBlockHash, finalizedBlockHash, - receivedBlock = shortLog(receivedBlock) - -from ../consensus_object_pools/attestation_pool import - addForkChoice, selectOptimisticHead, BeaconHead +from web3/engine_api_types import PayloadExecutionStatus +from ../el/el_manager import ELManager, DeadlineFuture, sendNewPayload +from ../consensus_object_pools/attestation_pool import AttestationPool, addForkChoice from ../consensus_object_pools/spec_cache import get_attesting_indices proc newExecutionPayload*( elManager: ELManager, blck: SomeForkyBeaconBlock, - deadlineObj: DeadlineObject, - maxRetriesCount: int + deadline: DeadlineFuture, + retry: bool, ): Future[Opt[PayloadExecutionStatus]] {.async: (raises: [CancelledError]).} = - template executionPayload: untyped = blck.body.execution_payload - if not elManager.hasProperlyConfiguredConnection: - if elManager.hasConnection: - info "No execution client connected; cannot process block payloads", - executionPayload = shortLog(executionPayload) - else: - debug "No execution client connected; cannot process block payloads", - executionPayload = shortLog(executionPayload) - return Opt.none PayloadExecutionStatus - debug "newPayload: inserting block into execution engine", executionPayload = shortLog(executionPayload) - try: - let payloadStatus = - await elManager.sendNewPayload(blck, deadlineObj, maxRetriesCount) - - debug "newPayload: succeeded", - parentHash = executionPayload.parent_hash, - blockHash = executionPayload.block_hash, - blockNumber = executionPayload.block_number, - payloadStatus = $payloadStatus - - return Opt.some payloadStatus - except CatchableError as err: - warn "newPayload failed - check execution client", - msg = err.msg, - parentHash = shortLog(executionPayload.parent_hash), - blockHash = shortLog(executionPayload.block_hash), - blockNumber = executionPayload.block_number - return Opt.none PayloadExecutionStatus + let payloadStatus = ?await elManager.sendNewPayload(blck, deadline, retry) + + debug "newPayload: succeeded", + parentHash = executionPayload.parent_hash, + blockHash = executionPayload.block_hash, + blockNumber = executionPayload.block_number, + payloadStatus = payloadStatus + + Opt.some payloadStatus proc newExecutionPayload*( elManager: ELManager, @@ -327,62 +297,49 @@ proc newExecutionPayload*( ): Future[Opt[PayloadExecutionStatus]] {. async: (raises: [CancelledError], raw: true).} = newExecutionPayload( - elManager, blck, DeadlineObject.init(FORKCHOICEUPDATED_TIMEOUT), - high(int)) + elManager, blck, sleepAsync(FORKCHOICEUPDATED_TIMEOUT), true) proc getExecutionValidity( elManager: ELManager, blck: bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock | deneb.SignedBeaconBlock | electra.SignedBeaconBlock | fulu.SignedBeaconBlock, - deadlineObj: DeadlineObject, - maxRetriesCount: int -): Future[NewPayloadStatus] {.async: (raises: [CancelledError]).} = + deadline: DeadlineFuture, + retry: bool, +): Future[Opt[OptimisticStatus]] {.async: (raises: [CancelledError]).} = if not blck.message.is_execution_block: - return NewPayloadStatus.valid # vacuously - - try: - let executionPayloadStatus = await elManager.newExecutionPayload( - blck.message, deadlineObj, maxRetriesCount) - if executionPayloadStatus.isNone: - return NewPayloadStatus.noResponse - - case executionPayloadStatus.get - of PayloadExecutionStatus.invalid, - PayloadExecutionStatus.invalid_block_hash: - # Blocks come either from gossip or request manager requests. In the - # former case, they've passed libp2p gossip validation which implies - # correct signature for correct proposer,which makes spam expensive, - # while for the latter, spam is limited by the request manager. - info "execution payload invalid from EL client newPayload", - executionPayloadStatus = $executionPayloadStatus.get, - executionPayload = shortLog(blck.message.body.execution_payload), - blck = shortLog(blck) - return NewPayloadStatus.invalid - of PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted: - return NewPayloadStatus.notValid - of PayloadExecutionStatus.valid: - return NewPayloadStatus.valid - except CatchableError as err: - error "newPayload failed and leaked exception", - err = err.msg, + return Opt.some(OptimisticStatus.valid) # vacuously + + let status = (await elManager.newExecutionPayload(blck.message, deadline, retry)).valueOr: + return Opt.none(OptimisticStatus) + + let optimisticStatus = status.to(OptimisticStatus) + + if optimisticStatus == OptimisticStatus.invalidated: + # Blocks come either from gossip or request manager requests. In the + # former case, they've passed libp2p gossip validation which implies + # correct signature for correct proposer,which makes spam expensive, + # while for the latter, spam is limited by the request manager. + info "execution payload invalid from EL client newPayload", + executionPayloadStatus = status, executionPayload = shortLog(blck.message.body.execution_payload), blck = shortLog(blck) - return NewPayloadStatus.noResponse -proc checkBloblessSignature( + Opt.some(optimisticStatus) + +proc checkBlobOrColumnlessSignature( self: BlockProcessor, signed_beacon_block: deneb.SignedBeaconBlock | electra.SignedBeaconBlock | fulu.SignedBeaconBlock): Result[void, cstring] = let dag = self.consensusManager.dag let parent = dag.getBlockRef(signed_beacon_block.message.parent_root).valueOr: - return err("checkBloblessSignature called with orphan block") + return err("checkBlobOrColumnlessSignature called with orphan block") let proposer = getProposer( dag, parent, signed_beacon_block.message.slot).valueOr: - return err("checkBloblessSignature: Cannot compute proposer") + return err("checkBlobOrColumnlessSignature: Cannot compute proposer") if distinctBase(proposer) != signed_beacon_block.message.proposer_index: - return err("checkBloblessSignature: Incorrect proposer") + return err("checkBlobOrColumnlessSignature: Incorrect proposer") if not verify_block_signature( dag.forkAtEpoch(signed_beacon_block.message.slot.epoch), getStateField(dag.headState, genesis_validators_root), @@ -390,64 +347,232 @@ proc checkBloblessSignature( signed_beacon_block.root, dag.validatorKey(proposer).get(), signed_beacon_block.signature): - return err("checkBloblessSignature: Invalid proposer signature") + return err("checkBlobOrColumnlessSignature: Invalid proposer signature") ok() +proc addBlock*( + self: ref BlockProcessor, + src: MsgSource, + blck: ForkySignedBeaconBlock, + sidecarsOpt: SomeOptSidecars, + maybeFinalized = false, + validationDur = Duration(), +): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} + proc enqueueBlock*( - self: var BlockProcessor, src: MsgSource, blck: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], - resfut: Future[Result[void, VerifierError]].Raising([CancelledError]) = nil, + self: ref BlockProcessor, + src: MsgSource, + blck: ForkySignedBeaconBlock, + sidecarsOpt: SomeOptSidecars, maybeFinalized = false, - validationDur = Duration()) = - withBlck(blck): - if forkyBlck.message.slot <= self.consensusManager.dag.finalizedHead.slot: - # let backfill blocks skip the queue - these are always "fast" to process - # because there are no state rewinds to deal with - let res = self.storeBackfillBlock(forkyBlck, blobs) - resfut.complete(res) - return + validationDur = Duration(), +) = + if blck.message.slot <= self.consensusManager.dag.finalizedHead.slot: + # let backfill blocks skip the queue - these are always "fast" to process + # because there are no state rewinds to deal with + discard self[].storeBackfillBlock(blck, sidecarsOpt) + return - try: - self.blockQueue.addLastNoWait(BlockEntry( - blck: blck, - blobs: blobs, - maybeFinalized: maybeFinalized, - resfut: resfut, queueTick: Moment.now(), - validationDur: validationDur, - src: src)) - except AsyncQueueFullError: - raiseAssert "unbounded queue" - -proc updateHead*( - consensusManager: ref ConsensusManager, - getBeaconTimeFn: GetBeaconTimeFn, -): Result[void, string] = - let - attestationPool = consensusManager.attestationPool - wallTime = getBeaconTimeFn() - wallSlot = wallTime.slotOrZero() - newHead = - attestationPool[].selectOptimisticHead(wallSlot.start_beacon_time) - if newHead.isOk(): - consensusManager[].updateHead(newHead.get.blck) - ok() + # `discard` here means that the `async` task will continue running even though + # this function returns, similar to `asyncSpawn` (which we cannot use because + # of the return type) - therefore, processing of the block cannot be cancelled + # and its result is lost - this is fine however: callers of `enqueueBlock` + # don't care. However, because this acts as an unbounded queue, they have to + # be careful not to enqueue too many blocks or we'll run out of memory - + # `addBlock` should be used where managing backpressure is appropriate. + discard self.addBlock(src, blck, sidecarsOpt, maybeFinalized, validationDur) + +proc enqueueQuarantine(self: ref BlockProcessor, root: Eth2Digest) = + ## Enqueue blocks whose parent is `root` - ie when `root` has been added to + ## the blockchain dag, its direct descendants are now candidates for + ## processing + for quarantined in self.consensusManager.quarantine[].pop(root): + # Process the blocks that had the newly accepted block as parent + debug "Block from quarantine", + blockRoot = shortLog(root), quarantined = shortLog(quarantined.root) + + withBlck(quarantined): + when consensusFork == ConsensusFork.Gloas: + debugGloasComment "" + self.enqueueBlock(MsgSource.gossip, forkyBlck, Opt.none(DataColumnSidecars)) + elif consensusFork == ConsensusFork.Fulu: + if len(forkyBlck.message.body.blob_kzg_commitments) == 0: + self.enqueueBlock( + MsgSource.gossip, forkyBlck, Opt.some(DataColumnSidecars @[]) + ) + else: + if (let res = checkBlobOrColumnlessSignature(self[], forkyBlck); res.isErr): + warn "Failed to verify signature of unorphaned blobless block", + blck = shortLog(forkyBlck), error = res.error() + continue + let cres = self.dataColumnQuarantine[].popSidecars(forkyBlck.root, forkyBlck) + if cres.isSome: + self.enqueueBlock(MsgSource.gossip, forkyBlck, cres) + else: + discard self.consensusManager.quarantine[].addSidecarless( + self.consensusManager[].dag.finalizedHead.slot, forkyBlck + ) + elif consensusFork in ConsensusFork.Deneb .. ConsensusFork.Electra: + if len(forkyBlck.message.body.blob_kzg_commitments) == 0: + self.enqueueBlock(MsgSource.gossip, forkyBlck, Opt.some(BlobSidecars @[])) + else: + if (let res = checkBlobOrColumnlessSignature(self[], forkyBlck); res.isErr): + warn "Failed to verify signature of unorphaned columnless block", + blck = shortLog(forkyBlck), error = res.error() + continue + let bres = self.blobQuarantine[].popSidecars(forkyBlck.root, forkyBlck) + if bres.isSome(): + self.enqueueBlock(MsgSource.gossip, forkyBlck, bres) + else: + self.consensusManager.quarantine[].addSidecarless(forkyBlck) + elif consensusFork in ConsensusFork.Phase0 .. ConsensusFork.Capella: + self.enqueueBlock(MsgSource.gossip, forkyBlck, noSidecars) + else: + {.error: "Unknown consensus fork " & $consensusFork.} + +proc onBlockAdded*( + dag: ChainDAGRef, + consensusFork: static ConsensusFork, + src: MsgSource, + wallTime: BeaconTime, + attestationPool: ref AttestationPool, + validatorMonitor: ref ValidatorMonitor, +): OnBlockAdded[consensusFork] = + # Actions to perform when a block is successfully added to the DAG, while + # still having access to the clearance state data + + return proc( + blckRef: BlockRef, + blck: consensusFork.TrustedSignedBeaconBlock, + state: consensusFork.BeaconState, + epochRef: EpochRef, + unrealized: FinalityCheckpoints, + ) = + attestationPool[].addForkChoice( + epochRef, blckRef, unrealized, blck.message, wallTime + ) + + validatorMonitor[].registerBeaconBlock(src, wallTime, blck.message) + + for attestation in blck.message.body.attestations: + for vidx in dag.get_attesting_indices(attestation, true): + validatorMonitor[].registerAttestationInBlock( + attestation.data, vidx, blck.message.slot + ) + + when consensusFork >= ConsensusFork.Altair: + for i in blck.message.body.sync_aggregate.sync_committee_bits.oneIndices(): + validatorMonitor[].registerSyncAggregateInBlock( + blck.message.slot, blck.root, state.current_sync_committee.pubkeys.data[i] + ) + +proc verifyPayload( + self: ref BlockProcessor, signedBlock: ForkySignedBeaconBlock +): Result[OptimisticStatus, VerifierError] = + const consensusFork = typeof(signedBlock).kind + # When the execution layer is not available to verify the payload, we do the + # required checks on the CL instead and proceed as if the EL was syncing + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/bellatrix/beacon-chain.md#verify_and_notify_new_payload + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/deneb/beacon-chain.md#modified-verify_and_notify_new_payload + when consensusFork == ConsensusFork.Gloas: + debugGloasComment "no exection payload field for gloas" + ok OptimisticStatus.valid + elif consensusFork >= ConsensusFork.Bellatrix: + if signedBlock.message.is_execution_block: + template payload(): auto = + signedBlock.message.body.execution_payload + + template returnWithError(msg: string, extraMsg = ""): untyped = + if extraMsg != "": + debug msg, reason = extraMsg, executionPayload = shortLog(payload) + else: + debug msg, executionPayload = shortLog(payload) + return err(VerifierError.Invalid) + + if payload.transactions.anyIt(it.len == 0): + returnWithError "Execution block contains zero length transactions" + + if payload.block_hash != signedBlock.message.compute_execution_block_hash(): + returnWithError "Execution block hash validation failed" + + # [New in Deneb:EIP4844] + when consensusFork >= ConsensusFork.Deneb: + let blobsRes = signedBlock.message.is_valid_versioned_hashes + if blobsRes.isErr: + returnWithError "Blob versioned hashes invalid", blobsRes.error + else: + # If there are EIP-4844 (type 3) transactions in the payload with + # versioned hashes, the transactions would be rejected by the EL + # based on payload timestamp (only allowed post Deneb); + # There are no `blob_kzg_commitments` before Deneb to compare against + discard + + if signedBlock.root in self.invalidBlockRoots: + returnWithError "Block root treated as invalid via config", $signedBlock.root + + ok OptimisticStatus.notValidated + else: + ok OptimisticStatus.valid else: - err("Head selection failed, using previous head") + ok OptimisticStatus.valid + +proc enqueueFromDb(self: ref BlockProcessor, root: Eth2Digest) = + # TODO This logic can be removed if the database schema is extended + # to store non-canonical heads on top of the canonical head and learns to keep + # track of non-canonical forks - it was added during a time when there were + # many forks and the client needed frequent restarting leading to a database + # that contained semi-downloaded branches that couldn't be added via BlockRef. + let + dag = self.consensusManager.dag + blck = dag.getForkedBlock(root).valueOr: + return + + withBlck(blck): + var sidecarsOk = true + + let sidecarsOpt = + when consensusFork >= ConsensusFork.Fulu: + var data_column_sidecars: fulu.DataColumnSidecars + for i in self.dataColumnQuarantine[].custodyColumns: + let data_column = fulu.DataColumnSidecar.new() + if not dag.db.getDataColumnSidecar(root, i, data_column[]): + sidecarsOk = false # Pruned, or inconsistent DB + break + data_column_sidecars.add data_column + Opt.some data_column_sidecars + elif consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + var blob_sidecars: BlobSidecars + for i in 0 ..< forkyBlck.message.body.blob_kzg_commitments.len: + let blob = BlobSidecar.new() + if not dag.db.getBlobSidecar(root, i.BlobIndex, blob[]): + sidecarsOk = false # Pruned, or inconsistent DB + break + blob_sidecars.add blob + Opt.some blob_sidecars + else: + noSidecars + + if sidecarsOk: + debug "Loaded block from storage", root + self.enqueueBlock(MsgSource.gossip, forkyBlck.asSigned(), sidecarsOpt) proc storeBlock( - self: ref BlockProcessor, src: MsgSource, wallTime: BeaconTime, + self: ref BlockProcessor, + src: MsgSource, + wallTime: BeaconTime, signedBlock: ForkySignedBeaconBlock, - blobsOpt: Opt[BlobSidecars], - maybeFinalized = false, - queueTick: Moment = Moment.now(), validationDur = Duration()): - Future[Result[BlockRef, (VerifierError, ProcessingStatus)]] {.async: (raises: [CancelledError]).} = + sidecarsOpt: SomeOptSidecars, + maybeFinalized: bool, + queueTick: Moment, + validationDur: Duration, +): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = ## storeBlock is the main entry point for unvalidated blocks - all untrusted ## blocks, regardless of origin, pass through here. When storing a block, ## we will add it to the dag and pass it to all block consumers that need ## to know about it, such as the fork choice and the monitoring let - attestationPool = self.consensusManager.attestationPool + ap = self.consensusManager.attestationPool startTick = Moment.now() vm = self.validatorMonitor dag = self.consensusManager.dag @@ -459,68 +584,25 @@ proc storeBlock( 0.seconds else: chronos.nanoseconds((slotTime - wallTime).nanoseconds) - deadlineObj = DeadlineObject.init(deadlineTime) - - func getRetriesCount(): int = - if dag.is_optimistic(dag.head.bid): - 1 - else: - high(int) + deadline = sleepAsync(deadlineTime) # If the block is missing its parent, it will be re-orphaned below self.consensusManager.quarantine[].removeOrphan(signedBlock) + self.consensusManager.quarantine[].removeSidecarless(signedBlock) # The block is certainly not missing any more self.consensusManager.quarantine[].missing.del(signedBlock.root) if signedBlock.message.parent_root in self.consensusManager.quarantine[].unviable: # DAG doesn't know about unviable ancestor blocks - we do however! - self.consensusManager.quarantine[].addUnviable(signedBlock.root) - - return err((VerifierError.UnviableFork, ProcessingStatus.completed)) - - template handleVerifierError(errorParam: VerifierError): auto = - let error = errorParam - case error - of VerifierError.MissingParent: - if (let r = self.consensusManager.quarantine[].addOrphan( - dag.finalizedHead.slot, ForkedSignedBeaconBlock.init(signedBlock)); - r.isErr()): - debug "could not add orphan", - blockRoot = shortLog(signedBlock.root), - blck = shortLog(signedBlock.message), - signature = shortLog(signedBlock.signature), - err = r.error() - else: - if blobsOpt.isSome: - for blobSidecar in blobsOpt.get: - self.blobQuarantine[].put(blobSidecar) - debug "Block quarantined", - blockRoot = shortLog(signedBlock.root), - blck = shortLog(signedBlock.message), - signature = shortLog(signedBlock.signature) - - of VerifierError.UnviableFork: - # Track unviables so that descendants can be discarded promptly - self.consensusManager.quarantine[].addUnviable(signedBlock.root) - else: - discard - - err((error, ProcessingStatus.completed)) - - let - # We have to be careful that there exists only one in-flight entry point - # for adding blocks or the checks performed in `checkHeadBlock` might - # be invalidated (ie a block could be added while we wait for EL response - # here) - parent = dag.checkHeadBlock(signedBlock) - - if parent.isErr(): - # TODO This logic can be removed if the database schema is extended - # to store non-canonical heads on top of the canonical head! - # If that is done, the database no longer contains extra blocks - # that have not yet been assigned a `BlockRef` - if parent.error() == VerifierError.MissingParent: + return err(VerifierError.UnviableFork) + + # We have to be careful that there exists only one in-flight entry point + # for adding blocks or the checks performed in `checkHeadBlock` might + # be invalidated (ie a block could be added while we wait for EL response + # here) + let parent = dag.checkHeadBlock(signedBlock).valueOr: + if error == VerifierError.MissingParent: # This indicates that no `BlockRef` is available for the `parent_root`. # However, the block may still be available in local storage. On startup, # only the canonical branch is imported into `blockchain_dag`, while @@ -531,33 +613,13 @@ proc storeBlock( # lot of time, especially when a non-canonical branch has non-trivial # depth. Note that if it turns out that a non-canonical branch eventually # becomes canonical, it is vital to import it as quickly as possible. - let - parent_root = signedBlock.message.parent_root - parentBlck = dag.getForkedBlock(parent_root) - if parentBlck.isSome(): - var blobsOk = true - let blobs = - withBlck(parentBlck.get()): - when consensusFork >= ConsensusFork.Deneb: - var blob_sidecars: BlobSidecars - for i in 0 ..< forkyBlck.message.body.blob_kzg_commitments.len: - let blob = BlobSidecar.new() - if not dag.db.getBlobSidecar(parent_root, i.BlobIndex, blob[]): - blobsOk = false # Pruned, or inconsistent DB - break - blob_sidecars.add blob - Opt.some blob_sidecars - else: - Opt.none BlobSidecars - if blobsOk: - debug "Loaded parent block from storage", parent_root - self[].enqueueBlock( - MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs) - - return handleVerifierError(parent.error()) + self.enqueueFromDb(signedBlock.message.parent_root) + return err(error) + + const consensusFork = typeof(signedBlock).kind let - payloadStatus = + optimisticStatusRes = if maybeFinalized and (self.lastPayload + SLOTS_PER_PAYLOAD) > signedBlock.message.slot and (signedBlock.message.slot + PAYLOAD_PRE_WALL_SLOTS) < wallSlot and @@ -568,114 +630,36 @@ proc storeBlock( # from an honest source (or when we're close to head). # Occasionally we also send a payload to the EL so that it can # progress in its own sync. - NewPayloadStatus.noResponse + Opt.none(OptimisticStatus) else: - when typeof(signedBlock).kind >= ConsensusFork.Bellatrix: + when consensusFork == ConsensusFork.Gloas: + debugGloasComment "need getExecutionValidity on gloas blocks" + Opt.some OptimisticStatus.valid + elif consensusFork >= ConsensusFork.Bellatrix: + func shouldRetry(): bool = + not dag.is_optimistic(dag.head.bid) await self.consensusManager.elManager.getExecutionValidity( - signedBlock, deadlineObj, getRetriesCount()) + signedBlock, deadline, shouldRetry()) else: - NewPayloadStatus.valid # vacuously - payloadValid = payloadStatus == NewPayloadStatus.valid - - if NewPayloadStatus.invalid == payloadStatus: - self.consensusManager.quarantine[].addUnviable(signedBlock.root) - self[].dumpInvalidBlock(signedBlock) - return err((VerifierError.UnviableFork, ProcessingStatus.completed)) - - if NewPayloadStatus.noResponse == payloadStatus: - # When the execution layer is not available to verify the payload, we do the - # required checks on the CL instead and proceed as if the EL was syncing - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#verify_and_notify_new_payload - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#modified-verify_and_notify_new_payload - when typeof(signedBlock).kind >= ConsensusFork.Bellatrix: - if signedBlock.message.is_execution_block: - template payload(): auto = signedBlock.message.body.execution_payload - - template returnWithError(msg: string, extraMsg = ""): untyped = - if extraMsg != "": - debug msg, reason = extraMsg, executionPayload = shortLog(payload) - else: - debug msg, executionPayload = shortLog(payload) - self[].dumpInvalidBlock(signedBlock) - doAssert strictVerification notin dag.updateFlags - self.consensusManager.quarantine[].addUnviable(signedBlock.root) - return err((VerifierError.Invalid, ProcessingStatus.completed)) - - if payload.transactions.anyIt(it.len == 0): - returnWithError "Execution block contains zero length transactions" - - if payload.block_hash != - signedBlock.message.compute_execution_block_hash(): - returnWithError "Execution block hash validation failed" - - # [New in Deneb:EIP4844] - when typeof(signedBlock).kind >= ConsensusFork.Deneb: - let blobsRes = signedBlock.message.is_valid_versioned_hashes - if blobsRes.isErr: - returnWithError "Blob versioned hashes invalid", blobsRes.error - else: - # If there are EIP-4844 (type 3) transactions in the payload with - # versioned hashes, the transactions would be rejected by the EL - # based on payload timestamp (only allowed post Deneb); - # There are no `blob_kzg_commitments` before Deneb to compare against - discard + Opt.some(OptimisticStatus.valid) # vacuously - let newPayloadTick = Moment.now() + let optimisticStatus = ?(optimisticStatusRes or verifyPayload(self, signedBlock)) - # TODO with v1.4.0, not sure this is still relevant - # Establish blob viability before calling addHeadBlock to avoid - # writing the block in case of blob error. - when typeof(signedBlock).kind >= ConsensusFork.Deneb: - if blobsOpt.isSome: - let blobs = blobsOpt.get() - let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq - if blobs.len > 0 or kzgCommits.len > 0: - let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)), - blobs.mapIt(it.kzg_proof)) - if r.isErr(): - debug "blob validation failed", - blockRoot = shortLog(signedBlock.root), - blobs = shortLog(blobs), - blck = shortLog(signedBlock.message), - kzgCommits = mapIt(kzgCommits, shortLog(it)), - signature = shortLog(signedBlock.signature), - msg = r.error() - return err((VerifierError.Invalid, ProcessingStatus.completed)) + if OptimisticStatus.invalidated == optimisticStatus: + return err(VerifierError.Invalid) - type Trusted = typeof signedBlock.asTrusted() + let newPayloadTick = Moment.now() - let - blck = dag.addHeadBlockWithParent( - self.verifier, signedBlock, parent.value(), payloadValid) do ( - blckRef: BlockRef, trustedBlock: Trusted, - epochRef: EpochRef, unrealized: FinalityCheckpoints): - # Callback add to fork choice if valid - attestationPool[].addForkChoice( - epochRef, blckRef, unrealized, trustedBlock.message, wallTime) - - vm[].registerBeaconBlock( - src, wallTime, trustedBlock.message) - - for attestation in trustedBlock.message.body.attestations: - for validator_index in dag.get_attesting_indices(attestation, true): - vm[].registerAttestationInBlock(attestation.data, validator_index, - trustedBlock.message.slot) - - withState(dag[].clearanceState): - when consensusFork >= ConsensusFork.Altair and - Trusted isnot phase0.TrustedSignedBeaconBlock: # altair+ - for i in trustedBlock.message.body.sync_aggregate.sync_committee_bits.oneIndices(): - vm[].registerSyncAggregateInBlock( - trustedBlock.message.slot, trustedBlock.root, - forkyState.data.current_sync_committee.pubkeys.data[i]) - - self[].dumpBlock(signedBlock, blck) - - # There can be a scenario where we receive a block we already received. - # However this block was before the last finalized epoch and so its parent - # was pruned from the ForkChoice. - if blck.isErr(): - return handleVerifierError(blck.error()) + ?verifySidecars(signedBlock, sidecarsOpt) + + let blck = + ?dag.addHeadBlockWithParent( + self.verifier, + signedBlock, + parent, + optimisticStatus, + onBlockAdded(dag, consensusFork, src, wallTime, ap, vm), + ) # Even if the EL is not responding, we'll only try once every now and then # to give it a block - this avoids a pathological slowdown where a busy EL @@ -684,124 +668,37 @@ proc storeBlock( self[].lastPayload = signedBlock.message.slot # write blobs now that block has been written. - let blobs = blobsOpt.valueOr: BlobSidecars @[] - for b in blobs: - self.consensusManager.dag.db.putBlobSidecar(b[]) + self[].storeSidecars(sidecarsOpt) let addHeadBlockTick = Moment.now() - # Eagerly update head: the incoming block "should" get selected. - # - # storeBlock gets called from validator_duties, which depends on its not - # blocking progress any longer than necessary, and processBlock here, in - # which case it's fine to await for a while on engine API results. + # Update consensus head - in the happy case where we are in sync and the + # execution client has validated the block, this will allow validators to + # start attesting via the `checkExpectedBlock` mechanism - notably, this can + # be done without waiting for `forkchoiceUpdated` as the execution client + # already validated the payload. # - # Three general scenarios: (1) pre-merge; (2) merge, already `VALID` by way - # of `newPayload`; (3) optimistically imported, need to call fcU before DAG - # updateHead. Because in a non-finalizing network, completing sync isn't as - # useful because regular reorgs likely still occur, and when finalizing the - # EL is only called every SLOTS_PER_PAYLOAD slots regardless, await, rather - # than asyncSpawn forkchoiceUpdated calls. + # In the unhappy case, the head update kickstarts any pruning and cleanup work + # which helps conserve resources, ie also a good thing to be doing just after + # having added a block. + let previousExecutionValid = dag.head.executionValid + self.consensusManager[].updateHead(wallSlot) + + # After producing attestations, we might be asked to produce a block for the + # next slot, for which there is a hard requirement on the execution client + # fork choice being up to date - there's also a soft requirement that the + # execution head follows the consensus head as closely as possible as this + # keeps the execution client healthy and up to date with finality, so as soon + # as we've updated the consensus head we'll do the same for execution. # - # This reduces in-flight fcU spam, which both reduces EL load and decreases - # otherwise somewhat unpredictable CL head movement. - - # Grab the new head according to our latest attestation data; determines how - # async this needs to be. - let newHead = attestationPool[].selectOptimisticHead( - wallSlot.start_beacon_time) - - if newHead.isOk: - template elManager(): auto = self.consensusManager.elManager - if self.consensusManager[].shouldSyncOptimistically(wallSlot): - # Optimistic head is far in the future; report it as head block to EL. - - # Note that the specification allows an EL client to skip fcU processing - # if an update to an ancestor is requested. - # > Client software MAY skip an update of the forkchoice state and MUST - # NOT begin a payload build process if `forkchoiceState.headBlockHash` - # references an ancestor of the head of canonical chain. - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#specification-1 - # - # However, in practice, an EL client may not have completed importing all - # block headers, so may be unaware of a block's ancestor status. - # Therefore, hopping back and forth between the optimistic head and the - # chain DAG head does not work well in practice, e.g., Geth: - # - "Beacon chain gapped" from DAG head to optimistic head, - # - followed by "Beacon chain reorged" from optimistic head back to DAG. - self.consensusManager[].updateHead(newHead.get.blck) - - template callForkchoiceUpdated(attributes: untyped) = - if NewPayloadStatus.noResponse != payloadStatus and - not self.consensusManager[].optimisticExecutionBlockHash.isZero: - discard await elManager.forkchoiceUpdated( - headBlockHash = - self.consensusManager[].optimisticExecutionBlockHash, - safeBlockHash = newHead.get.safeExecutionBlockHash, - finalizedBlockHash = newHead.get.finalizedExecutionBlockHash, - payloadAttributes = Opt.none attributes, - deadlineObj = deadlineObj, - maxRetriesCount = getRetriesCount()) - - let consensusFork = self.consensusManager.dag.cfg.consensusForkAtEpoch( - newHead.get.blck.bid.slot.epoch) - withConsensusFork(consensusFork): - when consensusFork >= ConsensusFork.Bellatrix: - callForkchoiceUpdated(consensusFork.PayloadAttributes) - else: - let - headExecutionBlockHash = - dag.loadExecutionBlockHash(newHead.get.blck).get(ZERO_HASH) - wallSlot = self.getBeaconTime().slotOrZero - if headExecutionBlockHash.isZero or - NewPayloadStatus.noResponse == payloadStatus: - # Blocks without execution payloads can't be optimistic, and don't try - # to fcU to a block the EL hasn't seen - self.consensusManager[].updateHead(newHead.get.blck) - elif newHead.get.blck.executionValid: - # `forkchoiceUpdated` necessary for EL client only. - self.consensusManager[].updateHead(newHead.get.blck) - - template callExpectValidFCU(payloadAttributeType: untyped): auto = - await elManager.expectValidForkchoiceUpdated( - headBlockPayloadAttributesType = payloadAttributeType, - headBlockHash = headExecutionBlockHash, - safeBlockHash = newHead.get.safeExecutionBlockHash, - finalizedBlockHash = newHead.get.finalizedExecutionBlockHash, - receivedBlock = signedBlock, - deadlineObj = deadlineObj, - maxRetriesCount = getRetriesCount()) - - debugFuluComment "We don't know yet if there'd be new PayloadAttributes version in Fulu." - template callForkChoiceUpdated: auto = - case self.consensusManager.dag.cfg.consensusForkAtEpoch( - newHead.get.blck.bid.slot.epoch) - of ConsensusFork.Deneb, ConsensusFork.Electra, ConsensusFork.Fulu: - # https://github.com/ethereum/execution-apis/blob/90a46e9137c89d58e818e62fa33a0347bba50085/src/engine/prague.md - # does not define any new forkchoiceUpdated, so reuse V3 from Dencun - callExpectValidFCU(payloadAttributeType = PayloadAttributesV3) - of ConsensusFork.Capella: - callExpectValidFCU(payloadAttributeType = PayloadAttributesV2) - of ConsensusFork.Phase0, ConsensusFork.Altair, - ConsensusFork.Bellatrix: - callExpectValidFCU(payloadAttributeType = PayloadAttributesV1) - - if self.consensusManager.checkNextProposer(wallSlot).isNone: - # No attached validator is next proposer, so use non-proposal fcU - callForkChoiceUpdated() - else: - # Some attached validator is next proposer, so prepare payload. As - # updateHead() updated the DAG head, runProposalForkchoiceUpdated, - # which needs the state corresponding to that head block, can run. - if (await self.consensusManager.runProposalForkchoiceUpdated( - wallSlot)).isNone: - callForkChoiceUpdated() - else: - await self.consensusManager.updateHeadWithExecution( - newHead.get, self.getBeaconTime) - else: - warn "Head selection failed, using previous head", - head = shortLog(dag.head), wallSlot + # In the case that the execution client is not responding to payloads or + # that we skipped sending the payload altogether per the above + # `SLOTS_PER_PAYLOAD` logic, we will skip the execution client update. + if optimisticStatusRes.isSome(): + # retry fcU until the deadline expires, in case the previous payload was + # valid to increase chances of leaving this function with a still-valid head + await self.consensusManager.updateExecutionHead( + deadline, retry = previousExecutionValid, self.getBeaconTime) let updateHeadTick = Moment.now() @@ -818,46 +715,19 @@ proc storeBlock( debug "Block processed", head = shortLog(dag.head), - blck = shortLog(blck.get()), + blck = shortLog(blck), validationDur, queueDur, newPayloadDur, addHeadBlockDur, updateHeadDur - for quarantined in self.consensusManager.quarantine[].pop(blck.get().root): - # Process the blocks that had the newly accepted block as parent - debug "Block from quarantine", - blockRoot = shortLog(signedBlock.root), - quarantined = shortLog(quarantined.root) - - withBlck(quarantined): - when typeof(forkyBlck).kind < ConsensusFork.Deneb: - self[].enqueueBlock( - MsgSource.gossip, quarantined, Opt.none(BlobSidecars)) - else: - if len(forkyBlck.message.body.blob_kzg_commitments) == 0: - self[].enqueueBlock( - MsgSource.gossip, quarantined, Opt.some(BlobSidecars @[])) - else: - if (let res = checkBloblessSignature(self[], forkyBlck); res.isErr): - warn "Failed to verify signature of unorphaned blobless block", - blck = shortLog(forkyBlck), - error = res.error() - continue - if self.blobQuarantine[].hasBlobs(forkyBlck): - let blobs = self.blobQuarantine[].popBlobs( - forkyBlck.root, forkyBlck) - self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs)) - else: - discard self.consensusManager.quarantine[].addBlobless( - dag.finalizedHead.slot, forkyBlck) - - ok blck.value() - -# Enqueue -# ------------------------------------------------------------------------------ + ok() proc addBlock*( - self: var BlockProcessor, src: MsgSource, blck: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], maybeFinalized = false, - validationDur = Duration()): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} = + self: ref BlockProcessor, + src: MsgSource, + blck: ForkySignedBeaconBlock, + sidecarsOpt: SomeOptSidecars, + maybeFinalized = false, + validationDur = Duration(), +): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = ## Enqueue a Gossip-validated block for consensus verification # Backpressure: # There is no backpressure here - producers must wait for `resfut` to @@ -867,63 +737,98 @@ proc addBlock*( # - SyncManager (during sync) # - RequestManager (missing ancestor blocks) # - API - let resfut = newFuture[Result[void, VerifierError]]("BlockProcessor.addBlock") - enqueueBlock(self, src, blck, blobs, resfut, maybeFinalized, validationDur) - resfut - -# Event Loop -# ------------------------------------------------------------------------------ + let blockRoot = blck.root -proc processBlock( - self: ref BlockProcessor, entry: BlockEntry) {.async: (raises: [CancelledError]).} = logScope: - blockRoot = shortLog(entry.blck.root) + blockRoot = shortLog(blockRoot) + + if blck.message.slot <= self.consensusManager.dag.finalizedHead.slot: + # let backfill blocks skip the queue - these are always "fast" to process + # because there are no state rewinds to deal with + return self[].storeBackfillBlock(blck, sidecarsOpt) + + let queueTick = Moment.now() + let res = + try: + # If the lock is acquired already, the current block will be put on hold + # meaning that we'll form an unbounded queue of blocks to be processed + # waiting for the lock - this is similar to using an `AsyncQueue` but + # without the copying and transition to/from `Forked`. + # The lock is important to ensure that we don't process blocks out-of-order + # which both would upset the `storeBlock` logic and cause unnecessary + # quarantine traffic. + self.pendingStores += 1 + await self.storeLock.acquire() + + # Cooperative concurrency: one block per loop iteration - because + # we run both networking and CPU-heavy things like block processing + # on the same thread, we need to make sure that there is steady progress + # on the networking side or we get long lockups that lead to timeouts. + const + # We cap waiting for an idle slot in case there's a lot of network traffic + # taking up all CPU - we don't want to _completely_ stop processing blocks + # in this case - doing so also allows us to benefit from more batching / + # larger network reads when under load. + idleTimeout = 10.milliseconds + + discard await idleAsync().withTimeout(idleTimeout) - let - wallTime = self.getBeaconTime() - (afterGenesis, _) = wallTime.toSlot() - - if not afterGenesis: - error "Processing block before genesis, clock turned back?" - quit 1 - - let res = withBlck(entry.blck): - await self.storeBlock( - entry.src, wallTime, forkyBlck, entry.blobs, entry.maybeFinalized, - entry.queueTick, entry.validationDur) - - if res.isErr and res.error[1] == ProcessingStatus.notCompleted: - # When an execution engine returns an error or fails to respond to a - # payload validity request for some block, a consensus engine: - # - MUST NOT optimistically import the block. - # - MUST NOT apply the block to the fork choice store. - # - MAY queue the block for later processing. - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/sync/optimistic.md#execution-engine-errors - await sleepAsync(chronos.seconds(1)) - self[].enqueueBlock( - entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized, - entry.validationDur) - # To ensure backpressure on the sync manager, do not complete these futures. - return + let + wallTime = self.getBeaconTime() + (afterGenesis, _) = wallTime.toSlot() + + if not afterGenesis: + fatal "Processing block before genesis, clock turned back?" + quit 1 + + await self.storeBlock( + src, wallTime, blck, sidecarsOpt, maybeFinalized, queueTick, validationDur + ) + finally: + try: + self.storeLock.release() + self.pendingStores -= 1 + except AsyncLockError: + raiseAssert "release matched with acquire, shouldn't happen" + + self[].dumpBlock(blck, res) + + if res.isOk(): + # Once a block is successfully stored, enqueue the direct descendants + self.enqueueQuarantine(blockRoot) + else: + case res.error() + of VerifierError.MissingParent: + let finalizedSlot = self.consensusManager.dag.finalizedHead.slot + if ( + let r = self.consensusManager.quarantine[].addOrphan( + finalizedSlot, ForkedSignedBeaconBlock.init(blck) + ) + r.isErr() + ): + debug "Could not add orphan", + blck = shortLog(blck), signature = shortLog(blck.signature), err = r.error() + else: + when sidecarsOpt is Opt[BlobSidecars]: + if sidecarsOpt.isSome: + self.blobQuarantine[].put(blockRoot, sidecarsOpt.get) + elif sidecarsOpt is Opt[DataColumnSidecars]: + if sidecarsOpt.isSome: + self.dataColumnQuarantine[].put(blockRoot, sidecarsOpt.get) + elif sidecarsOpt is NoSidecars: + discard + else: + {.error.} - if entry.resfut != nil: - entry.resfut.complete( - if res.isOk(): Result[void, VerifierError].ok() - else: Result[void, VerifierError].err(res.error()[0])) - -proc runQueueProcessingLoop*(self: ref BlockProcessor) {.async.} = - while true: - # Cooperative concurrency: one block per loop iteration - because - # we run both networking and CPU-heavy things like block processing - # on the same thread, we need to make sure that there is steady progress - # on the networking side or we get long lockups that lead to timeouts. - const - # We cap waiting for an idle slot in case there's a lot of network traffic - # taking up all CPU - we don't want to _completely_ stop processing blocks - # in this case - doing so also allows us to benefit from more batching / - # larger network reads when under load. - idleTimeout = 10.milliseconds - - discard await idleAsync().withTimeout(idleTimeout) - - await self.processBlock(await self[].blockQueue.popFirst()) + debug "Block quarantined", + blck = shortLog(blck), signature = shortLog(blck.signature) + of VerifierError.UnviableFork: + # Track unviables so that descendants can be discarded promptly + # TODO Invalid and unviable should be treated separately, to correctly + # respond when a descendant of an invalid block is validated + # TODO re-add VeriferError.Invalid handling + self.consensusManager.quarantine[].addUnviable(blockRoot) + else: + discard + + res diff --git a/beacon_chain/gossip_processing/eth2_processor.nim b/beacon_chain/gossip_processing/eth2_processor.nim index 3d1382c4de..5f1d1d1b5e 100644 --- a/beacon_chain/gossip_processing/eth2_processor.nim +++ b/beacon_chain/gossip_processing/eth2_processor.nim @@ -5,17 +5,20 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import - std/tables, + std/[tables], chronicles, chronos, metrics, taskpools, + kzg4844/kzg, + ssz_serialization/types, + ../el/el_manager, ../spec/[helpers, forks], ../consensus_object_pools/[ blob_quarantine, block_clearance, block_quarantine, blockchain_dag, - attestation_pool, light_client_pool, sync_committee_msg_pool, - validator_change_pool], + attestation_pool, light_client_pool, + sync_committee_msg_pool, validator_change_pool], ../validators/validator_pool, ../beacon_clock, "."/[gossip_validation, block_processor, batch_validation], @@ -46,6 +49,10 @@ declareCounter blob_sidecars_received, "Number of valid blobs processed by this node" declareCounter blob_sidecars_dropped, "Number of invalid blobs dropped by this node", labels = ["reason"] +declareCounter data_column_sidecars_received, + "Number of valid data columns processed by this node" +declareCounter data_column_sidecars_dropped, + "Number of invalid data columns dropped by this node", labels = ["reason"] declareCounter beacon_attester_slashings_received, "Number of valid attester slashings processed by this node" declareCounter beacon_attester_slashings_dropped, @@ -89,6 +96,10 @@ declareHistogram beacon_block_delay, declareHistogram blob_sidecar_delay, "Time(s) between slot start and blob sidecar reception", buckets = delayBuckets +declareHistogram data_column_sidecar_delay, + "Time(s) betweeen slot start and data column sidecar reception", + buckets = delayBuckets + type DoppelgangerProtection = object broadcastStartEpoch*: Epoch ##\ @@ -144,6 +155,8 @@ type blobQuarantine*: ref BlobQuarantine + dataColumnQuarantine*: ref ColumnQuarantine + # Application-provided current time provider (to facilitate testing) getCurrentBeaconTime*: GetBeaconTimeFn @@ -167,6 +180,7 @@ proc new*(T: type Eth2Processor, lightClientPool: ref LightClientPool, quarantine: ref Quarantine, blobQuarantine: ref BlobQuarantine, + dataColumnQuarantine: ref ColumnQuarantine, rng: ref HmacDrbgContext, getBeaconTime: GetBeaconTimeFn, taskpool: Taskpool @@ -185,6 +199,7 @@ proc new*(T: type Eth2Processor, lightClientPool: lightClientPool, quarantine: quarantine, blobQuarantine: blobQuarantine, + dataColumnQuarantine: dataColumnQuarantine, getCurrentBeaconTime: getBeaconTime, batchCrypto: BatchCrypto.new( rng = rng, @@ -204,6 +219,8 @@ proc processSignedBeaconBlock*( self: var Eth2Processor, src: MsgSource, signedBlock: ForkySignedBeaconBlock, maybeFinalized: bool = false): ValidationRes = + const consensusFork = typeof(signedBlock).kind + let wallTime = self.getCurrentBeaconTime() (afterGenesis, wallSlot) = wallTime.toSlot() @@ -225,49 +242,50 @@ proc processSignedBeaconBlock*( # decoding at this stage, which may be significant debug "Block received", delay - let v = - self.dag.validateBeaconBlock(self.quarantine, signedBlock, wallTime, {}) + self.dag.validateBeaconBlock(self.quarantine, signedBlock, wallTime, {}).isOkOr: + debug "Dropping block", err = error - if v.isOk(): - # Block passed validation - enqueue it for processing. The block processing - # queue is effectively unbounded as we use a freestanding task to enqueue - # the block - this is done so that when blocks arrive concurrently with - # sync, we don't lose the gossip blocks, but also don't block the gossip - # propagation of seemingly good blocks - trace "Block validated" - - if not(isNil(self.dag.onBlockGossipAdded)): - self.dag.onBlockGossipAdded(ForkedSignedBeaconBlock.init(signedBlock)) - - let blobs = - when typeof(signedBlock).kind >= ConsensusFork.Deneb: - if self.blobQuarantine[].hasBlobs(signedBlock): - Opt.some(self.blobQuarantine[].popBlobs(signedBlock.root, signedBlock)) - else: - discard self.quarantine[].addBlobless(self.dag.finalizedHead.slot, - signedBlock) - return v - else: - Opt.none(BlobSidecars) - - self.blockProcessor[].enqueueBlock( - src, ForkedSignedBeaconBlock.init(signedBlock), - blobs, - maybeFinalized = maybeFinalized, - validationDur = nanoseconds( - (self.getCurrentBeaconTime() - wallTime).nanoseconds)) - - # Validator monitor registration for blocks is done by the processor - beacon_blocks_received.inc() - beacon_block_delay.observe(delay.toFloatSeconds()) + self.blockProcessor[].dumpInvalidBlock(signedBlock) + + beacon_blocks_dropped.inc(1, [$error[0]]) + return err(error) + + # Block passed validation - enqueue it for processing. The block processing + # queue is effectively unbounded as we use a freestanding task to enqueue + # the block - this is done so that when blocks arrive concurrently with + # sync, we don't lose the gossip blocks, but also don't block the gossip + # propagation of seemingly good blocks + trace "Block validated" + + if not (isNil(self.dag.onBlockGossipAdded)): + self.dag.onBlockGossipAdded(ForkedSignedBeaconBlock.init(signedBlock)) + + when consensusFork in ConsensusFork.Fulu .. ConsensusFork.Gloas: + let sidecarsOpt = + self.dataColumnQuarantine[].popSidecars(signedBlock.root, signedBlock) + if sidecarsOpt.isNone(): + discard self.quarantine[].addSidecarless(self.dag.finalizedHead.slot, signedBlock) + return ok() + elif consensusFork in ConsensusFork.Deneb .. ConsensusFork.Electra: + let sidecarsOpt = self.blobQuarantine[].popSidecars(signedBlock.root, signedBlock) + if sidecarsOpt.isNone(): + self.quarantine[].addSidecarless(signedBlock) + return ok() + elif consensusFork in ConsensusFork.Phase0 .. ConsensusFork.Capella: + const sidecarsOpt = noSidecars else: - debug "Dropping block", error = v.error() + {.error: "Unknown fork " & $consensusFork.} - self.blockProcessor[].dumpInvalidBlock(signedBlock) + let validationDur = nanoseconds((self.getCurrentBeaconTime() - wallTime).nanoseconds) + self.blockProcessor.enqueueBlock( + src, signedBlock, sidecarsOpt, maybeFinalized, validationDur + ) - beacon_blocks_dropped.inc(1, [$v.error[0]]) + # Validator monitor registration for blocks is done by the processor + beacon_blocks_received.inc() + beacon_block_delay.observe(delay.toFloatSeconds()) - v + ok() proc processBlobSidecar*( self: var Eth2Processor, src: MsgSource, @@ -295,29 +313,102 @@ proc processBlobSidecar*( blob_sidecars_dropped.inc(1, [$v.error[0]]) return v - debug "Blob validated, putting in blob quarantine" - self.blobQuarantine[].put(newClone(blobSidecar)) - let block_root = hash_tree_root(block_header) - if (let o = self.quarantine[].popBlobless(block_root); o.isSome): - let blobless = o.unsafeGet() - withBlck(blobless): - when consensusFork >= ConsensusFork.Deneb: - if self.blobQuarantine[].hasBlobs(forkyBlck): - self.blockProcessor[].enqueueBlock( - MsgSource.gossip, blobless, - Opt.some(self.blobQuarantine[].popBlobs(block_root, forkyBlck))) + debug "Blob validated, putting in blob quarantine" + self.blobQuarantine[].put(block_root, newClone(blobSidecar)) + + if (let o = self.quarantine[].popSidecarless(block_root); o.isSome): + withBlck(o[]): + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + let bres = self.blobQuarantine[].popSidecars(block_root, forkyBlck) + if bres.isSome(): + self.blockProcessor.enqueueBlock(MsgSource.gossip, forkyBlck, bres) else: - discard self.quarantine[].addBlobless( - self.dag.finalizedHead.slot, forkyBlck) + self.quarantine[].addSidecarless(forkyBlck) else: - raiseAssert "Could not have been added as blobless" + raiseAssert "Could not be added as blobless" blob_sidecars_received.inc() blob_sidecar_delay.observe(delay.toFloatSeconds()) v +proc processDataColumnSidecar*( + self: var Eth2Processor, src: MsgSource, + dataColumnSidecar: fulu.DataColumnSidecar, + subnet_id: uint64): ValidationRes = + template block_header: untyped = dataColumnSidecar.signed_block_header.message + let + block_root = hash_tree_root(block_header) + wallTime = self.getCurrentBeaconTime() + (_, wallSlot) = wallTime.toSlot() + logScope: + dcs = shortLog(dataColumnSidecar) + wallSlot + # Potential under/overflows are fine; would just create odd metrics and logs + let delay = wallTime - block_header.slot.start_beacon_time + debug "Data column received", delay + + let v = + self.dag.validateDataColumnSidecar(self.quarantine, self.dataColumnQuarantine, + dataColumnSidecar, wallTime, subnet_id) + if v.isErr(): + debug "Dropping data column", error = v.error() + data_column_sidecars_dropped.inc(1, [$v.error[0]]) + return v + debug "Data column validated, putting data column in quarantine" + self.dataColumnQuarantine[].put(block_root, newClone(dataColumnSidecar)) + if (let o = self.quarantine[].popSidecarless(block_root); o.isSome): + withBlck(o[]): + when consensusFork >= ConsensusFork.Fulu and + consensusFork < ConsensusFork.Gloas: + let cres = + self.dataColumnQuarantine[].popSidecars(block_root, forkyBlck) + if cres.isSome(): + self.blockProcessor.enqueueBlock(MsgSource.gossip, forkyBlck, cres) + else: + discard self.quarantine[].addSidecarless( + self.dag.finalizedHead.slot, forkyBlck) + else: + raiseAssert "Could not be added as columnless" + + data_column_sidecars_received.inc() + data_column_sidecar_delay.observe(delay.toFloatSeconds()) + + v + +proc processDataColumnSidecar*( + self: var Eth2Processor, src: MsgSource, + dataColumnSidecar: gloas.DataColumnSidecar, + subnet_id: uint64): ValidationRes = + let + block_root = dataColumnSidecar.beacon_block_root + wallTime = self.getCurrentBeaconTime() + (_, wallSlot) = wallTime.toSlot() + + logScope: + dcs = shortLog(dataColumnSidecar) + wallSlot + + debug "Data column received (Gloas - quarantine not implemented)" + + let v = self.dag.validateDataColumnSidecar( + self.quarantine, self.dataColumnQuarantine, + dataColumnSidecar, wallTime, subnet_id) + + if v.isErr(): + debug "Dropping data column", error = v.error() + data_column_sidecars_dropped.inc(1, [$v.error[0]]) + return v + + debugGloasComment "" + # TODO: Implement quarantine logic for Gloas + # For now, just validate and drop + debug "Data column validated (not stored - quarantine TODO)" + + data_column_sidecars_received.inc() + v + proc setupDoppelgangerDetection*(self: var Eth2Processor, slot: Slot) = # When another client's already running, this is very likely to detect # potential duplicate validators, which can trigger slashing. @@ -364,8 +455,9 @@ proc checkForPotentialDoppelganger( proc processAttestation*( self: ref Eth2Processor, src: MsgSource, attestation: phase0.Attestation | SingleAttestation, - subnet_id: SubnetId, checkSignature, checkValidator: bool -): Future[ValidationRes] {.async: (raises: [CancelledError]).} = + subnet_id: SubnetId, checkSignature, checkValidator: bool, + fork: ConsensusFork): + Future[ValidationRes] {.async: (raises: [CancelledError]).} = var wallTime = self.getCurrentBeaconTime() let (afterGenesis, wallSlot) = wallTime.toSlot() @@ -379,12 +471,18 @@ proc processAttestation*( return errIgnore("Attestation before genesis") # Potential under/overflows are fine; would just create odd metrics and logs - let delay = wallTime - attestation.data.slot.attestation_deadline + let + timeConfig = self.dag.cfg.time + delay = wallTime - attestation.data.slot.attestation_deadline(timeConfig) debug "Attestation received", delay - # Now proceed to validation - let v = await self.attestationPool.validateAttestation( - self.batchCrypto, attestation, wallTime, subnet_id, checkSignature) + let v = when attestation is phase0.Attestation: + await self.attestationPool.validateAttestation( + self.batchCrypto, attestation, wallTime, subnet_id, checkSignature) + else: + await self.attestationPool.validateAttestation( + self.batchCrypto, attestation, wallTime, subnet_id, checkSignature, fork) + return if v.isOk(): # Due to async validation the wallTime here might have changed wallTime = self.getCurrentBeaconTime() @@ -422,7 +520,8 @@ proc processSignedAggregateAndProof*( self: ref Eth2Processor, src: MsgSource, signedAggregateAndProof: phase0.SignedAggregateAndProof | electra.SignedAggregateAndProof, - checkSignature = true, checkCover = true): Future[ValidationRes] + checkSignature = true, checkCover = true, + fork: ConsensusFork): Future[ValidationRes] {.async: (raises: [CancelledError]).} = var wallTime = self.getCurrentBeaconTime() let (afterGenesis, wallSlot) = wallTime.toSlot() @@ -440,14 +539,14 @@ proc processSignedAggregateAndProof*( # Potential under/overflows are fine; would just create odd logs let + timeConfig = self.dag.cfg.time slot = signedAggregateAndProof.message.aggregate.data.slot - delay = wallTime - slot.aggregate_deadline + delay = wallTime - slot.aggregate_deadline(timeConfig) debug "Aggregate received", delay - let v = - await self.attestationPool.validateAggregate( - self.batchCrypto, signedAggregateAndProof, wallTime, - checkSignature = checkSignature, checkCover = checkCover) + let v = await self.attestationPool.validateAggregate( + self.batchCrypto, signedAggregateAndProof, wallTime, + checkSignature = checkSignature, checkCover = checkCover, fork) return if v.isOk(): # Due to async validation the wallTime here might have changed @@ -504,6 +603,15 @@ proc processBlsToExecutionChange*( return v +proc checkKnownValidatorSlashing( + self: var Eth2Processor, + msg: ProposerSlashing | phase0.AttesterSlashing | electra.AttesterSlashing) = + for idx in getValidatorIndices(msg): + let i = ValidatorIndex.init(idx).valueOr: + continue + if self.blockProcessor[].consensusManager[].actionTracker.knownValidators.hasKey(i): + quitSlashing() + proc processAttesterSlashing*( self: var Eth2Processor, src: MsgSource, attesterSlashing: phase0.AttesterSlashing | electra.AttesterSlashing): @@ -518,6 +626,8 @@ proc processAttesterSlashing*( if v.isOk(): trace "Attester slashing validated" + self.checkKnownValidatorSlashing(attesterSlashing) + self.validatorChangePool[].addMessage(attesterSlashing) self.validatorMonitor[].registerAttesterSlashing(src, attesterSlashing) @@ -541,6 +651,8 @@ proc processProposerSlashing*( if v.isOk(): trace "Proposer slashing validated" + self.checkKnownValidatorSlashing(proposerSlashing) + self.validatorChangePool[].addMessage(proposerSlashing) self.validatorMonitor[].registerProposerSlashing(src, proposerSlashing) @@ -591,7 +703,10 @@ proc processSyncCommitteeMessage*( wallSlot # Potential under/overflows are fine; would just create odd metrics and logs - let delay = wallTime - syncCommitteeMsg.slot.sync_committee_message_deadline + let + timeConfig = self.dag.cfg.time + slot = syncCommitteeMsg.slot + delay = wallTime - slot.sync_committee_message_deadline(timeConfig) debug "Sync committee message received", delay # Now proceed to validation @@ -639,8 +754,9 @@ proc processSignedContributionAndProof*( # Potential under/overflows are fine; would just create odd metrics and logs let + timeConfig = self.dag.cfg.time slot = contributionAndProof.message.contribution.slot - delay = wallTime - slot.sync_contribution_deadline + delay = wallTime - slot.sync_contribution_deadline(timeConfig) debug "Contribution received", delay # Now proceed to validation diff --git a/beacon_chain/gossip_processing/gossip_validation.nim b/beacon_chain/gossip_processing/gossip_validation.nim index e0f3e7e851..42c936da9b 100644 --- a/beacon_chain/gossip_processing/gossip_validation.nim +++ b/beacon_chain/gossip_processing/gossip_validation.nim @@ -5,21 +5,20 @@ # * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import # Status chronicles, chronos, metrics, results, kzg4844/[kzg, kzg_abi], - stew/byteutils, # Internals ../spec/[ beaconstate, state_transition_block, forks, helpers, network, signatures, peerdas_helpers], ../consensus_object_pools/[ attestation_pool, blockchain_dag, blob_quarantine, block_quarantine, - data_column_quarantine, spec_cache, light_client_pool, sync_committee_msg_pool, + spec_cache, light_client_pool, sync_committee_msg_pool, validator_change_pool], ".."/[beacon_clock], ./batch_validation @@ -52,7 +51,7 @@ type template errIgnore*(msg: cstring): untyped = err((ValidationResult.Ignore, cstring msg)) template errReject*(msg: cstring): untyped = - err((ValidationResult.Reject, cstring msg)) + err((ValidationResult.Reject, msg)) # Internal checks # ---------------------------------------------------------------- @@ -105,7 +104,7 @@ func check_propagation_slot_range( pastSlot.slot: return errIgnore("Attestation slot in the past") else: - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#beacon_attestation_subnet_id + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/deneb/p2p-interface.md#beacon_attestation_subnet_id # "[IGNORE] the epoch of attestation.data.slot is either the current or # previous epoch (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. # compute_epoch_at_slot(attestation.data.slot) in @@ -135,7 +134,7 @@ func check_slot_exact(msgSlot: Slot, wallTime: BeaconTime): ok(msgSlot) -func check_beacon_and_target_block( +proc check_beacon_and_target_block( pool: var AttestationPool, data: AttestationData): Result[BlockSlot, ValidationError] = # The block being voted for (data.beacon_block_root) passes validation - by @@ -206,12 +205,12 @@ func check_blob_sidecar_inclusion_proof( blob_sidecar: deneb.BlobSidecar): Result[void, ValidationError] = let res = blob_sidecar.verify_blob_sidecar_inclusion_proof() if res.isErr: - return errReject(res.error) + return errReject(cstring res.error) ok() func check_data_column_sidecar_inclusion_proof( - data_column_sidecar: DataColumnSidecar): Result[void, ValidationError] = + data_column_sidecar: fulu.DataColumnSidecar): Result[void, ValidationError] = let res = data_column_sidecar.verify_data_column_sidecar_inclusion_proof() if res.isErr: return errReject(res.error) @@ -219,7 +218,8 @@ func check_data_column_sidecar_inclusion_proof( ok() proc check_data_column_sidecar_kzg_proofs( - data_column_sidecar: DataColumnSidecar): Result[void, ValidationError] = + data_column_sidecar: fulu.DataColumnSidecar | gloas.DataColumnSidecar): + Result[void, ValidationError] = let res = data_column_sidecar.verify_data_column_sidecar_kzg_proofs() if res.isErr: return errReject(res.error) @@ -294,17 +294,21 @@ template checkedReject( pool.dag.checkedReject(error) func getMaxBlobsPerBlock(cfg: RuntimeConfig, slot: Slot): uint64 = - if slot >= cfg.ELECTRA_FORK_EPOCH.start_slot: + let epoch = slot.epoch + if epoch >= cfg.FULU_FORK_EPOCH: + get_blob_parameters(cfg, epoch).MAX_BLOBS_PER_BLOCK + elif epoch >= cfg.ELECTRA_FORK_EPOCH: cfg.MAX_BLOBS_PER_BLOCK_ELECTRA else: cfg.MAX_BLOBS_PER_BLOCK +debugGloasComment "" template validateBeaconBlockBellatrix( - _: phase0.SignedBeaconBlock | altair.SignedBeaconBlock, + _: phase0.SignedBeaconBlock | altair.SignedBeaconBlock | gloas.SignedBeaconBlock, _: BlockRef): untyped = discard -# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/p2p-interface.md#beacon_block +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/bellatrix/p2p-interface.md#beacon_block template validateBeaconBlockBellatrix( signed_beacon_block: bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock | @@ -358,11 +362,12 @@ template validateBeaconBlockBellatrix( # cannot occur here, because Nimbus's optimistic sync waits for either # `ACCEPTED` or `SYNCING` from the EL to get this far. +debugGloasComment "" template validateBeaconBlockDeneb( _: ChainDAGRef, _: phase0.SignedBeaconBlock | altair.SignedBeaconBlock | - bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock, + bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock | gloas.SignedBeaconBlock, _: BeaconTime): untyped = discard @@ -377,8 +382,10 @@ template validateBeaconBlockDeneb( # [REJECT] The length of KZG commitments is less than or equal to the # limitation defined in Consensus Layer -- i.e. validate that # len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK + let blob_params = + dag.cfg.get_blob_parameters(signed_beacon_block.message.slot.epoch()) if not (lenu64(signed_beacon_block.message.body.blob_kzg_commitments) <= - dag.cfg.getMaxBlobsPerBlock(signed_beacon_block.message.slot)): + blob_params.MAX_BLOBS_PER_BLOCK): return dag.checkedReject("validateBeaconBlockDeneb: too many blob commitments") # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id @@ -424,7 +431,7 @@ proc validateBlobSidecar*( if dag.getBlockRef(block_root).isSome(): return errIgnore("BlobSidecar: already have block") - # This adds KZG commitment matching to the spec gossip validation. It's an + # This adds block root matching to the spec gossip validation. It's an # IGNORE condition, so it shouldn't affect Nimbus's scoring, and when some # (slashable) double proposals happen with blobs present, without this one # or the other block, or potentially both, won't get its full set of blobs @@ -434,9 +441,51 @@ proc validateBlobSidecar*( # # It would be good to fix this more properly, but this has come up often on # Pectra devnet-6. - if blobQuarantine[].hasBlob( - block_header.slot, block_header.proposer_index, blob_sidecar.index, - blob_sidecar.kzg_commitment): + # + # Detailed explanation: + # + # There were regular double-proposer, slashable events (some of which got + # slashed, but that takes at least a couple of slots typically to be noticed, + # it's not instant). What would happen is, Nimbus would be going fine, + # following the chain, until one of these double proposals came up. + # Each had, independently, some set of blobs: + # + # * separately valid block 1, with a set of valid blobs; and + # * separately valid block 2, with a set of valid blobs (different than the + # first set, created by a different node). + # + # Both of these proposals shared a slot and proposer index, because they were + # the same proposer. Indeed, the signatures were all valid too, because, well, + # they were both legitimately running that private key. + # + # But what would happen is, + # * if block 1's blobs came in, and block 1 came in, and block 1 turned out + # to be the one the chain followed, then, great, the IGNORE condition here + # worked fine (WLOG extend to block 2); but + # * if the blobs came in interleaved, this wasn't always true, and, + # crucially, this gossip condition as spec-written prevented Nimbus's + # gossip from being able to collect all the blobs from block 1. + # + # Maybe other clients did/do this by having a very efficient + # request manager-equivalent, I'm not sure. But without something, either + # receiving via gossip or req/resp, Nimbus just got stuck until a suitable + # reorg happened, typically dozens of slots later, because this gossip + # condition prevented it from seeing all the blobs corresponding to either + # block. + # + # Also, it would be basically random chance which, if asked by req/resp, + # of the two different (or more, but the devnet-6 case was two slashable + # blocks at a time) sets of blobs would be returned, so it seemed to + # sometimes have to retry this. All of this took enough time Nimbus lost the + # chain basically deterministically every time this slashable double-proposal + # situation came up. + # + # I don't see anything obviously corresponding to this in the tests, either, + # to show this is otherwise addressed. + + if blobQuarantine[].hasSidecar(block_root, block_header.slot, + block_header.proposer_index, + blob_sidecar.index): return errIgnore("BlobSidecar: already have valid blob from same proposer") # [REJECT] The sidecar's inclusion proof is valid as verified by @@ -522,9 +571,10 @@ proc validateBlobSidecar*( return dag.checkedReject("BlobSidecar: blob invalid") # Send notification about new blob sidecar via callback - if not(isNil(blobQuarantine.onBlobSidecarCallback)): - blobQuarantine.onBlobSidecarCallback BlobSidecarInfoObject( - block_root: hash_tree_root(blob_sidecar.signed_block_header.message), + let onBlobSidecarCallback = blobQuarantine[].onBlobSidecarCallback() + if not(isNil(onBlobSidecarCallback)): + onBlobSidecarCallback BlobSidecarInfoObject( + block_root: block_root, index: blob_sidecar.index, slot: blob_sidecar.signed_block_header.message.slot, kzg_commitment: blob_sidecar.kzg_commitment, @@ -533,20 +583,20 @@ proc validateBlobSidecar*( ok() -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#data_column_sidecar_subnet_id +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/p2p-interface.md#data_column_sidecar_subnet_id proc validateDataColumnSidecar*( dag: ChainDAGRef, quarantine: ref Quarantine, - dataColumnQuarantine: ref DataColumnQuarantine, - data_column_sidecar: DataColumnSidecar, + dataColumnQuarantine: ref ColumnQuarantine, + data_column_sidecar: fulu.DataColumnSidecar, wallTime: BeaconTime, subnet_id: uint64): Result[void, ValidationError] = template block_header: untyped = data_column_sidecar.signed_block_header.message - - # [REJECT] The sidecar's index is consistent with `NUMBER_OF_COLUMNS` - # -- i.e. `data_column_sidecar.index < NUMBER_OF_COLUMNS` - if not (data_column_sidecar.index < NUMBER_OF_COLUMNS): - return dag.checkedReject("DataColumnSidecar: The sidecar's index should be consistent with NUMBER_OF_COLUMNS") + # [REJECT] The sidecar is valid as verified by verify_data_column_sidecar(sidecar) + block: + let v = verify_data_column_sidecar(data_column_sidecar) + if v.isErr: + return dag.checkedReject(v.error) # [REJECT] The sidecar is for the correct subnet # -- i.e. `compute_subnet_for_data_column_sidecar(blob_sidecar.index) == subnet_id`. @@ -571,10 +621,8 @@ proc validateDataColumnSidecar*( # (block_header.slot, block_header.proposer_index, data_column_sidecar.index) # with valid header signature, sidecar inclusion proof, and kzg proof. let block_root = hash_tree_root(block_header) - if dag.getBlockRef(block_root).isSome(): - return errIgnore("DataColumnSidecar: already have block") - if dataColumnQuarantine[].hasDataColumn( - block_header.slot, block_header.proposer_index, data_column_sidecar.index): + if dataColumnQuarantine[].hasSidecar( + block_root, block_header.slot, block_header.proposer_index, data_column_sidecar.index): return errIgnore("DataColumnSidecar: already have valid data column from same proposer") # [REJECT] The sidecar's `kzg_commitments` inclusion proof is valid as verified by @@ -656,8 +704,70 @@ proc validateDataColumnSidecar*( return dag.checkedReject(r.error) # Send notification about new data column sidecar via callback - if not(isNil(dataColumnQuarantine.onDataColumnSidecarCallback)): - dataColumnQuarantine.onDataColumnSidecarCallback(data_column_sidecar) + let onDataColumnSidecarCallback = + dataColumnQuarantine[].onDataColumnSidecarCallback() + + if not(isNil(onDataColumnSidecarCallback)): + onDataColumnSidecarCallback DataColumnSidecarInfoObject( + block_root: block_root, + index: data_column_sidecar.index, + slot: data_column_sidecar.signed_block_header.message.slot, + kzg_commitments: data_column_sidecar.kzg_commitments) + + ok() + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#data_column_sidecar_subnet_id +proc validateDataColumnSidecar*( + dag: ChainDAGRef, quarantine: ref Quarantine, + dataColumnQuarantine: ref ColumnQuarantine, + data_column_sidecar: gloas.DataColumnSidecar, + wallTime: BeaconTime, subnet_id: uint64): + Result[void, ValidationError] = + + # [REJECT] The sidecar is valid as verified by verify_data_column_sidecar + block: + let v = verify_data_column_sidecar(data_column_sidecar) + if v.isErr: + return dag.checkedReject(v.error) + + # [REJECT] The sidecar is for the correct subnet + if not (compute_subnet_for_data_column_sidecar(data_column_sidecar.index) == + subnet_id): + return dag.checkedReject("DataColumnSidecar: not for correct subnet") + + # [IGNORE] Modified from Fulu: The sidecar is the first sidecar for the tuple + # (sidecar.beacon_block_root, sidecar.index) with valid kzg proof. + let block_root = data_column_sidecar.beacon_block_root + if dataColumnQuarantine[].hasSidecar(block_root, data_column_sidecar.index): + return errIgnore("DataColumnSidecar: already have valid data column") + + debugGloasComment "" + # [IGNORE] The sidecar's beacon_block_root has been seen via a valid signed + # execution payload header (builder's bid). + # + # [REJECT] The hash of the sidecar's kzg_commitments matches the + # blob_kzg_commitments_root in the corresponding builder's bid for + # sidecar.beacon_block_root. + # + # TODO: Implement getExecutionPayloadBid(block_root) + # This requires storing bids received via execution_payload_bid gossip topic, + # indexed by the beacon block root they commit to. + + # [REJECT] The sidecar's column data is valid + block: + let r = check_data_column_sidecar_kzg_proofs(data_column_sidecar) + if r.isErr: + return dag.checkedReject(r.error) + + # Send notification about new data column sidecar via callback + let onDataColumnSidecarCallback = + dataColumnQuarantine[].onDataColumnSidecarCallback() + + if not(isNil(onDataColumnSidecarCallback)): + onDataColumnSidecarCallback DataColumnSidecarInfoObject( + block_root: block_root, + index: data_column_sidecar.index, + kzg_commitments: data_column_sidecar.kzg_commitments) ok() @@ -1035,7 +1145,8 @@ proc validateAttestation*( batchCrypto: ref BatchCrypto, attestation: SingleAttestation, wallTime: BeaconTime, - subnet_id: SubnetId, checkSignature: bool): + subnet_id: SubnetId, checkSignature: bool, + consensusFork: ConsensusFork): Future[Result[ tuple[attesting_index: ValidatorIndex, beacon_committee_len: int, index_in_committee: int, sig: CookedSig], @@ -1063,15 +1174,14 @@ proc validateAttestation*( # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#beacon_attestation_subnet_id # modifies this for Deneb and newer forks. block: - let v = check_propagation_slot_range( - pool.dag.cfg.consensusForkAtEpoch(wallTime.slotOrZero.epoch), slot, - wallTime) + let v = check_propagation_slot_range(consensusFork, slot, wallTime) if v.isErr(): # [IGNORE] return err(v.error()) # [REJECT] attestation.data.index == 0 - if not (attestation.data.index == 0): - return pool.checkedReject("SingleAttestation: attestation.data.index != 0") + if consensusFork < ConsensusFork.Gloas: + if not (attestation.data.index == 0): + return pool.checkedReject("SingleAttestation: attestation.data.index != 0") # The block being voted for (attestation.data.beacon_block_root) has been seen # (via both gossip and non-gossip sources) (a client MAY queue attestations @@ -1085,6 +1195,18 @@ proc validateAttestation*( return pool.checkedResult(v.error) v.get() + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#beacon_attestation_subnet_id + if consensusFork >= ConsensusFork.Gloas: + # [REJECT] attestation.data.index < 2 + if not (attestation.data.index < 2): + return pool.checkedReject("SingleAttestation: index must be < 2 in Gloas") + + # [REJECT] attestation.data.index == 0 if block.slot == attestation.data.slot + if target.blck.bid.slot == attestation.data.slot: + if not (attestation.data.index == 0): + return pool.checkedReject( + "SingleAttestation: same-slot attestation must have index 0") + if attestation.attester_index > high(ValidatorIndex).uint64: return errReject("SingleAttestation: attester index too high") let validator_index = attestation.attester_index.ValidatorIndex @@ -1194,11 +1316,13 @@ proc validateAttestation*( # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#beacon_aggregate_and_proof # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/p2p-interface.md#beacon_aggregate_and_proof +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#beacon_aggregate_and_proof proc validateAggregate*( pool: ref AttestationPool, batchCrypto: ref BatchCrypto, signedAggregateAndProof: phase0.SignedAggregateAndProof | electra.SignedAggregateAndProof, - wallTime: BeaconTime, checkSignature = true, checkCover = true): + wallTime: BeaconTime, checkSignature = true, checkCover = true, + consensusFork: ConsensusFork): Future[Result[ tuple[attestingIndices: seq[ValidatorIndex], sig: CookedSig], ValidationError]] {.async: (raises: [CancelledError]).} = @@ -1231,9 +1355,7 @@ proc validateAggregate*( # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#beacon_aggregate_and_proof # modifies this for Deneb and newer forks. block: - let v = check_propagation_slot_range( - pool.dag.cfg.consensusForkAtEpoch(wallTime.slotOrZero.epoch), slot, - wallTime) + let v = check_propagation_slot_range(consensusFork, slot, wallTime) if v.isErr(): # [IGNORE] return err(v.error()) @@ -1276,6 +1398,18 @@ proc validateAggregate*( return pool.checkedResult(v.error) v.get() + when signedAggregateAndProof is electra.SignedAggregateAndProof: + if consensusFork >= ConsensusFork.Gloas: + # [REJECT] aggregate.data.index < 2 + if not (aggregate.data.index < 2): + return pool.checkedReject("Aggregate: index must be < 2 in Gloas") + + # [REJECT] aggregate.data.index == 0 if block.slot == aggregate.data.slot + if target.blck.bid.slot == aggregate.data.slot: + if not (aggregate.data.index == 0): + return pool.checkedReject( + "Aggregate: same-slot aggregate must have index 0") + let shufflingRef = pool.dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr: @@ -1843,7 +1977,8 @@ proc validateLightClientFinalityUpdate*( else: GENESIS_SLOT currentTime = wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY - forwardTime = signature_slot.light_client_finality_update_time + forwardTime = signature_slot + .light_client_finality_update_time(dag.cfg.time) if currentTime < forwardTime: # [IGNORE] The `finality_update` is received after the block at # `signature_slot` was given enough time to propagate through the network. @@ -1880,7 +2015,8 @@ proc validateLightClientOptimisticUpdate*( else: GENESIS_SLOT currentTime = wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY - forwardTime = signature_slot.light_client_optimistic_update_time + forwardTime = signature_slot + .light_client_optimistic_update_time(dag.cfg.time) if currentTime < forwardTime: # [IGNORE] The `optimistic_update` is received after the block at # `signature_slot` was given enough time to propagate through the network. diff --git a/beacon_chain/gossip_processing/light_client_processor.nim b/beacon_chain/gossip_processing/light_client_processor.nim index 666c3bc1e4..a5f0f7d756 100644 --- a/beacon_chain/gossip_processing/light_client_processor.nim +++ b/beacon_chain/gossip_processing/light_client_processor.nim @@ -178,12 +178,12 @@ proc dumpInvalidObject( proc dumpObject[T]( self: LightClientProcessor, obj: SomeForkyLightClientObject, - res: Result[T, VerifierError]) = + res: Result[T, LightClientVerifierError]) = if self.dumpEnabled and res.isErr: case res.error - of VerifierError.Invalid: + of LightClientVerifierError.Invalid: self.dumpInvalidObject(obj) - of VerifierError.MissingParent: + of LightClientVerifierError.MissingParent: dump(self.dumpDirIncoming, obj) else: discard @@ -212,15 +212,15 @@ proc tryForceUpdate( proc doProcessObject( self: var LightClientProcessor, bootstrap: ForkedLightClientBootstrap, - wallTime: BeaconTime): Result[void, VerifierError] = + wallTime: BeaconTime): Result[void, LightClientVerifierError] = if bootstrap.kind == LightClientDataFork.None: - err(VerifierError.Invalid) + err(LightClientVerifierError.Invalid) elif self.store[].kind > LightClientDataFork.None: - err(VerifierError.Duplicate) + err(LightClientVerifierError.Duplicate) else: let trustedBlockRoot = self.getTrustedBlockRoot() if trustedBlockRoot.isNone: - err(VerifierError.MissingParent) + err(LightClientVerifierError.MissingParent) else: withForkyBootstrap(bootstrap): when lcDataFork > LightClientDataFork.None: @@ -237,11 +237,11 @@ proc doProcessObject( proc doProcessObject( self: var LightClientProcessor, update: SomeForkedLightClientUpdate, - wallTime: BeaconTime): Result[void, VerifierError] = + wallTime: BeaconTime): Result[void, LightClientVerifierError] = if update.kind == LightClientDataFork.None: - err(VerifierError.Invalid) + err(LightClientVerifierError.Invalid) elif self.store[].kind == LightClientDataFork.None: - err(VerifierError.MissingParent) + err(LightClientVerifierError.MissingParent) else: withForkyObject(update): when lcDataFork > LightClientDataFork.None: @@ -263,7 +263,7 @@ proc doProcessObject( proc processObject( self: var LightClientProcessor, obj: SomeForkedLightClientObject, - wallTime: BeaconTime): Result[void, VerifierError] = + wallTime: BeaconTime): Result[void, LightClientVerifierError] = let res = self.doProcessObject(obj, wallTime) withForkyObject(obj): @@ -282,7 +282,7 @@ proc processObject( # If none is made available within reasonable time, light client # is force-updated with best known data to ensure sync progress. case res.error - of VerifierError.Duplicate: + of LightClientVerifierError.Duplicate: if wallTime >= self.lastDuplicateTick + duplicateRateLimit: if self.numDupsSinceProgress < minForceUpdateDuplicates: let upgradedObj = obj.migratingToDataFork(lcDataFork) @@ -391,7 +391,7 @@ template withReportedProgress(body: untyped): bool = proc storeObject*( self: var LightClientProcessor, src: MsgSource, wallTime: BeaconTime, - obj: SomeForkedLightClientObject): Result[bool, VerifierError] = + obj: SomeForkedLightClientObject): Result[bool, LightClientVerifierError] = ## storeObject is the main entry point for unvalidated light client objects - ## all untrusted objects pass through here. When storing an object, we will ## update the `LightClientStore` accordingly @@ -454,7 +454,7 @@ proc addObject*( self: var LightClientProcessor, src: MsgSource, obj: SomeForkedLightClientObject, - resfut: Future[Result[void, VerifierError]].Raising([CancelledError]) = nil) = + resfut: Future[Result[void, LightClientVerifierError]].Raising([CancelledError]) = nil) = ## Enqueue a Gossip-validated light client object for verification # Backpressure: # Only one object is validated at any time - @@ -492,16 +492,16 @@ proc addObject*( if resfut != nil: if res.isOk: - resfut.complete(Result[void, VerifierError].ok()) + resfut.complete(Result[void, LightClientVerifierError].ok()) else: - resfut.complete(Result[void, VerifierError].err(res.error)) + resfut.complete(Result[void, LightClientVerifierError].err(res.error)) # Message validators # ------------------------------------------------------------------------------ func toValidationError( self: var LightClientProcessor, - r: Result[bool, VerifierError], + r: Result[bool, LightClientVerifierError], wallTime: BeaconTime, obj: SomeForkedLightClientObject): Result[void, ValidationError] = if r.isOk: @@ -514,7 +514,8 @@ func toValidationError( else: GENESIS_SLOT currentTime = wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY - forwardTime = signature_slot.light_client_finality_update_time + forwardTime = signature_slot + .light_client_finality_update_time(self.cfg.time) if currentTime < forwardTime: # [IGNORE] The `finality_update` is received after the block # at `signature_slot` was given enough time to propagate through @@ -536,18 +537,18 @@ func toValidationError( errIgnore(typeof(obj).name & ": no significant progress") else: case r.error - of VerifierError.Invalid: + of LightClientVerifierError.Invalid: # [REJECT] The `finality_update` is valid. # [REJECT] The `optimistic_update` is valid. - errReject($r.error) - of VerifierError.MissingParent, - VerifierError.UnviableFork, - VerifierError.Duplicate: + errReject(typeof(obj).name & ": invalid") + of LightClientVerifierError.MissingParent, + LightClientVerifierError.UnviableFork, + LightClientVerifierError.Duplicate: # [IGNORE] The `finalized_header.beacon.slot` is greater than that of # all previously forwarded `finality_update`s # [IGNORE] The `attested_header.beacon.slot` is greater than that of all # previously forwarded `optimistic_update`s - errIgnore($r.error) + errIgnore(typeof(obj).name & ": duplicate") # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update proc processLightClientFinalityUpdate*( @@ -564,7 +565,7 @@ proc processLightClientFinalityUpdate*( self.latestFinalityUpdate = finality_update.toOptimistic v -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/light-client/sync-protocol.md#process_light_client_optimistic_update proc processLightClientOptimisticUpdate*( self: var LightClientProcessor, src: MsgSource, optimistic_update: ForkedLightClientOptimisticUpdate diff --git a/beacon_chain/gossip_processing/optimistic_processor.nim b/beacon_chain/gossip_processing/optimistic_processor.nim index 9721a10bb1..972efc293c 100644 --- a/beacon_chain/gossip_processing/optimistic_processor.nim +++ b/beacon_chain/gossip_processing/optimistic_processor.nim @@ -26,14 +26,17 @@ type ): Future[void] {.async: (raises: [CancelledError]).} OptimisticProcessor* = ref object + timeConfig: TimeConfig getBeaconTime: GetBeaconTimeFn optimisticVerifier: OptimisticBlockVerifier processFut: Future[void].Raising([CancelledError]) proc initOptimisticProcessor*( + timeConfig: TimeConfig, getBeaconTime: GetBeaconTimeFn, optimisticVerifier: OptimisticBlockVerifier): OptimisticProcessor = OptimisticProcessor( + timeConfig: timeConfig, getBeaconTime: getBeaconTime, optimisticVerifier: optimisticVerifier) diff --git a/beacon_chain/libnimbus_lc/libnimbus_lc.h b/beacon_chain/libnimbus_lc/libnimbus_lc.h index a1f8e009b1..3866899d8a 100644 --- a/beacon_chain/libnimbus_lc/libnimbus_lc.h +++ b/beacon_chain/libnimbus_lc/libnimbus_lc.h @@ -94,7 +94,7 @@ typedef struct ETHConsensusConfig ETHConsensusConfig; * based on the given `config.yaml` file content - If successful. * @return `NULL` - If the given `config.yaml` is malformed or incompatible. * - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/configs/README.md + * @see https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/configs/README.md */ ETH_RESULT_USE_CHECK ETHConsensusConfig *_Nullable ETHConsensusConfigCreateFromYaml(const char *configFileContent); @@ -150,7 +150,7 @@ typedef struct ETHBeaconState ETHBeaconState; * @return `NULL` - If the given `sszBytes` is malformed. * * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#beaconstate - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#beaconstate + * @see https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/capella/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/configs/README.md @@ -326,7 +326,7 @@ typedef struct ETHLightClientStore ETHLightClientStore; * @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap * @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/light-client.md - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/weak-subjectivity.md#weak-subjectivity-period + * @see https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/weak-subjectivity.md#weak-subjectivity-period */ ETH_RESULT_USE_CHECK ETHLightClientStore *_Nullable ETHLightClientStoreCreateFromBootstrap( @@ -634,7 +634,7 @@ const ETHLightClientHeader *ETHLightClientStoreGetOptimisticHeader( * * @return Light client store safety threshold. * - * @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#get_safety_threshold + * @see https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/light-client/sync-protocol.md#get_safety_threshold */ ETH_RESULT_USE_CHECK int ETHLightClientStoreGetSafetyThreshold(const ETHLightClientStore *store); @@ -775,7 +775,7 @@ const ETHRoot *ETHBeaconBlockHeaderGetBodyRoot(const ETHBeaconBlockHeader *beaco * * @return Pointer to a copy of the given header's execution block hash. * - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/beacon-chain.md#executionpayloadheader + * @see https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/deneb/beacon-chain.md#executionpayloadheader */ ETH_RESULT_USE_CHECK ETHRoot *ETHLightClientHeaderCopyExecutionHash( diff --git a/beacon_chain/libnimbus_lc/libnimbus_lc.nim b/beacon_chain/libnimbus_lc/libnimbus_lc.nim index b18b1a32a6..51d3dbdd05 100644 --- a/beacon_chain/libnimbus_lc/libnimbus_lc.nim +++ b/beacon_chain/libnimbus_lc/libnimbus_lc.nim @@ -17,7 +17,7 @@ import json_rpc/jsonmarshal, secp256k1, web3/[engine_api_types, eth_api_types, conversions], - ../el/[engine_api_conversions, eth1_chain], + ../el/engine_api_conversions, ../spec/eth2_apis/[eth2_rest_serialization, rest_light_client_calls], ../spec/[helpers, light_client_sync], ../sync/light_client_sync_helpers, @@ -142,10 +142,10 @@ proc ETHBeaconStateCreateFromSsz( ## ## See: ## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#beaconstate + ## * https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/configs/README.md + ## * https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/configs/README.md let consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr: return nil @@ -243,10 +243,11 @@ proc ETHBeaconClockCreateFromState( ## Returns: ## * Pointer to an initialized beacon clock based on the beacon state or ## NULL if the state contained an invalid time. - let beaconClock = BeaconClock.new() - beaconClock[] = - BeaconClock.init(getStateField(state[], genesis_time)).valueOr: - return nil + let + genesisTime = getStateField(state[], genesis_time) + beaconClock = BeaconClock.new() + beaconClock[] = BeaconClock.init(cfg[].time, genesisTime).valueOr: + return nil beaconClock.toUnmanagedPtr() proc ETHBeaconClockDestroy(beaconClock: ptr BeaconClock) {.exported.} = @@ -269,7 +270,7 @@ proc ETHBeaconClockGetSlot(beaconClock: ptr BeaconClock): cint {.exported.} = ## * `0` - If genesis is still pending. ## ## See: - ## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#custom-types + ## * https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#custom-types beaconClock[].now().slotOrZero().cint const lcDataFork = LightClientDataFork.high @@ -466,6 +467,7 @@ proc ETHLightClientStoreGetMillisecondsToNextSyncTask( ## * Number of milliseconds until `ETHLightClientStoreGetNextSyncTask` ## should be called again to obtain the next light client sync task. asRef(rng).nextLcSyncTaskDelay( + beaconClock[].timeConfig, wallTime = beaconClock[].now(), finalized = store[].finalized_header.beacon.slot.sync_committee_period, optimistic = store[].optimistic_header.beacon.slot.sync_committee_period, @@ -537,13 +539,13 @@ proc ETHLightClientStoreProcessUpdatesByRange( didProgress = true else: case res.error - of VerifierError.MissingParent: + of LightClientVerifierError.MissingParent: break - of VerifierError.Duplicate: + of LightClientVerifierError.Duplicate: discard - of VerifierError.UnviableFork: + of LightClientVerifierError.UnviableFork: break - of VerifierError.Invalid: + of LightClientVerifierError.Invalid: return 1 if not didProgress: return 2 @@ -625,13 +627,13 @@ proc ETHLightClientStoreProcessFinalityUpdate( 0 else: case res.error - of VerifierError.MissingParent: + of LightClientVerifierError.MissingParent: 2 - of VerifierError.Duplicate: + of LightClientVerifierError.Duplicate: 2 - of VerifierError.UnviableFork: + of LightClientVerifierError.UnviableFork: 2 - of VerifierError.Invalid: + of LightClientVerifierError.Invalid: 1 proc ETHLightClientStoreProcessOptimisticUpdate( @@ -710,13 +712,13 @@ proc ETHLightClientStoreProcessOptimisticUpdate( 0 else: case res.error - of VerifierError.MissingParent: + of LightClientVerifierError.MissingParent: 2 - of VerifierError.Duplicate: + of LightClientVerifierError.Duplicate: 2 - of VerifierError.UnviableFork: + of LightClientVerifierError.UnviableFork: 2 - of VerifierError.Invalid: + of LightClientVerifierError.Invalid: 1 func ETHLightClientStoreGetFinalizedHeader( @@ -735,7 +737,7 @@ func ETHLightClientStoreGetFinalizedHeader( ## * Latest finalized header. ## ## See: - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/sync-protocol.md#modified-lightclientheader + ## * https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/capella/light-client/sync-protocol.md#modified-lightclientheader addr store[].finalized_header func ETHLightClientStoreIsNextSyncCommitteeKnown( @@ -755,7 +757,7 @@ func ETHLightClientStoreIsNextSyncCommitteeKnown( ## ## See: ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/light-client.md + ## * https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/altair/light-client/light-client.md store[].is_next_sync_committee_known func ETHLightClientStoreGetOptimisticHeader( @@ -1296,7 +1298,7 @@ proc ETHExecutionBlockHeaderCreateFromJson( Opt.some data.requestsHash.get.asEth2Digest.to(Hash32) else: Opt.none(Hash32)) - if rlpHash(blockHeader) != executionHash[]: + if blockHeader.computeRlpHash().asEth2Digest() != executionHash[]: return nil # Construct withdrawals @@ -1321,20 +1323,20 @@ proc ETHExecutionBlockHeaderCreateFromJson( wds.add ETHWithdrawal( index: wd.index, validatorIndex: wd.validatorIndex, - address: ExecutionAddress(data: wd.address.data), + address: wd.address, amount: wd.amount, bytes: rlpBytes) let tr = orderedTrieRoot(wds) - if tr != data.withdrawalsRoot.get.asEth2Digest: + if tr != data.withdrawalsRoot.get: return nil let executionBlockHeader = ETHExecutionBlockHeader.new() executionBlockHeader[] = ETHExecutionBlockHeader( - transactionsRoot: blockHeader.txRoot, - withdrawalsRoot: blockHeader.withdrawalsRoot.get(zeroHash32), + transactionsRoot: blockHeader.txRoot.asEth2Digest(), + withdrawalsRoot: blockHeader.withdrawalsRoot.get(zeroHash32).asEth2Digest(), withdrawals: wds, - requestsHash: blockHeader.requestsHash.get(zeroHash32)) + requestsHash: blockHeader.requestsHash.get(zeroHash32).asEth2Digest()) executionBlockHeader.toUnmanagedPtr() proc ETHExecutionBlockHeaderDestroy( @@ -1552,10 +1554,7 @@ proc ETHTransactionsCreateFromJson( return nil if yParity != data.v: return nil - if data.authorizationList.isSome: - for authorization in data.authorizationList.get: - if authorization.v > uint8.high: - return nil + let tx = eth_types.EthTransaction( txType: txType, @@ -1600,7 +1599,7 @@ proc ETHTransactionsCreateFromJson( except RlpError: raiseAssert "Unreachable" hash = keccak256(rlpBytes) - if data.hash.asEth2Digest != hash: + if data.hash != hash: return nil func packSignature(r, s: UInt256, yParity: uint8): array[65, byte] = @@ -1612,15 +1611,12 @@ proc ETHTransactionsCreateFromJson( func recoverSignerAddress( rawSig: array[65, byte], - hashForSigning: Hash32): Opt[array[20, byte]] = + hashForSigning: Hash32): SkResult[ExecutionAddress] = let - sig = SkRecoverableSignature.fromRaw(rawSig).valueOr: - return Opt.none(array[20, byte]) - sigHash = SkMessage.fromBytes(hashForSigning.data).valueOr: - return Opt.none(array[20, byte]) - pubkey = sig.recover(sigHash).valueOr: - return Opt.none(array[20, byte]) - Opt.some keys.PublicKey(pubkey).toCanonicalAddress().data + signature = ?Signature.fromRaw(rawSig) + pubkey = ?signature.recover(SkMessage(hashForSigning.data())) + + ok pubkey.toCanonicalAddress() # Compute from execution address let @@ -1633,7 +1629,7 @@ proc ETHTransactionsCreateFromJson( sigHash = tx.rlpHashForSigning(tx.isEip155()) fromAddress = recoverSignerAddress(rawSig, sigHash).valueOr: return nil - if distinctBase(data.`from`) != fromAddress: + if data.`from` != fromAddress: return nil # Compute to execution address @@ -1656,30 +1652,30 @@ proc ETHTransactionsCreateFromJson( tx.authorizationList.len) for auth in tx.authorizationList: let - sig = packSignature(auth.r, auth.s, auth.v.uint8) + sig = packSignature(auth.r, auth.s, auth.yParity) authority = recoverSignerAddress(sig, auth.rlpHashForSigning).valueOr: return nil authorizationList.add ETHAuthorization( chainId: auth.chainId, - address: ExecutionAddress(data: auth.address.data), + address: auth.address, nonce: auth.nonce, - authority: ExecutionAddress(data: authority), + authority: authority, signature: @sig) txs.add ETHTransaction( - hash: keccak256(rlpBytes), + hash: keccak256(rlpBytes).asEth2Digest, chainId: tx.chainId, - `from`: ExecutionAddress(data: fromAddress), + `from`: fromAddress, nonce: tx.nonce, maxPriorityFeePerGas: tx.maxPriorityFeePerGas.uint64, maxFeePerGas: tx.maxFeePerGas.uint64, gas: tx.gasLimit.uint64, destinationType: destinationType, - to: ExecutionAddress(data: toAddress.data), + to: toAddress, value: tx.value, input: tx.payload, accessList: tx.accessList.mapIt(ETHAccessTuple( - address: ExecutionAddress(data: it.address.data), + address: it.address, storageKeys: it.storageKeys.mapIt(Eth2Digest(data: it.data)))), maxFeePerBlobGas: tx.maxFeePerBlobGas, blobVersionedHashes: tx.versionedHashes.mapIt(Eth2Digest(data: it.data)), @@ -1688,7 +1684,7 @@ proc ETHTransactionsCreateFromJson( signature: @rawSig, bytes: rlpBytes.TypedTransaction) - if orderedTrieRoot(txs) != transactionsRoot[]: + if orderedTrieRoot(txs).asEth2Digest() != transactionsRoot[]: return nil let transactions = seq[ETHTransaction].new() @@ -2396,17 +2392,17 @@ proc ETHReceiptsCreateFromJson( ReceiptStatusType.Root else: ReceiptStatusType.Status, - root: rec.hash, + root: rec.hash.asEth2Digest(), status: rec.status, gasUsed: distinctBase(data.gasUsed), # Validated during sanity checks. logsBloom: BloomLogs(data: rec.logsBloom.data), logs: rec.logs.mapIt(ETHLog( - address: ExecutionAddress(data: it.address.data), + address: it.address, topics: it.topics.mapIt(Eth2Digest(data: it.data)), data: it.data)), bytes: rlpBytes) - if orderedTrieRoot(recs) != receiptsRoot[]: + if orderedTrieRoot(recs).asEth2Digest() != receiptsRoot[]: return nil let receipts = seq[ETHReceipt].new() diff --git a/beacon_chain/libnimbus_lc/test_files/transactions.json b/beacon_chain/libnimbus_lc/test_files/transactions.json index 6fbc74b7b8..f8aa4b77d2 100644 --- a/beacon_chain/libnimbus_lc/test_files/transactions.json +++ b/beacon_chain/libnimbus_lc/test_files/transactions.json @@ -318,7 +318,7 @@ "chainId": "0x0", "address": "0x3031323334353637383940414243444546474849", "nonce": "0x0", - "v": "0x1", + "yParity": "0x1", "r": "0xa4be86c16c6d3a2b907660b24187d0b30b69f6db3e6e8e7a7bb1183a4706d454", "s": "0x28aba84cdee6059dde41620422959d01da4f6cfff21a9b97036db018f1d815f6" }, @@ -326,7 +326,7 @@ "chainId": "0x1", "address": "0x5051525354555657585960616263646566676869", "nonce": "0x309", - "v": "0x1", + "yParity": "0x1", "r": "0xa4be86c16c6d3a2b907660b24187d0b30b69f6db3e6e8e7a7bb1183a4706d454", "s": "0x28aba84cdee6059dde41620422959d01da4f6cfff21a9b97036db018f1d815f6" } diff --git a/beacon_chain/light_client.nim b/beacon_chain/light_client.nim index 3270e115a8..40291ae908 100644 --- a/beacon_chain/light_client.nim +++ b/beacon_chain/light_client.nim @@ -137,8 +137,8 @@ proc createLightClient( strictVerification) proc lightClientVerifier(obj: SomeForkedLightClientObject): - Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} = - let resfut = Future[Result[void, VerifierError]].Raising([CancelledError]).init("lightClientVerifier") + Future[Result[void, LightClientVerifierError]] {.async: (raises: [CancelledError], raw: true).} = + let resfut = Future[Result[void, LightClientVerifierError]].Raising([CancelledError]).init("lightClientVerifier") lightClient.processor[].addObject(MsgSource.gossip, obj, resfut) resfut proc bootstrapVerifier(obj: ForkedLightClientBootstrap): auto = @@ -175,13 +175,13 @@ proc createLightClient( GENESIS_SLOT.sync_committee_period lightClient.manager = LightClientManager.init( - lightClient.network, rng, getTrustedBlockRoot, + lightClient.network, rng, lightClient.cfg.time, getTrustedBlockRoot, bootstrapVerifier, updateVerifier, finalityVerifier, optimisticVerifier, isLightClientStoreInitialized, isNextSyncCommitteeKnown, getFinalizedPeriod, getOptimisticPeriod, getBeaconTime, shouldInhibitSync = shouldInhibitSync) - lightClient.gossipState = {} + reset(lightClient.gossipState) lightClient @@ -287,11 +287,13 @@ proc installMessageValidators*( template validate[T: SomeForkyLightClientObject]( msg: T, - contextFork: ConsensusFork, + expectedContextBytes: ForkDigest, validatorProcName: untyped): ValidationResult = msg.logReceived() - if contextFork != lightClient.cfg.consensusForkAtEpoch(msg.contextEpoch): + let contextBytes = + lightClient.forkDigests[].atEpoch(msg.contextEpoch, lightClient.cfg) + if contextBytes != expectedContextBytes: msg.logDropped( (ValidationResult.Reject, cstring "Invalid context fork")) return ValidationResult.Reject @@ -346,28 +348,29 @@ proc installMessageValidators*( let forkDigests = lightClient.forkDigests for consensusFork in ConsensusFork: - withLcDataFork(lcDataForkAtConsensusFork(consensusFork)): - when lcDataFork > LightClientDataFork.None: - closureScope: - let - contextFork = consensusFork - digest = forkDigests[].atConsensusFork(contextFork) - - # light_client_optimistic_update - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_finality_update - lightClient.network.addValidator( - getLightClientFinalityUpdateTopic(digest), proc ( - msg: lcDataFork.LightClientFinalityUpdate - ): ValidationResult = - validate(msg, contextFork, processLightClientFinalityUpdate)) - - # light_client_optimistic_update - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update - lightClient.network.addValidator( - getLightClientOptimisticUpdateTopic(digest), proc ( - msg: lcDataFork.LightClientOptimisticUpdate - ): ValidationResult = - validate(msg, contextFork, processLightClientOptimisticUpdate)) + for forkDigest in consensusFork.forkDigests(forkDigests[]): + withLcDataFork(lcDataForkAtConsensusFork(consensusFork)): + when lcDataFork > LightClientDataFork.None: + closureScope: + let contextBytes = forkDigest + + # light_client_optimistic_update + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_finality_update + lightClient.network.addValidator( + getLightClientFinalityUpdateTopic(forkDigest), proc ( + msg: lcDataFork.LightClientFinalityUpdate, + src: PeerId + ): ValidationResult = + validate(msg, contextBytes, processLightClientFinalityUpdate)) + + # light_client_optimistic_update + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update + lightClient.network.addValidator( + getLightClientOptimisticUpdateTopic(forkDigest), proc ( + msg: lcDataFork.LightClientOptimisticUpdate, + src: PeerId + ): ValidationResult = + validate(msg, contextBytes, processLightClientOptimisticUpdate)) proc updateGossipStatus*( lightClient: LightClient, slot: Slot, dagIsBehind = default(Option[bool])) = @@ -387,10 +390,7 @@ proc updateGossipStatus*( dagIsBehind.get(true) isBehind = lcBehind and dagBehind - currentEpochTargetGossipState = getTargetGossipState( - epoch, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, - cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, - cfg.FULU_FORK_EPOCH, isBehind) + currentEpochTargetGossipState = getTargetGossipState(epoch, cfg, isBehind) targetGossipState = if lcBehind or epoch < 1: currentEpochTargetGossipState @@ -399,9 +399,7 @@ proc updateGossipStatus*( # which is in the past relative to the signature slot (current slot). # Therefore, LC topic subscriptions are kept for 1 extra epoch. let previousEpochTargetGossipState = getTargetGossipState( - epoch - 1, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, - cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, - cfg.FULU_FORK_EPOCH, isBehind) + epoch - 1, cfg, isBehind) currentEpochTargetGossipState + previousEpochTargetGossipState template currentGossipState(): auto = lightClient.gossipState @@ -419,25 +417,23 @@ proc updateGossipStatus*( discard let - newGossipForks = targetGossipState - currentGossipState - oldGossipForks = currentGossipState - targetGossipState + newGossipEpochs = targetGossipState - currentGossipState + oldGossipEpochs = currentGossipState - targetGossipState - for gossipFork in oldGossipForks: - if gossipFork >= ConsensusFork.Altair: - let forkDigest = lightClient.forkDigests[].atConsensusFork(gossipFork) + for gossipEpoch in oldGossipEpochs: + if gossipEpoch >= cfg.ALTAIR_FORK_EPOCH: + let forkDigest = lightClient.forkDigests[].atEpoch(gossipEpoch, cfg) lightClient.network.unsubscribe( getLightClientFinalityUpdateTopic(forkDigest)) lightClient.network.unsubscribe( getLightClientOptimisticUpdateTopic(forkDigest)) - for gossipFork in newGossipForks: - if gossipFork >= ConsensusFork.Altair: - let forkDigest = lightClient.forkDigests[].atConsensusFork(gossipFork) + for gossipEpoch in newGossipEpochs: + if gossipEpoch >= cfg.ALTAIR_FORK_EPOCH: + let forkDigest = lightClient.forkDigests[].atEpoch(gossipEpoch, cfg) lightClient.network.subscribe( - getLightClientFinalityUpdateTopic(forkDigest), - basicParams) + getLightClientFinalityUpdateTopic(forkDigest), basicParams()) lightClient.network.subscribe( - getLightClientOptimisticUpdateTopic(forkDigest), - basicParams) + getLightClientOptimisticUpdateTopic(forkDigest), basicParams()) - lightClient.gossipState = targetGossipState \ No newline at end of file + lightClient.gossipState = targetGossipState diff --git a/beacon_chain/light_client_db.nim b/beacon_chain/light_client_db.nim index 20b21f62e8..702ec1b300 100644 --- a/beacon_chain/light_client_db.nim +++ b/beacon_chain/light_client_db.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -15,7 +15,7 @@ import # Beacon chain internals spec/datatypes/altair, spec/[eth2_ssz_serialization, helpers], - ./db_limits + ./db_utils logScope: topics = "lcdb" diff --git a/beacon_chain/networking/eth2_discovery.nim b/beacon_chain/networking/eth2_discovery.nim index 359513c29f..baf14d7f1e 100644 --- a/beacon_chain/networking/eth2_discovery.nim +++ b/beacon_chain/networking/eth2_discovery.nim @@ -10,7 +10,7 @@ import std/[algorithm, sequtils], chronos, chronicles, - eth/p2p/discoveryv5/[enr, protocol, node, random2], + eth/p2p/discoveryv5/[protocol, node, random2], ../spec/datatypes/[altair, fulu], ../spec/eth2_ssz_serialization, ".."/[conf, conf_light_client] diff --git a/beacon_chain/networking/eth2_network.nim b/beacon_chain/networking/eth2_network.nim index 66b03dd651..8b4bfde8f4 100644 --- a/beacon_chain/networking/eth2_network.nim +++ b/beacon_chain/networking/eth2_network.nim @@ -25,7 +25,7 @@ import libp2p/stream/connection, libp2p/services/wildcardresolverservice, eth/[common/keys, async_utils], - eth/net/nat, eth/p2p/discoveryv5/[enr, node, random2], + eth/net/nat, eth/p2p/discoveryv5/[node, random2], ".."/[version, conf, beacon_clock, conf_light_client], ../spec/[eth2_ssz_serialization, network, helpers, forks], ../validators/keystore_management, @@ -54,6 +54,13 @@ type # warning about unused import (rpc/messages). GossipMsg = messages.Message + ValidationSyncProc*[T] = + proc(msg: T, src: PeerId): ValidationResult {.gcsafe, raises: [].} + + ValidationAsyncProc*[T] = + proc(msg: T, src: PeerId): Future[ValidationResult] {. + async: (raises: [CancelledError]).} + SeenItem* = object peerId*: PeerId stamp*: chronos.Moment @@ -79,6 +86,7 @@ type forkId*: ENRForkID discoveryForkId*: ENRForkID forkDigests*: ref ForkDigests + nextForkDigest: ForkDigest rng*: ref HmacDrbgContext peers*: Table[PeerId, Peer] directPeers*: DirectPeers @@ -86,8 +94,8 @@ type validTopics: HashSet[string] peerPingerHeartbeatFut: Future[void].Raising([CancelledError]) peerTrimmerHeartbeatFut: Future[void].Raising([CancelledError]) - cfg: RuntimeConfig - getBeaconTime: GetBeaconTimeFn + cfg*: RuntimeConfig + getBeaconTime*: GetBeaconTimeFn quota: TokenBucket ## Global quota mainly for high-bandwidth stuff @@ -436,7 +444,7 @@ proc peerFromStream(network: Eth2Node, conn: Connection): Peer = func getKey*(peer: Peer): PeerId {.inline.} = peer.peerId -proc getFuture(peer: Peer): Future[void] {.inline.} = +proc getFuture*(peer: Peer): Future[void] {.inline.} = if isNil(peer.disconnectedFut): peer.disconnectedFut = newFuture[void]("Peer.disconnectedFut") peer.disconnectedFut @@ -619,7 +627,7 @@ proc writeChunkSZ( uncompressedLenBytes = toBytes(uncompressedLen, Leb128) var - data = newSeqUninitialized[byte]( + data = newSeqUninit[byte]( ord(responseCode.isSome) + contextBytes.len + uncompressedLenBytes.len + payloadSZ.len) pos = 0 @@ -638,7 +646,7 @@ proc writeChunk(conn: Connection, let uncompressedLenBytes = toBytes(payload.lenu64, Leb128) var - data = newSeqUninitialized[byte]( + data = newSeqUninit[byte]( ord(responseCode.isSome) + contextBytes.len + uncompressedLenBytes.len + snappy.maxCompressedLenFramed(payload.len).int) pos = 0 @@ -752,8 +760,8 @@ proc uncompressFramedStream(conn: Connection, doAssert maxCompressedFrameDataLen >= maxUncompressedFrameDataLen.uint64 var - frameData = newSeqUninitialized[byte](maxCompressedFrameDataLen + 4) - output = newSeqUninitialized[byte](expectedSize) + frameData = newSeqUninit[byte](maxCompressedFrameDataLen + 4) + output = newSeqUninit[byte](expectedSize) written = 0 while written < expectedSize: @@ -850,7 +858,8 @@ template gossipMaxSize(T: untyped): uint32 = fixedPortionSize(T).uint32 elif T is bellatrix.SignedBeaconBlock or T is capella.SignedBeaconBlock or T is deneb.SignedBeaconBlock or T is electra.SignedBeaconBlock or - T is fulu.SignedBeaconBlock: + T is fulu.SignedBeaconBlock or T is fulu.DataColumnSidecar or + T is gloas.DataColumnSidecar: MAX_PAYLOAD_SIZE # TODO https://github.com/status-im/nim-ssz-serialization/issues/20 for # Attestation, AttesterSlashing, and SignedAggregateAndProof, which all @@ -922,7 +931,15 @@ proc readResponseChunk( var responseCodeByte: byte try: await conn.readExactly(addr responseCodeByte, 1) - except LPStreamEOFError, LPStreamIncompleteError: + except LPStreamIncompleteError: + # `LPStreamIncompleteError` is raised by `nim-libp2p` when remote peer + # dropped connection and stream, so it can't be used anymore. + return neterr UnexpectedEOF + except LPStreamEOFError: + # `LPStreamEOFError` is raised by `nim-libp2p` when remote peer sent + # EOF frame, which indicates that remote peer wants to gracefully finish + # the stream. It also means that our connection with remote peer is not + # broken and new streams could be initiated. return neterr PotentiallyExpectedEOF except CancelledError as exc: raise exc @@ -1691,12 +1708,9 @@ proc runDiscoveryLoop(node: Eth2Node) {.async: (raises: [CancelledError]).} = proc fetchNodeIdFromPeerId*(peer: Peer): NodeId= # Convert peer id to node id by extracting the peer's public key - let nodeId = - block: - var key: PublicKey - discard peer.peerId.extractPublicKey(key) - keys.PublicKey.fromRaw(key.skkey.getBytes()).get().toNodeId() - nodeId + var key: PublicKey + discard peer.peerId.extractPublicKey(key) + keys.PublicKey.fromRaw(key.skkey.getBytes()).get().toNodeId() proc resolvePeer(peer: Peer) = # Resolve task which performs searching of peer's public key and recovery of @@ -1845,7 +1859,7 @@ proc new(T: type Eth2Node, ip: Opt[IpAddress], tcpPort, udpPort: Opt[Port], privKey: keys.PrivateKey, discovery: bool, directPeers: DirectPeers, announcedAddresses: openArray[MultiAddress], - rng: ref HmacDrbgContext): T {.raises: [CatchableError].} = + rng: ref HmacDrbgContext): T = when not defined(local_testnet): let connectTimeout = chronos.minutes(1) @@ -2495,7 +2509,21 @@ proc lookupCgcFromPeer*(peer: Peer): uint64 = let metadata = peer.metadata if metadata.isOk: - return metadata.get.custody_group_count + let cgc = if metadata.get.custody_group_count <= NUMBER_OF_COLUMNS: + metadata.get.custody_group_count + else: + 0 + + # If a peer's metadata hasn't been updated since a Fulu transition, the + # metadata is present but has no initialized cgc. + # + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/fulu/p2p-interface.md#custody-group-count + # guarantees that the ENR will have this information though: + # "A new field is added to the ENR under the key cgc to facilitate custody + # data column discovery. This new field MUST be added once + # `FULU_FORK_EPOCH` is assigned any value other than `FAR_FUTURE_EPOCH`." + if cgc >= CUSTODY_REQUIREMENT: + return cgc # Try getting the custody count from ENR if metadata fetch fails. debug "Could not get cgc from metadata, trying from ENR", @@ -2507,12 +2535,18 @@ proc lookupCgcFromPeer*(peer: Peer): uint64 = if enrFieldOpt.isOk: try: let cgc = SSZ.decode(enrFieldOpt.get, uint8) + if cgc > NUMBER_OF_COLUMNS: + return 0 + + if peer.metadata.isOk: + peer.metadata.get.custody_group_count = cgc + return cgc.uint64 except SszError, SerializationError: discard # Ignore decoding errors and fallback to default # Return default value if no valid custody subnet count is found. - return CUSTODY_REQUIREMENT.uint64 + CUSTODY_REQUIREMENT func shortForm*(id: NetKeyPair): string = $PeerId.init(id.pubkey) @@ -2534,10 +2568,11 @@ proc newValidationResultFuture(v: ValidationResult): Future[ValidationResult] res.complete(v) res -func addValidator*[MsgType](node: Eth2Node, - topic: string, - msgValidator: proc(msg: MsgType): - ValidationResult {.gcsafe, raises: [].} ) = +func addValidator*[MsgType]( + node: Eth2Node, + topic: string, + msgValidator: ValidationSyncProc[MsgType] +) = # Message validators run when subscriptions are enabled - they validate the # data and return an indication of whether the message should be broadcast # or not - validation is `async` but implemented without the macro because @@ -2552,7 +2587,7 @@ func addValidator*[MsgType](node: Eth2Node, try: let decoded = SSZ.decode(decompressed, MsgType) decompressed = newSeq[byte](0) # release memory before validating - msgValidator(decoded) # doesn't raise! + msgValidator(decoded, message.fromPeer) # doesn't raise! except SerializationError as e: inc nbc_gossip_failed_ssz debug "Error decoding gossip", @@ -2569,12 +2604,15 @@ func addValidator*[MsgType](node: Eth2Node, node.validTopics.incl topic # Only allow subscription to validated topics node.pubsub.addValidator(topic, execValidator) -proc addAsyncValidator*[MsgType](node: Eth2Node, - topic: string, - msgValidator: proc(msg: MsgType): - Future[ValidationResult] {.async: (raises: [CancelledError]).} ) = - proc execValidator(topic: string, message: GossipMsg): - Future[ValidationResult] {.async: (raw: true).} = +proc addAsyncValidator*[MsgType]( + node: Eth2Node, + topic: string, + msgValidator: ValidationAsyncProc[MsgType] +) = + proc execValidator( + topic: string, + message: GossipMsg + ): Future[ValidationResult] {.async: (raw: true).} = inc nbc_gossip_messages_received trace "Validating incoming gossip message", len = message.data.len, topic @@ -2583,7 +2621,7 @@ proc addAsyncValidator*[MsgType](node: Eth2Node, try: let decoded = SSZ.decode(decompressed, MsgType) decompressed = newSeq[byte](0) # release memory before validating - msgValidator(decoded) # doesn't raise! + msgValidator(decoded, message.fromPeer) # doesn't raise! except SerializationError as e: inc nbc_gossip_failed_ssz debug "Error decoding gossip", @@ -2635,15 +2673,13 @@ proc broadcast(node: Eth2Node, topic: string, msg: auto): broadcast(node, topic, gossipEncode(msg)) proc subscribeAttestationSubnets*( - node: Eth2Node, subnets: AttnetBits, forkDigest: ForkDigest) = - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/p2p-interface.md#attestations-and-aggregation - # Nimbus won't score attestation subnets for now, we just rely on block and - # aggregate which are more stable and reliable - + node: Eth2Node, subnets: AttnetBits, forkDigest: ForkDigest, + topicParams: TopicParams) = + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/p2p-interface.md#attestations-and-aggregation for subnet_id, enabled in subnets: if enabled: node.subscribe(getAttestationTopic( - forkDigest, SubnetId(subnet_id)), TopicParams.init()) # don't score attestation subnets for now + forkDigest, SubnetId(subnet_id)), topicParams) proc unsubscribeAttestationSubnets*( node: Eth2Node, subnets: AttnetBits, forkDigest: ForkDigest) = @@ -2673,7 +2709,10 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) = debug "Stability subnets changed; updated ENR attnets", attnets proc loadCgcnetMetadataAndEnr*(node: Eth2Node, cgcnets: CgcCount) = + node.metadata.seq_number += 1 node.metadata.custody_group_count = cgcnets.uint64 + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/fulu/p2p-interface.md#custody-group-count let res = node.discovery.updateRecord({ enrCustodySubnetCountField: SSZ.encode(cgcnets) @@ -2704,6 +2743,24 @@ proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) = else: debug "Sync committees changed; updated ENR syncnets", syncnets +proc updateNextForkDigest*(node: Eth2Node, next_fork_digest: ForkDigest) = + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/p2p-interface.md#next-fork-digest + if node.nextForkDigest == next_fork_digest: + return + + node.metadata.seq_number += 1 + node.nextForkDigest = next_fork_digest + + let res = node.discovery.updateRecord({ + enrNextForkDigestField: SSZ.encode(next_fork_digest) + }) + if res.isErr(): + # This should not occur in this scenario as the private key would always + # be the correct one and the ENR will not increase in size. + warn "Failed to update the ENR nfd field", error = res.error + else: + debug "Next fork digest changed; updated ENR nfd", next_fork_digest + proc updateForkId(node: Eth2Node, value: ENRForkID) = node.forkId = value let res = node.discovery.updateRecord({enrForkIdField: SSZ.encode value}) @@ -2776,45 +2833,10 @@ proc broadcastAggregateAndProof*( node.broadcast(topic, proof) proc broadcastBeaconBlock*( - node: Eth2Node, blck: phase0.SignedBeaconBlock): - Future[SendResult] {.async: (raises: [CancelledError], raw: true).} = - let topic = getBeaconBlocksTopic(node.forkDigests.phase0) - node.broadcast(topic, blck) - -proc broadcastBeaconBlock*( - node: Eth2Node, blck: altair.SignedBeaconBlock): - Future[SendResult] {.async: (raises: [CancelledError], raw: true).} = - let topic = getBeaconBlocksTopic(node.forkDigests.altair) - node.broadcast(topic, blck) - -proc broadcastBeaconBlock*( - node: Eth2Node, blck: bellatrix.SignedBeaconBlock): - Future[SendResult] {.async: (raises: [CancelledError], raw: true).} = - let topic = getBeaconBlocksTopic(node.forkDigests.bellatrix) - node.broadcast(topic, blck) - -proc broadcastBeaconBlock*( - node: Eth2Node, blck: capella.SignedBeaconBlock): - Future[SendResult] {.async: (raises: [CancelledError], raw: true).} = - let topic = getBeaconBlocksTopic(node.forkDigests.capella) - node.broadcast(topic, blck) - -proc broadcastBeaconBlock*( - node: Eth2Node, blck: deneb.SignedBeaconBlock): - Future[SendResult] {.async: (raises: [CancelledError], raw: true).} = - let topic = getBeaconBlocksTopic(node.forkDigests.deneb) - node.broadcast(topic, blck) - -proc broadcastBeaconBlock*( - node: Eth2Node, blck: electra.SignedBeaconBlock): + node: Eth2Node, blck: SomeForkySignedBeaconBlock): Future[SendResult] {.async: (raises: [CancelledError], raw: true).} = - let topic = getBeaconBlocksTopic(node.forkDigests.electra) - node.broadcast(topic, blck) - -proc broadcastBeaconBlock*( - node: Eth2Node, blck: fulu.SignedBeaconBlock): - Future[SendResult] {.async: (raises: [CancelledError], raw: true).} = - let topic = getBeaconBlocksTopic(node.forkDigests.fulu) + let topic = getBeaconBlocksTopic( + node.forkDigestAtEpoch(blck.message.slot.epoch)) node.broadcast(topic, blck) proc broadcastBlobSidecar*( @@ -2826,6 +2848,15 @@ proc broadcastBlobSidecar*( node.forkDigestAtEpoch(contextEpoch), subnet_id) node.broadcast(topic, blob) +proc broadcastDataColumnSidecar*( + node: Eth2Node, subnet_id: uint64, data_column: fulu.DataColumnSidecar): + Future[SendResult] {.async: (raises: [CancelledError], raw: true).} = + let + contextEpoch = data_column.signed_block_header.message.slot.epoch + topic = getDataColumnSidecarTopic( + node.forkDigestAtEpoch(contextEpoch), subnet_id) + node.broadcast(topic, data_column) + proc broadcastSyncCommitteeMessage*( node: Eth2Node, msg: SyncCommitteeMessage, subcommitteeIdx: SyncSubcommitteeIndex): diff --git a/beacon_chain/networking/network_metadata.nim b/beacon_chain/networking/network_metadata.nim index f3731c6269..a0c8418020 100644 --- a/beacon_chain/networking/network_metadata.nim +++ b/beacon_chain/networking/network_metadata.nim @@ -5,13 +5,11 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import std/os, - stew/[byteutils, objects], stew/shims/macros, nimcrypto/hash, - web3/[conversions], - web3/primitives as web3types, + stew/byteutils, stew/shims/macros, chronicles, eth/common/eth_types_json_serialization, ../spec/[eth2_ssz_serialization, forks] @@ -34,8 +32,7 @@ from std/strutils import # compilation, so a host OS specific separator can be used when deriving paths # from `currentSourcePath`. -export - web3types, conversions, RuntimeConfig +export RuntimeConfig const vendorDir = currentSourcePath.parentDir.replace('\\', '/') & "/../../vendor" @@ -43,8 +40,6 @@ const incbinEnabled* = sizeof(pointer) == 8 type - Eth1BlockHash* = web3types.Hash32 - Eth1Network* = enum mainnet sepolia @@ -84,9 +79,6 @@ type # Parsing `enr.Records` is still not possible at compile-time bootstrapNodes*: seq[string] - depositContractBlock*: uint64 - depositContractBlockHash*: Eth2Digest - genesis*: GenesisMetadata func hasGenesis*(metadata: Eth2NetworkMetadata): bool = @@ -142,9 +134,6 @@ proc loadEth2NetworkMetadata*( let genesisPath = path & "/genesis.ssz" configPath = path & "/config.yaml" - deployBlockPath = path & "/deploy_block.txt" - depositContractBlockPath = path & "/deposit_contract_block.txt" - depositContractBlockHashPath = path & "/deposit_contract_block_hash.txt" bootstrapNodesLegacyPath = path & "/bootstrap_nodes.txt" # <= Dec 2024 bootstrapNodesPath = path & "/bootstrap_nodes.yaml" bootEnrPath = path & "/boot_enr.yaml" @@ -160,43 +149,6 @@ proc loadEth2NetworkMetadata*( else: defaultRuntimeConfig - depositContractBlockStr = if fileExists(depositContractBlockPath): - readFile(depositContractBlockPath).strip - else: - "" - - depositContractBlockHashStr = if fileExists(depositContractBlockHashPath): - readFile(depositContractBlockHashPath).strip - else: - "" - - deployBlockStr = if fileExists(deployBlockPath): - readFile(deployBlockPath).strip - else: - "" - - depositContractBlock = if depositContractBlockStr.len > 0: - parseBiggestUInt depositContractBlockStr - elif deployBlockStr.len > 0: - parseBiggestUInt deployBlockStr - elif not runtimeConfig.DEPOSIT_CONTRACT_ADDRESS.isDefaultValue: - raise newException(ValueError, - "A network with deposit contract should specify the " & - "deposit contract deployment block in a file named " & - "deposit_contract_block.txt or deploy_block.txt") - else: - 1'u64 - - depositContractBlockHash = if depositContractBlockHashStr.len > 0: - Eth2Digest.strictParse(depositContractBlockHashStr) - elif not runtimeConfig.DEPOSIT_CONTRACT_ADDRESS.isDefaultValue: - raise newException(ValueError, - "A network with deposit contract should specify the " & - "deposit contract deployment block hash in a file " & - "name deposit_contract_block_hash.txt") - else: - default(Eth2Digest) - bootstrapNodes = deduplicate( readBootstrapNodes(bootstrapNodesLegacyPath) & readBootEnr(bootstrapNodesPath) & @@ -206,8 +158,6 @@ proc loadEth2NetworkMetadata*( eth1Network: eth1Network, cfg: runtimeConfig, bootstrapNodes: bootstrapNodes, - depositContractBlock: depositContractBlock, - depositContractBlockHash: depositContractBlockHash, genesis: if downloadGenesisFrom.isSome: GenesisMetadata(kind: BakedInUrl, @@ -251,14 +201,20 @@ proc loadCompileTimeNetworkMetadata( else: macros.error "config.yaml not found for network '" & path -when const_preset == "gnosis": +when IsGnosisSupported: when incbinEnabled: let - gnosisGenesis* {.importc: "gnosis_mainnet_genesis".}: ptr UncheckedArray[byte] - gnosisGenesisSize* {.importc: "gnosis_mainnet_genesis_size".}: int + gnosisGenesisVar {.importc: "gnosis_mainnet_genesis".}: ptr UncheckedArray[byte] + gnosisGenesisSizeVar {.importc: "gnosis_mainnet_genesis_size".}: int + + chiadoGenesisVar {.importc: "gnosis_chiado_genesis".}: ptr UncheckedArray[byte] + chiadoGenesisSizeVar {.importc: "gnosis_chiado_genesis_size".}: int + + template gnosisGenesis*(): ptr UncheckedArray[byte] = {.noSideEffect.}: gnosisGenesisVar + template gnosisGenesisSize*(): int = {.noSideEffect.}: gnosisGenesisSizeVar - chiadoGenesis* {.importc: "gnosis_chiado_genesis".}: ptr UncheckedArray[byte] - chiadoGenesisSize* {.importc: "gnosis_chiado_genesis_size".}: int + template chiadoGenesis*(): ptr UncheckedArray[byte] = {.noSideEffect.}: chiadoGenesisVar + template chiadoGenesisSize*(): int = {.noSideEffect.}: chiadoGenesisSizeVar # let `.incbin` in assembly file find the binary file through search path {.passc: "-I" & escape(vendorDir).} @@ -288,21 +244,28 @@ when const_preset == "gnosis": checkForkConsistency(network.cfg) doAssert network.cfg.ELECTRA_FORK_EPOCH < FAR_FUTURE_EPOCH doAssert network.cfg.FULU_FORK_EPOCH == FAR_FUTURE_EPOCH - doAssert ConsensusFork.high == ConsensusFork.Fulu + doAssert network.cfg.GLOAS_FORK_EPOCH == FAR_FUTURE_EPOCH + doAssert ConsensusFork.high == ConsensusFork.Gloas -elif const_preset == "mainnet": +elif IsMainnetSupported: when incbinEnabled: # Nim is very inefficent at loading large constants from binary files so we # use this trick instead which saves significant amounts of compile time {.push hint[GlobalVar]:off.} let - mainnetGenesis* {.importc: "eth2_mainnet_genesis".}: ptr UncheckedArray[byte] - mainnetGenesisSize* {.importc: "eth2_mainnet_genesis_size".}: int + mainnetGenesisVar {.importc: "eth2_mainnet_genesis".}: ptr UncheckedArray[byte] + mainnetGenesisSizeVar {.importc: "eth2_mainnet_genesis_size".}: int - sepoliaGenesis* {.importc: "eth2_sepolia_genesis".}: ptr UncheckedArray[byte] - sepoliaGenesisSize* {.importc: "eth2_sepolia_genesis_size".}: int + sepoliaGenesisVar {.importc: "eth2_sepolia_genesis".}: ptr UncheckedArray[byte] + sepoliaGenesisSizeVar {.importc: "eth2_sepolia_genesis_size".}: int {.pop.} + template mainnetGenesis*(): ptr UncheckedArray[byte] = {.noSideEffect.}: mainnetGenesisVar + template mainnetGenesisSize*: int = {.noSideEffect.}: mainnetGenesisSizeVar + + template sepoliaGenesis*(): ptr UncheckedArray[byte] = {.noSideEffect.}: sepoliaGenesisVar + template sepoliaGenesisSize*(): int = {.noSideEffect.}: sepoliaGenesisSizeVar + # let `.incbin` in assembly file find the binary file through search path {.passc: "-I" & escape(vendorDir).} {.compile: "network_metadata_mainnet.S".} @@ -364,9 +327,15 @@ elif const_preset == "mainnet": for network in [ mainnetMetadata, sepoliaMetadata, holeskyMetadata, hoodiMetadata]: checkForkConsistency(network.cfg) - doAssert network.cfg.ELECTRA_FORK_EPOCH < FAR_FUTURE_EPOCH - doAssert network.cfg.FULU_FORK_EPOCH == FAR_FUTURE_EPOCH - doAssert ConsensusFork.high == ConsensusFork.Fulu + doAssert network.cfg.GLOAS_FORK_EPOCH == FAR_FUTURE_EPOCH + doAssert ConsensusFork.high == ConsensusFork.Gloas + + doAssert mainnetMetadata.cfg.FULU_FORK_EPOCH == FAR_FUTURE_EPOCH + doAssert mainnetMetadata.cfg.BLOB_SCHEDULE.len == 0 + + for network in [sepoliaMetadata, holeskyMetadata, hoodiMetadata]: + doAssert network.cfg.FULU_FORK_EPOCH < FAR_FUTURE_EPOCH + doAssert network.cfg.BLOB_SCHEDULE.len == 2 proc getMetadataForNetwork*(networkName: string): Eth2NetworkMetadata = template loadRuntimeMetadata(): auto = @@ -387,11 +356,11 @@ proc getMetadataForNetwork*(networkName: string): Eth2NetworkMetadata = fatal "config.yaml not found for network", networkName quit 1 - if networkName in ["goerli", "prater"]: - warn "Goerli is deprecated and unsupported; https://blog.ethereum.org/2023/11/30/goerli-lts-update suggests migrating to Holesky or Sepolia" + if networkName == "holesky": + warn "https://blog.ethereum.org/2025/09/01/holesky-shutdown-announcement suggests migrating to Hoodi or Sepolia" let metadata = - when const_preset == "gnosis": + when IsGnosisSupported: case toLowerAscii(networkName) of "gnosis": gnosisMetadata @@ -404,7 +373,7 @@ proc getMetadataForNetwork*(networkName: string): Eth2NetworkMetadata = else: loadRuntimeMetadata() - elif const_preset == "mainnet": + elif IsMainnetSupported: case toLowerAscii(networkName) of "mainnet": mainnetMetadata @@ -435,9 +404,9 @@ proc getRuntimeConfig*(eth2Network: Option[string]): RuntimeConfig = if eth2Network.isSome: getMetadataForNetwork(eth2Network.get) else: - when const_preset == "mainnet": + when IsMainnetSupported: mainnetMetadata - elif const_preset == "gnosis": + elif IsGnosisSupported: gnosisMetadata else: # This is a non-standard build (i.e. minimal), and the function was @@ -447,7 +416,7 @@ proc getRuntimeConfig*(eth2Network: Option[string]): RuntimeConfig = metadata.cfg -when const_preset in ["mainnet", "gnosis"]: +when IsMainnetSupported or IsGnosisSupported: template bakedInGenesisStateAsBytes(networkName: untyped): untyped = when incbinEnabled: `networkName Genesis`.toOpenArray(0, `networkName GenesisSize` - 1) @@ -466,22 +435,22 @@ when const_preset in ["mainnet", "gnosis"]: template bakedBytes*(metadata: GenesisMetadata): auto = case metadata.networkName of "mainnet": - when const_preset == "mainnet": + when IsMainnetSupported: bakedInGenesisStateAsBytes mainnet else: raiseAssert availableOnlyInMainnetBuild of "sepolia": - when const_preset == "mainnet": + when IsMainnetSupported: bakedInGenesisStateAsBytes sepolia else: raiseAssert availableOnlyInMainnetBuild of "gnosis": - when const_preset == "gnosis": + when IsGnosisSupported: bakedInGenesisStateAsBytes gnosis else: raiseAssert availableOnlyInGnosisBuild of "chiado": - when const_preset == "gnosis": + when IsGnosisSupported: bakedInGenesisStateAsBytes chiado else: raiseAssert availableOnlyInGnosisBuild @@ -505,4 +474,4 @@ else: raiseAssert "Baked genesis states are not available in the current build mode" func bakedGenesisValidatorsRoot*(metadata: Eth2NetworkMetadata): Opt[Eth2Digest] = - Opt.none Eth2Digest \ No newline at end of file + Opt.none Eth2Digest diff --git a/beacon_chain/networking/peer_protocol.nim b/beacon_chain/networking/peer_protocol.nim index f81ad3b68d..a9fc879983 100644 --- a/beacon_chain/networking/peer_protocol.nim +++ b/beacon_chain/networking/peer_protocol.nim @@ -26,6 +26,15 @@ type headRoot*: Eth2Digest headSlot*: Slot + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/fulu/p2p-interface.md#status-v2 + StatusMsgV2* = object + forkDigest*: ForkDigest + finalizedRoot*: Eth2Digest + finalizedEpoch*: Epoch + headRoot*: Eth2Digest + headSlot*: Slot + earliestAvailableSlot*: Slot + PeerSyncNetworkState* {.final.} = ref object of RootObj dag: ChainDAGRef cfg: RuntimeConfig @@ -36,6 +45,7 @@ type PeerSyncPeerState* {.final.} = ref object of RootObj statusLastTime: chronos.Moment statusMsg: StatusMsg + statusMsgV2: Opt[StatusMsgV2] declareCounter nbc_disconnects_count, "Number disconnected peers", labels = ["agent", "reason"] @@ -50,12 +60,23 @@ func shortLog*(s: StatusMsg): auto = ) chronicles.formatIt(StatusMsg): shortLog(it) +func shortLog*(s: StatusMsgV2): auto = + ( + forkDigest: s.forkDigest, + finalizedRoot: shortLog(s.finalizedRoot), + finalizedEpoch: shortLog(s.finalizedEpoch), + headRoot: shortLog(s.headRoot), + headSlot: shortLog(s.headSlot), + earliestAvailableSlot: shortLog(s.earliestAvailableSlot) + ) +chronicles.formatIt(StatusMsgV2): shortLog(it) + func forkDigestAtEpoch(state: PeerSyncNetworkState, epoch: Epoch): ForkDigest = state.forkDigests[].atEpoch(epoch, state.cfg) # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/p2p-interface.md#status -proc getCurrentStatus(state: PeerSyncNetworkState): StatusMsg = +proc getCurrentStatusV1(state: PeerSyncNetworkState): StatusMsg = let dag = state.dag wallSlot = state.getBeaconTime().slotOrZero @@ -83,7 +104,38 @@ proc getCurrentStatus(state: PeerSyncNetworkState): StatusMsg = headRoot: state.genesisBlockRoot, headSlot: GENESIS_SLOT) -proc checkStatusMsg(state: PeerSyncNetworkState, status: StatusMsg): +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/fulu/p2p-interface.md#status-v2 +proc getCurrentStatusV2(state: PeerSyncNetworkState): StatusMsgV2 = + let + dag = state.dag + wallSlot = state.getBeaconTime().slotOrZero + + if dag != nil: + StatusMsgV2( + forkDigest: state.forkDigestAtEpoch(wallSlot.epoch), + finalizedRoot: + (if dag.finalizedHead.slot.epoch != GENESIS_EPOCH: + dag.finalizedHead.blck.root + else: + # this defaults to `Root(b'\x00' * 32)` for the genesis finalized + # checkpoint + ZERO_HASH), + finalizedEpoch: dag.finalizedHead.slot.epoch, + headRoot: dag.head.root, + headSlot: dag.head.slot, + earliestAvailableSlot: dag.earliestAvailableSlot()) + else: + StatusMsgV2( + forkDigest: state.forkDigestAtEpoch(wallSlot.epoch), + # this defaults to `Root(b'\x00' * 32)` for the genesis finalized + # checkpoint + finalizedRoot: ZERO_HASH, + finalizedEpoch: GENESIS_EPOCH, + headRoot: state.genesisBlockRoot, + headSlot: GENESIS_SLOT, + earliestAvailableSlot: GENESIS_SLOT) + +proc checkStatusMsg(state: PeerSyncNetworkState, status: StatusMsg | StatusMsgV2): Result[void, cstring] = let dag = state.dag @@ -114,12 +166,20 @@ proc checkStatusMsg(state: PeerSyncNetworkState, status: StatusMsg): # apparently don't use spec ZERO_HASH as of this writing if not (status.finalizedRoot in [state.genesisBlockRoot, ZERO_HASH]): return err("peer following different finality") - ok() -proc handleStatus(peer: Peer, - state: PeerSyncNetworkState, - theirStatus: StatusMsg): Future[bool] {.async: (raises: [CancelledError]).} +proc handleStatusV1(peer: Peer, + state: PeerSyncNetworkState, + theirStatus: StatusMsg): Future[bool] {.async: (raises: [CancelledError]).} + +proc handleStatusV2(peer: Peer, + state: PeerSyncNetworkState, + theirStatus: StatusMsgV2): Future[bool] {.async: (raises: [CancelledError]).} + +proc setStatusV2Msg(state: PeerSyncPeerState, + statusMsg: Opt[StatusMsgV2]) = + state.statusMsgV2 = statusMsg + state.statusLastTime = Moment.now() {.pop.} # TODO fix p2p macro for raises @@ -142,26 +202,57 @@ p2pProtocol PeerSync(version = 1, # need a dedicated flow in libp2p that resolves the race conditions - # this needs more thinking around the ordering of events and the # given incoming flag + let - ourStatus = peer.networkState.getCurrentStatus() - theirStatus = await peer.status(ourStatus, timeout = RESP_TIMEOUT_DUR) + remoteFork = peer.networkState.getBeaconTime().slotOrZero.epoch() + + if remoteFork >= peer.networkState.cfg.FULU_FORK_EPOCH: + let + ourStatus = peer.networkState.getCurrentStatusV2() + theirStatus = + await peer.statusV2(ourStatus, timeout = RESP_TIMEOUT_DUR) + + if theirStatus.isOk: + discard await peer.handleStatusV2(peer.networkState, theirStatus.get()) + peer.updateAgent() + else: + # Mark status v2 of remote peer as None. + peer.state(PeerSync).setStatusV2Msg(Opt.none(StatusMsgV2)) + debug "Status response not received in time", + peer, errorKind = theirStatus.error.kind + await peer.disconnect(FaultOrError) - if theirStatus.isOk: - discard await peer.handleStatus(peer.networkState, theirStatus.get()) - peer.updateAgent() else: - debug "Status response not received in time", - peer, errorKind = theirStatus.error.kind - await peer.disconnect(FaultOrError) - - proc status(peer: Peer, - theirStatus: StatusMsg, - response: SingleChunkResponse[StatusMsg]) - {.async, libp2pProtocol("status", 1).} = - let ourStatus = peer.networkState.getCurrentStatus() - trace "Sending status message", peer = peer, status = ourStatus + let + ourStatus = peer.networkState.getCurrentStatusV1() + theirStatus = + await peer.statusV1(ourStatus, timeout = RESP_TIMEOUT_DUR) + + if theirStatus.isOk: + discard await peer.handleStatusV1(peer.networkState, theirStatus.get()) + peer.updateAgent() + else: + debug "Status response not received in time", + peer, errorKind = theirStatus.error.kind + await peer.disconnect(FaultOrError) + + proc statusV1(peer: Peer, + theirStatus: StatusMsg, + response: SingleChunkResponse[StatusMsg]) + {.async, libp2pProtocol("status", 1).} = + let ourStatus = peer.networkState.getCurrentStatusV1() + trace "Sending status (v1)", peer = peer, status = ourStatus await response.send(ourStatus) - discard await peer.handleStatus(peer.networkState, theirStatus) + discard await peer.handleStatusV1(peer.networkState, theirStatus) + + proc statusV2(peer: Peer, + theirStatus: StatusMsgV2, + response: SingleChunkResponse[StatusMsgV2]) + {.async, libp2pProtocol("status", 2).} = + let ourStatus = peer.networkState.getCurrentStatusV2() + trace "Sending status (v2)", peer = peer, status = ourStatus + await response.send(ourStatus) + discard await peer.handleStatusV2(peer.networkState, theirStatus) proc ping(peer: Peer, value: uint64): uint64 {.libp2pProtocol("ping", 1).} = @@ -176,7 +267,7 @@ p2pProtocol PeerSync(version = 1, altair_metadata proc getMetadata_v3(peer: Peer): fulu.MetaData - {. libp2pProtocol("metadata", 3).} = + {.libp2pProtocol("metadata", 3).} = peer.network.metadata proc goodbye(peer: Peer, reason: uint64) {. @@ -192,10 +283,15 @@ proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) = peer.state(PeerSync).statusMsg = statusMsg peer.state(PeerSync).statusLastTime = Moment.now() -proc handleStatus(peer: Peer, - state: PeerSyncNetworkState, - theirStatus: StatusMsg): Future[bool] - {.async: (raises: [CancelledError]).} = +proc setStatusV2Msg(peer: Peer, statusMsg: Opt[StatusMsgV2]) = + debug "Peer statusV2", peer, statusMsg + peer.state(PeerSync).statusMsgV2 = statusMsg + peer.state(PeerSync).statusLastTime = Moment.now() + +proc handleStatusV1(peer: Peer, + state: PeerSyncNetworkState, + theirStatus: StatusMsg): Future[bool] + {.async: (raises: [CancelledError]).} = let res = checkStatusMsg(state, theirStatus) @@ -212,28 +308,126 @@ proc handleStatus(peer: Peer, await peer.handlePeer() true +proc handleStatusV2(peer: Peer, + state: PeerSyncNetworkState, + theirStatus: StatusMsgV2): Future[bool] + {.async: (raises: [CancelledError]).} = + let + res = checkStatusMsg(state, theirStatus) + + return if res.isErr(): + debug "Irrelevant peer", peer, theirStatus, err = res.error() + await peer.disconnect(IrrelevantNetwork) + false + else: + peer.setStatusV2Msg(Opt.some(theirStatus)) + + if peer.connectionState == Connecting: + # As soon as we get here it means that we passed handshake succesfully. So + # we can add this peer to PeerPool. + await peer.handlePeer() + true + proc updateStatus*(peer: Peer): Future[bool] {.async: (raises: [CancelledError]).} = ## Request `status` of remote peer ``peer``. let nstate = peer.networkState(PeerSync) - ourStatus = getCurrentStatus(nstate) - theirStatus = - (await peer.status(ourStatus, timeout = RESP_TIMEOUT_DUR)).valueOr: - return false - await peer.handleStatus(nstate, theirStatus) + if nstate.getBeaconTime().slotOrZero.epoch() >= nstate.cfg.FULU_FORK_EPOCH: + let + ourStatus = getCurrentStatusV2(nstate) + theirStatus = + (await peer.statusV2(ourStatus, timeout = RESP_TIMEOUT_DUR)) + if theirStatus.isOk(): + await peer.handleStatusV2(nstate, theirStatus.get()) + else: + # Mark status v2 of remote peer as None + peer.setStatusV2Msg(Opt.none(StatusMsgV2)) + return false + + else: + let + ourStatus = getCurrentStatusV1(nstate) + theirStatus = + (await peer.statusV1(ourStatus, timeout = RESP_TIMEOUT_DUR)).valueOr: + return false + + await peer.handleStatusV1(nstate, theirStatus) proc getHeadRoot*(peer: Peer): Eth2Digest = - ## Returns head root for specific peer ``peer``. - peer.state(PeerSync).statusMsg.headRoot + let + state = peer.networkState(PeerSync) + pstate = peer.state(PeerSync) + if pstate.statusMsgV2.isSome(): + pstate.statusMsgV2.get.headRoot + else: + pstate.statusMsg.headRoot proc getHeadSlot*(peer: Peer): Slot = - ## Returns head slot for specific peer ``peer``. - peer.state(PeerSync).statusMsg.headSlot + let + state = peer.networkState(PeerSync) + pstate = peer.state(PeerSync) + if pstate.statusMsgV2.isSome(): + pstate.statusMsgV2.get.headSlot + else: + pstate.statusMsg.headSlot proc getFinalizedEpoch*(peer: Peer): Epoch = - ## Returns head slot for specific peer ``peer``. - peer.state(PeerSync).statusMsg.finalizedEpoch + let + state = peer.networkState(PeerSync) + pstate = peer.state(PeerSync) + if pstate.statusMsgV2.isSome(): + pstate.statusMsgV2.get.finalizedEpoch + else: + pstate.statusMsg.finalizedEpoch + +proc getFinalizedRoot*(peer: Peer): Eth2Digest = + ## Returns finalized checkpoint's root for specific peer ``peer``. + let pstate = peer.state(PeerSync) + if pstate.statusMsgV2.isSome(): + pstate.statusMsgV2.get.finalizedRoot + else: + pstate.statusMsg.finalizedRoot + +proc getForkDigest*(peer: Peer): ForkDigest = + ## Returns fork for specific peer ``peer``. + let pstate = peer.state(PeerSync) + if pstate.statusMsgV2.isSome(): + pstate.statusMsgV2.get.forkDigest + else: + pstate.statusMsg.forkDigest + +proc getFinalizedCheckpoint*(peer: Peer): Checkpoint = + ## Returns finalized checkpoint's root for specific peer ``peer``. + let pstate = peer.state(PeerSync) + if pstate.statusMsgV2.isSome(): + Checkpoint( + root: pstate.statusMsgV2.get.finalizedRoot, + epoch: pstate.statusMsgV2.get.finalizedEpoch) + else: + Checkpoint( + root: pstate.statusMsg.finalizedRoot, + epoch: pstate.statusMsg.finalizedEpoch) + +proc getHeadBlockId*(peer: Peer): BlockId = + ## Returns head BlockId for specific peer ``peer``. + let pstate = peer.state(PeerSync) + if pstate.statusMsgV2.isSome(): + BlockId( + root: pstate.statusMsgV2.get.headRoot, + slot: pstate.statusMsgV2.get.headSlot) + else: + BlockId( + root: pstate.statusMsg.headRoot, + slot: pstate.statusMsg.headSlot) + +proc getEarliestAvailableSlot*(peer: Peer): Opt[Slot] = + ## Returns earliest available slot for specific peer ``peer``. + let + pstate = peer.state(PeerSync) + msg = pstate.statusMsgV2.valueOr: + return Opt.none(Slot) + Opt.some(msg.earliestAvailableSlot) proc getStatusLastTime*(peer: Peer): chronos.Moment = ## Returns head slot for specific peer ``peer``. diff --git a/beacon_chain/networking/peer_scores.nim b/beacon_chain/networking/peer_scores.nim index c6f2c10bd4..1ad9d1b035 100644 --- a/beacon_chain/networking/peer_scores.nim +++ b/beacon_chain/networking/peer_scores.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -35,6 +35,14 @@ const ## Peer's answer to our request is fine. PeerScoreBadValues* = -1000 ## Peer's response contains incorrect data. + PeerScoreBadColumnIntersection* = -200 + ## Peer custodies irrelevant custody columns + PeerScoreScantyColumnIntersection* = -3 + ## Peer custody overlaps less than 50% of our custody. + PeerScoreDecentColumnIntersection* = 5 + ## Peer custody overlaps more than 50% of our custody. + PeerScoreSupernode* = 8 + ## Peer can provide all columns, as they custody all columns. PeerScoreBadResponse* = -1000 ## Peer's response is not in requested range. PeerScoreMissingValues* = -25 diff --git a/beacon_chain/networking/topic_params.nim b/beacon_chain/networking/topic_params.nim index 18f7bb2dc9..1a540ea69a 100644 --- a/beacon_chain/networking/topic_params.nim +++ b/beacon_chain/networking/topic_params.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -7,58 +7,386 @@ {.push raises: [].} -import chronos +# Inspired by Lighthouse research here: +# https://gist.github.com/blacktemplar/5c1862cb3f0e32a1a7fb0b25e79e6e2c#file-generate-scoring-params-py +# by Lighthouse actual implementation here: +# https://github.com/sigp/lighthouse/blob/stable/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +# by Prysm actual implementation here: +# https://github.com/prysmaticlabs/prysm/blob/develop/beacon-chain/p2p/gossip_scoring_params.go + +import std/[math, strutils] +import results, chronos +import ".."/spec/[presets, network, validator] -from - libp2p/protocols/pubsub/gossipsub -import +from libp2p/protocols/pubsub/gossipsub import TopicParams, validateParameters, init -# inspired by lighthouse research here -# https://gist.github.com/blacktemplar/5c1862cb3f0e32a1a7fb0b25e79e6e2c#file-generate-scoring-params-py +type + MeshMessageInfo = object + meshMessageDecayTime: chronos.Duration + meshMessageCapFactor: float64 + meshMessageActivation: chronos.Duration + dampeningFactor: float64 + +func slotsDuration(number: int): chronos.Duration = + chronos.seconds(int64(SECONDS_PER_SLOT) * number) + +func epochsDuration(number: int): chronos.Duration = + chronos.seconds(int64(SECONDS_PER_SLOT * SLOTS_PER_EPOCH) * number) + const - blocksTopicParams* = TopicParams( - topicWeight: 0.5, - timeInMeshWeight: 0.03333333333333333, - timeInMeshQuantum: chronos.seconds(12), - timeInMeshCap: 300, - firstMessageDeliveriesWeight: 1.1471603557060206, - firstMessageDeliveriesDecay: 0.9928302477768374, - firstMessageDeliveriesCap: 34.86870846001471, - meshMessageDeliveriesWeight: -458.31054878249114, - meshMessageDeliveriesDecay: 0.9716279515771061, - meshMessageDeliveriesThreshold: 0.6849191409056553, - meshMessageDeliveriesCap: 2.054757422716966, - meshMessageDeliveriesActivation: chronos.seconds(384), - meshMessageDeliveriesWindow: chronos.seconds(2), - meshFailurePenaltyWeight: -458.31054878249114 , - meshFailurePenaltyDecay: 0.9716279515771061, - invalidMessageDeliveriesWeight: -214.99999999999994, - invalidMessageDeliveriesDecay: 0.9971259067705325 - ) - aggregateTopicParams* = TopicParams( - topicWeight: 0.5, - timeInMeshWeight: 0.03333333333333333, - timeInMeshQuantum: chronos.seconds(12), - timeInMeshCap: 300, - firstMessageDeliveriesWeight: 0.10764904539552399, - firstMessageDeliveriesDecay: 0.8659643233600653, - firstMessageDeliveriesCap: 371.5778421725158, - meshMessageDeliveriesWeight: -0.07538533073670682, - meshMessageDeliveriesDecay: 0.930572040929699, - meshMessageDeliveriesThreshold: 53.404248450179836, - meshMessageDeliveriesCap: 213.61699380071934, - meshMessageDeliveriesActivation: chronos.seconds(384), - meshMessageDeliveriesWindow: chronos.seconds(2), - meshFailurePenaltyWeight: -0.07538533073670682 , - meshFailurePenaltyDecay: 0.930572040929699, - invalidMessageDeliveriesWeight: -214.99999999999994, - invalidMessageDeliveriesDecay: 0.9971259067705325 + GossipD = 8 + ## `BeaconBlockWeight` specifies the scoring weight that we apply to + ## our beacon block topic. + ## blacktemplar's code uses 0.5 + BeaconBlockWeight = 0.5'f64 + ## `AggregateWeight` specifies the scoring weight that we apply to + ## our aggregate topic. + AggregateWeight = 0.5'f64 + ## `SyncContributionWeight` specifies the scoring weight that we apply to + ## our sync contribution topic. + SyncContributionWeight = 0.2'f64 + ## `AttestationTotalWeight` specifies the scoring weight that we apply to + ## our attestation subnet topic. + AttestationTotalWeight = 1'f64 + ## `SyncCommitteesTotalWeight` specifies the scoring weight that we apply to + ## our sync subnet topic. + SyncCommitteesTotalWeight = 0.4'f64 + ## `AttesterSlashingWeight` specifies the scoring weight that we apply to + ## our attester slashing topic. + AttesterSlashingWeight = 0.05'f64 + ## `ProposerSlashingWeight` specifies the scoring weight that we apply to + ## our proposer slashing topic. + ProposerSlashingWeight = 0.05'f64 + ## `VoluntaryExitWeight` specifies the scoring weight that we apply to + ## our voluntary exit topic. + VoluntaryExitWeight = 0.05'f64 + ## `BlsToExecutionChangeWeight` specifies the scoring weight that we apply to + ## our bls to execution topic. + BlsToExecutionChangeWeight = 0.05'f64 + ## `MaxInMeshScore` describes the max score a peer can attain from being in + ## the mesh. + MaxInMeshScore = 10'f64 + ## `MaxFirstDeliveryScore` describes the max score a peer can obtain from + ## first deliveries. + MaxFirstDeliveryScore = 40'f64 + ## `DecayToZero` specifies the terminal value that we will use when decaying + ## a value. + DecayToZero = 0.01'f64 + DecayInterval = chronos.seconds(int64(SECONDS_PER_SLOT)) + ## `DampeningFactor` reduces the amount by which the various thresholds and + ## caps are created. Python code and Lighthouse using 50.0, while Prysm + ## using 90.0. + DampeningFactor = 50'f64 + ## The time window (seconds) that we expect messages to be forwarded to us + ## in the mesh. + MeshMessageDeliveriesWindow = chronos.seconds(2) + ## `MaxScore` maximum score a peer can get. + MaxScore = + (MaxInMeshScore + MaxFirstDeliveryScore) * + (BeaconBlockWeight + AggregateWeight + + AttestationTotalWeight + + AttesterSlashingWeight + ProposerSlashingWeight + VoluntaryExitWeight + + SyncCommitteesTotalWeight + SyncContributionWeight + + BlsToExecutionChangeWeight) + InvalidMessageDecayPeriod = epochsDuration(50) + +func init( + t: typedesc[MeshMessageInfo], + meshMessageDecayTime: chronos.Duration, + meshMessageCapFactor: float64, + meshMessageActivation: chronos.Duration, + dampeningFactor = DampeningFactor +): MeshMessageInfo = + MeshMessageInfo( + meshMessageDecayTime: meshMessageDecayTime, + meshMessageCapFactor: meshMessageCapFactor, + meshMessageActivation: meshMessageActivation, + dampeningFactor: dampeningFactor ) - basicParams* = TopicParams.init() -static: - # compile time validation - blocksTopicParams.validateParameters().tryGet() - aggregateTopicParams.validateParameters().tryGet() - basicParams.validateParameters.tryGet() +func scoreParameterDecay(decayDuration: chronos.Duration): float64 = + ## Computes the decay to use such that a value of 1 decays to 0 (using the + ## DecayToZero parameter) within the specified `decayDuration`. + let ticks = decayDuration.seconds div DecayInterval.seconds + math.pow(DecayToZero, 1'f64 / float64(ticks)) + +func decayConvergence(decay, rate: float64): float64 = + ## Computes the limit to which a decay process will convert if it has the + ## given issuaence rate per decay interval and the given decay factor. + rate / (1 - decay) + +func threshold(decay, requiredRate: float64): float64 = + ## Computes a threshold value if we require at least the given rate with the + ## given decay (In fact we require strictly more than the given rate, since + ## the rate will reach the threshold only at infinity). + decayConvergence(decay, requiredRate) * decay + +func expectedAggregatorCountPerSloot(validators: uint64): float64 = + let + committees = + get_committee_count_per_slot(validators) * SLOTS_PER_EPOCH + (smallerCommitteeSize, numLargerCommittees) = divmod(validators, committees) + moduloSmaller = + max(1'u64, + smallerCommitteeSize div TARGET_AGGREGATORS_PER_COMMITTEE) + moduleLarger = + max(1'u64, + (smallerCommitteeSize + 1) div TARGET_AGGREGATORS_PER_COMMITTEE) + + (float64((committees - numLargerCommittees) * smallerCommitteeSize) / + float64(moduloSmaller) + + float64(numLargerCommittees * (smallerCommitteeSize + 1)) / + float64(moduleLarger)) / float64(SLOTS_PER_EPOCH) + +func topicParams( + topicWeight: float64, + expectedMessageRate: float64, + firstMessageDecayTime: chronos.Duration, + meshMessageInfo: Opt[MeshMessageInfo] = Opt.none(MeshMessageInfo) +): TopicParams = + let + timeInMeshCap = float64(3600) / float64(SECONDS_PER_SLOT) + firstMessageDeliveriesDecay = + scoreParameterDecay(firstMessageDecayTime) + firstMessageDeliveriesCap = + decayConvergence(firstMessageDeliveriesDecay, + 2'f64 * expectedMessageRate / float64(GossipD)) + if meshMessageInfo.isNone(): + TopicParams( + topicWeight: + topicWeight, + timeInMeshWeight: + MaxInMeshScore / timeInMeshCap, + timeInMeshQuantum: + chronos.seconds(int64(SECONDS_PER_SLOT)), + timeInMeshCap: + timeInMeshCap, + firstMessageDeliveriesDecay: + firstMessageDeliveriesDecay, + firstMessageDeliveriesCap: + firstMessageDeliveriesCap, + firstMessageDeliveriesWeight: + MaxFirstDeliveryScore / firstMessageDeliveriesCap, + meshMessageDeliveriesWeight: 0.0'f64, + meshMessageDeliveriesThreshold: 0.0'f64, + meshMessageDeliveriesDecay: 0.0'f64, + meshMessageDeliveriesCap: 0.0'f64, + meshMessageDeliveriesActivation: ZeroDuration, + meshMessageDeliveriesWindow: ZeroDuration, + meshFailurePenaltyDecay: 0.0'f64, + meshFailurePenaltyWeight: 0.0'f64, + invalidMessageDeliveriesWeight: + -MaxScore / topicWeight, + invalidMessageDeliveriesDecay: + scoreParameterDecay(InvalidMessageDecayPeriod) + ) + else: + let + info = meshMessageInfo.get() + meshMessageDeliveriesDecay = + if info.meshMessageDecayTime.isZero(): + 0.0'f64 + else: + scoreParameterDecay(info.meshMessageDecayTime) + meshMessageDeliveriesThreshold = + if info.meshMessageDecayTime.isZero(): + 0.0'f64 + else: + threshold(meshMessageDeliveriesDecay, + expectedMessageRate / info.dampeningFactor) + meshMessageDeliveriesWeight = + if info.meshMessageDecayTime.isZero(): + 0.0'f64 + else: + -MaxScore / (topicWeight * meshMessageDeliveriesThreshold * + meshMessageDeliveriesThreshold) + meshMessageDeliveriesCap = + info.meshMessageCapFactor * meshMessageDeliveriesThreshold + meshMessageDeliveriesActivation = + if info.meshMessageDecayTime.isZero(): + ZeroDuration + else: + info.meshMessageActivation + meshMessageDeliveriesWindow = + if info.meshMessageDecayTime.isZero(): + ZeroDuration + else: + MeshMessageDeliveriesWindow + meshFailurePenaltyWeight = meshMessageDeliveriesWeight + meshFailurePenaltyDecay = meshMessageDeliveriesDecay + + TopicParams( + topicWeight: + topicWeight, + timeInMeshWeight: + MaxInMeshScore / timeInMeshCap, + timeInMeshQuantum: + chronos.seconds(int64(SECONDS_PER_SLOT)), + timeInMeshCap: + timeInMeshCap, + firstMessageDeliveriesDecay: + firstMessageDeliveriesDecay, + firstMessageDeliveriesCap: + firstMessageDeliveriesCap, + firstMessageDeliveriesWeight: + MaxFirstDeliveryScore / firstMessageDeliveriesCap, + meshMessageDeliveriesDecay: + meshMessageDeliveriesDecay, + meshMessageDeliveriesThreshold: + meshMessageDeliveriesThreshold, + meshMessageDeliveriesWeight: + meshMessageDeliveriesWeight, + meshMessageDeliveriesCap: + meshMessageDeliveriesCap, + meshMessageDeliveriesActivation: + meshMessageDeliveriesActivation, + meshMessageDeliveriesWindow: + meshMessageDeliveriesWindow, + meshFailurePenaltyWeight: + meshFailurePenaltyWeight, + meshFailurePenaltyDecay: + meshFailurePenaltyDecay, + invalidMessageDeliveriesWeight: + -MaxScore / topicWeight, + invalidMessageDeliveriesDecay: + scoreParameterDecay(InvalidMessageDecayPeriod), + ) + +func getBlockTopicParams*(): TopicParams = + let meshInfo = + MeshMessageInfo.init(epochsDuration(5), 3.0'f64, + epochsDuration(1)) + topicParams(BeaconBlockWeight, 1.0'f64, epochsDuration(20), + Opt.some(meshInfo)) + +func getAttestationSubnetTopicParams*(validatorsCount: uint64): TopicParams = + let + committeesPerSlot = get_committee_count_per_slot(validatorsCount) + multipleBurstsPerSubnetPerEpoch = + committeesPerSlot >= 2 * ATTESTATION_SUBNET_COUNT div SLOTS_PER_EPOCH + topicWeight = 1.0'f64 / float64(ATTESTATION_SUBNET_COUNT) + messageRate = + float64(validatorsCount) / float64(ATTESTATION_SUBNET_COUNT) / + float64(SLOTS_PER_EPOCH) + firstMessageDecayTime = + if multipleBurstsPerSubnetPerEpoch: + epochsDuration(1) + else: + epochsDuration(4) + meshMessageDecayTime = + if multipleBurstsPerSubnetPerEpoch: + epochsDuration(4) + else: + epochsDuration(16) + meshMessageCapFactor = 16.0'f64 + meshMessageActivation = + if multipleBurstsPerSubnetPerEpoch: + slotsDuration(int(SLOTS_PER_EPOCH) div 2 + 1) + else: + epochsDuration(3) + meshInfo = MeshMessageInfo.init(meshMessageDecayTime, meshMessageCapFactor, + meshMessageActivation) + topicParams(topicWeight, messageRate, firstMessageDecayTime, + Opt.some(meshInfo)) + +func getSyncCommitteeSubnetTopicParams*(validatorsCount: uint64): TopicParams = + let + topicWeight = + SyncCommitteesTotalWeight / float64(SYNC_COMMITTEE_SUBNET_COUNT) + activeValidators = + if validatorsCount > SYNC_COMMITTEE_SIZE: + uint64(SYNC_COMMITTEE_SIZE) + else: + validatorsCount + messageRate = + float64(activeValidators) / float64(SYNC_COMMITTEE_SUBNET_COUNT) + firstMessageDecayTime = epochsDuration(1) + meshMessageDecayTime = epochsDuration(4) + meshMessageCapFactor = 4.0'f64 + meshMessageActivation = epochsDuration(1) + meshInfo = MeshMessageInfo.init(meshMessageDecayTime, meshMessageCapFactor, + meshMessageActivation) + topicParams(topicWeight, messageRate, firstMessageDecayTime, + Opt.some(meshInfo)) + +func getAggregateProofTopicParams*(validatorsCount: uint64): TopicParams = + let + messageRate = expectedAggregatorCountPerSloot(validatorsCount) + meshInfo = MeshMessageInfo.init(epochsDuration(2), 4.0'f64, + epochsDuration(1)) + topicParams(AggregateWeight, messageRate, epochsDuration(1), + Opt.some(meshInfo)) + +func getSyncContributionTopicParams*(): TopicParams = + let + messageRate = float64( + SYNC_COMMITTEE_SUBNET_COUNT * TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE) + meshInfo = MeshMessageInfo.init(epochsDuration(1), 4.0'f64, + epochsDuration(1)) + topicParams(SyncContributionWeight, messageRate, epochsDuration(1), + Opt.some(meshInfo)) + +func getVoluntaryExitTopicParams*(): TopicParams = + let messageRate = 4.0'f64 / float(SLOTS_PER_EPOCH) + topicParams(VoluntaryExitWeight, messageRate, epochsDuration(100), + Opt.none(MeshMessageInfo)) + +func getProposerSlashingTopicParams*(): TopicParams = + let messageRate = 1.0'f64 / 5.0'f64 / float64(SLOTS_PER_EPOCH) + topicParams(ProposerSlashingWeight, messageRate, epochsDuration(100), + Opt.none(MeshMessageInfo)) + +func getAttesterSlashingTopicParams*(): TopicParams = + let messageRate = 1.0'f64 / 5.0'f64 / float64(SLOTS_PER_EPOCH) + topicParams(AttesterSlashingWeight, messageRate, epochsDuration(100), + Opt.none(MeshMessageInfo)) + +func getBlsToExecutionChangeTopicParams*(): TopicParams = + let messageRate = 1.0'f64 / 5.0'f64 / float64(SLOTS_PER_EPOCH) + topicParams(BlsToExecutionChangeWeight, messageRate, epochsDuration(100), + Opt.none(MeshMessageInfo)) + +func basicParams*(): TopicParams = TopicParams.init() + +proc `$`*(params: TopicParams): string = + const FormatString = + "TopicWeight: $1\p" & + "TimeInMeshWeight: $2\p" & + "TimeInMeshQuantum: $3\p" & + "TimeInMeshCap: $4\p" & + "FirstMessageDeliveriesWeight: $5\p" & + "FirstMessageDeliveriesDecay: $6\p" & + "FirstMessageDeliveriesCap:: $7\p" & + "MeshMessageDeliveriesWeight: $8\p" & + "MeshMessageDeliveriesDecay: $9\p" & + "MeshMessageDeliveriesCap: $10\p" & + "MeshMessageDeliveriesThreshold: $11\p" & + "MeshMessageDeliveriesWindow: $12\p" & + "MeshMessageDeliveriesActivation: $13\p" & + "MeshFailurePenaltyWeight: $14\p" & + "MeshFailurePenaltyDecay: $15\p" & + "InvalidMessageDeliveriesWeight: $16\p" & + "InvalidMessageDeliveriesDecay: $17\p" + try: + FormatString % [ + $params.topicWeight, + $params.timeInMeshWeight, + $params.timeInMeshQuantum, + $params.timeInMeshCap, + $params.firstMessageDeliveriesWeight, + $params.firstMessageDeliveriesDecay, + $params.firstMessageDeliveriesCap, + $params.meshMessageDeliveriesWeight, + $params.meshMessageDeliveriesDecay, + $params.meshMessageDeliveriesCap, + $params.meshMessageDeliveriesThreshold, + $params.meshMessageDeliveriesWindow, + $params.meshMessageDeliveriesActivation, + $params.meshFailurePenaltyWeight, + $params.meshFailurePenaltyDecay, + $params.invalidMessageDeliveriesWeight, + $params.invalidMessageDeliveriesDecay + ] + except ValueError: + raiseAssert "Should not happen" diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 782a76244e..ca01ff1354 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -8,35 +8,33 @@ {.push raises: [].} import - std/[os, random, terminal, times, exitprocs], + system/ansi_c, + std/[os, random, strutils, terminal, times], chronos, chronicles, metrics, metrics/chronos_httpserver, stew/[byteutils, io2], - eth/p2p/discoveryv5/[enr, random2], + kzg4844/kzg, + eth/enr/enr, + eth/p2p/discoveryv5/random2, ./consensus_object_pools/[ - blob_quarantine, data_column_quarantine, blockchain_list], + blob_quarantine, blockchain_list], ./consensus_object_pools/vanity_logs/vanity_logs, ./networking/[topic_params, network_metadata_downloads], ./rpc/[rest_api, state_ttl_cache], ./spec/datatypes/[altair, bellatrix, phase0], ./spec/[ - deposit_snapshots, engine_authentication, weak_subjectivity, - peerdas_helpers], - ./sync/[sync_protocol, light_client_protocol, sync_overseer], + engine_authentication, weak_subjectivity, peerdas_helpers], + ./sync/[sync_protocol, light_client_protocol, sync_overseer, validator_custody], ./validators/[keystore_management, beacon_validators], - "."/[ - beacon_node, beacon_node_light_client, deposits, - nimbus_binary_common, statusbar, trusted_node_sync, wallets] + ./[ + beacon_node, beacon_node_light_client, buildinfo, deposits, + nimbus_binary_common, process_state, statusbar, trusted_node_sync, wallets] -when defined(posix): - import system/ansi_c - -from ./spec/datatypes/deneb import SignedBeaconBlock - -from - libp2p/protocols/pubsub/gossipsub -import +from std/algorithm import sort +from std/sequtils import filterIt, mapIt, toSeq +from libp2p/protocols/pubsub/gossipsub import TopicParams, validateParameters, init +from ./spec/datatypes/deneb import SignedBeaconBlock logScope: topics = "beacnde" @@ -111,7 +109,6 @@ proc doRunTrustedNodeSync( trustedBlockRoot: Option[Eth2Digest], backfill: bool, reindex: bool, - downloadDepositSnapshot: bool, genesisState: ref ForkedHashedBeaconState) {.async.} = let syncTarget = if stateId.isSome: @@ -138,7 +135,6 @@ proc doRunTrustedNodeSync( syncTarget, backfill, reindex, - downloadDepositSnapshot, genesisState) func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = @@ -146,22 +142,22 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = of StdoutLogKind.Auto: raiseAssert "inadmissable here" of StdoutLogKind.Colors: VanityLogs( - onUpgradeToCapella: capellaColor, onKnownBlsToExecutionChange: capellaBlink, onUpgradeToDeneb: denebColor, onUpgradeToElectra: electraColor, - onKnownCompoundingChange: electraBlink) + onKnownCompoundingChange: electraBlink, + onUpgradeToFulu: fuluColor, + onBlobParametersUpdate: fuluColor) of StdoutLogKind.NoColors: VanityLogs( - onUpgradeToCapella: capellaMono, onKnownBlsToExecutionChange: capellaMono, onUpgradeToDeneb: denebMono, onUpgradeToElectra: electraMono, - onKnownCompoundingChange: electraMono) + onKnownCompoundingChange: electraMono, + onUpgradeToFulu: fuluMono, + onBlobParametersUpdate: fuluMono) of StdoutLogKind.Json, StdoutLogKind.None: VanityLogs( - onUpgradeToCapella: - (proc() = notice "🦉 Withdrowls now available 🦉"), onKnownBlsToExecutionChange: (proc() = notice "🦉 BLS to execution changed 🦉"), onUpgradeToDeneb: @@ -169,12 +165,19 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = onUpgradeToElectra: (proc() = notice "🦒 Compounding is available 🦒"), onKnownCompoundingChange: - (proc() = notice "🦒 Compounding is activated 🦒")) + (proc() = notice "🦒 Compounding is activated 🦒"), + onUpgradeToFulu: + (proc() = notice "🐅 Blobs columnized 🐅"), + onBlobParametersUpdate: + (proc() = notice "🐅 Blob parameters updated 🐅")) func getVanityMascot(consensusFork: ConsensusFork): string = + debugGloasComment "don't know vanity mascot yet" case consensusFork + of ConsensusFork.Gloas: + "?" of ConsensusFork.Fulu: - "❓" + "🐅" of ConsensusFork.Electra: "🦒" of ConsensusFork.Deneb: @@ -202,24 +205,24 @@ proc loadChainDag( if dag == nil: return withForkyFinalityUpdate(data): when lcDataFork > LightClientDataFork.None: - let contextFork = - dag.cfg.consensusForkAtEpoch(forkyFinalityUpdate.contextEpoch) + let + contextEpoch = forkyFinalityUpdate.contextEpoch + contextFork = dag.cfg.consensusForkAtEpoch(contextEpoch) + contextBytes = dag.forkDigestAtEpoch(contextEpoch) eventBus.finUpdateQueue.emit( RestVersioned[ForkedLightClientFinalityUpdate]( - data: data, - jsonVersion: contextFork, - sszContext: dag.forkDigests[].atConsensusFork(contextFork))) + data: data, jsonVersion: contextFork, sszContext: contextBytes)) proc onLightClientOptimisticUpdate(data: ForkedLightClientOptimisticUpdate) = if dag == nil: return withForkyOptimisticUpdate(data): when lcDataFork > LightClientDataFork.None: - let contextFork = - dag.cfg.consensusForkAtEpoch(forkyOptimisticUpdate.contextEpoch) + let + contextEpoch = forkyOptimisticUpdate.contextEpoch + contextFork = dag.cfg.consensusForkAtEpoch(contextEpoch) + contextBytes = dag.forkDigestAtEpoch(contextEpoch) eventBus.optUpdateQueue.emit( RestVersioned[ForkedLightClientOptimisticUpdate]( - data: data, - jsonVersion: contextFork, - sszContext: dag.forkDigests[].atConsensusFork(contextFork))) + data: data, jsonVersion: contextFork, sszContext: contextBytes)) let chainDagFlags = @@ -309,12 +312,14 @@ proc initFullNode( node.eventBus.electraAttSlashQueue.emit(data) proc onBlobSidecarAdded(data: BlobSidecarInfoObject) = node.eventBus.blobSidecarQueue.emit(data) + proc onColumnSidecarAdded(data: DataColumnSidecarInfoObject) = + node.eventBus.columnSidecarQueue.emit(data) proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) = let optimistic = if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: - some node.dag.is_optimistic(data.toBlockId()) + Opt.some node.dag.is_optimistic(data.toBlockId()) else: - none[bool]() + Opt.none(bool) node.eventBus.blocksQueue.emit( EventBeaconBlockObject.init(data, optimistic)) proc onBlockGossipAdded(data: ForkedSignedBeaconBlock) = @@ -324,7 +329,7 @@ proc initFullNode( let eventData = if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: var res = data - res.optimistic = some node.dag.is_optimistic( + res.optimistic = Opt.some node.dag.is_optimistic( BlockId(slot: data.slot, root: data.block_root)) res else: @@ -334,7 +339,7 @@ proc initFullNode( let eventData = if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: var res = data - res.optimistic = some node.dag.is_optimistic( + res.optimistic = Opt.some node.dag.is_optimistic( BlockId(slot: data.slot, root: data.new_head_block)) res else: @@ -347,18 +352,13 @@ proc initFullNode( elManager: ELManager): OnFinalizedCallback {.nimcall.} = static: doAssert (elManager is ref) return proc(dag: ChainDAGRef, data: FinalizationInfoObject) = - if elManager != nil: - let finalizedEpochRef = dag.getFinalizedEpochRef() - discard trackFinalizedState(elManager, - finalizedEpochRef.eth1_data, - finalizedEpochRef.eth1_deposit_index) node.updateLightClientFromDag() let eventData = if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: var res = data # `slot` in this `BlockId` may be higher than block's actual slot, # this is alright for the purpose of calling `is_optimistic`. - res.optimistic = some node.dag.is_optimistic( + res.optimistic = Opt.some node.dag.is_optimistic( BlockId(slot: data.epoch.start_slot, root: data.block_root)) res else: @@ -393,15 +393,18 @@ proc initFullNode( isSlotWithinWeakSubjectivityPeriod(node.dag, node.beaconClock.now().slotOrZero()) + proc forkAtEpoch(epoch: Epoch): ConsensusFork = + consensusForkAtEpoch(dag.cfg, epoch) + proc eventWaiter(): Future[void] {.async: (raises: [CancelledError]).} = await node.shutdownEvent.wait() - bnStatus = BeaconNodeStatus.Stopping + ProcessState.scheduleStop("shutdownEvent") asyncSpawn eventWaiter() let quarantine = newClone( - Quarantine.init()) + Quarantine.init(dag.cfg)) attestationPool = newClone(AttestationPool.init( dag, quarantine, onPhase0AttestationReceived, onSingleAttestationReceived)) @@ -414,18 +417,24 @@ proc initFullNode( onProposerSlashingAdded, onPhase0AttesterSlashingAdded, onElectraAttesterSlashingAdded)) blobQuarantine = newClone(BlobQuarantine.init( - dag.cfg, onBlobSidecarAdded)) - dataColumnQuarantine = newClone(DataColumnQuarantine.init()) + dag.cfg, dag.db.getQuarantineDB(), 10, onBlobSidecarAdded)) supernode = node.config.peerdasSupernode localCustodyGroups = if supernode: - NUMBER_OF_CUSTODY_GROUPS.uint64 + dag.cfg.NUMBER_OF_CUSTODY_GROUPS else: - CUSTODY_REQUIREMENT.uint64 - custody_columns_set = - node.network.nodeId.resolve_column_sets_from_custody_groups( - max(SAMPLES_PER_SLOT.uint64, - localCustodyGroups)) + dag.cfg.CUSTODY_REQUIREMENT + custodyColumns = + dag.cfg.resolve_columns_from_custody_groups( + node.network.nodeId, localCustodyGroups) + + var sortedColumns = custodyColumns.toSeq() + sort(sortedColumns) + + let + dataColumnQuarantine = newClone(ColumnQuarantine.init( + dag.cfg, sortedColumns, dag.db.getQuarantineDB(), 10, + onColumnSidecarAdded)) consensusManager = ConsensusManager.new( dag, attestationPool, quarantine, node.elManager, ActionTracker.init(node.network.nodeId, config.subscribeAllSubnets), @@ -435,17 +444,25 @@ proc initFullNode( blockProcessor = BlockProcessor.new( config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming, batchVerifier, consensusManager, node.validatorMonitor, - blobQuarantine, getBeaconTime) - + blobQuarantine, dataColumnQuarantine, getBeaconTime, + config.invalidBlockRoots) blockVerifier = proc(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} = - # The design with a callback for block verification is unusual compared - # to the rest of the application, but fits with the general approach - # taken in the sync/request managers - this is an architectural compromise - # that should probably be reimagined more holistically in the future. - blockProcessor[].addBlock( - MsgSource.gossip, signedBlock, blobs, maybeFinalized = maybeFinalized) + withBlck(signedBlock): + when consensusFork in ConsensusFork.Fulu .. ConsensusFork.Gloas: + # TODO document why there are no columns here + let sidecarsOpt = Opt.none(DataColumnSidecars) + elif consensusFork in ConsensusFork.Deneb .. ConsensusFork.Electra: + template sidecarsOpt: untyped = blobs + elif consensusFork in ConsensusFork.Phase0 .. ConsensusFork.Capella: + const sidecarsOpt = noSidecars + else: + {.error: "Unkown fork: " & $consensusFork.} + + blockProcessor.addBlock( + MsgSource.gossip, forkyBlck, sidecarsOpt, maybeFinalized) + untrustedBlockVerifier = proc(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], maybeFinalized: bool): Future[Result[void, VerifierError]] {. @@ -455,23 +472,38 @@ proc initFullNode( maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = withBlck(signedBlock): - when consensusFork >= ConsensusFork.Deneb: - if not blobQuarantine[].hasBlobs(forkyBlck): - # We don't have all the blobs for this block, so we have - # to put it in blobless quarantine. - if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck): - err(VerifierError.UnviableFork) - else: - err(VerifierError.MissingParent) - else: - let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck) - await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, - Opt.some(blobs), - maybeFinalized = maybeFinalized) + when consensusFork == ConsensusFork.Gloas: + debugGloasComment "no blob_kzg_commitments field for gloas" + let sidecarsOpt = Opt.none(DataColumnSidecars) + elif consensusFork == ConsensusFork.Fulu: + let sidecarsOpt = + dataColumnQuarantine[].popSidecars(forkyBlck.root, forkyBlck) + if sidecarsOpt.isNone(): + # We don't have all the columns for this block, so we have + # to put it in columnless quarantine. + return + if not quarantine[].addSidecarless(dag.finalizedHead.slot, forkyBlck): + err(VerifierError.UnviableFork) + else: + err(VerifierError.MissingParent) + elif consensusFork in ConsensusFork.Deneb .. ConsensusFork.Electra: + let sidecarsOpt = blobQuarantine[].popSidecars(forkyBlck.root, forkyBlck) + if sidecarsOpt.isNone(): + # We don't have all the sidecars for this block, so we have + # to put it to the quarantine. + return + if not quarantine[].addSidecarless(dag.finalizedHead.slot, forkyBlck): + err(VerifierError.UnviableFork) + else: + err(VerifierError.MissingParent) + elif consensusFork in ConsensusFork.Phase0 .. ConsensusFork.Capella: + const sidecarsOpt = noSidecars else: - await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, - Opt.none(BlobSidecars), - maybeFinalized = maybeFinalized) + {.error: "Unkown fork: " & $consensusFork.} + + await blockProcessor.addBlock( + MsgSource.gossip, forkyBlck, sidecarsOpt, maybeFinalized + ) rmanBlockLoader = proc( blockRoot: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] = dag.getForkedBlock(blockRoot) @@ -483,18 +515,19 @@ proc initFullNode( else: Opt.none(ref BlobSidecar) rmanDataColumnLoader = proc( - columnId: DataColumnIdentifier): Opt[ref DataColumnSidecar] = - var data_column_sidecar = DataColumnSidecar.new() + columnId: DataColumnIdentifier): Opt[ref fulu.DataColumnSidecar] = + var data_column_sidecar = fulu.DataColumnSidecar.new() if dag.db.getDataColumnSidecar(columnId.block_root, columnId.index, data_column_sidecar[]): Opt.some data_column_sidecar else: - Opt.none(ref DataColumnSidecar) + Opt.none(ref fulu.DataColumnSidecar) processor = Eth2Processor.new( config.doppelgangerDetection, blockProcessor, node.validatorMonitor, dag, attestationPool, validatorChangePool, node.attachedValidators, syncCommitteeMsgPool, - lightClientPool, quarantine, blobQuarantine, rng, getBeaconTime, taskpool) + lightClientPool, quarantine, blobQuarantine, dataColumnQuarantine, + rng, getBeaconTime, taskpool) syncManagerFlags = if node.config.longRangeSync != LongRangeSyncMode.Lenient: {SyncManagerFlag.NoGenesisSync} @@ -509,7 +542,7 @@ proc initFullNode( SyncQueueKind.Forward, getLocalHeadSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, getFrontfillSlot, isWithinWeakSubjectivityPeriod, - dag.tail.slot, blockVerifier, + dag.tail.slot, blockVerifier, forkAtEpoch, shutdownEvent = node.shutdownEvent, flags = syncManagerFlags) backfiller = newSyncManager[Peer, PeerId]( @@ -521,7 +554,7 @@ proc initFullNode( SyncQueueKind.Backward, getLocalHeadSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, getFrontfillSlot, isWithinWeakSubjectivityPeriod, - dag.backfill.slot, blockVerifier, maxHeadAge = 0, + dag.backfill.slot, blockVerifier, forkAtEpoch, maxHeadAge = 0, shutdownEvent = node.shutdownEvent, flags = syncManagerFlags) clistPivotSlot = @@ -529,6 +562,7 @@ proc initFullNode( clist.tail.get().blck.slot() else: getLocalWallSlot() + eaSlot = dag.head.slot untrustedManager = newSyncManager[Peer, PeerId]( node.network.peerPool, dag.cfg.DENEB_FORK_EPOCH, @@ -538,17 +572,19 @@ proc initFullNode( SyncQueueKind.Backward, getLocalHeadSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getUntrustedBackfillSlot, getFrontfillSlot, isWithinWeakSubjectivityPeriod, - clistPivotSlot, untrustedBlockVerifier, maxHeadAge = 0, + clistPivotSlot, untrustedBlockVerifier, forkAtEpoch, maxHeadAge = 0, shutdownEvent = node.shutdownEvent, flags = syncManagerFlags) router = (ref MessageRouter)( processor: processor, network: node.network) requestManager = RequestManager.init( - node.network, supernode, custody_columns_set, dag.cfg.DENEB_FORK_EPOCH, - getBeaconTime, (proc(): bool = syncManager.inProgress), + node.network, supernode, custodyColumns, + dag.cfg.DENEB_FORK_EPOCH, getBeaconTime, (proc(): bool = syncManager.inProgress), quarantine, blobQuarantine, dataColumnQuarantine, rmanBlockVerifier, rmanBlockLoader, rmanBlobLoader, rmanDataColumnLoader) + validatorCustody = ValidatorCustodyRef.init(node.network, dag, custodyColumns, + dataColumnQuarantine) # As per EIP 7594, the BN is now categorised into a # `Fullnode` and a `Supernode`, the fullnodes custodies a @@ -569,16 +605,10 @@ proc initFullNode( # during peer selection, sync with columns, and so on. That is why, # the rationale of populating it at boot and using it gloabally. - dataColumnQuarantine[].supernode = supernode - dataColumnQuarantine[].custody_columns = - node.network.nodeId.resolve_columns_from_custody_groups( - max(SAMPLES_PER_SLOT.uint64, - localCustodyGroups)) - if node.config.peerdasSupernode: - node.network.loadCgcnetMetadataAndEnr(NUMBER_OF_CUSTODY_GROUPS.uint8) + node.network.loadCgcnetMetadataAndEnr(dag.cfg.NUMBER_OF_CUSTODY_GROUPS.uint8) else: - node.network.loadCgcnetMetadataAndEnr(CUSTODY_REQUIREMENT.uint8) + node.network.loadCgcnetMetadataAndEnr(dag.cfg.CUSTODY_REQUIREMENT.uint8) if node.config.lightClientDataServe: proc scheduleSendingLightClientUpdates(slot: Slot) = @@ -602,8 +632,10 @@ proc initFullNode( dag.setReorgCb(onChainReorg) node.dag = dag + node.dag.eaSlot = eaSlot node.list = clist node.blobQuarantine = blobQuarantine + node.dataColumnQuarantine = dataColumnQuarantine node.quarantine = quarantine node.attestationPool = attestationPool node.syncCommitteeMsgPool = syncCommitteeMsgPool @@ -614,6 +646,7 @@ proc initFullNode( node.blockProcessor = blockProcessor node.consensusManager = consensusManager node.requestManager = requestManager + node.validatorCustody = validatorCustody node.syncManager = syncManager node.backfiller = backfiller node.untrustedManager = untrustedManager @@ -676,16 +709,17 @@ const SlashingDbName = "slashing_protection" # changing this requires physical file rename as well or history is lost. -proc init*(T: type BeaconNode, - rng: ref HmacDrbgContext, - config: BeaconNodeConf, - metadata: Eth2NetworkMetadata): Future[BeaconNode] - {.async.} = +proc init*( + T: type BeaconNode, + rng: ref HmacDrbgContext, + config: BeaconNodeConf, + metadata: Eth2NetworkMetadata, + taskpool: Taskpool, +): Future[BeaconNode] {.async.} = var genesisState: ref ForkedHashedBeaconState = nil template cfg: auto = metadata.cfg - template eth1Network: auto = metadata.eth1Network if not(isDir(config.databaseDir)): # If database directory missing, we going to use genesis state to check @@ -695,7 +729,7 @@ proc init*(T: type BeaconNode, metadata, config.genesisState, config.genesisStateUrl) let genesisTime = getStateField(genesisState[], genesis_time) - beaconClock = BeaconClock.init(genesisTime).valueOr: + beaconClock = BeaconClock.init(metadata.cfg.time, genesisTime).valueOr: fatal "Invalid genesis time in genesis state", genesisTime quit 1 currentSlot = beaconClock.now().slotOrZero() @@ -717,21 +751,6 @@ proc init*(T: type BeaconNode, altair_fork_epoch = metadata.cfg.ALTAIR_FORK_EPOCH quit 1 - let taskpool = - try: - if config.numThreads < 0: - fatal "The number of threads --num-threads cannot be negative." - quit 1 - elif config.numThreads == 0: - Taskpool.new(numThreads = min(countProcessors(), 16)) - else: - Taskpool.new(numThreads = config.numThreads) - except CatchableError as e: - fatal "Cannot start taskpool", err = e.msg - quit 1 - - info "Threadpool started", numThreads = taskpool.numThreads - if metadata.genesis.kind == BakedIn: if config.genesisState.isSome: warn "The --genesis-state option has no effect on networks with built-in genesis state" @@ -752,6 +771,7 @@ proc init*(T: type BeaconNode, phase0AttSlashQueue: newAsyncEventQueue[phase0.AttesterSlashing](), electraAttSlashQueue: newAsyncEventQueue[electra.AttesterSlashing](), blobSidecarQueue: newAsyncEventQueue[BlobSidecarInfoObject](), + columnSidecarQueue: newAsyncEventQueue[DataColumnSidecarInfoObject](), finalQueue: newAsyncEventQueue[FinalizationInfoObject](), reorgQueue: newAsyncEventQueue[ReorgInfoObject](), contribQueue: newAsyncEventQueue[SignedContributionAndProof](), @@ -801,16 +821,12 @@ proc init*(T: type BeaconNode, config.eraDir, config.externalBeaconApiUrl.get, config.trustedStateRoot.map do (x: Eth2Digest) -> string: - "0x" & x.data.toHex, + x.data.to0xHex(), trustedBlockRoot, backfill = false, reindex = false, - downloadDepositSnapshot = false, genesisState) - if config.finalizedCheckpointBlock.isSome: - warn "--finalized-checkpoint-block has been deprecated, ignoring" - let checkpointState = if config.finalizedCheckpointState.isSome: let checkpointStatePath = config.finalizedCheckpointState.get.string let tmp = try: @@ -832,24 +848,6 @@ proc init*(T: type BeaconNode, else: nil - if config.finalizedDepositTreeSnapshot.isSome: - let - depositTreeSnapshotPath = config.finalizedDepositTreeSnapshot.get.string - snapshot = - try: - SSZ.loadFile(depositTreeSnapshotPath, DepositTreeSnapshot) - except SszError as err: - fatal "Deposit tree snapshot loading failed", - err = formatMsg(err, depositTreeSnapshotPath) - quit 1 - except CatchableError as err: - fatal "Failed to read deposit tree snapshot file", err = err.msg - quit 1 - depositContractSnapshot = DepositContractSnapshot.init(snapshot).valueOr: - fatal "Invalid deposit tree snapshot file" - quit 1 - db.putDepositContractSnapshot(depositContractSnapshot) - let engineApiUrls = config.engineApiUrls if engineApiUrls.len == 0: @@ -915,6 +913,7 @@ proc init*(T: type BeaconNode, # break existing setups let validatorMonitor = newClone(ValidatorMonitor.init( + cfg.time, config.validatorMonitorAuto, config.validatorMonitorTotals.get( not config.validatorMonitorDetails))) @@ -927,7 +926,7 @@ proc init*(T: type BeaconNode, config, cfg, db, eventBus, validatorMonitor, networkGenesisValidatorsRoot) genesisTime = getStateField(dag.headState, genesis_time) - beaconClock = BeaconClock.init(genesisTime).valueOr: + beaconClock = BeaconClock.init(cfg.time, genesisTime).valueOr: fatal "Invalid genesis time in state", genesisTime quit 1 @@ -959,16 +958,7 @@ proc init*(T: type BeaconNode, dag.checkWeakSubjectivityCheckpoint( config.weakSubjectivityCheckpoint.get, beaconClock) - let elManager = ELManager.new( - cfg, - metadata.depositContractBlock, - metadata.depositContractBlockHash, - db, - engineApiUrls, - eth1Network) - - if config.rpcEnabled.isSome: - warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it." + let elManager = ELManager.new(engineApiUrls, metadata.eth1Network) let restServer = if config.restEnabled: RestServerRef.init(config.restAddress, config.restPort, @@ -1072,8 +1062,6 @@ proc init*(T: type BeaconNode, keymanagerServer: keymanagerInitResult.server, keystoreCache: keystoreCache, eventBus: eventBus, - gossipState: {}, - blocksGossipState: {}, beaconClock: beaconClock, validatorMonitor: validatorMonitor, stateTtlCache: stateTtlCache, @@ -1107,22 +1095,9 @@ func verifyFinalization(node: BeaconNode, slot: Slot) = # finalization occurs every slot, to 4 slots vs scheduledSlot. doAssert finalizedEpoch + 4 >= epoch -from std/sequtils import toSeq - func subnetLog(v: BitArray): string = $toSeq(v.oneIndices()) -func forkDigests(node: BeaconNode): auto = - let forkDigestsArray: array[ConsensusFork, auto] = [ - node.dag.forkDigests.phase0, - node.dag.forkDigests.altair, - node.dag.forkDigests.bellatrix, - node.dag.forkDigests.capella, - node.dag.forkDigests.deneb, - node.dag.forkDigests.electra, - node.dag.forkDigests.fulu] - forkDigestsArray - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) = if node.gossipState.card == 0: @@ -1137,6 +1112,9 @@ proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) = stabilitySubnets = node.consensusManager[].actionTracker.stabilitySubnets(slot) subnets = aggregateSubnets + stabilitySubnets + validatorsCount = + withState(node.dag.headState): + forkyState.data.validators.lenu64 node.network.updateStabilitySubnetMetadata(stabilitySubnets) @@ -1149,12 +1127,12 @@ proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) = # Remember what we subscribed to, so we can unsubscribe later node.consensusManager[].actionTracker.subscribedSubnets = subnets - let forkDigests = node.forkDigests() - - for gossipFork in node.gossipState: - let forkDigest = forkDigests[gossipFork] + for gossipEpoch in node.gossipState: + let forkDigest = node.dag.forkDigests[].atEpoch(gossipEpoch, node.dag.cfg) node.network.unsubscribeAttestationSubnets(unsubscribeSubnets, forkDigest) - node.network.subscribeAttestationSubnets(subscribeSubnets, forkDigest) + node.network.subscribeAttestationSubnets( + subscribeSubnets, forkDigest, + getAttestationSubnetTopicParams(validatorsCount)) debug "Attestation subnets", slot, epoch = slot.epoch, gossipState = node.gossipState, @@ -1178,10 +1156,7 @@ proc updateBlocksGossipStatus*( # Use DAG status to determine whether to subscribe for blocks gossip dagIsBehind - targetGossipState = getTargetGossipState( - slot.epoch, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, - cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, - cfg.FULU_FORK_EPOCH, isBehind) + targetGossipState = getTargetGossipState(slot.epoch, cfg, isBehind) template currentGossipState(): auto = node.blocksGossipState if currentGossipState == targetGossipState: @@ -1198,29 +1173,35 @@ proc updateBlocksGossipStatus*( discard let - newGossipForks = targetGossipState - currentGossipState - oldGossipForks = currentGossipState - targetGossipState + newGossipEpochs = targetGossipState - currentGossipState + oldGossipEpochs = currentGossipState - targetGossipState - for gossipFork in oldGossipForks: - let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) + for gossipEpoch in oldGossipEpochs: + let forkDigest = node.dag.forkDigests[].atEpoch(gossipEpoch, cfg) node.network.unsubscribe(getBeaconBlocksTopic(forkDigest)) - for gossipFork in newGossipForks: - let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) + for gossipEpoch in newGossipEpochs: + let forkDigest = node.dag.forkDigests[].atEpoch(gossipEpoch, cfg) node.network.subscribe( - getBeaconBlocksTopic(forkDigest), blocksTopicParams, + getBeaconBlocksTopic(forkDigest), getBlockTopicParams(), enableTopicMetrics = true) node.blocksGossipState = targetGossipState proc addPhase0MessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.network.subscribe(getAttesterSlashingsTopic(forkDigest), basicParams) - node.network.subscribe(getProposerSlashingsTopic(forkDigest), basicParams) - node.network.subscribe(getVoluntaryExitsTopic(forkDigest), basicParams) + let validatorsCount = + withState(node.dag.headState): + forkyState.data.validators.lenu64 + node.network.subscribe( + getAttesterSlashingsTopic(forkDigest), getAttesterSlashingTopicParams()) + node.network.subscribe( + getProposerSlashingsTopic(forkDigest), getProposerSlashingTopicParams()) + node.network.subscribe( + getVoluntaryExitsTopic(forkDigest), getVoluntaryExitTopicParams()) node.network.subscribe( - getAggregateAndProofsTopic(forkDigest), aggregateTopicParams, - enableTopicMetrics = true) + getAggregateAndProofsTopic(forkDigest), + getAggregateProofTopicParams(validatorsCount), enableTopicMetrics = true) # updateAttestationSubnetHandlers subscribes attestation subnets @@ -1233,7 +1214,7 @@ proc removePhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = for subnet_id in SubnetId: node.network.unsubscribe(getAttestationTopic(forkDigest, subnet_id)) - node.consensusManager[].actionTracker.subscribedSubnets = default(AttnetBits) + node.consensusManager[].actionTracker.subscribedSubnets.reset() func hasSyncPubKey(node: BeaconNode, epoch: Epoch): auto = # Only used to determine which gossip topics to which to subscribe @@ -1279,35 +1260,62 @@ func getSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = subnets + node.getNextSyncCommitteeSubnets(epoch) +func readCustodyGroupSubnets(node: BeaconNode): uint64 = + let vcus_count = node.dataColumnQuarantine.custodyColumns.lenu64 + if node.config.peerdasSupernode: + node.dag.cfg.NUMBER_OF_CUSTODY_GROUPS + elif vcus_count > node.dag.cfg.CUSTODY_REQUIREMENT: + vcus_count + else: + node.dag.cfg.CUSTODY_REQUIREMENT + +proc updateDataColumnSidecarHandlers(node: BeaconNode, gossipEpoch: Epoch) = + let + forkDigest = node.dag.forkDigests[].atEpoch(gossipEpoch, node.dag.cfg) + targetSubnets = node.readCustodyGroupSubnets() + custody = node.dag.cfg.get_custody_groups( + node.network.nodeId, targetSubnets.uint64) + + for i in custody: + let topic = getDataColumnSidecarTopic(forkDigest, i) + node.network.subscribe(topic, basicParams()) + proc addAltairMessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = node.addPhase0MessageHandlers(forkDigest, slot) # If this comes online near sync committee period, it'll immediately get # replaced as usual by trackSyncCommitteeTopics, which runs at slot end. - let syncnets = node.getSyncCommitteeSubnets(slot.epoch) + let + syncnets = node.getSyncCommitteeSubnets(slot.epoch) + validatorsCount = + withState(node.dag.headState): + forkyState.data.validators.lenu64 for subcommitteeIdx in SyncSubcommitteeIndex: if syncnets[subcommitteeIdx]: node.network.subscribe( - getSyncCommitteeTopic(forkDigest, subcommitteeIdx), basicParams) + getSyncCommitteeTopic(forkDigest, subcommitteeIdx), + getSyncCommitteeSubnetTopicParams(validatorsCount)) node.network.subscribe( - getSyncCommitteeContributionAndProofTopic(forkDigest), basicParams) + getSyncCommitteeContributionAndProofTopic(forkDigest), + getSyncContributionTopicParams()) node.network.updateSyncnetsMetadata(syncnets) proc addCapellaMessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = node.addAltairMessageHandlers(forkDigest, slot) - node.network.subscribe(getBlsToExecutionChangeTopic(forkDigest), basicParams) + node.network.subscribe(getBlsToExecutionChangeTopic(forkDigest), + getBlsToExecutionChangeTopicParams()) proc doAddDenebMessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot, blobSidecarSubnetCount: uint64) = node.addCapellaMessageHandlers(forkDigest, slot) for topic in blobSidecarTopics(forkDigest, blobSidecarSubnetCount): - node.network.subscribe(topic, basicParams) + node.network.subscribe(topic, basicParams()) proc addDenebMessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = @@ -1319,9 +1327,16 @@ proc addElectraMessageHandlers( node.doAddDenebMessageHandlers( forkDigest, slot, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA) -proc addFuluMessageHandlers( +proc addGloasMessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.addElectraMessageHandlers(forkDigest, slot) + node.addCapellaMessageHandlers(forkDigest, slot) + debugGloasComment "default gossipsub config" + node.network.subscribe( + getExecutionPayloadBidTopic(forkDigest), basicParams()) + node.network.subscribe( + getExecutionPayloadTopic(forkDigest), basicParams()) + node.network.subscribe( + getPayloadAttestationMessageTopic(forkDigest), basicParams()) proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = node.removePhase0MessageHandlers(forkDigest) @@ -1353,7 +1368,24 @@ proc removeElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = forkDigest, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA) proc removeFuluMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.removeElectraMessageHandlers(forkDigest) + # Deliberately don't handle blobs, which Deneb and Electra contain, in lieu + # of columns. Last common ancestor fork for gossip environment is Capellla. + node.removeCapellaMessageHandlers(forkDigest) + + let + targetSubnets = node.readCustodyGroupSubnets() + custody = node.dag.cfg.get_custody_groups( + node.network.nodeId, targetSubnets.uint64) + + for i in custody: + let topic = getDataColumnSidecarTopic(forkDigest, i) + node.network.unsubscribe(topic) + +proc removeGloasMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removeFuluMessageHandlers(forkDigest) + node.network.unsubscribe(getExecutionPayloadBidTopic(forkDigest)) + node.network.unsubscribe(getExecutionPayloadTopic(forkDigest)) + node.network.unsubscribe(getPayloadAttestationMessageTopic(forkDigest)) proc updateSyncCommitteeTopics(node: BeaconNode, slot: Slot) = template lastSyncUpdate: untyped = @@ -1386,18 +1418,21 @@ proc updateSyncCommitteeTopics(node: BeaconNode, slot: Slot) = syncnets - node.network.metadata.syncnets oldSyncnets = node.network.metadata.syncnets - syncnets - forkDigests = node.forkDigests() + validatorsCount = + withState(node.dag.headState): + forkyState.data.validators.lenu64 for subcommitteeIdx in SyncSubcommitteeIndex: doAssert not (newSyncnets[subcommitteeIdx] and oldSyncnets[subcommitteeIdx]) - for gossipFork in node.gossipState: - template topic(): auto = - getSyncCommitteeTopic(forkDigests[gossipFork], subcommitteeIdx) + for gossipEpoch in node.gossipState: + template topic(): auto = getSyncCommitteeTopic( + node.dag.forkDigests[].atEpoch(gossipEpoch, node.dag.cfg), subcommitteeIdx) if oldSyncnets[subcommitteeIdx]: node.network.unsubscribe(topic) elif newSyncnets[subcommitteeIdx]: - node.network.subscribe(topic, basicParams) + node.network.subscribe(topic, + getSyncCommitteeSubnetTopicParams(validatorsCount)) node.network.updateSyncnetsMetadata(syncnets) @@ -1413,7 +1448,8 @@ proc doppelgangerChecked(node: BeaconNode, epoch: Epoch) = validator.doppelgangerChecked(epoch - 1) proc maybeUpdateActionTrackerNextEpoch( - node: BeaconNode, forkyState: ForkyHashedBeaconState, nextEpoch: Epoch) = + node: BeaconNode, forkyState: ForkyHashedBeaconState, currentSlot: Slot) = + let nextEpoch = currentSlot.epoch + 1 if node.consensusManager[].actionTracker.needsUpdate( forkyState, nextEpoch): template epochRefFallback() = @@ -1431,6 +1467,7 @@ proc maybeUpdateActionTrackerNextEpoch( shufflingRef = node.dag.getShufflingRef(node.dag.head, nextEpoch, false).valueOr: # epochRefFallback() won't work in this case either return + # using the separate method of proposer indices calculation in Fulu nextEpochProposers = get_beacon_proposer_indices( forkyState.data, shufflingRef.shuffled_active_validator_indices, nextEpoch) @@ -1474,7 +1511,12 @@ proc maybeUpdateActionTrackerNextEpoch( effective_balance = forkyState.data.validators.item( nextEpochFirstProposer).effective_balance - if participation_flags.has_flag(TIMELY_SOURCE_FLAG_INDEX) and + # Maximal potential accuracy primarily useful during the last slot of + # each epoch to prepare for a possible proposal the first slot of the + # next epoch. Otherwise, epochRefFallback is potentially very slow as + # it can induce a lengthy state replay. + if (not (currentSlot + 1).is_epoch) or + (participation_flags.has_flag(TIMELY_SOURCE_FLAG_INDEX) and participation_flags.has_flag(TIMELY_TARGET_FLAG_INDEX) and effective_balance == MAX_EFFECTIVE_BALANCE.Gwei and forkyState.data.slot.epoch != GENESIS_EPOCH and @@ -1482,7 +1524,7 @@ proc maybeUpdateActionTrackerNextEpoch( nextEpochFirstProposer) == 0 and not effective_balance_might_update( forkyState.data.balances.item(nextEpochFirstProposer), - effective_balance): + effective_balance)): node.consensusManager[].actionTracker.updateActions( shufflingRef, nextEpochProposers) else: @@ -1505,7 +1547,7 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = TOPIC_SUBSCRIBE_THRESHOLD_SLOTS = 64 HYSTERESIS_BUFFER = 16 - static: doAssert high(ConsensusFork) == ConsensusFork.Fulu + static: doAssert high(ConsensusFork) == ConsensusFork.Gloas let head = node.dag.head @@ -1515,38 +1557,31 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = isBehind = headDistance > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER targetGossipState = - getTargetGossipState( - slot.epoch, - node.dag.cfg.ALTAIR_FORK_EPOCH, - node.dag.cfg.BELLATRIX_FORK_EPOCH, - node.dag.cfg.CAPELLA_FORK_EPOCH, - node.dag.cfg.DENEB_FORK_EPOCH, - node.dag.cfg.ELECTRA_FORK_EPOCH, - node.dag.cfg.FULU_FORK_EPOCH, - isBehind) - - doAssert targetGossipState.card <= 2 + getTargetGossipState(slot.epoch, node.dag.cfg, isBehind) + + doAssert targetGossipState.len <= 2 let - newGossipForks = targetGossipState - node.gossipState - oldGossipForks = node.gossipState - targetGossipState + newGossipEpochs = targetGossipState - node.gossipState + oldGossipEpochs = node.gossipState - targetGossipState - doAssert newGossipForks.card <= 2 - doAssert oldGossipForks.card <= 2 + doAssert newGossipEpochs.len <= 2 + doAssert oldGossipEpochs.len <= 2 - func maxGossipFork(gossipState: GossipState): int = - var res = -1 - for gossipFork in gossipState: - res = max(res, gossipFork.int) + # TODO properly or reconsider, should become sort of trivial now + func maxGossipEpoch(gossipState: GossipState): uint64 = + var res = 0'u64 + for gossipEpoch in gossipState: + res = max(res, distinctBase(gossipEpoch)) res - if maxGossipFork(targetGossipState) < maxGossipFork(node.gossipState) and - targetGossipState != {}: + if maxGossipEpoch(targetGossipState) < maxGossipEpoch(node.gossipState) and + targetGossipState.len > 0: warn "Unexpected clock regression during transition", targetGossipState, gossipState = node.gossipState - if node.gossipState.card == 0 and targetGossipState.card > 0: + if node.gossipState.len == 0 and targetGossipState.len > 0: # We are synced, so we will connect debug "Enabling topic subscriptions", wallSlot = slot, @@ -1567,9 +1602,9 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = node.consensusManager[].actionTracker.updateActions( epochRef.shufflingRef, epochRef.beacon_proposers) - node.maybeUpdateActionTrackerNextEpoch(forkyState, slot.epoch + 1) + node.maybeUpdateActionTrackerNextEpoch(forkyState, slot) - if node.gossipState.card > 0 and targetGossipState.card == 0: + if node.gossipState.len > 0 and targetGossipState.len == 0: debug "Disabling topic subscriptions", wallSlot = slot, headSlot = head.slot, @@ -1577,8 +1612,6 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = node.processor[].clearDoppelgangerProtection() - let forkDigests = node.forkDigests() - const removeMessageHandlers: array[ConsensusFork, auto] = [ removePhase0MessageHandlers, removeAltairMessageHandlers, @@ -1586,11 +1619,14 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = removeCapellaMessageHandlers, removeDenebMessageHandlers, removeElectraMessageHandlers, - removeFuluMessageHandlers + removeFuluMessageHandlers, + removeGloasMessageHandlers ] - for gossipFork in oldGossipForks: - removeMessageHandlers[gossipFork](node, forkDigests[gossipFork]) + for gossipEpoch in oldGossipEpochs: + let gossipFork = node.dag.cfg.consensusForkAtEpoch(gossipEpoch) + removeMessageHandlers[gossipFork]( + node, node.dag.forkDigests[].atEpoch(gossipEpoch, node.dag.cfg)) const addMessageHandlers: array[ConsensusFork, auto] = [ addPhase0MessageHandlers, @@ -1599,13 +1635,27 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = addCapellaMessageHandlers, addDenebMessageHandlers, addElectraMessageHandlers, - addFuluMessageHandlers + addCapellaMessageHandlers, # no blobs; updateDataColumnSidecarHandlers for rest + addGloasMessageHandlers ] - for gossipFork in newGossipForks: - addMessageHandlers[gossipFork](node, forkDigests[gossipFork], slot) + for gossipEpoch in newGossipEpochs: + let gossipFork = node.dag.cfg.consensusForkAtEpoch(gossipEpoch) + addMessageHandlers[gossipFork]( + node, node.dag.forkDigests[].atEpoch(gossipEpoch, node.dag.cfg), slot) node.gossipState = targetGossipState + + # Validator custody can change in the middle of a fork/BPO interval; need to + # subscribe to potentially new column topics. Do this after node.gossipState + # is updated to avoid adding immediately unsubscribed subscriptions. Custody + # can only grow in a node's lifetime, so only address additive case. It can, + # therefore, overlap existing subscriptions, rather than separately tracking + # them. + for gossipEpoch in node.gossipState: + if node.dag.cfg.consensusForkAtEpoch(gossipEpoch) >= ConsensusFork.Fulu: + node.updateDataColumnSidecarHandlers(gossipEpoch) + node.doppelgangerChecked(slot.epoch) node.updateAttestationSubnetHandlers(slot) node.updateBlocksGossipStatus(slot, isBehind) @@ -1622,13 +1672,110 @@ proc pruneBlobs(node: BeaconNode, slot: Slot) = for i in startIndex..= node.dag.cfg.FULU_FORK_EPOCH: + var blocks: array[SLOTS_PER_EPOCH.int, BlockId] + var count = 0 + let startIndex = node.dag.getBlockRange( + dataColumnPruneEpoch.start_slot, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1)) + for i in startIndex..= ConsensusFork.Fulu: + let maxColCount = node.dag.cfg.NUMBER_OF_COLUMNS + var + columns: seq[ref fulu.DataColumnSidecar] + indices: HashSet[uint64] + + # Get columns from database + for i in 0 ..< maxColCount: + var colData: fulu.DataColumnSidecar + if node.dag.db.getDataColumnSidecar(forkyBlck.root, i, colData): + columns.add(newClone(colData)) + indices.incl(i) + debug "PeerDAS: Data columns before reconstruction", columns = indices.len + + # Make sure the node has obtained 50%+ of all the columns + if columns.lenu64 < (maxColCount div 2): + warn "The node did not obtain 50%+ of all the columns" + return + # Ignore if the node has already obtained all the columns + elif columns.lenu64 == maxColCount: + debug "The node has already obtained all the columns" + return + + let startTime = Moment.now() + + # Reconstruct columns + let recovered = recover_cells_and_proofs_parallel( + node.batchVerifier[].taskpool, columns).valueOr: + error "Data column reconstruction incomplete" + return + let rowCount = recovered.len + var reconCounter = 0 + + let recoveredTime = Moment.now() + + for i in 0 ..< maxColCount: + if i in indices: + continue + var + cells = newSeq[Cell](rowCount) + proofs = newSeq[kzg.KzgProof](rowCount) + for j in 0 ..< rowCount: + cells[j] = recovered[j].cells[i] + proofs[j] = recovered[j].proofs[i] + let dataColumn = fulu.DataColumnSidecar( + index: ColumnIndex(i), + column: DataColumn.init(cells), + kzg_commitments: columns[0].kzg_commitments, + kzg_proofs: deneb.KzgProofs.init(proofs), + signed_block_header: forkyBlck.asSigned().toSignedBeaconBlockHeader(), + kzg_commitments_inclusion_proof: + columns[0].kzg_commitments_inclusion_proof) + node.dag.db.putDataColumnSidecar(dataColumn) # TODO might already have + inc reconCounter + + let reconstructedTime = Moment.now() + + debug "Columns reconstructed", + columns = reconCounter, + recoveryTime = recoveredTime - startTime, + reconstructionTime = reconstructedTime - recoveredTime + proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = # Things we do when slot processing has ended and we're about to wait for the # next slot @@ -1642,6 +1789,9 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = debug "Waiting for slot end", slot, endCutoff = shortLog(endCutoff.offset) await sleepAsync(endCutoff.offset) + if node.dag.cfg.consensusForkAtEpoch(slot.epoch()) >= ConsensusFork.Fulu: + reconstructDataColumns(node, slot) + if node.dag.needStateCachesAndForkChoicePruning(): if node.attachedValidators[].validators.len > 0: node.attachedValidators[] @@ -1650,6 +1800,10 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = .pruneAfterFinalization( node.dag.finalizedHead.slot.epoch() ) + node.processor.blobQuarantine[].pruneAfterFinalization( + node.dag.finalizedHead.slot.epoch(), node.dag.needsBackfill()) + node.processor.quarantine[].pruneAfterFinalization( + node.dag.finalizedHead.slot.epoch(), node.dag.needsBackfill()) # Delay part of pruning until latency critical duties are done. # The other part of pruning, `pruneBlocksDAG`, is done eagerly. @@ -1663,6 +1817,7 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = # the pruning for later node.dag.pruneHistory() node.pruneBlobs(slot) + node.pruneDataColumns(slot) when declared(GC_fullCollect): # The slots in the beacon node work as frames in a game: we want to make @@ -1713,7 +1868,7 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = # missed sync committee participation via process_sync_aggregate(), but # attestation penalties for example, need, specific handling. # checked by maybeUpdateActionTrackerNextEpoch. - node.maybeUpdateActionTrackerNextEpoch(forkyState, slot.epoch + 1) + node.maybeUpdateActionTrackerNextEpoch(forkyState, slot) let nextAttestationSlot = @@ -1784,13 +1939,31 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = # logging slot end since the nextActionWaitTime can be short let advanceCutoff = node.beaconClock.fromNow( slot.start_beacon_time() + chronos.seconds(int(SECONDS_PER_SLOT - 1))) - if advanceCutoff.inFuture: - # We wait until there's only a second left before the next slot begins, then - # we advance the clearance state to the next slot - this gives us a high - # probability of being prepared for the block that will arrive and the - # epoch processing that follows - await sleepAsync(advanceCutoff.offset) - node.dag.advanceClearanceState() + + let proposalFcu = + if advanceCutoff.inFuture: + # We wait until there's only a second left before the next slot begins, then + # we advance the clearance state to the next slot - this gives us a high + # probability of being prepared for the block that will arrive and the + # epoch processing that follows + await sleepAsync(advanceCutoff.offset) + let + nextSlot = slot + 1 + nextSlotCutoff = node.beaconClock.fromNow(nextSlot.start_beacon_time) + head = node.dag.head # could be a new head compared to earlier + + if nextSlotCutoff.inFuture and node.isSynced(head) and head.executionValid: + # If there is a proposal, we want to let the execution client know a bit + # earlier - the risk is that fork choice changes again before the proposal + # but this risk should be small - this function also prepares the + # clearance state for the most likely block to be arriving next + node.consensusManager.prepareNextSlot( + nextSlot, sleepAsync(nextSlotCutoff.offset) + ) + else: + nil + else: + nil # Prepare action tracker for the next slot node.consensusManager[].actionTracker.updateSlot(slot + 1) @@ -1800,8 +1973,50 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = # above, this will be done just before the next slot starts node.updateSyncCommitteeTopics(slot + 1) + if (not node.config.peerdasSupernode) and + (slot.epoch() + 1).start_slot() - slot == 1 and + node.dataColumnQuarantine[].len == 0 and + node.attachedValidatorBalanceTotal > 0.Gwei: + # Detect new validator custody at the last slot of every epoch + node.validatorCustody.detectNewValidatorCustody(slot, + node.attachedValidatorBalanceTotal) + + if node.validatorCustody.diff_set.len > 0: + var custodyColumns = + node.validatorCustody.newer_column_set.toSeq() + sort(custodyColumns) + # update custody columns + node.dataColumnQuarantine.updateColumnQuarantine( + node.dag.cfg, custodyColumns) + + # Update CGC and metadata with respect to the new detected validator custody + let new_vcus = CgcCount node.validatorCustody.newer_column_set.lenu64 + + if new_vcus > node.dag.cfg.CUSTODY_REQUIREMENT.uint8: + node.network.loadCgcnetMetadataAndEnr(new_vcus) + else: + node.network.loadCgcnetMetadataAndEnr(node.dag.cfg.CUSTODY_REQUIREMENT.uint8) + + info "New validator custody count detected", + custody_columns = node.dataColumnQuarantine.custodyColumns.len + + # Update nfd field for BPOs + let + nextForkEpoch = node.dag.cfg.nextForkEpochAtEpoch(epoch) + nextForkDigest = if nextForkEpoch == FAR_FUTURE_EPOCH: + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/p2p-interface.md#next-fork-digest + # "If no next fork is scheduled, the nfd entry contains the default value + # for the type (i.e., the SSZ representation of a zero-filled array)." + default(ForkDigest) + else: + node.dag.forkDigests[].atEpoch(nextForkEpoch, node.dag.cfg) + node.network.updateNextForkDigest(nextForkDigest) + await node.updateGossipStatus(slot + 1) + if proposalFcu != nil: + await proposalFcu + func formatNextConsensusFork( node: BeaconNode, withVanityArt = false): Opt[string] = let consensusFork = @@ -1817,34 +2032,49 @@ func formatNextConsensusFork( (if withVanityArt: nextConsensusFork.getVanityMascot & " " else: "") & $nextConsensusFork & ":" & $nextForkEpoch) -func syncStatus(node: BeaconNode, wallSlot: Slot): string = - node.syncOverseer.statusMsg.valueOr: - let optimisticHead = not node.dag.head.executionValid - if node.syncManager.inProgress: - let - optimisticSuffix = - if optimisticHead: - "/opt" - else: - "" - lightClientSuffix = - if node.consensusManager[].shouldSyncOptimistically(wallSlot): - " - lc: " & $shortLog(node.consensusManager[].optimisticHead) - else: - "" - node.syncManager.syncStatus & optimisticSuffix & lightClientSuffix - elif node.untrustedManager.inProgress: - "untrusted: " & node.untrustedManager.syncStatus - elif node.backfiller.inProgress: - "backfill: " & node.backfiller.syncStatus - elif optimisticHead: - "synced/opt" - else: - "synced" +proc syncStatus(node: BeaconNode, wallSlot: Slot): string = + node.syncOverseer.syncStatusMessage() when defined(windows): from winservice import establishWindowsService, reportServiceStatusSuccess +proc attemptGetBlobs(node: BeaconNode, + lastSlot: Slot) {.async.} = + let + block_id = node.quarantine[].last_block_slot.valueOr: + return + if block_id.slot != lastSlot + 1: + return + let + elManager = node.blockProcessor[].consensusManager.elManager + if (let o = node.quarantine[].getColumnless(block_id.root); o.isSome): + let columnless = o.unsafeGet() + withBlck(columnless): + when consensusFork >= ConsensusFork.Fulu and + consensusFork < ConsensusFork.Gloas: + let blobsFromElOpt = + await elManager.sendGetBlobsV2(forkyBlck) + if blobsFromElOpt.isSome(): + let blobsEl = blobsFromElOpt.get() + # check lengths of array[BlobAndProofV2] with blobs + # kzg commitments of the signed block + if blobsEl.len == forkyBlck.message.body.blob_kzg_commitments.len: + # we have received all columns from the EL + # hence we can safely remove the columnless block from quarantine + var flat_proof: seq[kzg.KzgProof] + for item in blobsEl: + for proof in item.proofs: + flat_proof.add(kzg.KzgProof(bytes: proof.data)) + let recovered_columns = assemble_data_column_sidecars( + forkyBlck, + blobsEl.mapIt(kzg.KzgBlob(bytes: it.blob.data)), + flat_proof) + # Send notification to event stream + # and add these columns to column quarantine + for col in recovered_columns: + if col.index in node.dataColumnQuarantine[].custodyColumns: + node.dataColumnQuarantine[].put(forkyBlck.root, newClone(col)) + proc onSlotStart(node: BeaconNode, wallTime: BeaconTime, lastSlot: Slot): Future[bool] {.async.} = ## Called at the beginning of a slot - usually every slot, but sometimes might @@ -1897,10 +2127,12 @@ proc onSlotStart(node: BeaconNode, wallTime: BeaconTime, if node.config.strictVerification: verifyFinalization(node, wallSlot) + await node.attemptGetBlobs(lastSlot) + node.consensusManager[].updateHead(wallSlot) await node.handleValidatorDuties(lastSlot, wallSlot) - + node.requestManager.switchToColumnLoop() await onSlotEnd(node, wallSlot) # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#registration-dissemination @@ -1912,6 +2144,76 @@ proc onSlotStart(node: BeaconNode, wallTime: BeaconTime, return false +proc runSlotLoop(node: BeaconNode, startTime: BeaconTime) {.async.} = + var + curSlot = startTime.slotOrZero() + nextSlot = curSlot + 1 # No earlier than GENESIS_SLOT + 1 + timeToNextSlot = nextSlot.start_beacon_time() - startTime + + info "Scheduling first slot action", + startTime = shortLog(startTime), + nextSlot = shortLog(nextSlot), + timeToNextSlot = shortLog(timeToNextSlot) + + while true: + # Start by waiting for the time when the slot starts. Sleeping relinquishes + # control to other tasks which may or may not finish within the allotted + # time, so below, we need to be wary that the ship might have sailed + # already. + await sleepAsync(timeToNextSlot) + + let + wallTime = node.beaconClock.now() + wallSlot = wallTime.slotOrZero() # Always > GENESIS! + + if wallSlot < nextSlot: + # While we were sleeping, the system clock changed and time moved + # backwards! + if wallSlot + 1 < nextSlot: + # This is a critical condition where it's hard to reason about what + # to do next - we'll call the attention of the user here by shutting + # down. + fatal "System time adjusted backwards significantly - clock may be inaccurate - shutting down", + nextSlot = shortLog(nextSlot), wallSlot = shortLog(wallSlot) + ProcessState.scheduleStop("clock skew") + return + + # Time moved back by a single slot - this could be a minor adjustment, + # for example when NTP does its thing after not working for a while + warn "System time adjusted backwards, rescheduling slot actions", + wallTime = shortLog(wallTime), + nextSlot = shortLog(nextSlot), + wallSlot = shortLog(wallSlot) + + # cur & next slot remain the same + timeToNextSlot = nextSlot.start_beacon_time() - wallTime + continue + + if wallSlot > nextSlot + SLOTS_PER_EPOCH: + # Time moved forwards by more than an epoch - either the clock was reset + # or we've been stuck in processing for a long time - either way, we will + # skip ahead so that we only process the events of the last + # SLOTS_PER_EPOCH slots + warn "Time moved forwards by more than an epoch, skipping ahead", + curSlot = shortLog(curSlot), + nextSlot = shortLog(nextSlot), + wallSlot = shortLog(wallSlot) + + curSlot = wallSlot - SLOTS_PER_EPOCH + elif wallSlot > nextSlot: + notice "Missed expected slot start, catching up", + delay = shortLog(wallTime - nextSlot.start_beacon_time()), + curSlot = shortLog(curSlot), + nextSlot = shortLog(curSlot) + + let breakLoop = await onSlotStart(node, wallTime, curSlot) + if breakLoop: + break + + curSlot = wallSlot + nextSlot = wallSlot + 1 + timeToNextSlot = nextSlot.start_beacon_time() - node.beaconClock.now() + proc onSecond(node: BeaconNode, time: Moment) = # Nim GC metrics (for the main thread) updateThreadMetrics() @@ -1919,7 +2221,7 @@ proc onSecond(node: BeaconNode, time: Moment) = if node.config.stopAtSyncedEpoch != 0 and node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch: notice "Shutting down after having reached the target synced epoch" - bnStatus = BeaconNodeStatus.Stopping + ProcessState.scheduleStop("stopAtSyncedEpoch") proc runOnSecondLoop(node: BeaconNode) {.async.} = const @@ -1961,169 +2263,217 @@ proc installMessageValidators(node: BeaconNode) = for fork in ConsensusFork: withConsensusFork(fork): - let digest = forkDigests[].atConsensusFork(consensusFork) - - # beacon_block - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_block - node.network.addValidator( - getBeaconBlocksTopic(digest), proc ( - signedBlock: consensusFork.SignedBeaconBlock - ): ValidationResult = - if node.shouldSyncOptimistically(node.currentSlot): - toValidationResult( - node.optimisticProcessor.processSignedBeaconBlock( - signedBlock)) - else: - toValidationResult( - node.processor[].processSignedBeaconBlock( - MsgSource.gossip, signedBlock))) - - # beacon_attestation_{subnet_id} - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id - when consensusFork >= ConsensusFork.Electra: - for it in SubnetId: - closureScope: # Needed for inner `proc`; don't lift it out of loop. - let subnet_id = it - node.network.addAsyncValidator( - getAttestationTopic(digest, subnet_id), proc ( - attestation: SingleAttestation - ): Future[ValidationResult] {. - async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processAttestation( - MsgSource.gossip, attestation, subnet_id, - checkSignature = true, checkValidator = false))) - else: - for it in SubnetId: - closureScope: # Needed for inner `proc`; don't lift it out of loop. - let subnet_id = it - node.network.addAsyncValidator( - getAttestationTopic(digest, subnet_id), proc ( - attestation: phase0.Attestation - ): Future[ValidationResult] {. - async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processAttestation( - MsgSource.gossip, attestation, subnet_id, - checkSignature = true, checkValidator = false))) - - # beacon_aggregate_and_proof - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof - when consensusFork >= ConsensusFork.Electra: - node.network.addAsyncValidator( - getAggregateAndProofsTopic(digest), proc ( - signedAggregateAndProof: electra.SignedAggregateAndProof - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSignedAggregateAndProof( - MsgSource.gossip, signedAggregateAndProof))) - else: - node.network.addAsyncValidator( - getAggregateAndProofsTopic(digest), proc ( - signedAggregateAndProof: phase0.SignedAggregateAndProof - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSignedAggregateAndProof( - MsgSource.gossip, signedAggregateAndProof))) - - # attester_slashing - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/p2p-interface.md#attester_slashing - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/electra/p2p-interface.md#modifications-in-electra - when consensusFork >= ConsensusFork.Electra: + for digest in @[forkDigests[].atConsensusFork(consensusFork)] & + forkDigests[].bpos.filterIt(it[1] == consensusFork).mapIt(it[2]): + let digest = digest # lent + # beacon_block + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_block + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#beacon_block + when consensusFork >= ConsensusFork.Gloas: + debugGloasComment " " + else: + node.network.addValidator( + getBeaconBlocksTopic(digest), proc ( + signedBlock: consensusFork.SignedBeaconBlock, + src: PeerId, + ): ValidationResult = + if node.shouldSyncOptimistically(node.currentSlot): + toValidationResult( + node.optimisticProcessor.processSignedBeaconBlock( + signedBlock)) + else: + toValidationResult( + node.processor[].processSignedBeaconBlock( + MsgSource.gossip, signedBlock))) + + # beacon_attestation_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#beacon_attestation_subnet_id + when consensusFork >= ConsensusFork.Electra: + for it in SubnetId: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let subnet_id = it + node.network.addAsyncValidator( + getAttestationTopic(digest, subnet_id), proc ( + attestation: SingleAttestation, src: PeerId + ): Future[ValidationResult] {. + async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processAttestation( + MsgSource.gossip, attestation, subnet_id, + checkSignature = true, checkValidator = false, + consensusFork))) + else: + for it in SubnetId: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let subnet_id = it + node.network.addAsyncValidator( + getAttestationTopic(digest, subnet_id), proc ( + attestation: phase0.Attestation, src: PeerId + ): Future[ValidationResult] {. + async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processAttestation( + MsgSource.gossip, attestation, subnet_id, + checkSignature = true, checkValidator = false, + consensusFork))) + + # beacon_aggregate_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#beacon_aggregate_and_proof + when consensusFork >= ConsensusFork.Electra: + node.network.addAsyncValidator( + getAggregateAndProofsTopic(digest), proc ( + signedAggregateAndProof: electra.SignedAggregateAndProof, + src: PeerId + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedAggregateAndProof( + MsgSource.gossip, signedAggregateAndProof, + fork = consensusFork))) + else: + node.network.addAsyncValidator( + getAggregateAndProofsTopic(digest), proc ( + signedAggregateAndProof: phase0.SignedAggregateAndProof, + src: PeerId + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedAggregateAndProof( + MsgSource.gossip, signedAggregateAndProof, + fork = consensusFork))) + + # attester_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/p2p-interface.md#attester_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/electra/p2p-interface.md#modifications-in-electra + when consensusFork >= ConsensusFork.Electra: + node.network.addValidator( + getAttesterSlashingsTopic(digest), proc ( + attesterSlashing: electra.AttesterSlashing, + src: PeerId + ): ValidationResult = + toValidationResult( + node.processor[].processAttesterSlashing( + MsgSource.gossip, attesterSlashing))) + else: + node.network.addValidator( + getAttesterSlashingsTopic(digest), proc ( + attesterSlashing: phase0.AttesterSlashing, + src: PeerId + ): ValidationResult = + toValidationResult( + node.processor[].processAttesterSlashing( + MsgSource.gossip, attesterSlashing))) + + # proposer_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#proposer_slashing node.network.addValidator( - getAttesterSlashingsTopic(digest), proc ( - attesterSlashing: electra.AttesterSlashing + getProposerSlashingsTopic(digest), proc ( + proposerSlashing: ProposerSlashing, + src: PeerId ): ValidationResult = toValidationResult( - node.processor[].processAttesterSlashing( - MsgSource.gossip, attesterSlashing))) - else: + node.processor[].processProposerSlashing( + MsgSource.gossip, proposerSlashing))) + + # voluntary_exit + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#voluntary_exit node.network.addValidator( - getAttesterSlashingsTopic(digest), proc ( - attesterSlashing: phase0.AttesterSlashing + getVoluntaryExitsTopic(digest), proc ( + signedVoluntaryExit: SignedVoluntaryExit, + src: PeerId ): ValidationResult = toValidationResult( - node.processor[].processAttesterSlashing( - MsgSource.gossip, attesterSlashing))) - - # proposer_slashing - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#proposer_slashing - node.network.addValidator( - getProposerSlashingsTopic(digest), proc ( - proposerSlashing: ProposerSlashing - ): ValidationResult = - toValidationResult( - node.processor[].processProposerSlashing( - MsgSource.gossip, proposerSlashing))) - - # voluntary_exit - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#voluntary_exit - node.network.addValidator( - getVoluntaryExitsTopic(digest), proc ( - signedVoluntaryExit: SignedVoluntaryExit - ): ValidationResult = - toValidationResult( - node.processor[].processSignedVoluntaryExit( - MsgSource.gossip, signedVoluntaryExit))) - - when consensusFork >= ConsensusFork.Altair: - # sync_committee_{subnet_id} - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_subnet_id - for subcommitteeIdx in SyncSubcommitteeIndex: - closureScope: # Needed for inner `proc`; don't lift it out of loop. - let idx = subcommitteeIdx - node.network.addAsyncValidator( - getSyncCommitteeTopic(digest, idx), proc ( - msg: SyncCommitteeMessage - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSyncCommitteeMessage( - MsgSource.gossip, msg, idx))) - - # sync_committee_contribution_and_proof - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof - node.network.addAsyncValidator( - getSyncCommitteeContributionAndProofTopic(digest), proc ( - msg: SignedContributionAndProof - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSignedContributionAndProof( - MsgSource.gossip, msg))) - - when consensusFork >= ConsensusFork.Capella: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/capella/p2p-interface.md#bls_to_execution_change - node.network.addAsyncValidator( - getBlsToExecutionChangeTopic(digest), proc ( - msg: SignedBLSToExecutionChange - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processBlsToExecutionChange( - MsgSource.gossip, msg))) - - when consensusFork >= ConsensusFork.Deneb: - # blob_sidecar_{subnet_id} - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id - let subnetCount = - when consensusFork >= ConsensusFork.Electra: - node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA - else: - node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT - for it in 0.BlobId ..< subnetCount.BlobId: - closureScope: # Needed for inner `proc`; don't lift it out of loop. - let subnet_id = it - node.network.addValidator( - getBlobSidecarTopic(digest, subnet_id), proc ( - blobSidecar: deneb.BlobSidecar - ): ValidationResult = - toValidationResult( - node.processor[].processBlobSidecar( - MsgSource.gossip, blobSidecar, subnet_id))) + node.processor[].processSignedVoluntaryExit( + MsgSource.gossip, signedVoluntaryExit))) + + when consensusFork >= ConsensusFork.Altair: + # sync_committee_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_subnet_id + for subcommitteeIdx in SyncSubcommitteeIndex: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let idx = subcommitteeIdx + node.network.addAsyncValidator( + getSyncCommitteeTopic(digest, idx), proc ( + msg: SyncCommitteeMessage, + src: PeerId + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSyncCommitteeMessage( + MsgSource.gossip, msg, idx))) + + # sync_committee_contribution_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof + node.network.addAsyncValidator( + getSyncCommitteeContributionAndProofTopic(digest), proc ( + msg: SignedContributionAndProof, + src: PeerId + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedContributionAndProof( + MsgSource.gossip, msg))) + + when consensusFork >= ConsensusFork.Capella: + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/capella/p2p-interface.md#bls_to_execution_change + node.network.addAsyncValidator( + getBlsToExecutionChangeTopic(digest), proc ( + msg: SignedBLSToExecutionChange, + src: PeerId + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processBlsToExecutionChange( + MsgSource.gossip, msg))) + + # data_column_sidecar_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/p2p-interface.md#data_column_sidecar_subnet_id + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#data_column_sidecar_subnet_id + when consensusFork >= ConsensusFork.Gloas: + for it in 0'u64..= ConsensusFork.Electra: + node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA + else: + node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT + for it in 0.BlobId ..< subnetCount.BlobId: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let subnet_id = it + node.network.addValidator( + getBlobSidecarTopic(digest, subnet_id), proc ( + blobSidecar: deneb.BlobSidecar, + src: PeerId + ): ValidationResult = + toValidationResult( + node.processor[].processBlobSidecar( + MsgSource.gossip, blobSidecar, subnet_id))) node.installLightClientMessageValidators() proc stop(node: BeaconNode) = - bnStatus = BeaconNodeStatus.Stopping - notice "Graceful shutdown" if not node.config.inProcessValidators: try: node.vcProcess.close() @@ -2141,58 +2491,6 @@ proc stop(node: BeaconNode) = node.db.close() notice "Databases closed" -proc run(node: BeaconNode) {.raises: [CatchableError].} = - bnStatus = BeaconNodeStatus.Running - - if not isNil(node.restServer): - node.restServer.installRestHandlers(node) - node.restServer.start() - - if not isNil(node.keymanagerServer): - doAssert not isNil(node.keymanagerHost) - node.keymanagerServer.router.installKeymanagerHandlers(node.keymanagerHost[]) - if node.keymanagerServer != node.restServer: - node.keymanagerServer.start() - - let - wallTime = node.beaconClock.now() - wallSlot = wallTime.slotOrZero() - - node.startLightClient() - node.requestManager.start() - node.syncOverseer.start() - - waitFor node.updateGossipStatus(wallSlot) - - for web3signerUrl in node.config.web3SignerUrls: - # TODO - # The current strategy polls all remote signers independently - # from each other which may lead to some race conditions of - # validators are migrated from one signer to another - # (because the updates to our validator pool are not atomic). - # Consider using different strategies that would detect such - # race conditions. - asyncSpawn node.pollForDynamicValidators( - web3signerUrl, node.config.web3signerUpdateInterval) - - asyncSpawn runSlotLoop(node, wallTime, onSlotStart) - asyncSpawn runOnSecondLoop(node) - asyncSpawn runQueueProcessingLoop(node.blockProcessor) - asyncSpawn runKeystoreCachePruningLoop(node.keystoreCache) - - # main event loop - while bnStatus == BeaconNodeStatus.Running: - poll() # if poll fails, the network is broken - - # time to say goodbye - node.stop() - -var gPidFile: string -proc createPidFile(filename: string) {.raises: [IOError].} = - writeFile filename, $os.getCurrentProcessId() - gPidFile = filename - addExitProc proc {.noconv.} = discard io2.removeFile(gPidFile) - proc initializeNetworking(node: BeaconNode) {.async.} = node.installMessageValidators() @@ -2204,7 +2502,9 @@ proc initializeNetworking(node: BeaconNode) {.async.} = await node.network.start() -proc start*(node: BeaconNode) {.raises: [CatchableError].} = +type StopFuture = Future[void].Raising([CancelledError]) + +proc run*(node: BeaconNode, stopper: StopFuture) {.raises: [CatchableError].} = let head = node.dag.head finalizedHead = node.dag.finalizedHead @@ -2235,7 +2535,56 @@ proc start*(node: BeaconNode) {.raises: [CatchableError].} = waitFor node.initializeNetworking() node.elManager.start() - node.run() + + ProcessState.notifyRunning() + + if not isNil(node.restServer): + node.restServer.installRestHandlers(node) + node.restServer.start() + + if not isNil(node.keymanagerServer): + doAssert not isNil(node.keymanagerHost) + node.keymanagerServer.router.installKeymanagerHandlers(node.keymanagerHost[]) + if node.keymanagerServer != node.restServer: + node.keymanagerServer.start() + + let + wallTime = node.beaconClock.now() + wallSlot = wallTime.slotOrZero() + + node.startLightClient() + node.requestManager.start() + node.syncOverseer.start() + + waitFor node.updateGossipStatus(wallSlot) + + for web3signerUrl in node.config.web3SignerUrls: + # TODO + # The current strategy polls all remote signers independently + # from each other which may lead to some race conditions of + # validators are migrated from one signer to another + # (because the updates to our validator pool are not atomic). + # Consider using different strategies that would detect such + # race conditions. + asyncSpawn node.pollForDynamicValidators( + web3signerUrl, node.config.web3signerUpdateInterval) + + asyncSpawn runSlotLoop(node, wallTime) + asyncSpawn runOnSecondLoop(node) + asyncSpawn runKeystoreCachePruningLoop(node.keystoreCache) + + while true: + if (let reason = ProcessState.stopping(); reason.isSome()): + notice "Shutting down", reason = reason[] + break + if stopper != nil and stopper.finished(): + break + + chronos.poll() + + # time to say goodbye + node.stop() + func formatGwei(amount: Gwei): string = # TODO This is implemented in a quite a silly way. @@ -2380,7 +2729,13 @@ when not defined(windows): asyncSpawn statusBarUpdatesPollingLoop() -proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.raises: [CatchableError].} = +proc doRunBeaconNode*( + config: var BeaconNodeConf, + rng: ref HmacDrbgContext, + taskpool: Taskpool, + stopper: StopFuture, +) {.raises: [CatchableError], gcsafe.} = + doAssert taskpool != nil info "Launching beacon node", version = fullVersionStr, bls_backend = $BLS_BACKEND, @@ -2388,19 +2743,6 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai cmdParams = commandLineParams(), config - template ignoreDeprecatedOption(option: untyped): untyped = - if config.option.isSome: - warn "Config option is deprecated", - option = config.option.get - ignoreDeprecatedOption requireEngineAPI - ignoreDeprecatedOption safeSlotsToImportOptimistically - ignoreDeprecatedOption terminalTotalDifficultyOverride - ignoreDeprecatedOption optimistic - ignoreDeprecatedOption validatorMonitorTotals - ignoreDeprecatedOption web3ForcePolling - - createPidFile(config.dataDir.string / "beacon_node.pid") - config.createDumpDirs() # There are no managed event loops in here, to do a graceful shutdown, but @@ -2413,27 +2755,6 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai for node in metadata.bootstrapNodes: config.bootstrapNodes.add node - ## Ctrl+C handling - proc controlCHandler() {.noconv.} = - when defined(windows): - # workaround for https://github.com/nim-lang/Nim/issues/4057 - try: - setupForeignThreadGc() - except Exception as exc: raiseAssert exc.msg # shouldn't happen - notice "Shutting down after having received SIGINT" - bnStatus = BeaconNodeStatus.Stopping - try: - setControlCHook(controlCHandler) - except Exception as exc: # TODO Exception - warn "Cannot set ctrl-c handler", msg = exc.msg - - # equivalent SIGTERM handler - when defined(posix): - proc SIGTERMHandler(signal: cint) {.noconv.} = - notice "Shutting down after having received SIGTERM" - bnStatus = BeaconNodeStatus.Stopping - c_signal(ansi_c.SIGTERM, SIGTERMHandler) - block: let res = if config.trustedSetupFile.isNone: @@ -2443,18 +2764,19 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai if res.isErr(): raiseAssert res.error() - let node = waitFor BeaconNode.init(rng, config, metadata) - - let metricsServer = (waitFor config.initMetricsServer()).valueOr: + if ProcessState.stopIt(notice("Shutting down", reason = it)): return + let node = waitFor BeaconNode.init(rng, config, metadata, taskpool) + # Nim GC metrics (for the main thread) will be collected in onSecond(), but # we disable piggy-backing on other metrics here. setSystemMetricsAutomaticUpdate(false) - node.metricsServer = metricsServer + node.metricsServer = (waitFor config.initMetricsServer()).valueOr: + return - if bnStatus == BeaconNodeStatus.Stopping: + if ProcessState.stopIt(notice("Shutting down", reason = it)): return when not defined(windows): @@ -2463,9 +2785,9 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai initStatusBar(node) if node.nickname != "": - dynamicLogScope(node = node.nickname): node.start() + dynamicLogScope(node = node.nickname): node.run(stopper) else: - node.start() + node.run(stopper) proc doRecord(config: BeaconNodeConf, rng: var HmacDrbgContext) {. raises: [CatchableError].} = @@ -2499,10 +2821,7 @@ proc doWeb3Cmd(config: BeaconNodeConf, rng: var HmacDrbgContext) {.raises: [CatchableError].} = case config.web3Cmd: of Web3Cmd.test: - let metadata = config.loadEth2Network() - waitFor testWeb3Provider(config.web3TestUrl, - metadata.cfg.DEPOSIT_CONTRACT_ADDRESS, rng.loadJwtSecret(config, allowCreate = true)) proc doSlashingExport(conf: BeaconNodeConf) {.raises: [IOError].}= @@ -2516,7 +2835,7 @@ proc doSlashingExport(conf: BeaconNodeConf) {.raises: [IOError].}= db.exportSlashingInterchange(interchange, conf.exportedValidators) echo "Export finished: '", dir/filetrunc & ".sqlite3" , "' into '", interchange, "'" -proc doSlashingImport(conf: BeaconNodeConf) {.raises: [SerializationError, IOError].} = +proc doSlashingImport(conf: BeaconNodeConf) {.raises: [IOError].} = let dir = conf.validatorsDir() filetrunc = SlashingDbName @@ -2564,7 +2883,29 @@ proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [CatchableError].} = let rng = HmacDrbgContext.new() case config.cmd - of BNStartUpCmd.noCommand: doRunBeaconNode(config, rng) + of BNStartUpCmd.noCommand: + createPidFile(config.dataDir.string / "beacon_node.pid") + ProcessState.setupStopHandlers() + + if config.rpcEnabled.isSome: + warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it." + + template ignoreDeprecatedOption(option: untyped): untyped = + if config.option.isSome: + warn "Ignoring deprecated configuration option", + option = config.option.get + ignoreDeprecatedOption requireEngineAPI + ignoreDeprecatedOption safeSlotsToImportOptimistically + ignoreDeprecatedOption terminalTotalDifficultyOverride + ignoreDeprecatedOption optimistic + ignoreDeprecatedOption validatorMonitorTotals + ignoreDeprecatedOption web3ForcePolling + ignoreDeprecatedOption finalizedDepositTreeSnapshot + ignoreDeprecatedOption finalizedCheckpointBlock + + let taskpool = setupTaskpool(config.numThreads) + + doRunBeaconNode(config, rng, taskpool, nil) of BNStartUpCmd.deposits: doDeposits(config, rng[]) of BNStartUpCmd.wallets: doWallets(config, rng[]) of BNStartUpCmd.record: doRecord(config, rng[]) @@ -2588,57 +2929,56 @@ proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [CatchableError].} = config.lcTrustedBlockRoot, config.backfillBlocks, config.reindex, - config.downloadDepositSnapshot, genesisState) db.close() -{.pop.} # TODO moduletests exceptions +# noinline to keep it in stack traces +proc main*() {.noinline, raises: [CatchableError].} = + const copyright = + "Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH" -programMain: - var config = makeBannerAndConfig(clientId, copyrights, nimBanner, - SPEC_VERSION, [], BeaconNodeConf).valueOr: - stderr.write error + var config = BeaconNodeConf.loadWithBanners(clientId, copyright, [specBanner]).valueOr: + writePanicLine error # Logging not yet set up quit QuitFailure - if not(checkAndCreateDataDir(string(config.dataDir))): + setupLogging(config.logLevel, config.logStdout, config.logFile) + setupFileLimits() + + if not (checkAndCreateDataDir(string(config.dataDir))): # We are unable to access/create data folder or data folder's # permissions are insecure. quit QuitFailure - setupLogging(config.logLevel, config.logStdout, config.logFile) - setupFileLimits() - ## This Ctrl+C handler exits the program in non-graceful way. ## It's responsible for handling Ctrl+C in sub-commands such ## as `wallets *` and `deposits *`. In a regular beacon node ## run, it will be overwritten later with a different handler ## performing a graceful exit. proc exitImmediatelyOnCtrlC() {.noconv.} = - when defined(windows): - # workaround for https://github.com/nim-lang/Nim/issues/4057 - setupForeignThreadGc() - # in case a password prompt disabled echoing - resetStdin() - echo "" # If we interrupt during an interactive prompt, this - # will move the cursor to the next line - notice "Shutting down after having received SIGINT" - quit 0 + # No allocations in signal handler + cstdout.rawWrite("Shutting down after having received SIGINT / ctrl-c") + quit QuitSuccess setControlCHook(exitImmediatelyOnCtrlC) + # equivalent SIGTERM handler - when defined(posix): + when declared(ansi_c.SIGTERM): proc exitImmediatelyOnSIGTERM(signal: cint) {.noconv.} = - notice "Shutting down after having received SIGTERM" - quit 0 + # No allocations in signal handler + cstdout.rawWrite("Shutting down after having received SIGTERM") + quit QuitSuccess c_signal(ansi_c.SIGTERM, exitImmediatelyOnSIGTERM) when defined(windows): if config.runAsService: proc exitService() = - bnStatus = BeaconNodeStatus.Stopping - establishWindowsService(clientId, copyrights, nimBanner, SPEC_VERSION, + ProcessState.scheduleStop("exitService") + establishWindowsService(clientId, copyright, [specBanner], "nimbus_beacon_node", BeaconNodeConf, handleStartUpCmd, exitService) else: handleStartUpCmd(config) else: - handleStartUpCmd(config) \ No newline at end of file + handleStartUpCmd(config) + +when isMainModule: + main() diff --git a/beacon_chain/nimbus_beacon_node.nim.cfg b/beacon_chain/nimbus_beacon_node.nim.cfg index 4c0d442473..30c8770614 100644 --- a/beacon_chain/nimbus_beacon_node.nim.cfg +++ b/beacon_chain/nimbus_beacon_node.nim.cfg @@ -1,6 +1,6 @@ -d:"chronicles_sinks=textlines[dynamic],json[dynamic]" -d:"chronicles_runtime_filtering=on" --d:"chronicles_disable_thread_id" +-d:"chronicles_thread_ids=no" @if release: -d:"chronicles_line_numbers:0" diff --git a/beacon_chain/nimbus_binary_common.nim b/beacon_chain/nimbus_binary_common.nim index 73d6839101..500c205f18 100644 --- a/beacon_chain/nimbus_binary_common.nim +++ b/beacon_chain/nimbus_binary_common.nim @@ -7,76 +7,45 @@ {.push raises: [].} -# Common routines for a BeaconNode and a ValidatorClient +# Utilities common across several nimbus binaries (BN/VC/EC/Portal/etc) import # Standard library - std/[tables, strutils, terminal, typetraits], + std/[cpuinfo, exitprocs, os, tables, terminal, typetraits], # Nimble packages chronos, confutils, presto, toml_serialization, metrics, chronicles, chronicles/helpers as chroniclesHelpers, chronicles/topics_registry, - stew/io2, metrics/chronos_httpserver, + stew/io2, metrics/chronos_httpserver, taskpools, # Local modules - ./spec/[helpers, keystore], - ./spec/datatypes/base, - "."/[beacon_clock, beacon_node_status, conf, conf_common, version] + ./spec/keystore, + ./buildinfo -when defined(posix): - import termios +from ./spec/datatypes/base import SPEC_VERSION + +const specBanner* = "Ethereum consensus spec v" & SPEC_VERSION -declareGauge versionGauge, "Nimbus version info (as metric labels)", ["version", "commit"], name = "version" -versionGauge.set(1, labelValues=[fullVersionStr, gitRevision]) +from system/ansi_c import c_malloc -declareGauge nimVersionGauge, "Nim version info", ["version", "nim_commit"], name = "nim_version" -nimVersionGauge.set(1, labelValues=[NimVersion, getNimGitHash()]) +when defaultChroniclesStream.outputs.type.arity == 2: + from ./filepath import secureCreatePath + + import stew/staticfor + +when defined(posix): + import termios export - confutils, toml_serialization, beacon_clock, beacon_node_status, conf, - conf_common + confutils, toml_serialization type - SlotStartProc*[T] = proc(node: T, wallTime: BeaconTime, - lastSlot: Slot): Future[bool] {.gcsafe, - raises: [].} - -# silly chronicles, colors is a compile-time property -when defaultChroniclesStream.outputs.type.arity == 2: - func stripAnsi(v: string): string = - var - res = newStringOfCap(v.len) - i: int - - while i < v.len: - let c = v[i] - if c == '\x1b': - var - x = i + 1 - found = false - - while x < v.len: # look for [..m - let c2 = v[x] - if x == i + 1: - if c2 != '[': - break - else: - if c2 in {'0'..'9'} + {';'}: - discard # keep looking - elif c2 == 'm': - i = x + 1 - found = true - break - else: - break - inc x - - if found: # skip adding c - continue - res.add c - inc i - - res + StdoutLogKind* {.pure.} = enum + Auto = "auto" + Colors = "colors" + NoColors = "nocolors" + Json = "json" + None = "none" proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} = # Updates log levels (without clearing old ones) @@ -93,7 +62,7 @@ proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} = proc detectTTY*(stdoutKind: StdoutLogKind): StdoutLogKind = if stdoutKind == StdoutLogKind.Auto: - if isatty(stdout): + if getEnv("NO_COLOR").len == 0 and isatty(stdout): # On a TTY, let's be fancy StdoutLogKind.Colors else: @@ -104,10 +73,6 @@ proc detectTTY*(stdoutKind: StdoutLogKind): StdoutLogKind = else: stdoutKind -when defaultChroniclesStream.outputs.type.arity == 2: - from std/os import splitFile - from "."/filepath import secureCreatePath - proc setupFileLimits*() = when not defined(windows): # In addition to databases and sockets, we need a file descriptor for every @@ -119,8 +84,18 @@ proc setupFileLimits*() = setMaxOpenFiles2(16384).isOkOr: warn "Cannot increase open file limit", err = osErrorMsg(error) +proc writePanicLine*(v: varargs[string, `$`]) = + ## Attempt writing text to stderr, ignoring errors if it fails - useful when + ## logging has not yet been set up + try: + for s in v: + stderr.write(s) + stderr.write("\p") + except IOError: + discard # Nothing to do.. + proc setupLogging*( - logLevel: string, stdoutKind: StdoutLogKind, logFile: Option[OutFile]) = + logLevel: string, stdoutKind: StdoutLogKind, logFile = none(OutFile)) = # In the cfg file for nimbus, we create two formats: textlines and json. # Here, we either write those logs to an output, or not, depending on the # given configuration. @@ -129,9 +104,6 @@ proc setupLogging*( when defaultChroniclesStream.outputs.type.arity != 2: warn "Logging configuration options not enabled in the current build" else: - # Naive approach where chronicles will form a string and we will discard - # it, even if it could have skipped the formatting phase - proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard proc writeAndFlush(f: File, msg: LogOutputStr) = try: @@ -143,9 +115,6 @@ proc setupLogging*( proc stdoutFlush(logLevel: LogLevel, msg: LogOutputStr) = writeAndFlush(stdout, msg) - proc noColorsFlush(logLevel: LogLevel, msg: LogOutputStr) = - writeAndFlush(stdout, stripAnsi(msg)) - let fileWriter = if logFile.isSome(): let @@ -171,14 +140,15 @@ proc setupLogging*( defaultChroniclesStream.outputs[1].writer = fileWriter - let tmp = detectTTY(stdoutKind) - - case tmp - of StdoutLogKind.Auto: raiseAssert "checked above" + case detectTTY(stdoutKind) + of StdoutLogKind.Auto: + raiseAssert "Auto-detection done in detectTTY" of StdoutLogKind.Colors: defaultChroniclesStream.outputs[0].writer = stdoutFlush + defaultChroniclesStream.outputs[0].colors = true of StdoutLogKind.NoColors: - defaultChroniclesStream.outputs[0].writer = noColorsFlush + defaultChroniclesStream.outputs[0].writer = stdoutFlush + defaultChroniclesStream.outputs[0].colors = false of StdoutLogKind.Json: defaultChroniclesStream.outputs[0].writer = noOutput @@ -190,56 +160,92 @@ proc setupLogging*( of StdoutLogKind.None: defaultChroniclesStream.outputs[0].writer = noOutput + staticFor i, 0.. GENESIS! - - if wallSlot < nextSlot: - # While we were sleeping, the system clock changed and time moved - # backwards! - if wallSlot + 1 < nextSlot: - # This is a critical condition where it's hard to reason about what - # to do next - we'll call the attention of the user here by shutting - # down. - fatal "System time adjusted backwards significantly - clock may be inaccurate - shutting down", - nextSlot = shortLog(nextSlot), - wallSlot = shortLog(wallSlot) - bnStatus = BeaconNodeStatus.Stopping - return - - # Time moved back by a single slot - this could be a minor adjustment, - # for example when NTP does its thing after not working for a while - warn "System time adjusted backwards, rescheduling slot actions", - wallTime = shortLog(wallTime), - nextSlot = shortLog(nextSlot), - wallSlot = shortLog(wallSlot) - - # cur & next slot remain the same - timeToNextSlot = nextSlot.start_beacon_time() - wallTime - continue - - if wallSlot > nextSlot + SLOTS_PER_EPOCH: - # Time moved forwards by more than an epoch - either the clock was reset - # or we've been stuck in processing for a long time - either way, we will - # skip ahead so that we only process the events of the last - # SLOTS_PER_EPOCH slots - warn "Time moved forwards by more than an epoch, skipping ahead", - curSlot = shortLog(curSlot), - nextSlot = shortLog(nextSlot), - wallSlot = shortLog(wallSlot) - - curSlot = wallSlot - SLOTS_PER_EPOCH - - elif wallSlot > nextSlot: - notice "Missed expected slot start, catching up", - delay = shortLog(wallTime - nextSlot.start_beacon_time()), - curSlot = shortLog(curSlot), - nextSlot = shortLog(curSlot) - - let breakLoop = await slotProc(node, wallTime, curSlot) - if breakLoop: - break - - curSlot = wallSlot - nextSlot = wallSlot + 1 - timeToNextSlot = nextSlot.start_beacon_time() - node.beaconClock.now() - proc init*(T: type RestServerRef, ip: IpAddress, port: Port, allowedOrigin: Option[string], validateFn: PatternCallback, ident: string, - config: AnyConf): T = + config: auto): T = let address = initTAddress(ip, port) serverFlags = {HttpServerFlags.QueryCommaSeparatedArray, @@ -401,7 +327,7 @@ type token*: string proc initKeymanagerServer*( - config: AnyConf, + config: auto, existingRestServer: RestServerRef = nil): KeymanagerInitResult {.raises: [].} = @@ -426,7 +352,7 @@ proc initKeymanagerServer*( fatal "The keymanager token should not be empty", tokenFilePath quit 1 - when config is BeaconNodeConf: + when compiles(config.restPort): if existingRestServer != nil and config.restAddress == config.keymanagerAddress and config.restPort == config.keymanagerPort: @@ -449,7 +375,7 @@ proc initKeymanagerServer*( KeymanagerInitResult(server: keymanagerServer, token: token) proc initMetricsServer*( - config: AnyConf + config: auto ): Future[Result[Opt[MetricsHttpServerRef], string]] {. async: (raises: [CancelledError]).} = if config.metricsEnabled: @@ -493,3 +419,57 @@ proc quitDoppelganger*() = const QuitDoppelganger = 129 quit QuitDoppelganger + +proc quitSlashing*() = + fatal "A known validator is slashed" + + const QuitSlashing = 198 + quit QuitSlashing + +proc defaultDataDir*(namespace, network: string): string = + ## Return the location to use by default for the given network - since + ## each network has its own blocks and configuration, separate the data + ## directories by chain to keep things simple. + ## + ## Namespace is for separating applications by namespace, ie when they don't + ## support sharing data directory - in particular, the validator client, + ## signing node and beacon node must use separate folders or they risk loading + ## the same keys! + ## + ## In theory, things like private keys could be shared between testnets and + ## mainnet, but this amounts to reusing the same keys for both environments + ## which seems dubious at best, security-wise. + + let + base = + when defined(windows): + # Avoid roaming profile since DB is large + os.getEnv("LOCALAPPDATA", os.getEnv("APPDATA")) + elif defined(macos) or defined(macosx): + # Everything goes in here on mac + os.getHomeDir() / "Library/Application Support" + else: + # https://specifications.freedesktop.org/basedir-spec/0.8/#variables + os.getEnv("XDG_STATE_HOME", os.getEnv("HOME") / ".local/state") + + nimbus = when defined(linux): "nimbus" else: "Nimbus" + + var dir = base / nimbus + + if namespace.len > 0: + dir = dir / namespace + + if network.len > 0: + dir = dir / network + + dir + +proc createPidFile*(filename: string) {.raises: [IOError].} = + var pidFile {.global.}: cstring # avoid gc + doAssert pidFile.len == 0, "PID file must only be created once" + + writeFile filename, $os.getCurrentProcessId() + pidFile = cast[cstring](c_malloc(csize_t(filename.len + 1))) + copyMem(pidFile, cstring(filename), filename.len + 1) + + addExitProc proc {.noconv.} = discard io2.removeFile($pidFile) diff --git a/beacon_chain/nimbus_light_client.nim b/beacon_chain/nimbus_light_client.nim index bce64960b7..c4b191627d 100644 --- a/beacon_chain/nimbus_light_client.nim +++ b/beacon_chain/nimbus_light_client.nim @@ -16,31 +16,26 @@ import ./networking/[topic_params, network_metadata_downloads], ./spec/beaconstate, ./spec/datatypes/[phase0, altair, bellatrix, capella, deneb], - "."/[filepath, light_client, light_client_db, nimbus_binary_common, version] + ./[ + beacon_clock, buildinfo, filepath, light_client, light_client_db, + nimbus_binary_common, process_state, version, + ] from ./gossip_processing/block_processor import newExecutionPayload from ./gossip_processing/eth2_processor import toValidationResult -# this needs to be global, so it can be set in the Ctrl+C signal handler -var globalRunning = true +# noinline to keep it in stack traces +proc main() {.noinline, raises: [CatchableError].} = + ProcessState.setupStopHandlers() + const + banner = "Nimbus light client " & fullVersionStr + copyright = + "Copyright (c) 2022-" & compileYear & " Status Research & Development GmbH" + + var config = LightClientConf.loadWithBanners(banner, copyright, [specBanner]).valueOr: + writePanicLine error # Logging not yet set up + quit QuitFailure -programMain: - ## Ctrl+C handling - proc controlCHandler() {.noconv.} = - when defined(windows): - # workaround for https://github.com/nim-lang/Nim/issues/4057 - try: - setupForeignThreadGc() - except Exception as exc: raiseAssert exc.msg # shouldn't happen - notice "Shutting down after having received SIGINT" - globalRunning = false - try: - setControlCHook(controlCHandler) - except Exception as exc: # TODO Exception - warn "Cannot set ctrl-c handler", msg = exc.msg - - var config = makeBannerAndConfig( - "Nimbus light client " & fullVersionStr, LightClientConf) setupLogging(config.logLevel, config.logStdout, config.logFile) notice "Launching light client", @@ -78,7 +73,7 @@ programMain: raiseAssert "Invalid baked-in state: " & err.msg genesisTime = getStateField(genesisState[], genesis_time) - beaconClock = BeaconClock.init(genesisTime).valueOr: + beaconClock = BeaconClock.init(cfg.time, genesisTime).valueOr: error "Invalid genesis time in state", genesisTime quit 1 getBeaconTime = beaconClock.getBeaconTimeFn() @@ -97,13 +92,7 @@ programMain: engineApiUrls = config.engineApiUrls elManager = if engineApiUrls.len > 0: - ELManager.new( - cfg, - metadata.depositContractBlock, - metadata.depositContractBlockHash, - db = nil, - engineApiUrls, - metadata.eth1Network) + ELManager.new(engineApiUrls, metadata.eth1Network) else: nil @@ -111,14 +100,15 @@ programMain: signedBlock: ForkedSignedBeaconBlock ): Future[void] {.async: (raises: [CancelledError]).} = withBlck(signedBlock): - when consensusFork >= ConsensusFork.Bellatrix: + debugGloasComment "" + when consensusFork >= ConsensusFork.Bellatrix and consensusFork != ConsensusFork.Gloas: if forkyBlck.message.is_execution_block: template payload(): auto = forkyBlck.message.body.execution_payload if elManager != nil and not payload.block_hash.isZero: discard await elManager.newExecutionPayload(forkyBlck.message) else: discard optimisticProcessor = initOptimisticProcessor( - getBeaconTime, optimisticHandler) + cfg.time, getBeaconTime, optimisticHandler) lightClient = createLightClient( network, rng, config, cfg, forkDigests, getBeaconTime, @@ -133,14 +123,19 @@ programMain: PeerSync, PeerSync.NetworkState.init( cfg, forkDigests, genesisBlockRoot, getBeaconTime)) - withAll(ConsensusFork): - let forkDigest = forkDigests[].atConsensusFork(consensusFork) - network.addValidator( - getBeaconBlocksTopic(forkDigest), proc ( - signedBlock: consensusFork.SignedBeaconBlock - ): ValidationResult = - toValidationResult( - optimisticProcessor.processSignedBeaconBlock(signedBlock))) + for consensusFork in ConsensusFork: + for forkDigest in consensusFork.forkDigests(forkDigests[]): + withConsensusFork(consensusFork): + when consensusFork >= ConsensusFork.Gloas: + debugGloasComment "consensusFork.SignedBeaconBlock support missing" + else: + network.addValidator( + getBeaconBlocksTopic(forkDigest), proc ( + signedBlock: consensusFork.SignedBeaconBlock, + src: PeerId + ): ValidationResult = + toValidationResult( + optimisticProcessor.processSignedBeaconBlock(signedBlock))) lightClient.installMessageValidators() waitFor network.startListening() waitFor network.start() @@ -235,42 +230,34 @@ programMain: isSynced(wallSlot) - var blocksGossipState: GossipState = {} + var blocksGossipState: GossipState proc updateBlocksGossipStatus(slot: Slot) = let isBehind = not shouldSyncOptimistically(slot) - - targetGossipState = getTargetGossipState( - slot.epoch, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, - cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, - cfg.FULU_FORK_EPOCH, isBehind) + targetGossipState = getTargetGossipState(slot.epoch, cfg, isBehind) template currentGossipState(): auto = blocksGossipState if currentGossipState == targetGossipState: return - if currentGossipState.card == 0 and targetGossipState.card > 0: + if currentGossipState.len == 0 and targetGossipState.len > 0: debug "Enabling blocks topic subscriptions", wallSlot = slot, targetGossipState - elif currentGossipState.card > 0 and targetGossipState.card == 0: + elif currentGossipState.len > 0 and targetGossipState.len == 0: debug "Disabling blocks topic subscriptions", wallSlot = slot else: # Individual forks added / removed discard - let - newGossipForks = targetGossipState - currentGossipState - oldGossipForks = currentGossipState - targetGossipState - - for gossipFork in oldGossipForks: - let forkDigest = forkDigests[].atConsensusFork(gossipFork) + for gossipEpoch in currentGossipState - targetGossipState: + let forkDigest = forkDigests[].atEpoch(gossipEpoch, cfg) network.unsubscribe(getBeaconBlocksTopic(forkDigest)) - for gossipFork in newGossipForks: - let forkDigest = forkDigests[].atConsensusFork(gossipFork) + for gossipEpoch in targetGossipState - currentGossipState: + let forkDigest = forkDigests[].atEpoch(gossipEpoch, cfg) network.subscribe( - getBeaconBlocksTopic(forkDigest), blocksTopicParams, + getBeaconBlocksTopic(forkDigest), getBlockTopicParams(), enableTopicMetrics = true) blocksGossipState = targetGossipState @@ -355,7 +342,9 @@ programMain: asyncSpawn runOnSlotLoop() asyncSpawn runOnSecondLoop() - while globalRunning: + + while not ProcessState.stopIt(notice("Shutting down", reason = it)): poll() - notice "Exiting light client" +when isMainModule: + main() diff --git a/beacon_chain/nimbus_light_client.nim.cfg b/beacon_chain/nimbus_light_client.nim.cfg index 4c0d442473..30c8770614 100644 --- a/beacon_chain/nimbus_light_client.nim.cfg +++ b/beacon_chain/nimbus_light_client.nim.cfg @@ -1,6 +1,6 @@ -d:"chronicles_sinks=textlines[dynamic],json[dynamic]" -d:"chronicles_runtime_filtering=on" --d:"chronicles_disable_thread_id" +-d:"chronicles_thread_ids=no" @if release: -d:"chronicles_line_numbers:0" diff --git a/beacon_chain/nimbus_signing_node.nim b/beacon_chain/nimbus_signing_node.nim index 3d357fa91c..438f9542ce 100644 --- a/beacon_chain/nimbus_signing_node.nim +++ b/beacon_chain/nimbus_signing_node.nim @@ -1,5 +1,5 @@ # nimbus_signing_node -# Copyright (c) 2018-2025 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -16,7 +16,7 @@ import "."/spec/datatypes/[base, altair, phase0], "."/spec/[crypto, digest, network, signatures, forks], "."/spec/eth2_apis/[rest_types, eth2_rest_serialization], "."/rpc/rest_constants, - "."/[conf, version, nimbus_binary_common], + "."/[buildinfo, conf, version, nimbus_binary_common], "."/validators/[keystore_management, validator_pool] const @@ -42,6 +42,7 @@ type runKeystoreCachePruningLoopFut: Future[void] sigintHandleFut: Future[void] sigtermHandleFut: Future[void] + genesis_fork_version: Version SigningNodeRef* = ref SigningNode @@ -114,19 +115,31 @@ proc loadTLSKey(pathName: InputFile): Result[TLSPrivateKey, cstring] = ok(key) proc new(t: typedesc[SigningNodeRef], config: SigningNodeConf): SigningNodeRef = + let + genesis_fork_version = + # With `mainnet` compile-time preset, these are not available + if config.eth2Network == some("minimal"): + Version [byte 0x00, 0x00, 0x00, 0x01] + elif config.eth2Network == some("gnosis"): + Version [byte 0x00, 0x00, 0x00, 0x64] + else: + config.loadEth2Network().cfg.GENESIS_FORK_VERSION + when declared(waitSignal): SigningNodeRef( config: config, sigintHandleFut: waitSignal(SIGINT), sigtermHandleFut: waitSignal(SIGTERM), - keystoreCache: KeystoreCacheRef.init() + keystoreCache: KeystoreCacheRef.init(), + genesis_fork_version: genesis_fork_version, ) else: SigningNodeRef( config: config, sigintHandleFut: newFuture[void]("sigint_placeholder"), sigtermHandleFut: newFuture[void]("sigterm_placeholder"), - keystoreCache: KeystoreCacheRef.init() + keystoreCache: KeystoreCacheRef.init(), + genesis_fork_version: genesis_fork_version, ) template errorResponse(code: HttpCode, message: string): RestApiResponse = @@ -238,18 +251,17 @@ proc installApiHandlers*(node: SigningNodeRef) = let (feeRecipientIndex, blockHeader) = case request.beaconBlockHeader.kind - of ConsensusFork.Phase0 .. ConsensusFork.Bellatrix: - # `phase0` and `altair` blocks do not have `fee_recipient`, so - # we return an error. + of ConsensusFork.Phase0 .. ConsensusFork.Capella: return errorResponse(Http400, BlockIncorrectFork) - of ConsensusFork.Capella: - (GeneralizedIndex(401), request.beaconBlockHeader.data) of ConsensusFork.Deneb: (GeneralizedIndex(801), request.beaconBlockHeader.data) of ConsensusFork.Electra: (GeneralizedIndex(801), request.beaconBlockHeader.data) of ConsensusFork.Fulu: (GeneralizedIndex(801), request.beaconBlockHeader.data) + of ConsensusFork.Gloas: + debugGloasComment "do not this" + return errorResponse(Http400, BlockIncorrectFork) if request.proofs.isNone() or len(request.proofs.get()) == 0: return errorResponse(Http400, MissingMerkleProofError) @@ -328,13 +340,11 @@ proc installApiHandlers*(node: SigningNodeRef) = signatureResponse(Http200, signature) of Web3SignerRequestKind.ValidatorRegistration: let - forkInfo = request.forkInfo.get() - signature = get_builder_signature(forkInfo.fork, + signature = get_builder_signature( + node.genesis_fork_version, ValidatorRegistrationV1( - fee_recipient: - ExecutionAddress(data: distinctBase(Eth1Address.fromHex( - request.validatorRegistration.feeRecipient))), - gas_limit: request.validatorRegistration.gasLimit, + fee_recipient: request.validatorRegistration.fee_recipient, + gas_limit: request.validatorRegistration.gas_limit, timestamp: request.validatorRegistration.timestamp, pubkey: request.validatorRegistration.pubkey, ), @@ -345,7 +355,7 @@ proc asyncInit(sn: SigningNodeRef) {.async: (raises: [SigningNodeError]).} = notice "Launching signing node", version = fullVersionStr, cmdParams = commandLineParams(), config = sn.config - info "Initializaing validators", path = sn.config.validatorsDir() + info "Initializing validators", path = sn.config.validatorsDir() sn.loadKeystores() if sn.attachedValidators.count() == 0: @@ -483,9 +493,20 @@ proc runSigningNode(config: SigningNodeConf) {.async: (raises: []).} = if not sn.runWithSignals(asyncRun sn): return -programMain: - let config = - makeBannerAndConfig("Nimbus signing node " & fullVersionStr, - SigningNodeConf) +# noinline to keep it in stack traces +proc main() {.noinline, raises: [CatchableError].} = + const + banner = "Nimbus signing node " & fullVersionStr + copyright = + "Copyright (c) 2021-" & compileYear & " Status Research & Development GmbH" + + let config = SigningNodeConf.loadWithBanners(banner, copyright, [specBanner]).valueOr: + writePanicLine error # Logging not yet set up + quit QuitFailure + setupLogging(config.logLevel, config.logStdout, config.logFile) + waitFor runSigningNode(config) + +when isMainModule: + main() diff --git a/beacon_chain/nimbus_signing_node.nim.cfg b/beacon_chain/nimbus_signing_node.nim.cfg index 4c0d442473..30c8770614 100644 --- a/beacon_chain/nimbus_signing_node.nim.cfg +++ b/beacon_chain/nimbus_signing_node.nim.cfg @@ -1,6 +1,6 @@ -d:"chronicles_sinks=textlines[dynamic],json[dynamic]" -d:"chronicles_runtime_filtering=on" --d:"chronicles_disable_thread_id" +-d:"chronicles_thread_ids=no" @if release: -d:"chronicles_line_numbers:0" diff --git a/beacon_chain/nimbus_validator_client.nim b/beacon_chain/nimbus_validator_client.nim index 843a2fef4a..e1d7f92d47 100644 --- a/beacon_chain/nimbus_validator_client.nim +++ b/beacon_chain/nimbus_validator_client.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2025 Status Research & Development GmbH +# Copyright (c) 2020-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -12,7 +12,8 @@ import ./rpc/rest_key_management_api, ./validator_client/[ common, fallback_service, duties_service, fork_service, block_service, - doppelganger_service, attestation_service, sync_committee_service] + doppelganger_service, attestation_service, sync_committee_service], + ./buildinfo const PREGENESIS_EPOCHS_COUNT = 1 @@ -21,7 +22,9 @@ declareGauge validator_client_node_counts, "Number of connected beacon nodes and their status", labels = ["status"] -proc initGenesis(vc: ValidatorClientRef): Future[RestGenesis] {. +proc initGenesis( + vc: ValidatorClientRef +): Future[(seq[BeaconNodeServerRef], RestGenesis)] {. async: (raises: [CancelledError]).} = info "Initializing genesis", nodes_count = len(vc.beaconNodes) var nodes = vc.beaconNodes @@ -52,15 +55,14 @@ proc initGenesis(vc: ValidatorClientRef): Future[RestGenesis] {. for future in pendingRequests: if not(future.finished()): pending.add(future.cancelAndWait()) - await allFutures(pending) + await noCancel allFutures(pending) raise exc let (errorNodes, genesisList) = block: - var gres: seq[RestGenesis] + var gres: seq[(BeaconNodeServerRef, RestGenesis)] var bres: seq[BeaconNodeServerRef] - for i in 0 ..< len(pendingRequests): - let fut = pendingRequests[i] + for i, fut in pendingRequests: if fut.completed(): let resp = fut.value if resp.status == 200: @@ -68,9 +70,9 @@ proc initGenesis(vc: ValidatorClientRef): Future[RestGenesis] {. genesis_time = resp.data.data.genesis_time, genesis_fork_version = resp.data.data.genesis_fork_version, genesis_root = resp.data.data.genesis_validators_root - gres.add(resp.data.data) + gres.add((nodes[i], resp.data.data)) else: - debug "Received unsuccessful response code", endpoint = nodes[i], + debug "Received unexpected response code", endpoint = nodes[i], response_code = resp.status bres.add(nodes[i]) elif fut.failed(): @@ -95,7 +97,7 @@ proc initGenesis(vc: ValidatorClientRef): Future[RestGenesis] {. # Boyer-Moore majority vote algorithm var melem: RestGenesis var counter = 0 - for item in genesisList: + for (_, item) in genesisList: if counter == 0: melem = item inc(counter) @@ -104,7 +106,74 @@ proc initGenesis(vc: ValidatorClientRef): Future[RestGenesis] {. inc(counter) else: dec(counter) - return melem + var mnodes: seq[BeaconNodeServerRef] + for (node, item) in genesisList: + if item == melem: + mnodes.add node + return (mnodes, melem) + +proc initTimeConfig( + nodes: seq[BeaconNodeServerRef] +): Future[Opt[TimeConfig]] {.async: (raises: [CancelledError]).} = + var pendingRequests: seq[Future[RestResponse[GetSpecVCResponse]]] + for node in nodes: + debug "Requesting time configuration settings", node = node + pendingRequests.add(node.client.getSpecVC()) + + try: + await allFutures(pendingRequests) + except CancelledError as exc: + var pending: seq[Future[void]] + debug "Time configuration settings request was interrupted" + for future in pendingRequests: + if not(future.finished()): + pending.add(future.cancelAndWait()) + await noCancel allFutures(pending) + raise exc + + var + res: Opt[TimeConfig] + didEncounterDisagreement = false + for i, fut in pendingRequests: + if fut.completed(): + let resp = fut.value + if resp.status == 200: + if checkConfig(resp.data.data): + let timeConfig = resp.data.data.getTimeConfig() + if timeConfig.isSome: + debug "Received time configuration settings", endpoint = nodes[i], + seconds_per_slot = timeConfig.get.SECONDS_PER_SLOT + if res.isNone: + res = timeConfig + elif timeConfig.get == res.get: + discard # Duplicate + else: + warn "Received incompatible time configuration settings", + endpoint = nodes[i], + seconds_per_slot = timeConfig.get.SECONDS_PER_SLOT, + expected_seconds_per_slot = res.get.SECONDS_PER_SLOT + didEncounterDisagreement = true + else: + debug "Received invalid time configuration settings", + endpoint = nodes[i], config = resp.data.data + else: + debug "Received incompatible time configuration settings", + endpoint = nodes[i], config = resp.data.data + else: + debug "Received unexpected time configuration settings response code", + endpoint = nodes[i], response_code = resp.status + elif fut.failed(): + let error = fut.error + debug "Could not obtain time configuration settings from beacon node", + endpoint = nodes[i], error_name = error.name, + reason = error.msg + else: + debug "Interrupted while requesting time configuration from beacon node", + endpoint = nodes[i] + + if didEncounterDisagreement: + res.reset() + res proc addValidatorsFromWeb3Signer( vc: ValidatorClientRef, @@ -142,9 +211,11 @@ proc initClock( # This procedure performs initialization of BeaconClock using current genesis # information. It also performs waiting for genesis. let - res = BeaconClock.init(vc.beaconGenesis.genesis_time).valueOr: + res = BeaconClock.init( + vc.timeConfig, vc.beaconGenesis.genesis_time).valueOr: raise (ref ValidatorClientError)( - msg: "Invalid genesis time: " & $vc.beaconGenesis.genesis_time) + msg: "Invalid genesis time: " & $vc.beaconGenesis.genesis_time & + "; seconds_per_slot=" & $vc.timeConfig.SECONDS_PER_SLOT) currentTime = res.now() currentSlot = currentTime.slotOrZero() currentEpoch = currentSlot.epoch() @@ -154,11 +225,13 @@ proc initClock( info "Initializing beacon clock", genesis_time = vc.beaconGenesis.genesis_time, current_slot = "", current_epoch = "", - time_to_genesis = genesisTime.offset + time_to_genesis = genesisTime.offset, + seconds_per_slot = vc.timeConfig.SECONDS_PER_SLOT else: info "Initializing beacon clock", genesis_time = vc.beaconGenesis.genesis_time, - current_slot = currentSlot, current_epoch = currentEpoch + current_slot = currentSlot, current_epoch = currentEpoch, + seconds_per_slot = vc.timeConfig.SECONDS_PER_SLOT res proc shutdownSlashingProtection(vc: ValidatorClientRef) = @@ -312,7 +385,11 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {. else: notice "Cannot initialize beacon node", node = node, status = node.status - vc.beaconGenesis = await vc.initGenesis() + let (nodes, genesis) = await vc.initGenesis() + vc.timeConfig = (await nodes.initTimeConfig()).valueOr: + raise newException(ValidatorClientError, + "Could not obtain time configuration settings") + vc.beaconGenesis = genesis info "Genesis information", genesis_time = vc.beaconGenesis.genesis_time, genesis_fork_version = vc.beaconGenesis.genesis_fork_version, genesis_root = vc.beaconGenesis.genesis_validators_root @@ -409,54 +486,49 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {. proc runPreGenesisWaitingLoop( vc: ValidatorClientRef ) {.async: (raises: [CancelledError]).} = - var breakLoop = false - while not(breakLoop): + while true: let - genesisTime = vc.beaconClock.fromNow(Slot(0)) - currentEpoch = vc.beaconClock.now().toSlot().slot.epoch() + currentTime = vc.beaconClock.now() + currentSlot = currentTime.toSlot() + currentEpoch = currentSlot.slot.epoch() - if not(genesisTime.inFuture) or currentEpoch < PREGENESIS_EPOCHS_COUNT: + if currentSlot.afterGenesis or currentEpoch < PREGENESIS_EPOCHS_COUNT: break notice "Waiting for genesis", genesis_time = vc.beaconGenesis.genesis_time, - time_to_genesis = genesisTime.offset + time_to_genesis = GENESIS_SLOT.start_beacon_time() - currentTime - breakLoop = - try: - await sleepAsync(vc.beaconClock.durationToNextSlot()) - false - except CancelledError as exc: - debug "Pre-genesis waiting loop was interrupted" - raise exc + try: + await vc.waitForNextSlot(currentSlot) + except CancelledError as exc: + debug "Pre-genesis waiting loop was interrupted" + raise exc - if not(breakLoop): - vc.preGenesisEvent.fire() + vc.preGenesisEvent.fire() proc runGenesisWaitingLoop( vc: ValidatorClientRef ) {.async: (raises: [CancelledError]).} = - var breakLoop = false - while not(breakLoop): - let genesisTime = vc.beaconClock.fromNow(Slot(0)) + while true: + let + currentTime = vc.beaconClock.now() + currentSlot = currentTime.toSlot() - if not(genesisTime.inFuture): + if currentSlot.afterGenesis: break notice "Waiting for genesis", genesis_time = vc.beaconGenesis.genesis_time, - time_to_genesis = genesisTime.offset + time_to_genesis = GENESIS_SLOT.start_beacon_time() - currentTime - breakLoop = - try: - await sleepAsync(vc.beaconClock.durationToNextSlot()) - false - except CancelledError as exc: - debug "Genesis waiting loop was interrupted" - raise exc + try: + await vc.waitForNextSlot(currentSlot) + except CancelledError as exc: + debug "Genesis waiting loop was interrupted" + raise exc - if not(breakLoop): - vc.genesisEvent.fire() + vc.genesisEvent.fire() proc asyncRun*( vc: ValidatorClientRef @@ -562,15 +634,26 @@ proc runValidatorClient*( if not vc.runWithSignals(asyncRun vc): return -programMain: +# noinline to keep it in stack traces +proc main() {.noinline, raises: [CatchableError].} = + const + banner = "Nimbus validator client " & fullVersionStr + copyright = + "Copyright (c) 2020-" & compileYear & " Status Research & Development GmbH" + let - config = makeBannerAndConfig("Nimbus validator client " & fullVersionStr, - ValidatorClientConf) + config = ValidatorClientConf.loadWithBanners(banner, copyright, [specBanner]).valueOr: + writePanicLine error # Logging not yet set up + quit QuitFailure # Single RNG instance for the application - will be seeded on construction # and avoid using system resources (such as urandom) after that rng = HmacDrbgContext.new() - setupFileLimits() setupLogging(config.logLevel, config.logStdout, config.logFile) + setupFileLimits() + waitFor runValidatorClient(config, rng) + +when isMainModule: + main() diff --git a/beacon_chain/nimbus_validator_client.nim.cfg b/beacon_chain/nimbus_validator_client.nim.cfg index 4c0d442473..30c8770614 100644 --- a/beacon_chain/nimbus_validator_client.nim.cfg +++ b/beacon_chain/nimbus_validator_client.nim.cfg @@ -1,6 +1,6 @@ -d:"chronicles_sinks=textlines[dynamic],json[dynamic]" -d:"chronicles_runtime_filtering=on" --d:"chronicles_disable_thread_id" +-d:"chronicles_thread_ids=no" @if release: -d:"chronicles_line_numbers:0" diff --git a/beacon_chain/process_state.nim b/beacon_chain/process_state.nim new file mode 100644 index 0000000000..8d525ad96f --- /dev/null +++ b/beacon_chain/process_state.nim @@ -0,0 +1,198 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +## Process state helper using a global variable to coordinate multithreaded +## shutdown in the presence of C signals. +## +## The high-level idea is the following: +## +## * OS signals are monitored using `signal` - signals may be either +## process-directed or thread-directed, but all of them end up in the +## same `signal` handler as long as they're not masked +## * When the main thread launches another thread, it passes a "stop event" to +## the thread - this can be a chronos ThreadSignalPtr, a condvar/lock or any +## other cross-thread "wake-up" mechanism that can tell the thread that it's +## time to go +## * When a signal is activated, a global flag is set indicating that the +## polling loop of the main thread should stop +## * The main thread wakes up any threads it started and notifies them of the +## imminent shutdown then waits for them to terminate +## +## `chronos` has a `waitSignal` function that could be use to wake it when a +## signal arrives - at the time of writing, it only works in a single-threaded +## application when chronos is the only signal handler and requires using +## its own raising mechanism instead of the standard `raise`/`pthread_kill` +## functions which makes it difficult to use: +## https://github.com/status-im/nim-chronos/issues/581 +## +## As such, polling `ProcessState.stopping` ends up being the more reliable +## cross-platform solution in spite of its downsides. + +{.push raises: [].} + +import std/atomics, results + +export results + +type ProcessState* {.pure.} = enum + Starting + Running + Stopping + +var processState: Atomic[ProcessState] +var shutdownSource: Atomic[pointer] + +import system/ansi_c + +proc scheduleStop*(_: type ProcessState, source: cstring) = + ## Schedule that the process should stop in a thread-safe way. This function + ## can be used from non-nim threads as well. + ## + # TODO in theory, we could use `raise`/`kill`/`etc` depending on the platform + # to set `processState` from within the signal handler - if we were + # a kqueue/epoll-based signal handler, this would be the way to go so + # as to provide a wakeup notification - there are platform-based + # differences to take into account however, ie on kqueue, only process- + # directed signals are woken up whereas on linux, the signal has to + # reach the correct thread that is doing the waiting which requires + # special care. + var nilptr: pointer + discard shutdownSource.compareExchange(nilptr, source, moRelaxed) + processState.store(ProcessState.Stopping) + +proc notifyRunning*(_: type ProcessState) = + processState.store(ProcessState.Running, moRelaxed) + +proc setupStopHandlers*(_: type ProcessState) = + ## Install signal handlers for SIGINT/SIGTERM such that the application + ## updates `processState` on CTRL-C and similar, allowing it to gracefully + ## shut down by monitoring `ProcessState.stopping` at regular intervals. + ## + ## This function should be called early on from the main thread to avoid the + ## default Nim signal handlers from being used as these will crash or close + ## the application. + + proc controlCHandler(a: cint) {.noconv.} = + # Cannot log in here because that would imply memory allocations and system + # calls + let sourceName = + if a == ansi_c.SIGINT: + cstring("SIGINT") + else: + cstring("SIGTERM") + + var nilptr: pointer + discard shutdownSource.compareExchange(nilptr, sourceName) + # Should also provide synchronization for the shutdownSource write.. + processState.store(Stopping) + + # Nim sets signal handlers using `c_signal`, but unfortunately these are broken + # since they perform memory allocations and call unsafe system functions: + # https://github.com/nim-lang/Nim/blob/c6352ce0ab5fef061b43c8ca960ff7728541b30b/lib/system/excpt.nim#L622 + + # Avoid using `setControlCHook` since it has an exception effect + c_signal(ansi_c.SIGINT, controlCHandler) + + # equivalent SIGTERM handler - this is only set on posix systems since on + # windows, SIGTERM is not generated - however, chronos may generate them so + # below, in the chronos version, we do monitor it on all platforms. + # https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/signal?view=msvc-170 + when defined(posix): + c_signal(ansi_c.SIGTERM, controlCHandler) + +proc running*(_: type ProcessState): bool = + processState.load(moRelaxed) == ProcessState.Running + +proc stopping*(_: type ProcessState): Opt[cstring] = + if processState.load(moRelaxed) == ProcessState.Stopping: + var source = cast[cstring](shutdownSource.load(moRelaxed)) + if source == nil: + source = "Stopped" + ok source + else: + Opt.none(cstring) + +template stopIt*(_: type ProcessState, body: untyped): bool = + let state = ProcessState.stopping() + if state.isSome(): + let it {.inject.} = state.get() + body + true + else: + false + +when isMainModule: # Test case + import os, chronos, chronos/threadsync + + proc threadWork() {.async.} = + var todo = 2 + while todo > 0: + # A few seconds to test ctrl-c-by-hand + echo "Terminating in ", todo + + await sleepAsync(1.seconds) + todo -= 1 + + echo "notification from thread" + # Sends signal from non-main thread + ProcessState.scheduleStop("thread") + + echo "Waiting for the end... " + await sleepAsync(10.seconds) + + raiseAssert "Should not reach here, ie stopping the thread should not take 10s" + + proc worker(p: ThreadSignalPtr) {.thread.} = + let + stop = p.wait() + work = threadWork() + discard waitFor noCancel race(stop, work) + + waitFor noCancel stop.cancelAndWait() + waitFor noCancel work.cancelAndWait() + + proc main() {.raises: [CatchableError].} = + let stopper = ThreadSignalPtr.new().expect("working thread signal") + + var workerThread: Thread[ThreadSignalPtr] + createThread(workerThread, worker, stopper) + + # Setup sync stop handlers - these are used whenever `waitSignal` is not + # used - whenever a `waitSignals` future is active, these signals should be + # masked - even if they are not masked, they are harmless in that they + # set the same flag as `waitStopSignals` does. + ProcessState.setupStopHandlers() + + echo "main thread waiting" + while ProcessState.stopping.isNone: + os.sleep(100) + + echo "main thread firing stopper" + + # Notify the thread should stop itself as well using a ThreadSignalPtr + # rather than an OS signal - this is more portable + waitFor stopper.fire() + + workerThread.joinThread() + + echo "notification from main thread" + # Now let's reset and try the sync API + ProcessState.notifyRunning() + ProcessState.scheduleStop("done") + + # poll for 10s, this should be enough even on platforms with async signal + # delivery (like windows, presumably?) + for i in 0 ..< 100: + if ProcessState.stopping().isSome: + break + os.sleep(100) + + echo "done" + + doAssert ProcessState.stopping().isSome + + main() diff --git a/beacon_chain/rpc/rest_beacon_api.nim b/beacon_chain/rpc/rest_beacon_api.nim index 512d37e204..ec3ae5c865 100644 --- a/beacon_chain/rpc/rest_beacon_api.nim +++ b/beacon_chain/rpc/rest_beacon_api.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import std/[typetraits, sequtils, sets], @@ -15,8 +15,9 @@ import ./state_ttl_cache, ../beacon_node, ../consensus_object_pools/[blockchain_dag, spec_cache, validator_change_pool], - ../spec/[deposit_snapshots, eth2_merkleization, forks, network, validator], - ../spec/mev/[bellatrix_mev, capella_mev], + ../spec/[ + peerdas_helpers, eth2_merkleization, + forks, network, validator], ../validators/message_router_mev export rest_utils @@ -129,19 +130,69 @@ proc toString*(kind: ValidatorFilterKind): string = of ValidatorFilterKind.WithdrawalDone: "withdrawal_done" +proc handleDataSidecarRequest*[ + InvalidIndexValueError: static string, + DataSidecarsType: typedesc[List]; + getDataSidecar: static proc +]( + node: BeaconNode, + mediaType: Result[MediaType, cstring], + block_id: Result[BlockIdent, cstring], + indices: Result[seq[uint64], cstring], + maxDataSidecars: uint64): RestApiResponse = + let + contentType = mediaType.valueOr: + return RestApiResponse.jsonError( + Http406, ContentNotAcceptableError) + blockIdent = block_id.valueOr: + return RestApiResponse.jsonError( + Http400, InvalidBlockIdValueError, $error) + bid = node.getBlockId(blockIdent).valueOr: + return RestApiResponse.jsonError( + Http404, BlockNotFoundError) + indexFilter = (block: indices.valueOr: + return RestApiResponse.jsonError( + Http400, InvalidIndexValueError, $error)).toHashSet() + + data = newClone(default(DataSidecarsType)) + for dataIndex in 0'u64 ..< maxDataSidecars: + if indexFilter.len > 0 and dataIndex notin indexFilter: + continue + let dataSidecar = new DataSidecarsType.T + if getDataSidecar(node.dag.db, bid.root, dataIndex, dataSidecar[]): + discard data[].add dataSidecar[] + let consensusFork = node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch) + + if contentType == sszMediaType: + RestApiResponse.sszResponse( + data[], consensusFork, node.hasRestAllowedOrigin) + elif contentType == jsonMediaType: + RestApiResponse.jsonResponseFinalizedWVersion( + data[].asSeq(), + Opt.some(node.dag.is_optimistic(bid)), node.dag.isFinalized(bid), + consensusFork, node.hasRestAllowedOrigin) + else: + RestApiResponse.jsonError(Http500, InvalidAcceptError) + +proc handleDataSidecarRequest*[ + InvalidIndexValueError: static string, + DataSidecarsType: typedesc[List]; + getDataSidecar: static proc +]( + node: BeaconNode, + mediaType: Result[MediaType, cstring], + block_id: Result[BlockIdent, cstring], + indices: Result[seq[uint64], cstring]): RestApiResponse = + handleDataSidecarRequest[ + InvalidIndexValueError, DataSidecarsType, getDataSidecar + ](node, mediaType, block_id, indices, DataSidecarsType.maxLen.uint64) + proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-4881.md router.api2(MethodGet, "/eth/v1/beacon/deposit_snapshot") do ( ) -> RestApiResponse: - let snapshot = node.db.getDepositContractSnapshot().valueOr: - # This can happen in a very short window after the client is started, - # but the snapshot record still haven't been upgraded in the database. - # Returning 404 should be easy to handle for the clients - they just need - # to retry. - return RestApiResponse.jsonError(Http404, - NoFinalizedSnapshotAvailableError) - - RestApiResponse.jsonResponse(snapshot.getTreeSnapshot()) + return RestApiResponse.jsonError(Http404, + NoFinalizedSnapshotAvailableError) # https://ethereum.github.io/beacon-APIs/#/Beacon/getGenesis router.api2(MethodGet, "/eth/v1/beacon/genesis") do () -> RestApiResponse: @@ -337,6 +388,44 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = ) RestApiResponse.jsonError(Http404, StateNotFoundError) + proc getValidatorIdentities( + node: BeaconNode, + bslot: BlockSlotId, + validatorIds: openArray[ValidatorIdent] + ): RestApiResponse = + node.withStateForBlockSlotId(bslot): + let + indices = node.getIndices(validatorIds, state).valueOr: + return RestApiResponse.jsonError(error) + response = + block: + var res: seq[RestValidatorIdentity] + if len(indices) == 0: + # Case when `len(indices) == 0 and len(validatorIds) != 0` means + # that we can't find validator identifiers in state, so we should + # return empty response. + if len(validatorIds) == 0: + # There are no indices, so we're going to filter all the + # validators. + for index, validator in getStateField(state, validators): + res.add(RestValidatorIdentity.init(ValidatorIndex(index), + validator.pubkeyData.pubkey(), + validator.activation_epoch)) + else: + for index in indices: + let + validator = getStateField(state, validators).item(index) + res.add(RestValidatorIdentity.init(index, + validator.pubkeyData.pubkey(), + validator.activation_epoch)) + res + return RestApiResponse.jsonResponseFinalized( + response, + node.getStateOptimistic(state), + node.dag.isFinalized(bslot.bid) + ) + RestApiResponse.jsonError(Http404, StateNotFoundError) + proc getBalances( node: BeaconNode, bslot: BlockSlotId, @@ -561,6 +650,31 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = return RestApiResponse.jsonError(Http404, StateNotFoundError, $error) getBalances(node, bslot, validatorIds) + # https://ethereum.github.io/beacon-APIs/#/Beacon/postStateValidatorIdentities + router.metricsApi2( + MethodPost, "/eth/v1/beacon/states/{state_id}/validator_identities", + {RestServerMetricsType.Status, Response}) do ( + state_id: StateIdent, contentBody: Option[ContentBody]) -> RestApiResponse: + let + validatorIds = + block: + if contentBody.isNone(): + return RestApiResponse.jsonError(Http400, EmptyRequestBodyError) + let body = contentBody.get() + decodeBody(seq[ValidatorIdent], body).valueOr: + return RestApiResponse.jsonError( + Http400, InvalidValidatorIdValueError, $error) + sid = state_id.valueOr: + return RestApiResponse.jsonError(Http400, InvalidStateIdValueError, + $error) + bslot = node.getBlockSlotId(sid).valueOr: + if sid.kind == StateQueryKind.Root: + # TODO (cheatfate): Its impossible to retrieve state by `state_root` + # in current version of database. + return RestApiResponse.jsonError(Http500, NoImplementationError) + return RestApiResponse.jsonError(Http404, StateNotFoundError, $error) + getValidatorIdentities(node, bslot, validatorIds) + # https://ethereum.github.io/beacon-APIs/#/Beacon/getEpochCommittees router.metricsApi2( MethodGet, "/eth/v1/beacon/states/{state_id}/committees", @@ -764,8 +878,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = RestApiResponse.jsonError(Http404, StateNotFoundError) - # https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/getStateRandao - # https://github.com/ethereum/beacon-APIs/blob/b3c4defa238aaa74bf22aa602aa1b24b68a4c78e/apis/beacon/states/randao.yaml + # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v3.1.0#/Beacon/getStateRandao + # https://github.com/ethereum/beacon-APIs/blob/v3.1.0/apis/beacon/states/randao.yaml router.metricsApi2( MethodGet, "/eth/v1/beacon/states/{state_id}/randao", {RestServerMetricsType.Status, Response}) do ( @@ -808,9 +922,11 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = if bsi.isSome: let mix = node.dag.computeRandaoMix(bsi.get.bid) if mix.isSome: - return RestApiResponse.jsonResponseWOpt( + let bid = bsi.get.bid + return RestApiResponse.jsonResponseFinalized( RestEpochRandao(randao: mix.get), - node.getBidOptimistic(bsi.get.bid) + node.getBidOptimistic(bid), + node.dag.isFinalized(bid) ) # Fall back to full state computation @@ -925,15 +1041,15 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = doAssert strictVerification notin node.dag.updateFlags return RestApiResponse.jsonError(Http400, InvalidBlockObjectError) - when consensusFork >= ConsensusFork.Deneb: + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: await node.router.routeSignedBeaconBlock( forkyBlck, Opt.some( forkyBlck.create_blob_sidecars(kzg_proofs, blobs)), - checkValidator = true) + Opt.none(seq[fulu.DataColumnSidecar]), checkValidator = true) else: await node.router.routeSignedBeaconBlock( forkyBlck, Opt.none(seq[BlobSidecar]), - checkValidator = true) + Opt.none(seq[fulu.DataColumnSidecar]), checkValidator = true) if res.isErr(): return RestApiResponse.jsonError( @@ -951,7 +1067,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = block: let version = request.headers.getString("eth-consensus-version") - validation = + validation {.used.} = block: let res = if broadcast_validation.isNone(): @@ -982,14 +1098,24 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = doAssert strictVerification notin node.dag.updateFlags return RestApiResponse.jsonError(Http400, InvalidBlockObjectError) - when consensusFork >= ConsensusFork.Deneb: + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: await node.router.routeSignedBeaconBlock( forkyBlck, Opt.some( forkyBlck.create_blob_sidecars(kzg_proofs, blobs)), + Opt.none(seq[fulu.DataColumnSidecar]), + checkValidator = true) + elif consensusFork >= ConsensusFork.Fulu: + let data_columns = assemble_data_column_sidecars( + forkyBlck, blobs.mapIt(kzg.KzgBlob(bytes: it)), + @(kzg_proofs.mapIt(kzg.KzgProof(it)))) + await node.router.routeSignedBeaconBlock( + forkyBlck, Opt.none(seq[BlobSidecar]), + Opt.some(data_columns), checkValidator = true) else: await node.router.routeSignedBeaconBlock( forkyBlck, Opt.none(seq[BlobSidecar]), + Opt.none(seq[fulu.DataColumnSidecar]), checkValidator = true) if res.isErr(): @@ -1024,20 +1150,21 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = signedMaybeBlindedBlck: auto, consensusFork: ConsensusFork): untyped = if contentType == sszMediaType: RestApiResponse.sszResponse( - signedMaybeBlindedBlck, - [("eth-consensus-version", consensusFork.toString())]) + signedMaybeBlindedBlck, consensusFork, node.hasRestAllowedOrigin) elif contentType == jsonMediaType: RestApiResponse.jsonResponseBlock( signedMaybeBlindedBlck, - consensusFork, node.getBlockOptimistic(bdata), - node.dag.isFinalized(bid) - ) + node.dag.isFinalized(bid), + consensusFork, node.hasRestAllowedOrigin) else: RestApiResponse.jsonError(Http500, InvalidAcceptError) withBlck(bdata.asSigned()): - when consensusFork <= ConsensusFork.Altair: + when consensusFork == ConsensusFork.Gloas: + debugGloasComment "" + return RestApiResponse.jsonError(Http404, BlockNotFoundError) + elif consensusFork <= ConsensusFork.Altair: respondSszOrJson(forkyBlck, consensusFork) else: respondSszOrJson(toSignedBlindedBeaconBlock(forkyBlck), consensusFork) @@ -1078,7 +1205,11 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = return RestApiResponse.jsonError(Http400, BlockIncorrectFork) withConsensusFork(currentEpochFork): - when consensusFork >= ConsensusFork.Deneb: + when consensusFork == ConsensusFork.Gloas: + debugGloasComment "" + return RestApiResponse.jsonError( + Http400, $consensusFork & " builder API unsupported") + elif consensusFork >= ConsensusFork.Electra: let restBlock = decodeBodyJsonOrSsz( consensusFork.SignedBlindedBeaconBlock, body).valueOr: @@ -1119,7 +1250,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = forkyBlck.root = hash_tree_root(forkyBlck.message) await node.router.routeSignedBeaconBlock( forkyBlck, Opt.none(seq[BlobSidecar]), - checkValidator = true) + Opt.none(seq[fulu.DataColumnSidecar]), checkValidator = true) if res.isErr(): return RestApiResponse.jsonError( @@ -1140,7 +1271,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = currentEpochFork = node.dag.cfg.consensusForkAtEpoch(node.currentSlot().epoch()) version = request.headers.getString("eth-consensus-version") - validation = + validation {.used.} = if broadcast_validation.isNone(): BroadcastValidationType.Gossip else: @@ -1161,7 +1292,11 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = withConsensusFork(currentEpochFork): # TODO (cheatfate): handle broadcast_validation flag - when consensusFork >= ConsensusFork.Deneb: + when consensusFork >= ConsensusFork.Gloas: + debugGloasComment "" + return RestApiResponse.jsonError( + Http400, $consensusFork & " builder API unsupported") + elif consensusFork >= ConsensusFork.Electra: let restBlock = decodeBodyJsonOrSsz( consensusFork.SignedBlindedBeaconBlock, body).valueOr: @@ -1202,7 +1337,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = forkyBlck.root = hash_tree_root(forkyBlck.message) await node.router.routeSignedBeaconBlock( forkyBlck, Opt.none(seq[BlobSidecar]), - checkValidator = true) + Opt.none(seq[fulu.DataColumnSidecar]), checkValidator = true) if res.isErr(): return RestApiResponse.jsonError( @@ -1228,24 +1363,21 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = bid = node.getBlockId(blockIdent).valueOr: return RestApiResponse.jsonError(Http404, BlockNotFoundError) - let contentType = - block: + contentType = block: let res = preferredContentType(jsonMediaType, sszMediaType) if res.isErr(): return RestApiResponse.jsonError(Http406, ContentNotAcceptableError) res.get() + consensusFork = node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch) if contentType == sszMediaType: var data: seq[byte] if not node.dag.getBlockSSZ(bid, data): return RestApiResponse.jsonError(Http404, BlockNotFoundError) - let - fork = node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch) - headers = [("eth-consensus-version", fork.toString())] - - RestApiResponse.sszResponsePlain(data, headers) + RestApiResponse.sszResponsePlain( + data, consensusFork, node.hasRestAllowedOrigin) elif contentType == jsonMediaType: let bdata = node.dag.getForkedBlock(bid).valueOr: return RestApiResponse.jsonError(Http404, BlockNotFoundError) @@ -1253,8 +1385,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = RestApiResponse.jsonResponseBlock( bdata.asSigned(), node.getBlockOptimistic(bdata), - node.dag.isFinalized(bid) - ) + node.dag.isFinalized(bid), + node.hasRestAllowedOrigin) else: RestApiResponse.jsonError(Http500, InvalidAcceptError) @@ -1314,8 +1446,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = forkyBlck.message.body.attestations.asSeq(), node.getBlockOptimistic(bdata), node.dag.isFinalized(bid), - consensusFork - ) + consensusFork, node.hasRestAllowedOrigin) # https://ethereum.github.io/beacon-APIs/#/Beacon/getPoolAttestations router.api2(MethodGet, "/eth/v1/beacon/pool/attestations") do ( @@ -1378,11 +1509,11 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = if consensusFork < ConsensusFork.Electra: return RestApiResponse.jsonResponseWVersion( toSeq(node.attestationPool[].attestations(vslot, vindex)), - consensusFork) + consensusFork, node.hasRestAllowedOrigin) else: return RestApiResponse.jsonResponseWVersion( toSeq(node.attestationPool[].electraAttestations(vslot, vindex)), - consensusFork) + consensusFork, node.hasRestAllowedOrigin) # https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolAttestations router.api2(MethodPost, "/eth/v1/beacon/pool/attestations") do ( @@ -1432,7 +1563,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = contentBody: Option[ContentBody]) -> RestApiResponse: let - headerVersion = request.headers.getString("Eth-Consensus-Version") + headerVersion = request.headers.getString("eth-consensus-version") consensusVersion = ConsensusFork.init(headerVersion) if consensusVersion.isNone(): return RestApiResponse.jsonError(Http400, FailedToObtainConsensusForkError) @@ -1455,7 +1586,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = case consensusVersion.get(): of ConsensusFork.Phase0 .. ConsensusFork.Deneb: decodeAttestations(phase0.Attestation) - of ConsensusFork.Electra .. ConsensusFork.Fulu: + of ConsensusFork.Electra .. ConsensusFork.Gloas: decodeAttestations(electra.SingleAttestation) let failures = @@ -1520,18 +1651,18 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = when consensusFork < ConsensusFork.Electra: RestApiResponse.jsonResponseWVersion( toSeq(node.validatorChangePool.phase0_attester_slashings), - contextFork) + contextFork, node.hasRestAllowedOrigin) else: RestApiResponse.jsonResponseWVersion( toSeq(node.validatorChangePool.electra_attester_slashings), - contextFork) + contextFork, node.hasRestAllowedOrigin) # https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/submitPoolAttesterSlashingsV2 router.api(MethodPost, "/eth/v2/beacon/pool/attester_slashings") do ( contentBody: Option[ContentBody]) -> RestApiResponse: let - headerVersion = request.headers.getString("Eth-Consensus-Version") + headerVersion = request.headers.getString("eth-consensus-version") consensusVersion = ConsensusFork.init(headerVersion) if consensusVersion.isNone(): return RestApiResponse.jsonError(Http400, FailedToObtainConsensusForkError) @@ -1555,7 +1686,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = case consensusVersion.get(): of ConsensusFork.Phase0 .. ConsensusFork.Deneb: decodeAttesterSlashing(phase0.AttesterSlashing) - of ConsensusFork.Electra .. ConsensusFork.Fulu: + of ConsensusFork.Electra .. ConsensusFork.Gloas: decodeAttesterSlashing(electra.AttesterSlashing) # https://ethereum.github.io/beacon-APIs/#/Beacon/getPoolProposerSlashings @@ -1686,54 +1817,21 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.2#/Beacon/getBlobSidecars # https://github.com/ethereum/beacon-APIs/blob/v2.4.2/apis/beacon/blob_sidecars/blob_sidecars.yaml router.api2(MethodGet, "/eth/v1/beacon/blob_sidecars/{block_id}") do ( - block_id: BlockIdent, indices: seq[uint64]) -> RestApiResponse: - let - blockIdent = block_id.valueOr: - return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError, - $error) - bid = node.getBlockId(blockIdent).valueOr: - return RestApiResponse.jsonError(Http404, BlockNotFoundError) - - contentType = block: - let res = preferredContentType(jsonMediaType, - sszMediaType) - if res.isErr(): - return RestApiResponse.jsonError(Http406, ContentNotAcceptableError) - res.get() - + block_id: BlockIdent, indices: seq[uint64]) -> RestApiResponse: # https://github.com/ethereum/beacon-APIs/blob/v2.4.2/types/deneb/blob_sidecar.yaml#L2-L28 # The merkleization limit of the list is `MAX_BLOB_COMMITMENTS_PER_BLOCK`, # the serialization limit is configurable and is: # - `MAX_BLOBS_PER_BLOCK` from Deneb onward # - `MAX_BLOBS_PER_BLOCK_ELECTRA` from Electra. - let data = newClone(default( - List[BlobSidecar, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK])) - - if indices.isErr: - return RestApiResponse.jsonError(Http400, - InvalidSidecarIndexValueError) - - let indexFilter = indices.get.toHashSet - - for blobIndex in 0'u64 ..< node.dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA: - if indexFilter.len > 0 and blobIndex notin indexFilter: - continue - - var blobSidecar = new BlobSidecar - - if node.dag.db.getBlobSidecar(bid.root, blobIndex, blobSidecar[]): - discard data[].add blobSidecar[] - - if contentType == sszMediaType: - RestApiResponse.sszResponse( - data[], headers = [("eth-consensus-version", - node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch).toString())]) - elif contentType == jsonMediaType: - RestApiResponse.jsonResponse(data) - else: - RestApiResponse.jsonError(Http500, InvalidAcceptError) - - # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v3.0.0#/Beacon/getPendingDeposits + handleDataSidecarRequest[ + InvalidBlobSidecarIndexValueError, + List[BlobSidecar, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK], + getBlobSidecar + ]( + node, preferredContentType(jsonMediaType, sszMediaType), + block_id, indices, node.dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + + # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v3.1.0#/Beacon/getPendingDeposits router.metricsApi2( MethodGet, "/eth/v1/beacon/states/{state_id}/pending_deposits", {RestServerMetricsType.Status, Response}) do ( @@ -1753,17 +1851,18 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = node.withStateForBlockSlotId(bslot): return withState(state): when consensusFork >= ConsensusFork.Electra: - RestApiResponse.jsonResponseFinalized( + RestApiResponse.jsonResponseFinalizedWVersion( forkyState.data.pending_deposits, node.getStateOptimistic(state), - node.dag.isFinalized(bslot.bid)) + node.dag.isFinalized(bslot.bid), + consensusFork, node.hasRestAllowedOrigin) else: RestApiResponse.jsonError(Http400, SlotFromTheIncorrectForkError, $error) RestApiResponse.jsonError(Http404, StateNotFoundError) - # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v3.0.0#/Beacon/getPendingPartialWithdrawals + # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v3.1.0#/Beacon/getPendingPartialWithdrawals router.metricsApi2( MethodGet, "/eth/v1/beacon/states/{state_id}/pending_partial_withdrawals", {RestServerMetricsType.Status, Response}) do ( @@ -1783,10 +1882,42 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = node.withStateForBlockSlotId(bslot): return withState(state): when consensusFork >= ConsensusFork.Electra: - RestApiResponse.jsonResponseFinalized( + RestApiResponse.jsonResponseFinalizedWVersion( forkyState.data.pending_partial_withdrawals, node.getStateOptimistic(state), - node.dag.isFinalized(bslot.bid)) + node.dag.isFinalized(bslot.bid), + consensusFork, node.hasRestAllowedOrigin) + else: + RestApiResponse.jsonError(Http400, SlotFromTheIncorrectForkError, + $error) + + RestApiResponse.jsonError(Http404, StateNotFoundError) + + # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v3.1.0#/Beacon/getPendingConsolidations + router.metricsApi2( + MethodGet, "/eth/v1/beacon/states/{state_id}/pending_consolidations", + {RestServerMetricsType.Status, Response}) do ( + state_id: StateIdent) -> RestApiResponse: + let + sid = state_id.valueOr: + return RestApiResponse.jsonError(Http400, InvalidStateIdValueError, + $error) + bslot = node.getBlockSlotId(sid).valueOr: + if sid.kind == StateQueryKind.Root: + # TODO (cheatfate): Its impossible to retrieve state by `state_root` + # in current version of database. + return RestApiResponse.jsonError(Http500, NoImplementationError) + return RestApiResponse.jsonError(Http404, StateNotFoundError, + $error) + + node.withStateForBlockSlotId(bslot): + return withState(state): + when consensusFork >= ConsensusFork.Electra: + RestApiResponse.jsonResponseFinalizedWVersion( + forkyState.data.pending_consolidations, + node.getStateOptimistic(state), + node.dag.isFinalized(bslot.bid), + consensusFork, node.hasRestAllowedOrigin) else: RestApiResponse.jsonError(Http400, SlotFromTheIncorrectForkError, $error) diff --git a/beacon_chain/rpc/rest_builder_api.nim b/beacon_chain/rpc/rest_builder_api.nim index cc9482ec66..0e3e24a233 100644 --- a/beacon_chain/rpc/rest_builder_api.nim +++ b/beacon_chain/rpc/rest_builder_api.nim @@ -1,10 +1,12 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +{.push raises: [], gcsafe.} + import ./rest_utils, ./state_ttl_cache, diff --git a/beacon_chain/rpc/rest_config_api.nim b/beacon_chain/rpc/rest_config_api.nim index 817e88668d..eb4996cfcf 100644 --- a/beacon_chain/rpc/rest_config_api.nim +++ b/beacon_chain/rpc/rest_config_api.nim @@ -5,8 +5,9 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} +import std/algorithm, json, sequtils import stew/[byteutils, base10], chronicles import ".."/beacon_node, ".."/spec/forks, @@ -16,11 +17,28 @@ export rest_utils logScope: topics = "rest_config" +func cmpBPOconfig(x, y: BlobParameters): int = + cmp(x.EPOCH.distinctBase, y.EPOCH.distinctBase) + proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = template cfg(): auto = node.dag.cfg let cachedForkSchedule = RestApiResponse.prepareJsonResponse(getForkSchedule(cfg)) + + # This has been intentionally copied and sorted in ascending order + # as the spec demands the endpoint to be sorted in this fashion. + # The spec says: + # There MUST NOT exist multiple blob schedule entries with the same epoch value. + # The maximum blobs per block limit for blob schedules entries MUST be less than + # or equal to `MAX_BLOB_COMMITMENTS_PER_BLOCK`. The blob schedule entries SHOULD + # be sorted by epoch in ascending order. The blob schedule MAY be empty. + sortedBlobSchedule = cfg.BLOB_SCHEDULE.sorted(cmp=cmpBPOconfig) + restBlobSchedule = sortedBlobSchedule.mapIt(%*{ + "EPOCH": Base10.toString(uint64(it.EPOCH)), + "MAX_BLOBS_PER_BLOCK": Base10.toString(uint64(it.MAX_BLOBS_PER_BLOCK)) + }) + cachedConfigSpec = RestApiResponse.prepareJsonResponse( ( @@ -194,7 +212,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(uint64(cfg.FULU_FORK_EPOCH)), SECONDS_PER_SLOT: - Base10.toString(SECONDS_PER_SLOT), + Base10.toString(cfg.time.SECONDS_PER_SLOT), SECONDS_PER_ETH1_BLOCK: Base10.toString(cfg.SECONDS_PER_ETH1_BLOCK), MIN_VALIDATOR_WITHDRAWABILITY_DELAY: @@ -220,11 +238,11 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = PROPOSER_SCORE_BOOST: Base10.toString(PROPOSER_SCORE_BOOST), REORG_HEAD_WEIGHT_THRESHOLD: - Base10.toString(REORG_HEAD_WEIGHT_THRESHOLD), + Base10.toString(cfg.REORG_HEAD_WEIGHT_THRESHOLD), REORG_PARENT_WEIGHT_THRESHOLD: Base10.toString(REORG_PARENT_WEIGHT_THRESHOLD), REORG_MAX_EPOCHS_SINCE_FINALIZATION: - Base10.toString(uint64(REORG_MAX_EPOCHS_SINCE_FINALIZATION)), + Base10.toString(cfg.REORG_MAX_EPOCHS_SINCE_FINALIZATION), DEPOSIT_CHAIN_ID: Base10.toString(cfg.DEPOSIT_CHAIN_ID), @@ -241,10 +259,6 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(EPOCHS_PER_SUBNET_SUBSCRIPTION), MIN_EPOCHS_FOR_BLOCK_REQUESTS: Base10.toString(cfg.MIN_EPOCHS_FOR_BLOCK_REQUESTS), - TTFB_TIMEOUT: - Base10.toString(TTFB_TIMEOUT), - RESP_TIMEOUT: - Base10.toString(RESP_TIMEOUT), ATTESTATION_PROPAGATION_SLOT_RANGE: Base10.toString(ATTESTATION_PROPAGATION_SLOT_RANGE), MAXIMUM_GOSSIP_CLOCK_DISPARITY: @@ -285,26 +299,25 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(cfg.MAX_REQUEST_BLOB_SIDECARS_ELECTRA), NUMBER_OF_COLUMNS: - Base10.toString(NUMBER_OF_COLUMNS.uint64), + Base10.toString(cfg.NUMBER_OF_COLUMNS.uint64), NUMBER_OF_CUSTODY_GROUPS: - Base10.toString(NUMBER_OF_CUSTODY_GROUPS.uint64), + Base10.toString(cfg.NUMBER_OF_CUSTODY_GROUPS.uint64), DATA_COLUMN_SIDECAR_SUBNET_COUNT: - Base10.toString(DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64), + Base10.toString(cfg.DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64), MAX_REQUEST_DATA_COLUMN_SIDECARS: - Base10.toString(MAX_REQUEST_DATA_COLUMN_SIDECARS), + Base10.toString(cfg.MAX_REQUEST_DATA_COLUMN_SIDECARS), SAMPLES_PER_SLOT: - Base10.toString(SAMPLES_PER_SLOT.uint64), + Base10.toString(cfg.SAMPLES_PER_SLOT.uint64), CUSTODY_REQUIREMENT: - Base10.toString(CUSTODY_REQUIREMENT.uint64), + Base10.toString(cfg.CUSTODY_REQUIREMENT.uint64), VALIDATOR_CUSTODY_REQUIREMENT: - Base10.toString(VALIDATOR_CUSTODY_REQUIREMENT.uint64), + Base10.toString(cfg.VALIDATOR_CUSTODY_REQUIREMENT.uint64), BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: - Base10.toString(BALANCE_PER_ADDITIONAL_CUSTODY_GROUP), - # MAX_BLOBS_PER_BLOCK_FULU: - # Base10.toString(cfg.MAX_BLOBS_PER_BLOCK_FULU), - # MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: - # Base10.toString(cfg.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS), - + Base10.toString(cfg.BALANCE_PER_ADDITIONAL_CUSTODY_GROUP.uint64), + BLOB_SCHEDULE: + restBlobSchedule, + MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: + Base10.toString(cfg.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS.uint64), # https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#constants # GENESIS_SLOT # GENESIS_EPOCH diff --git a/beacon_chain/rpc/rest_constants.nim b/beacon_chain/rpc/rest_constants.nim index dfa155d102..8f240567fe 100644 --- a/beacon_chain/rpc/rest_constants.nim +++ b/beacon_chain/rpc/rest_constants.nim @@ -267,8 +267,10 @@ const "Failed to obtain fork information" InvalidTimestampValue* = "Invalid or missing timestamp value" - InvalidSidecarIndexValueError* = + InvalidBlobSidecarIndexValueError* = "Invalid blob index" + InvalidDataColumnSidecarIndexValueError* = + "Invalid data column index" InvalidBroadcastValidationType* = "Invalid broadcast_validation type value" PathNotFoundError* = @@ -279,3 +281,5 @@ const "Unable to load state for parent block, database corrupt?" RewardOverflowError* = "Reward value overflow" + HistoricalSummariesUnavailable* = + "Historical summaries unavailable" diff --git a/beacon_chain/rpc/rest_debug_api.nim b/beacon_chain/rpc/rest_debug_api.nim index 1013c09e75..6b5f6754c7 100644 --- a/beacon_chain/rpc/rest_debug_api.nim +++ b/beacon_chain/rpc/rest_debug_api.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -11,7 +11,7 @@ import std/sequtils import chronicles, metrics import ".."/beacon_node, ".."/spec/forks, - "."/[rest_utils, state_ttl_cache] + "."/[rest_beacon_api, rest_utils, state_ttl_cache] from ../fork_choice/proto_array import ProtoArrayItem, items @@ -20,19 +20,33 @@ export rest_utils logScope: topics = "rest_debug" proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) = + # https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Debug/getDebugDataColumnSidecars + # https://github.com/ethereum/beacon-APIs/blob/v4.0.0-alpha.0/apis/debug/data_column_sidecars.yaml + router.api2( + MethodGet, "/eth/v1/debug/beacon/data_column_sidecars/{block_id}") do ( + block_id: BlockIdent, indices: seq[uint64]) -> RestApiResponse: + handleDataSidecarRequest[ + InvalidDataColumnSidecarIndexValueError, + List[fulu.DataColumnSidecar, NUMBER_OF_COLUMNS], + getDataColumnSidecar + ]( + node, preferredContentType(jsonMediaType, sszMediaType), + block_id, indices) + # https://ethereum.github.io/beacon-APIs/#/Debug/getState router.api2(MethodGet, "/eth/v1/debug/beacon/states/{state_id}") do ( state_id: StateIdent) -> RestApiResponse: RestApiResponse.jsonError( Http410, DeprecatedRemovalBeaconBlocksDebugStateV1) - # https://ethereum.github.io/beacon-APIs/#/Debug/getStateV2 + # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v3.1.0#/Debug/getStateV2 + # https://github.com/ethereum/beacon-APIs/blob/v4.0.0-alpha.0/apis/debug/state.v2.yaml router.metricsApi2( MethodGet, "/eth/v2/debug/beacon/states/{state_id}", {RestServerMetricsType.Status, Response}) do ( state_id: StateIdent) -> RestApiResponse: - let bslot = - block: + let + bslot = block: if state_id.isErr(): return RestApiResponse.jsonError(Http400, InvalidStateIdValueError, $state_id.error()) @@ -41,8 +55,7 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) = return RestApiResponse.jsonError(Http404, StateNotFoundError, $bres.error()) bres.get() - let contentType = - block: + contentType = block: let res = preferredContentType(jsonMediaType, sszMediaType) if res.isErr(): @@ -53,11 +66,13 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) = return if contentType == jsonMediaType: RestApiResponse.jsonResponseState( - state, node.getStateOptimistic(state)) + state, node.getStateOptimistic(state), + node.dag.isFinalized(bslot.bid), + node.hasRestAllowedOrigin) elif contentType == sszMediaType: - let headers = [("eth-consensus-version", state.kind.toString())] withState(state): - RestApiResponse.sszResponse(forkyState.data, headers) + RestApiResponse.sszResponse( + forkyState.data, state.kind, node.hasRestAllowedOrigin) else: RestApiResponse.jsonError(Http500, InvalidAcceptError) @@ -97,14 +112,14 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) = unrealized = item.unrealized.get(item.checkpoints) u_justified_checkpoint = if unrealized.justified != item.checkpoints.justified: - some unrealized.justified + Opt.some unrealized.justified else: - none(Checkpoint) + Opt.none(Checkpoint) u_finalized_checkpoint = if unrealized.finalized != item.checkpoints.finalized: - some unrealized.finalized + Opt.some unrealized.finalized else: - none(Checkpoint) + Opt.none(Checkpoint) response.fork_choice_nodes.add RestNode( slot: item.bid.slot, @@ -129,12 +144,12 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) = RestNodeValidity.valid, execution_block_hash: node.dag.loadExecutionBlockHash(item.bid).get(ZERO_HASH), - extra_data: some RestNodeExtraData( + extra_data: Opt.some RestNodeExtraData( justified_root: item.checkpoints.justified.root, finalized_root: item.checkpoints.finalized.root, u_justified_checkpoint: u_justified_checkpoint, u_finalized_checkpoint: u_finalized_checkpoint, best_child: item.bestChild, - bestDescendant: item.bestDescendant)) + best_descendant: item.bestDescendant)) RestApiResponse.jsonResponsePlain(response) diff --git a/beacon_chain/rpc/rest_event_api.nim b/beacon_chain/rpc/rest_event_api.nim index a6aa4f9e4b..6c627b5173 100644 --- a/beacon_chain/rpc/rest_event_api.nim +++ b/beacon_chain/rpc/rest_event_api.nim @@ -133,7 +133,7 @@ proc installEventApiHandlers*(router: var RestRouter, node: BeaconNode) = let handler = response.eventHandler(node.eventBus.phase0AttestQueue, "attestation") res.add(handler) - if EventTopic.Attestation in eventTopics: + if EventTopic.SingleAttestation in eventTopics: let handler = response.eventHandler(node.eventBus.singleAttestQueue, "single_attestation") res.add(handler) @@ -163,6 +163,10 @@ proc installEventApiHandlers*(router: var RestRouter, node: BeaconNode) = let handler = response.eventHandler(node.eventBus.blobSidecarQueue, "blob_sidecar") res.add(handler) + if EventTopic.DataColumnSidecar in eventTopics: + let handler = response.eventHandler(node.eventBus.columnSidecarQueue, + "data_column_sidecar") + res.add(handler) if EventTopic.FinalizedCheckpoint in eventTopics: let handler = response.eventHandler(node.eventBus.finalQueue, "finalized_checkpoint") diff --git a/beacon_chain/rpc/rest_light_client_api.nim b/beacon_chain/rpc/rest_light_client_api.nim index 0d0c15ad52..9c50d93594 100644 --- a/beacon_chain/rpc/rest_light_client_api.nim +++ b/beacon_chain/rpc/rest_light_client_api.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -41,10 +41,11 @@ proc installLightClientApiHandlers*(router: var RestRouter, node: BeaconNode) = contextFork = node.dag.cfg.consensusForkAtEpoch(contextEpoch) return if contentType == sszMediaType: - let headers = [("eth-consensus-version", contextFork.toString())] - RestApiResponse.sszResponse(forkyBootstrap, headers) + RestApiResponse.sszResponse( + forkyBootstrap, contextFork, node.hasRestAllowedOrigin) elif contentType == jsonMediaType: - RestApiResponse.jsonResponseWVersion(forkyBootstrap, contextFork) + RestApiResponse.jsonResponseWVersion( + forkyBootstrap, contextFork, node.hasRestAllowedOrigin) else: RestApiResponse.jsonError(Http500, InvalidAcceptError) else: @@ -102,10 +103,9 @@ proc installLightClientApiHandlers*(router: var RestRouter, node: BeaconNode) = else: continue contextFork = node.dag.cfg.consensusForkAtEpoch(contextEpoch) + contextBytes = node.dag.forkDigestAtEpoch(contextEpoch) updates.add RestVersioned[ForkedLightClientUpdate]( - data: update, - jsonVersion: contextFork, - sszContext: node.dag.forkDigests[].atConsensusFork(contextFork)) + data: update, jsonVersion: contextFork, sszContext: contextBytes) return if contentType == sszMediaType: @@ -136,11 +136,11 @@ proc installLightClientApiHandlers*(router: var RestRouter, node: BeaconNode) = contextFork = node.dag.cfg.consensusForkAtEpoch(contextEpoch) return if contentType == sszMediaType: - let headers = [("eth-consensus-version", contextFork.toString())] - RestApiResponse.sszResponse(forkyFinalityUpdate, headers) + RestApiResponse.sszResponse( + forkyFinalityUpdate, contextFork, node.hasRestAllowedOrigin) elif contentType == jsonMediaType: RestApiResponse.jsonResponseWVersion( - forkyFinalityUpdate, contextFork) + forkyFinalityUpdate, contextFork, node.hasRestAllowedOrigin) else: RestApiResponse.jsonError(Http500, InvalidAcceptError) else: @@ -167,11 +167,11 @@ proc installLightClientApiHandlers*(router: var RestRouter, node: BeaconNode) = contextFork = node.dag.cfg.consensusForkAtEpoch(contextEpoch) return if contentType == sszMediaType: - let headers = [("eth-consensus-version", contextFork.toString())] - RestApiResponse.sszResponse(forkyOptimisticUpdate, headers) + RestApiResponse.sszResponse( + forkyOptimisticUpdate, contextFork, node.hasRestAllowedOrigin) elif contentType == jsonMediaType: RestApiResponse.jsonResponseWVersion( - forkyOptimisticUpdate, contextFork) + forkyOptimisticUpdate, contextFork, node.hasRestAllowedOrigin) else: RestApiResponse.jsonError(Http500, InvalidAcceptError) else: diff --git a/beacon_chain/rpc/rest_nimbus_api.nim b/beacon_chain/rpc/rest_nimbus_api.nim index 027ae72a62..e531ac38da 100644 --- a/beacon_chain/rpc/rest_nimbus_api.nim +++ b/beacon_chain/rpc/rest_nimbus_api.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,7 @@ {.push raises: [].} import - std/[sequtils], + std/sequtils, stew/base10, chronicles, chronos/apps/http/httpdebug, @@ -16,7 +16,6 @@ import libp2p/protocols/pubsub/pubsubpeer, ./rest_utils, ../el/el_manager, - ../validators/beacon_validators, ../spec/[forks, beacon_time], ../beacon_node, ../nimbus_binary_common @@ -107,8 +106,6 @@ type connected*: bool RestJson.useDefaultSerializationFor( - BlockProposalEth1Data, - Eth1BlockObj, RestChronosMetricsInfo, RestConnectionInfo, RestFutureInfo, @@ -256,32 +253,6 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) = return RestApiResponse.jsonResponse((result: false)) RestApiResponse.jsonResponse((result: true)) - router.api2(MethodGet, "/nimbus/v1/eth1/chain") do () -> RestApiResponse: - let res = mapIt(node.elManager.eth1ChainBlocks, it) - RestApiResponse.jsonResponse(res) - - router.api2(MethodGet, "/nimbus/v1/eth1/proposal_data") do ( - ) -> RestApiResponse: - let wallSlot = node.beaconClock.now.slotOrZero - let head = - block: - let res = node.getSyncedHead(wallSlot) - if res.isErr(): - return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError, - $res.error()) - let tres = res.get() - if not tres.executionValid: - return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError) - tres - let proposalState = assignClone(node.dag.headState) - node.dag.withUpdatedState( - proposalState[], - head.atSlot(wallSlot).toBlockSlotId().expect("not nil")): - return RestApiResponse.jsonResponse( - node.getBlockProposalEth1Data(updatedState)) - do: - return RestApiResponse.jsonError(Http400, PrunedStateError) - router.api2(MethodGet, "/nimbus/v1/debug/chronos/futures") do ( ) -> RestApiResponse: when defined(chronosFutureTracking): @@ -531,3 +502,47 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) = delay: uint64(delay.nanoseconds) ) RestApiResponse.jsonResponsePlain(response) + + router.metricsApi2( + MethodGet, + "/nimbus/v1/debug/beacon/states/{state_id}/historical_summaries", + {RestServerMetricsType.Status, Response}, + ) do(state_id: StateIdent) -> RestApiResponse: + let + sid = state_id.valueOr: + return RestApiResponse.jsonError(Http400, InvalidStateIdValueError, $error) + bslot = node.getBlockSlotId(sid).valueOr: + return RestApiResponse.jsonError(Http404, StateNotFoundError, $error) + contentType = preferredContentType(jsonMediaType, sszMediaType).valueOr: + return RestApiResponse.jsonError(Http406, ContentNotAcceptableError) + + node.withStateForBlockSlotId(bslot): + return withState(state): + when consensusFork >= ConsensusFork.Capella: + const historicalSummariesFork = + historicalSummariesForkAtConsensusFork(consensusFork) + .expect("HistoricalSummariesFork for Capella onwards") + + let response = getHistoricalSummariesResponse(historicalSummariesFork)( + historical_summaries: forkyState.data.historical_summaries, + proof: forkyState.data + .build_proof(historicalSummariesFork.historical_summaries_gindex) + .expect("Valid gindex"), + slot: bslot.slot, + ) + + if contentType == jsonMediaType: + RestApiResponse.jsonResponseFinalizedWVersion( + response, + node.getStateOptimistic(state), + node.dag.isFinalized(bslot.bid), + consensusFork, node.hasRestAllowedOrigin) + elif contentType == sszMediaType: + RestApiResponse.sszResponse( + response, consensusFork, node.hasRestAllowedOrigin) + else: + RestApiResponse.jsonError(Http500, InvalidAcceptError) + else: + RestApiResponse.jsonError(Http404, HistoricalSummariesUnavailable) + + RestApiResponse.jsonError(Http404, StateNotFoundError) diff --git a/beacon_chain/rpc/rest_node_api.nim b/beacon_chain/rpc/rest_node_api.nim index ceb4bbc7c0..a104f04cab 100644 --- a/beacon_chain/rpc/rest_node_api.nim +++ b/beacon_chain/rpc/rest_node_api.nim @@ -9,7 +9,7 @@ import stew/byteutils, chronicles, - eth/p2p/discoveryv5/enr, + eth/enr/enr, libp2p/[multiaddress, multicodec, peerstore], ../version, ../beacon_node, ../sync/sync_manager, ../networking/[eth2_network, peer_pool], @@ -269,14 +269,14 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) = node.syncManager.inProgress isOptimistic = if node.currentSlot().epoch() >= node.dag.cfg.BELLATRIX_FORK_EPOCH: - some(not node.dag.head.executionValid) + Opt.some(not node.dag.head.executionValid) else: - none[bool]() + Opt.none(bool) elOffline = if node.currentSlot().epoch() >= node.dag.cfg.CAPELLA_FORK_EPOCH: - some(not node.elManager.hasAnyWorkingConnection) + Opt.some(not node.elManager.hasAnyWorkingConnection) else: - none[bool]() # Added with ethereum/beacon-APIs v2.4.0 + Opt.none(bool) # Added with ethereum/beacon-APIs v2.4.0 info = RestSyncInfo( head_slot: headSlot, sync_distance: distance, diff --git a/beacon_chain/rpc/rest_validator_api.nim b/beacon_chain/rpc/rest_validator_api.nim index df34536e3a..f4843a6d0f 100644 --- a/beacon_chain/rpc/rest_validator_api.nim +++ b/beacon_chain/rpc/rest_validator_api.nim @@ -4,7 +4,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import std/[typetraits, sets, sequtils] import stew/base10, chronicles @@ -12,7 +12,7 @@ import ".."/[beacon_chain_db, beacon_node], ".."/networking/eth2_network, ".."/consensus_object_pools/[blockchain_dag, spec_cache, attestation_pool, sync_committee_msg_pool], - ".."/validators/beacon_validators, + ".."/validators/[beacon_validators, block_payloads], ".."/spec/[beaconstate, forks, network, state_transition_block], "."/[rest_utils, state_ttl_cache] @@ -213,7 +213,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = # If the requested validator index was not valid within this old # state, it's not possible that it will sit on the sync committee. # Since this API must omit results for validators that don't have - # duties, we can simply ingnore this requested index. + # duties, we can simply ignore this requested index. # (we won't bother to validate it against a more recent state). continue @@ -331,24 +331,6 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = RestApiResponse.jsonError( Http410, DeprecatedRemovalProduceBlindedBlockV1) - func getMaybeBlindedHeaders( - consensusFork: ConsensusFork, - isBlinded: bool, - executionValue: Opt[UInt256], - consensusValue: Opt[UInt256]): HttpTable = - var res = HttpTable.init() - res.add("eth-consensus-version", consensusFork.toString()) - if isBlinded: - res.add("eth-execution-payload-blinded", "true") - else: - res.add("eth-execution-payload-blinded", "false") - if executionValue.isSome(): - res.add( - "eth-execution-payload-value", toString(executionValue.get(), 10)) - if consensusValue.isSome(): - res.add("eth-consensus-block-value", toString(consensusValue.get(), 10)) - res - # https://ethereum.github.io/beacon-APIs/#/Validator/produceBlockV3 router.api(MethodGet, "/eth/v3/validator/blocks/{slot}") do ( slot: Slot, randao_reveal: Option[ValidatorSig], @@ -429,66 +411,74 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = return RestApiResponse.jsonError(Http400, InvalidRandaoRevealValue) withConsensusFork(node.dag.cfg.consensusForkAtEpoch(qslot.epoch)): - when consensusFork >= ConsensusFork.Deneb: + when consensusFork >= ConsensusFork.Gloas: + # https://github.com/ethereum/beacon-APIs/pull/552 notes that + # produceBlockV3 won't work past Fulu. + return RestApiResponse.jsonError( + Http500, "Unsupported fork for block production: " & $consensusFork) + elif consensusFork >= ConsensusFork.Electra: let message = (await node.makeMaybeBlindedBeaconBlockForHeadAndSlot( - consensusFork, qrandao, qgraffiti, qhead, qslot, + consensusFork, proposer, qrandao, qgraffiti, qhead, qslot, qboostFactor)).valueOr: # HTTP 400 error is only for incorrect parameters. return RestApiResponse.jsonError(Http500, error) - headers = consensusFork.getMaybeBlindedHeaders( - message.blck.isBlinded, - message.executionValue, - message.consensusValue) if contentType == sszMediaType: if message.blck.isBlinded: - RestApiResponse.sszResponse(message.blck.blindedData, headers) + RestApiResponse.sszResponse( + message.blck.blindedData, consensusFork, isBlinded = true, + message.executionValue, message.consensusValue, + node.hasRestAllowedOrigin) else: - RestApiResponse.sszResponse(message.blck.data, headers) + RestApiResponse.sszResponse( + message.blck.data, consensusFork, isBlinded = false, + message.executionValue, message.consensusValue, + node.hasRestAllowedOrigin) elif contentType == jsonMediaType: let forked = if message.blck.isBlinded: ForkedMaybeBlindedBeaconBlock.init( message.blck.blindedData, - message.executionValue, - message.consensusValue) + Opt.some message.executionValue, + Opt.some message.consensusValue) else: ForkedMaybeBlindedBeaconBlock.init( message.blck.data, - message.executionValue, - message.consensusValue) - RestApiResponse.jsonResponsePlain(forked, headers) + Opt.some message.executionValue, + Opt.some message.consensusValue) + RestApiResponse.jsonResponsePlain( + forked, consensusFork, message.blck.isBlinded, + message.executionValue, message.consensusValue, + node.hasRestAllowedOrigin) else: raiseAssert "preferredContentType() returns invalid content type" - else: - when consensusFork >= ConsensusFork.Bellatrix: - type PayloadType = consensusFork.ExecutionPayloadForSigning - else: - type PayloadType = bellatrix.ExecutionPayloadForSigning + elif consensusFork >= ConsensusFork.Bellatrix: let - message = (await PayloadType.makeBeaconBlockForHeadAndSlot( - node, qrandao, proposer, qgraffiti, qhead, qslot)).valueOr: + message = (await node.makeBeaconBlockForHeadAndSlot( + consensusFork, proposer, qrandao, qgraffiti, qhead, qslot)).valueOr: return RestApiResponse.jsonError(Http500, error) - executionValue = Opt.some(message.executionPayloadValue) - consensusValue = Opt.some(message.consensusBlockValue) - headers = consensusFork.getMaybeBlindedHeaders( - isBlinded = false, executionValue, consensusValue) - doAssert message.blck.kind == consensusFork - template forkyBlck: untyped = message.blck.forky(consensusFork) if contentType == sszMediaType: - RestApiResponse.sszResponse(forkyBlck, headers) + RestApiResponse.sszResponse( + message.blck, consensusFork, isBlinded = false, + message.executionValue, message.consensusValue, + node.hasRestAllowedOrigin) elif contentType == jsonMediaType: - let forked = - when consensusFork >= ConsensusFork.Bellatrix: - ForkedMaybeBlindedBeaconBlock.init( - forkyBlck, executionValue, consensusValue) - else: - ForkedMaybeBlindedBeaconBlock.init(forkyBlck) - RestApiResponse.jsonResponsePlain(forked, headers) + let forked = ForkedMaybeBlindedBeaconBlock.init( + message.blck, + Opt.some message.executionValue, + Opt.some message.consensusValue) + + RestApiResponse.jsonResponsePlain( + forked, consensusFork, isBlinded = false, + message.executionValue, message.consensusValue, + node.hasRestAllowedOrigin) else: raiseAssert "preferredContentType() returns invalid content type" + else: + return RestApiResponse.jsonError( + Http500, "Unsupported fork for block production: " & $consensusFork) # https://ethereum.github.io/beacon-APIs/#/Validator/produceAttestationData router.api2(MethodGet, "/eth/v1/validator/attestation_data") do ( @@ -644,8 +634,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = UnableToGetAggregatedAttestationError) ForkedAttestation.init(phase0_attestation, qfork) - let headers = HttpTable.init([("eth-consensus-version", qfork.toString())]) - RestApiResponse.jsonResponsePlain(forked, headers) + RestApiResponse.jsonResponsePlain(forked, qfork, node.hasRestAllowedOrigin) # https://ethereum.github.io/beacon-APIs/#/Validator/publishAggregateAndProofs router.api2(MethodPost, "/eth/v1/validator/aggregate_and_proofs") do ( @@ -689,7 +678,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = return RestApiResponse.jsonError(Http400, EmptyRequestBodyError) let - headerVersion = request.headers.getString("Eth-Consensus-Version") + headerVersion = request.headers.getString("eth-consensus-version") consensusVersion = ConsensusFork.init(headerVersion) if consensusVersion.isNone(): return RestApiResponse.jsonError(Http400, FailedToObtainConsensusForkError) @@ -707,7 +696,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = case consensusVersion.get(): of ConsensusFork.Phase0 .. ConsensusFork.Deneb: addDecodedProofs(phase0.SignedAggregateAndProof) - of ConsensusFork.Electra .. ConsensusFork.Fulu: + of ConsensusFork.Electra .. ConsensusFork.Gloas: addDecodedProofs(electra.SignedAggregateAndProof) await allFutures(proofs) @@ -1101,4 +1090,4 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = # able to use it when a feature flag is turned on, the intercepting # middleware can handle and swallow the request. I suggest a CL either # returns 501 Not Implemented [or] 400 Bad Request." - RestApiResponse.jsonError(Http501, AggregationSelectionNotImplemented) \ No newline at end of file + RestApiResponse.jsonError(Http501, AggregationSelectionNotImplemented) diff --git a/beacon_chain/spec/beacon_time.nim b/beacon_chain/spec/beacon_time.nim index 19a919f776..a35d820058 100644 --- a/beacon_chain/spec/beacon_time.nim +++ b/beacon_chain/spec/beacon_time.nim @@ -150,7 +150,7 @@ const # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/light-client/p2p-interface.md#sync-committee lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds: NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/light-client/p2p-interface.md#sync-committee + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/altair/light-client/p2p-interface.md#sync-committee lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds: NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) @@ -164,19 +164,31 @@ func start_beacon_time*(s: Slot): BeaconTime = if s > maxSlot: FAR_FUTURE_BEACON_TIME else: BeaconTime(ns_since_genesis: int64(uint64(s) * NANOSECONDS_PER_SLOT)) -func block_deadline*(s: Slot): BeaconTime = +func block_deadline*(s: Slot, timeConfig: TimeConfig): BeaconTime = s.start_beacon_time -func attestation_deadline*(s: Slot): BeaconTime = + +func attestation_deadline*( + s: Slot, timeConfig: TimeConfig): BeaconTime = s.start_beacon_time + attestationSlotOffset -func aggregate_deadline*(s: Slot): BeaconTime = + +func aggregate_deadline*( + s: Slot, timeConfig: TimeConfig): BeaconTime = s.start_beacon_time + aggregateSlotOffset -func sync_committee_message_deadline*(s: Slot): BeaconTime = + +func sync_committee_message_deadline*( + s: Slot, timeConfig: TimeConfig): BeaconTime = s.start_beacon_time + syncCommitteeMessageSlotOffset -func sync_contribution_deadline*(s: Slot): BeaconTime = + +func sync_contribution_deadline*( + s: Slot, timeConfig: TimeConfig): BeaconTime = s.start_beacon_time + syncContributionSlotOffset -func light_client_finality_update_time*(s: Slot): BeaconTime = + +func light_client_finality_update_time*( + s: Slot, timeConfig: TimeConfig): BeaconTime = s.start_beacon_time + lightClientFinalityUpdateSlotOffset -func light_client_optimistic_update_time*(s: Slot): BeaconTime = + +func light_client_optimistic_update_time*( + s: Slot, timeConfig: TimeConfig): BeaconTime = s.start_beacon_time + lightClientOptimisticUpdateSlotOffset func slotOrZero*(time: BeaconTime): Slot = @@ -184,7 +196,7 @@ func slotOrZero*(time: BeaconTime): Slot = if exSlot.afterGenesis: exSlot.slot else: Slot(0) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#compute_epoch_at_slot +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#compute_epoch_at_slot func epoch*(slot: Slot): Epoch = # aka compute_epoch_at_slot ## Return the epoch number at ``slot``. if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index a4809f6753..afc815f11e 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import stew/assign2, @@ -13,7 +13,7 @@ import chronicles, "."/[eth2_merkleization, forks, signatures, validator] -from std/algorithm import fill, sort +from std/algorithm import fill, isSorted, sort from std/sequtils import anyIt, mapIt, toSeq export extras, forks, validator, chronicles @@ -48,16 +48,29 @@ func is_compounding_withdrawal_credential*( withdrawal_credentials: Eth2Digest): bool = withdrawal_credentials.data[0] == COMPOUNDING_WITHDRAWAL_PREFIX -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#new-has_compounding_withdrawal_credential -func has_compounding_withdrawal_credential*(validator: Validator): bool = - ## Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal - ## credential. - is_compounding_withdrawal_credential(validator.withdrawal_credentials) +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#new-is_builder_withdrawal_credential +func is_builder_withdrawal_credential*( + withdrawal_credentials: Eth2Digest): bool = + withdrawal_credentials.data[0] == BUILDER_WITHDRAWAL_PREFIX + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/electra/beacon-chain.md#new-has_compounding_withdrawal_credential +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#modified-has_compounding_withdrawal_credential +func has_compounding_withdrawal_credential*( + consensusFork: static ConsensusFork, validator: Validator): bool = + when consensusFork >= ConsensusFork.Gloas: + ## Check if ``validator`` has an 0x02 or 0x03 prefixed withdrawal credential. + is_compounding_withdrawal_credential(validator.withdrawal_credentials) or + is_builder_withdrawal_credential(validator.withdrawal_credentials) + else: + ## Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal + ## credential. + is_compounding_withdrawal_credential(validator.withdrawal_credentials) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/electra/beacon-chain.md#new-get_max_effective_balance -func get_max_effective_balance*(validator: Validator): Gwei = +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/electra/beacon-chain.md#new-get_max_effective_balance +func get_max_effective_balance*( + consensusFork: static ConsensusFork, validator: Validator): Gwei = ## Get max effective balance for ``validator``. - if has_compounding_withdrawal_credential(validator): + if has_compounding_withdrawal_credential(consensusFork, validator): MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei else: MIN_ACTIVATION_BALANCE.Gwei @@ -84,9 +97,9 @@ func get_validator_from_deposit*( effective_balance: effective_balance ) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#deposits +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/electra/beacon-chain.md#modified-get_validator_from_deposit func get_validator_from_deposit*( - _: electra.BeaconState | fulu.BeaconState, + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState, pubkey: ValidatorPubKey, withdrawal_credentials: Eth2Digest, amount: Gwei): Validator = var validator = Validator( @@ -100,7 +113,7 @@ func get_validator_from_deposit*( ) # [Modified in Electra:EIP7251] - let max_effective_balance = get_max_effective_balance(validator) + let max_effective_balance = get_max_effective_balance(type(state).kind, validator) validator.effective_balance = min( amount - amount mod static(Gwei(EFFECTIVE_BALANCE_INCREMENT)), max_effective_balance) @@ -130,7 +143,7 @@ func add_validator_to_registry*( ok() -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#compute_activation_exit_epoch +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#compute_activation_exit_epoch func compute_activation_exit_epoch*(epoch: Epoch): Epoch = ## Return the epoch during which validator activations and exits initiated in ## ``epoch`` take effect. @@ -146,7 +159,7 @@ func get_validator_churn_limit*( count_active_validators( state, state.get_current_epoch(), cache) div cfg.CHURN_LIMIT_QUOTIENT) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#new-get_validator_activation_churn_limit +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/deneb/beacon-chain.md#new-get_validator_activation_churn_limit func get_validator_activation_churn_limit*( cfg: RuntimeConfig, state: deneb.BeaconState | electra.BeaconState, cache: var StateCache): uint64 = @@ -185,14 +198,14 @@ func get_state_exit_queue_info*( ExitQueueInfo( exit_queue_epoch: exit_queue_epoch, exit_queue_churn: exit_queue_churn) -func get_state_exit_queue_info*(state: electra.BeaconState | - fulu.BeaconState): - ExitQueueInfo = +func get_state_exit_queue_info*( + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState): + ExitQueueInfo = # Electra initiate_validator_exit doesn't have same quadratic aspect given # StateCache balance caching default(ExitQueueInfo) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#initiate_validator_exit +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#initiate_validator_exit func initiate_validator_exit*( cfg: RuntimeConfig, state: var (phase0.BeaconState | altair.BeaconState | @@ -237,8 +250,8 @@ func get_total_active_balance*(state: ForkyBeaconState, cache: var StateCache): # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#new-get_balance_churn_limit func get_balance_churn_limit( - cfg: RuntimeConfig, state: electra.BeaconState | - fulu.BeaconState, + cfg: RuntimeConfig, + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState, cache: var StateCache): Gwei = ## Return the churn limit for the current epoch. let churn = max( @@ -249,7 +262,9 @@ func get_balance_churn_limit( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/beacon-chain.md#new-get_activation_exit_churn_limit func get_activation_exit_churn_limit*( - cfg: RuntimeConfig, state: electra.BeaconState | fulu.BeaconState, cache: var StateCache): + cfg: RuntimeConfig, + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState, + cache: var StateCache): Gwei = ## Return the churn limit for the current epoch dedicated to activations and ## exits. @@ -259,14 +274,17 @@ func get_activation_exit_churn_limit*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-get_consolidation_churn_limit func get_consolidation_churn_limit*( - cfg: RuntimeConfig, state: electra.BeaconState | fulu.BeaconState, cache: var StateCache): + cfg: RuntimeConfig, + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState, + cache: var StateCache): Gwei = get_balance_churn_limit(cfg, state, cache) - get_activation_exit_churn_limit(cfg, state, cache) # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-compute_exit_epoch_and_update_churn func compute_exit_epoch_and_update_churn*( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), + cfg: RuntimeConfig, + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState), exit_balance: Gwei, cache: var StateCache): Epoch = var earliest_exit_epoch = max(state.earliest_exit_epoch, @@ -296,7 +314,8 @@ func compute_exit_epoch_and_update_churn*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-compute_consolidation_epoch_and_update_churn func compute_consolidation_epoch_and_update_churn*( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), + cfg: RuntimeConfig, + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState), consolidation_balance: Gwei, cache: var StateCache): Epoch = var earliest_consolidation_epoch = max(state.earliest_consolidation_epoch, compute_activation_exit_epoch(get_current_epoch(state))) @@ -326,7 +345,8 @@ func compute_consolidation_epoch_and_update_churn*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/electra/beacon-chain.md#modified-initiate_validator_exit func initiate_validator_exit*( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), + cfg: RuntimeConfig, + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState), index: ValidatorIndex, exit_queue_info: ExitQueueInfo, cache: var StateCache): Result[ExitQueueInfo, cstring] = ## Initiate the exit of the validator with index ``index``. @@ -364,15 +384,15 @@ func get_slashing_penalty*( elif state is bellatrix.BeaconState or state is capella.BeaconState or state is deneb.BeaconState: validator_effective_balance div MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX - elif state is electra.BeaconState or - state is fulu.BeaconState: + elif state is electra.BeaconState or state is fulu.BeaconState or + state is gloas.BeaconState: validator_effective_balance div MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA else: {.fatal: "invalid BeaconState type".} -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#slash_validator +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#modified-slash_validator -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#modified-slash_validator +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/bellatrix/beacon-chain.md#modified-slash_validator func get_whistleblower_reward*( state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState, @@ -381,7 +401,7 @@ func get_whistleblower_reward*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#modified-slash_validator func get_whistleblower_reward*( - state: electra.BeaconState | fulu.BeaconState, + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState, validator_effective_balance: Gwei): Gwei = validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA @@ -393,7 +413,8 @@ func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): G whistleblower_reward div PROPOSER_REWARD_QUOTIENT elif state is altair.BeaconState or state is bellatrix.BeaconState or state is capella.BeaconState or state is deneb.BeaconState or - state is electra.BeaconState or state is fulu.BeaconState: + state is electra.BeaconState or state is fulu.BeaconState or + state is gloas.BeaconState: whistleblower_reward * PROPOSER_WEIGHT div WEIGHT_DENOMINATOR else: {.fatal: "invalid BeaconState type".} @@ -461,7 +482,6 @@ func get_initial_beacon_block*(state: phase0.HashedBeaconState): phase0.TrustedSignedBeaconBlock( message: message, root: hash_tree_root(message)) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#initialize-state-for-pure-altair-testnets-and-test-vectors func get_initial_beacon_block*(state: altair.HashedBeaconState): altair.TrustedSignedBeaconBlock = # The genesis block is implicitly trusted @@ -473,7 +493,6 @@ func get_initial_beacon_block*(state: altair.HashedBeaconState): altair.TrustedSignedBeaconBlock( message: message, root: hash_tree_root(message)) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#testing func get_initial_beacon_block*(state: bellatrix.HashedBeaconState): bellatrix.TrustedSignedBeaconBlock = # The genesis block is implicitly trusted @@ -532,6 +551,17 @@ func get_initial_beacon_block*(state: fulu.HashedBeaconState): fulu.TrustedSignedBeaconBlock( message: message, root: hash_tree_root(message)) +func get_initial_beacon_block*(state: gloas.HashedBeaconState): + gloas.TrustedSignedBeaconBlock = + # The genesis block is implicitly trusted + let message = gloas.TrustedBeaconBlock( + slot: state.data.slot, + state_root: state.root) + # parent_root, randao_reveal, eth1_data, signature, and body automatically + # initialized to default values. + gloas.TrustedSignedBeaconBlock( + message: message, root: hash_tree_root(message)) + func get_initial_beacon_block*(state: ForkedHashedBeaconState): ForkedTrustedSignedBeaconBlock = withState(state): @@ -663,7 +693,7 @@ iterator get_attesting_indices_iter*(state: ForkyBeaconState, # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-get_attesting_indices iterator get_attesting_indices_iter*( - state: electra.BeaconState | fulu.BeaconState, + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState, data: AttestationData, aggregation_bits: ElectraCommitteeValidatorsBits, committee_bits: auto, @@ -719,11 +749,11 @@ func get_attesting_indices*(state: ForkedHashedBeaconState; cache: var StateCache): seq[ValidatorIndex] = # TODO when https://github.com/nim-lang/Nim/issues/18188 fixed, use an # iterator - var idxBuf: seq[ValidatorIndex] withState(state): when consensusFork >= ConsensusFork.Electra: - for vidx in forkyState.data.get_attesting_indices(data, aggregation_bits, committee_bits, cache): + for vidx in forkyState.data.get_attesting_indices( + data, aggregation_bits, committee_bits, cache): idxBuf.add vidx idxBuf @@ -787,7 +817,7 @@ proc is_valid_indexed_attestation( # Attestation validation # ------------------------------------------------------------------------------------------ -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#attestations +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#attestations # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id func check_attestation_slot_target*(data: AttestationData): Result[Slot, cstring] = @@ -835,6 +865,23 @@ func check_attestation_index( Result[CommitteeIndex, cstring] = check_attestation_index(data.index, committees_per_slot) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#new-is_attestation_same_slot +func is_attestation_same_slot( + state: gloas.BeaconState, data: AttestationData): bool = + ## Checks if the attestation was for the block + ## proposed at the attestation slot. + if data.slot == 0: + return true + + let + is_matching_blockroot = + data.beacon_block_root == get_block_root_at_slot(state, data.slot) + is_current_blockroot = + data.beacon_block_root != get_block_root_at_slot(state, data.slot - 1) + + is_matching_blockroot and is_current_blockroot + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices func get_attestation_participation_flag_indices( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState, @@ -908,6 +955,52 @@ func get_attestation_participation_flag_indices( # TODO these duplicate some stuff in state_transition_epoch which uses TotalBalances # better to centralize around that if feasible +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#modified-get_attestation_participation_flag_indices +func get_attestation_participation_flag_indices( + state: gloas.BeaconState, data: AttestationData, + inclusion_delay: uint64): set[TimelyFlag] = + ## Return the flag indices that are satisfied by an attestation. + let justified_checkpoint = + if data.target.epoch == get_current_epoch(state): + state.current_justified_checkpoint + else: + state.previous_justified_checkpoint + + # Matching roots + let + is_matching_source = data.source == justified_checkpoint + is_matching_target = is_matching_source and + data.target.root == get_block_root(state, data.target.epoch) + is_matching_blockroot = is_matching_target and + data.beacon_block_root == get_block_root_at_slot(state, data.slot) + + var is_matching_payload = false + if is_attestation_same_slot(state, data): + doAssert data.index == 0 + is_matching_payload = true + else: + let availability_bit = + if state.execution_payload_availability[ + data.slot mod SLOTS_PER_HISTORICAL_ROOT]: 1'u64 + else: 0'u64 + is_matching_payload = (data.index == availability_bit) + + let is_matching_head = is_matching_blockroot and is_matching_payload + + # Checked by check_attestation + doAssert is_matching_source + + var participation_flag_indices: set[TimelyFlag] + if is_matching_source and inclusion_delay <= + integer_squareroot(SLOTS_PER_EPOCH): + participation_flag_indices.incl(TIMELY_SOURCE_FLAG_INDEX) + if is_matching_target: + participation_flag_indices.incl(TIMELY_TARGET_FLAG_INDEX) + if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY: + participation_flag_indices.incl(TIMELY_HEAD_FLAG_INDEX) + + participation_flag_indices + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#get_total_active_balance func get_total_active_balance*(state: ForkyBeaconState, cache: var StateCache): Gwei = ## Return the combined effective balance of the active validators. @@ -935,10 +1028,11 @@ func get_base_reward_per_increment*( get_base_reward_per_increment_sqrt( integer_squareroot(distinctBase(total_active_balance))) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#get_base_reward +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/beacon-chain.md#get_base_reward func get_base_reward( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState, + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState, index: ValidatorIndex, base_reward_per_increment: Gwei): Gwei = ## Return the base reward for the validator defined by ``index`` with respect ## to the current ``state``. @@ -947,7 +1041,7 @@ func get_base_reward( EFFECTIVE_BALANCE_INCREMENT.Gwei increments * base_reward_per_increment -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#attestations proc check_attestation*( state: ForkyBeaconState, attestation: SomeAttestation, flags: UpdateFlags, cache: var StateCache, on_chain: static bool = true): Result[void, cstring] = @@ -982,11 +1076,13 @@ proc check_attestation*( ok() -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/electra/beacon-chain.md#modified-process_attestation +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/electra/beacon-chain.md#modified-process_attestation +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#modified-process_attestation proc check_attestation*( - state: electra.BeaconState | fulu.BeaconState, + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState, attestation: electra.Attestation | electra.TrustedAttestation, - flags: UpdateFlags, cache: var StateCache, on_chain: static bool): Result[void, cstring] = + flags: UpdateFlags, cache: var StateCache, on_chain: static bool): + Result[void, cstring] = ## Check that an attestation follows the rules of being included in the state ## at the current slot. When acting as a proposer, the same rules need to ## be followed! @@ -998,9 +1094,16 @@ proc check_attestation*( ? check_attestation_inclusion((typeof state).kind, slot, state.slot) - # [Modified in Electra:EIP7549] - if not (data.index == 0): - return err("Electra attestation data index not 0") + # [Modified in Gloas:EIP7732] + when state is gloas.BeaconState: + if not (data.index < 2): + return err("Gloas attestation data index must be less than 2") + if is_attestation_same_slot(state, data) and data.index != 0: + return err("Same-slot attestation must have index 0") + else: + # [Modified in Electra:EIP7549] + if not (data.index == 0): + return err("Electra attestation data index not 0") when on_chain: var committee_offset = 0 @@ -1054,7 +1157,7 @@ proc check_attestation*( proc check_bls_to_execution_change*( genesisFork: Fork, state: capella.BeaconState | deneb.BeaconState | electra.BeaconState | - fulu.BeaconState, + fulu.BeaconState | gloas.BeaconState, signed_address_change: SignedBLSToExecutionChange, flags: UpdateFlags): Result[void, cstring] = let address_change = signed_address_change.message @@ -1187,7 +1290,7 @@ proc process_attestation*( ok(proposer_reward) proc process_attestation*( - state: var ForkyBeaconState, + state: var (electra.BeaconState | fulu.BeaconState), attestation: electra.Attestation | electra.TrustedAttestation, flags: UpdateFlags, base_reward_per_increment: Gwei, cache: var StateCache): Result[Gwei, cstring] = @@ -1211,6 +1314,78 @@ proc process_attestation*( ok(proposer_reward) +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#modified-process_attestation +proc process_attestation*( + state: var gloas.BeaconState, + attestation: electra.Attestation | electra.TrustedAttestation, + flags: UpdateFlags, base_reward_per_increment: Gwei, + cache: var StateCache): Result[Gwei, cstring] = + ? check_attestation(state, attestation, flags, cache, true) + + let proposer_index = get_beacon_proposer_index(state, cache).valueOr: + return err("process_attestation: no beacon proposer index and probably no active validators") + + # [Modified in Gloas:EIP7732] + let + current_epoch_target = + attestation.data.target.epoch == get_current_epoch(state) + payment_index = + if current_epoch_target: + SLOTS_PER_EPOCH + (attestation.data.slot mod SLOTS_PER_EPOCH) + else: + attestation.data.slot mod SLOTS_PER_EPOCH + participation_flag_indices = get_attestation_participation_flag_indices( + state, attestation.data, state.slot - attestation.data.slot) + + var payment = state.builder_pending_payments.item(payment_index.int) + + template updateParticipationFlags(epoch_participation: untyped): Gwei = + var proposer_reward_numerator = 0.Gwei + for index in get_attesting_indices_iter( + state, attestation.data, attestation.aggregation_bits, + attestation.committee_bits, cache): + # [New in Gloas:EIP7732] + # For same-slot attestations, check if we're setting any new flags + # If we are, this validator hasn't contributed to this slot's quorum yet + var will_set_new_flag = false + for flag_index, weight in PARTICIPATION_FLAG_WEIGHTS: + if flag_index in participation_flag_indices and + not has_flag(epoch_participation.item(index), flag_index): + asList(epoch_participation)[index] = + add_flag(epoch_participation.item(index), flag_index) + proposer_reward_numerator += + get_base_reward( + state, index, base_reward_per_increment) * weight.uint64 + will_set_new_flag = true + + # [New in Gloas:EIP7732] + # Add weight for same-slot attestations when any new flag is set + # This ensures each validator contributes exactly once per slot + if will_set_new_flag and + is_attestation_same_slot(state, attestation.data): + payment.weight += state.validators.item(index).effective_balance + + let + proposer_reward_denominator = + (WEIGHT_DENOMINATOR.uint64 - PROPOSER_WEIGHT.uint64) * + WEIGHT_DENOMINATOR.uint64 div PROPOSER_WEIGHT.uint64 + proposer_reward = + proposer_reward_numerator div proposer_reward_denominator + increase_balance(state, proposer_index, proposer_reward) + proposer_reward + + doAssert base_reward_per_increment > 0.Gwei + let proposer_reward = + if current_epoch_target: + updateParticipationFlags(state.current_epoch_participation) + else: + updateParticipationFlags(state.previous_epoch_participation) + + # Update builder payment weight + state.builder_pending_payments[payment_index.int] = payment + + ok(proposer_reward) + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#get_next_sync_committee_indices func get_next_sync_committee_keys( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | @@ -1249,9 +1424,9 @@ func get_next_sync_committee_keys( i += 1'u64 res -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#modified-get_next_sync_committee_indices +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/electra/beacon-chain.md#modified-get_next_sync_committee_indices func get_next_sync_committee_keys( - state: electra.BeaconState | fulu.BeaconState): + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState): array[SYNC_COMMITTEE_SIZE, ValidatorPubKey] = ## Return the sequence of sync committee indices, with possible duplicates, ## for the next sync committee. @@ -1297,12 +1472,18 @@ func has_eth1_withdrawal_credential*(validator: Validator): bool = ## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential. validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#new-has_execution_withdrawal_credential -func has_execution_withdrawal_credential*(validator: Validator): bool = +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/electra/beacon-chain.md#new-has_execution_withdrawal_credential +func has_execution_withdrawal_credential*( + consensusFork: static ConsensusFork, validator: Validator): bool = ## Check if ``validator`` has a 0x01 or 0x02 prefixed withdrawal credential. - has_compounding_withdrawal_credential(validator) or + has_compounding_withdrawal_credential(consensusFork, validator) or has_eth1_withdrawal_credential(validator) +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#new-has_builder_withdrawal_credential +func has_builder_withdrawal_credential*(validator: Validator): bool = + ## Check if ``validator`` has an 0x03 prefixed "builder" withdrawal credential. + validator.withdrawal_credentials.data[0] == BUILDER_WITHDRAWAL_PREFIX + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/beacon-chain.md#is_fully_withdrawable_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_fully_withdrawable_validator func is_fully_withdrawable_validator( @@ -1311,26 +1492,26 @@ func is_fully_withdrawable_validator( ## Check if ``validator`` is fully withdrawable. when fork >= ConsensusFork.Electra: # [Modified in Electra:EIP7251] - has_execution_withdrawal_credential(validator) and + has_execution_withdrawal_credential(fork, validator) and validator.withdrawable_epoch <= epoch and balance > 0.Gwei else: has_eth1_withdrawal_credential(validator) and validator.withdrawable_epoch <= epoch and balance > 0.Gwei # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#is_partially_withdrawable_validator -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#modified-is_partially_withdrawable_validator +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/electra/beacon-chain.md#modified-is_partially_withdrawable_validator func is_partially_withdrawable_validator( fork: static ConsensusFork, validator: Validator, balance: Gwei): bool = ## Check if ``validator`` is partially withdrawable. when fork >= ConsensusFork.Electra: # [Modified in Electra:EIP7251] let - max_effective_balance = get_max_effective_balance(validator) + max_effective_balance = get_max_effective_balance(fork, validator) has_max_effective_balance = validator.effective_balance == max_effective_balance has_excess_balance = balance > max_effective_balance # [Modified in Electra:EIP7251] - has_execution_withdrawal_credential(validator) and + has_execution_withdrawal_credential(fork, validator) and has_max_effective_balance and has_excess_balance else: let @@ -1342,7 +1523,7 @@ func is_partially_withdrawable_validator( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#new-queue_excess_active_balance func queue_excess_active_balance( - state: var (electra.BeaconState | fulu.BeaconState), + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState), index: uint64) = let balance = state.balances.item(index) if balance > static(MIN_ACTIVATION_BALANCE.Gwei): @@ -1360,21 +1541,30 @@ func queue_excess_active_balance( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/beacon-chain.md#new-switch_to_compounding_validator func switch_to_compounding_validator*( - state: var (electra.BeaconState | fulu.BeaconState), + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState), index: ValidatorIndex) = let validator = addr state.validators.mitem(index) validator.withdrawal_credentials.data[0] = COMPOUNDING_WITHDRAWAL_PREFIX queue_excess_active_balance(state, index.uint64) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#new-get_pending_balance_to_withdraw +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/electra/beacon-chain.md#new-get_pending_balance_to_withdraw +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#modified-get_pending_balance_to_withdraw func get_pending_balance_to_withdraw*( - state: electra.BeaconState | fulu.BeaconState, + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState, validator_index: ValidatorIndex): Gwei = var pending_balance: Gwei for withdrawal in state.pending_partial_withdrawals: if withdrawal.validator_index == validator_index: pending_balance += withdrawal.amount + when type(state).kind >= ConsensusFork.Gloas: + for withdrawal in state.builder_pending_withdrawals: + if withdrawal.builder_index == validator_index: + pending_balance += withdrawal.amount + for payment in state.builder_pending_payments: + if payment.withdrawal.builder_index == validator_index: + pending_balance += payment.withdrawal.amount + pending_balance # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#effective-balances-updates @@ -1399,7 +1589,7 @@ template get_effective_balance_update*( MAX_EFFECTIVE_BALANCE.Gwei) else: let effective_balance_limit = - if has_compounding_withdrawal_credential(state.validators.item(vidx)): + if has_compounding_withdrawal_credential(consensusFork, state.validators.item(vidx)): MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei else: MIN_ACTIVATION_BALANCE.Gwei @@ -1416,20 +1606,20 @@ template get_updated_effective_balance*( balance # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#new-get_expected_withdrawals -template get_expected_withdrawals_aux*( - state: capella.BeaconState | deneb.BeaconState, epoch: Epoch, - fetch_balance: untyped): seq[Withdrawal] = +proc get_expected_withdrawals*( + state: capella.BeaconState | deneb.BeaconState): seq[Withdrawal] = let + epoch = get_current_epoch(state) num_validators = lenu64(state.validators) bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP) var withdrawal_index = state.next_withdrawal_index - validator_index {.inject.} = state.next_withdrawal_validator_index + validator_index = state.next_withdrawal_validator_index withdrawals: seq[Withdrawal] = @[] for _ in 0 ..< bound: let validator = state.validators[validator_index] - balance = fetch_balance + balance = state.balances[validator_index] if is_fully_withdrawable_validator( typeof(state).kind, validator, balance, epoch): var w = Withdrawal( @@ -1453,12 +1643,7 @@ template get_expected_withdrawals_aux*( validator_index = (validator_index + 1) mod num_validators withdrawals -func get_expected_withdrawals*( - state: capella.BeaconState | deneb.BeaconState): seq[Withdrawal] = - get_expected_withdrawals_aux(state, get_current_epoch(state)) do: - state.balances[validator_index] - -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/electra/beacon-chain.md#modified-get_expected_withdrawals +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/electra/beacon-chain.md#modified-get_expected_withdrawals # This partials count is used in exactly one place, while in general being able # to cleanly treat the results of get_expected_withdrawals as a seq[Withdrawal] # are valuable enough to make that the default version of this spec function. @@ -1466,7 +1651,7 @@ template get_expected_withdrawals_with_partial_count_aux*( state: electra.BeaconState | fulu.BeaconState, epoch: Epoch, fetch_balance: untyped): (seq[Withdrawal], uint64) = - doAssert epoch - get_current_epoch(state) in [0'u64, 1'u64] + doAssert epoch == get_current_epoch(state) var withdrawal_index = state.next_withdrawal_index @@ -1498,12 +1683,19 @@ template get_expected_withdrawals_with_partial_count_aux*( has_sufficient_effective_balance = effective_balance_at_slot >= static(MIN_ACTIVATION_BALANCE.Gwei) - has_excess_balance = fetch_balance > static(MIN_ACTIVATION_BALANCE.Gwei) + total_withdrawn = block: + var res: Gwei + for w in withdrawals: + if w.validator_index == validator_index: + res += w.amount + res + balance = fetch_balance - total_withdrawn + has_excess_balance = balance > static(MIN_ACTIVATION_BALANCE.Gwei) if validator.exit_epoch == FAR_FUTURE_EPOCH and has_sufficient_effective_balance and has_excess_balance: let withdrawable_balance = min( - fetch_balance - static(MIN_ACTIVATION_BALANCE.Gwei), + balance - static(MIN_ACTIVATION_BALANCE.Gwei), withdrawal.amount) var w = Withdrawal( index: withdrawal_index, @@ -1547,7 +1739,7 @@ template get_expected_withdrawals_with_partial_count_aux*( index: withdrawal_index, validator_index: validator_index, # [Modified in Electra:EIP7251] - amount: balance - get_max_effective_balance(validator)) + amount: balance - get_max_effective_balance(type(state).kind, validator)) w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1] withdrawals.add w withdrawal_index = WithdrawalIndex(withdrawal_index + 1) @@ -1558,19 +1750,162 @@ template get_expected_withdrawals_with_partial_count_aux*( (withdrawals, processed_partial_withdrawals_count) template get_expected_withdrawals_with_partial_count*( - state: electra.BeaconState | fulu.BeaconState): (seq[Withdrawal], uint64) = + state: electra.BeaconState | fulu.BeaconState): + (seq[Withdrawal], uint64) = get_expected_withdrawals_with_partial_count_aux( state, get_current_epoch(state)) do: state.balances.item(validator_index) -func get_expected_withdrawals*(state: electra.BeaconState | fulu.BeaconState): - seq[Withdrawal] = +func get_expected_withdrawals*( + state: electra.BeaconState | fulu.BeaconState): + seq[Withdrawal] = get_expected_withdrawals_with_partial_count(state)[0] -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_next_sync_committee +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#modified-get_expected_withdrawals +template get_expected_withdrawals_with_builder_count_aux( + state: gloas.BeaconState, + epoch: Epoch, fetch_balance: untyped): + (seq[Withdrawal], uint64, uint64) = + doAssert epoch == get_current_epoch(state) + + var + withdrawal_index = state.next_withdrawal_index + validator_index {.inject.} = state.next_withdrawal_validator_index + withdrawals = newSeqOfCap[Withdrawal](MAX_WITHDRAWALS_PER_PAYLOAD) + processed_partial_withdrawals_count = 0'u64 + processed_builder_withdrawals_count = 0'u64 + + # [New in Gloas:EIP7732] + # Sweep for builder payments + for withdrawal in state.builder_pending_withdrawals: + if withdrawal.withdrawable_epoch > epoch or + len(withdrawals) + 1 == MAX_WITHDRAWALS_PER_PAYLOAD: + break + + if is_builder_payment_withdrawable(state, withdrawal): + let + total_withdrawn = block: + var res: Gwei + for w in withdrawals: + if w.validator_index == withdrawal.builder_index: + res += w.amount + res + balance = fetch_balance - total_withdrawn + builder = state.validators.item(withdrawal.builder_index) + + let withdrawable_balance = + if builder.slashed: + min(balance, withdrawal.amount) + elif balance > static(MIN_ACTIVATION_BALANCE.Gwei): + min(balance - static(MIN_ACTIVATION_BALANCE.Gwei), withdrawal.amount) + else: + 0.Gwei + + var w = Withdrawal( + index: withdrawal_index, + validator_index: withdrawal.builder_index, + amount: withdrawable_balance) + w.address = withdrawal.fee_recipient + withdrawals.add w + withdrawal_index += 1 + + processed_builder_withdrawals_count += 1 + + # Sweep for pending partial withdrawals + let bound = min( + len(withdrawals) + MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP, + MAX_WITHDRAWALS_PER_PAYLOAD - 1) + + for withdrawal in state.pending_partial_withdrawals: + if withdrawal.withdrawable_epoch > epoch or len(withdrawals) == bound: + break + + let + validator = state.validators.item(withdrawal.validator_index) + validator_index {.inject.} = withdrawal.validator_index + has_sufficient_effective_balance = + validator.effective_balance >= static(MIN_ACTIVATION_BALANCE.Gwei) + total_withdrawn = block: + var res: Gwei + for w in withdrawals: + if w.validator_index == validator_index: + res += w.amount + res + balance = fetch_balance - total_withdrawn + has_excess_balance = balance > static(MIN_ACTIVATION_BALANCE.Gwei) + + if validator.exit_epoch == FAR_FUTURE_EPOCH and + has_sufficient_effective_balance and has_excess_balance: + let withdrawable_balance = min( + balance - static(MIN_ACTIVATION_BALANCE.Gwei), + withdrawal.amount) + var w = Withdrawal( + index: withdrawal_index, + validator_index: withdrawal.validator_index, + amount: withdrawable_balance) + w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1] + withdrawals.add w + withdrawal_index += 1 + + processed_partial_withdrawals_count += 1 + + # Sweep for remaining + let + sweep_bound = min(len(state.validators), + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP) + num_validators = lenu64(state.validators) + validator_index = state.next_withdrawal_validator_index + + for _ in 0 ..< sweep_bound: + let + validator = state.validators.item(validator_index) + total_withdrawn = block: + var subtot: Gwei + for withdrawal in withdrawals: + if withdrawal.validator_index == validator_index: + subtot += withdrawal.amount + subtot + balance = fetch_balance - total_withdrawn + + if is_fully_withdrawable_validator( + typeof(state).kind, validator, balance, epoch): + var w = Withdrawal( + index: withdrawal_index, + validator_index: validator_index, + amount: balance) + w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1] + withdrawals.add w + withdrawal_index = WithdrawalIndex(withdrawal_index + 1) + elif is_partially_withdrawable_validator( + typeof(state).kind, validator, balance): + var w = Withdrawal( + index: withdrawal_index, + validator_index: validator_index, + amount: balance - get_max_effective_balance(type(state).kind, validator)) + w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1] + withdrawals.add w + withdrawal_index = WithdrawalIndex(withdrawal_index + 1) + + if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: + break + validator_index = (validator_index + 1) mod num_validators + + (withdrawals, + processed_builder_withdrawals_count, + processed_partial_withdrawals_count) + +template get_expected_withdrawals*( + state: gloas.BeaconState): + (seq[Withdrawal], uint64, uint64) = + get_expected_withdrawals_with_builder_count_aux( + state, get_current_epoch(state)) do: + state.balances.item(validator_index) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/beacon-chain.md#get_next_sync_committee func get_next_sync_committee*( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState): + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState): SyncCommittee = ## Return the next sync committee, with possible pubkey duplicates. var res: SyncCommittee @@ -1696,11 +2031,12 @@ proc initialize_hashed_beacon_state_from_eth1*( # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#testing proc initialize_beacon_state_from_eth1*( cfg: RuntimeConfig, + consensusFork: static ConsensusFork, eth1_block_hash: Eth2Digest, eth1_timestamp: uint64, deposits: openArray[DepositData], execution_payload_header: ForkyExecutionPayloadHeader, - flags: UpdateFlags = {}): auto = + flags: UpdateFlags = {}): consensusFork.BeaconState = ## Get the genesis ``BeaconState``. ## ## Before the beacon chain starts, validators will register in the Eth1 chain @@ -1717,7 +2053,6 @@ proc initialize_beacon_state_from_eth1*( # at that point :) doAssert deposits.lenu64 >= SLOTS_PER_EPOCH - const consensusFork = typeof(execution_payload_header).kind let forkVersion = cfg.forkVersion(consensusFork) fork = Fork( @@ -2123,26 +2458,7 @@ func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState): func upgrade_to_electra*( cfg: RuntimeConfig, pre: deneb.BeaconState, cache: var StateCache): ref electra.BeaconState = - let - epoch = get_current_epoch(pre) - latest_execution_payload_header = electra.ExecutionPayloadHeader( - parent_hash: pre.latest_execution_payload_header.parent_hash, - fee_recipient: pre.latest_execution_payload_header.fee_recipient, - state_root: pre.latest_execution_payload_header.state_root, - receipts_root: pre.latest_execution_payload_header.receipts_root, - logs_bloom: pre.latest_execution_payload_header.logs_bloom, - prev_randao: pre.latest_execution_payload_header.prev_randao, - block_number: pre.latest_execution_payload_header.block_number, - gas_limit: pre.latest_execution_payload_header.gas_limit, - gas_used: pre.latest_execution_payload_header.gas_used, - timestamp: pre.latest_execution_payload_header.timestamp, - extra_data: pre.latest_execution_payload_header.extra_data, - base_fee_per_gas: pre.latest_execution_payload_header.base_fee_per_gas, - block_hash: pre.latest_execution_payload_header.block_hash, - transactions_root: pre.latest_execution_payload_header.transactions_root, - withdrawals_root: pre.latest_execution_payload_header.withdrawals_root, - blob_gas_used: pre.latest_execution_payload_header.blob_gas_used, - excess_blob_gas: pre.latest_execution_payload_header.excess_blob_gas) + let epoch = get_current_epoch(pre) var earliest_exit_epoch = compute_activation_exit_epoch(get_current_epoch(pre)) @@ -2202,7 +2518,7 @@ func upgrade_to_electra*( next_sync_committee: pre.next_sync_committee, # Execution-layer - latest_execution_payload_header: latest_execution_payload_header, + latest_execution_payload_header: pre.latest_execution_payload_header, # Withdrawals next_withdrawal_index: pre.next_withdrawal_index, @@ -2257,34 +2573,16 @@ func upgrade_to_electra*( # Ensure early adopters of compounding credentials go through the activation # churn for index, validator in post.validators: - if has_compounding_withdrawal_credential(validator): + if has_compounding_withdrawal_credential(type(post[]).kind, validator): queue_excess_active_balance(post[], index.uint64) post +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/fulu/fork.md#upgrading-the-state func upgrade_to_fulu*( cfg: RuntimeConfig, pre: electra.BeaconState, cache: var StateCache): ref fulu.BeaconState = - let - epoch = get_current_epoch(pre) - latest_execution_payload_header = fulu.ExecutionPayloadHeader( - parent_hash: pre.latest_execution_payload_header.parent_hash, - fee_recipient: pre.latest_execution_payload_header.fee_recipient, - state_root: pre.latest_execution_payload_header.state_root, - receipts_root: pre.latest_execution_payload_header.receipts_root, - logs_bloom: pre.latest_execution_payload_header.logs_bloom, - prev_randao: pre.latest_execution_payload_header.prev_randao, - block_number: pre.latest_execution_payload_header.block_number, - gas_limit: pre.latest_execution_payload_header.gas_limit, - gas_used: pre.latest_execution_payload_header.gas_used, - timestamp: pre.latest_execution_payload_header.timestamp, - extra_data: pre.latest_execution_payload_header.extra_data, - base_fee_per_gas: pre.latest_execution_payload_header.base_fee_per_gas, - block_hash: pre.latest_execution_payload_header.block_hash, - transactions_root: pre.latest_execution_payload_header.transactions_root, - withdrawals_root: pre.latest_execution_payload_header.withdrawals_root, - blob_gas_used: pre.latest_execution_payload_header.blob_gas_used, - excess_blob_gas: pre.latest_execution_payload_header.excess_blob_gas) + let epoch = get_current_epoch(pre) let post = (ref fulu.BeaconState)( # Versioning @@ -2336,7 +2634,7 @@ func upgrade_to_fulu*( next_sync_committee: pre.next_sync_committee, # Execution-layer - latest_execution_payload_header: latest_execution_payload_header, + latest_execution_payload_header: pre.latest_execution_payload_header, # Withdrawals next_withdrawal_index: pre.next_withdrawal_index, @@ -2356,7 +2654,93 @@ func upgrade_to_fulu*( earliest_consolidation_epoch: pre.earliest_consolidation_epoch, pending_deposits: pre.pending_deposits, pending_partial_withdrawals: pre.pending_partial_withdrawals, - pending_consolidations: pre.pending_consolidations + pending_consolidations: pre.pending_consolidations, + proposer_lookahead: initialize_proposer_lookahead(pre, cache) + ) + + post + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/fork.md#upgrading-the-state +func upgrade_to_gloas*( + cfg: RuntimeConfig, pre: fulu.BeaconState): ref gloas.BeaconState = + let epoch = get_current_epoch(pre) + + const full_execution_payload_availability = block: + var res: BitArray[int(SLOTS_PER_HISTORICAL_ROOT)] + for i in 0 ..< res.len: + setBit(res, i) + res + + let post = (ref gloas.BeaconState)( + # Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork( + previous_version: pre.fork.current_version, + current_version: cfg.GLOAS_FORK_VERSION, + epoch: epoch + ), + + # History + latest_block_header: pre.latest_block_header, + block_roots: pre.block_roots, + state_roots: pre.state_roots, + historical_roots: pre.historical_roots, + + # Eth1 + eth1_data: pre.eth1_data, + eth1_data_votes: pre.eth1_data_votes, + eth1_deposit_index: pre.eth1_deposit_index, + + # Registry + validators: pre.validators, + balances: pre.balances, + + # Randomness + randao_mixes: pre.randao_mixes, + + # Slashings + slashings: pre.slashings, + + # Participation + previous_epoch_participation: pre.previous_epoch_participation, + current_epoch_participation: pre.current_epoch_participation, + + # Finality + justification_bits: pre.justification_bits, + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + + # Inactivity + inactivity_scores: pre.inactivity_scores, + + # Sync + current_sync_committee: pre.current_sync_committee, + next_sync_committee: pre.next_sync_committee, + + # [Modified in Gloas:EIP7732] + latest_execution_payload_bid: gloas.ExecutionPayloadBid(), + next_withdrawal_index: pre.next_withdrawal_index, + next_withdrawal_validator_index: pre.next_withdrawal_validator_index, + historical_summaries: pre.historical_summaries, + deposit_requests_start_index: pre.deposit_requests_start_index, + deposit_balance_to_consume: pre.deposit_balance_to_consume, + exit_balance_to_consume: pre.exit_balance_to_consume, + earliest_exit_epoch: pre.earliest_exit_epoch, + consolidation_balance_to_consume: pre.consolidation_balance_to_consume, + earliest_consolidation_epoch: pre.earliest_consolidation_epoch, + pending_deposits: pre.pending_deposits, + pending_partial_withdrawals: pre.pending_partial_withdrawals, + pending_consolidations: pre.pending_consolidations, + proposer_lookahead: pre.proposer_lookahead, + + # [New in Gloas:EIP7732] + # builder_pending_payments, builder_pending_withdrawals, and + # latest_withdrawals_root are default() values; omit. + execution_payload_availability: full_execution_payload_availability, + latest_block_hash: pre.latest_execution_payload_header.block_hash ) post @@ -2392,7 +2776,8 @@ func latest_block_root*(state: ForkedHashedBeaconState): Eth2Digest = func get_sync_committee_cache*( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState, + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState, cache: var StateCache): SyncCommitteeCache = let period = state.slot.sync_committee_period() @@ -2484,4 +2869,77 @@ func can_advance_slots( target_slot >= state.data.slot and block_root == state.latest_block_root func can_advance_slots*( state: ForkedHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool = - withState(state): forkyState.can_advance_slots(block_root, target_slot) \ No newline at end of file + withState(state): forkyState.can_advance_slots(block_root, target_slot) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#new-get_ptc +iterator get_ptc(state: gloas.BeaconState, slot: Slot, cache: var StateCache): + ValidatorIndex = + ## Get the payload timeliness committee for the given ``slot`` + let epoch = slot.epoch() + var buffer {.noinit.}: array[40, byte] + buffer[0..31] = get_seed(state, epoch, DOMAIN_PTC_ATTESTER).data + buffer[32..39] = uint_to_bytes(slot.uint64) + let seed = eth2digest(buffer) + + var indices = newSeqOfCap[ValidatorIndex](PTC_SIZE) + + # Concatenate all committees for this slot in order + let committees_per_slot = get_committee_count_per_slot(state, epoch, cache) + for committee_index in get_committee_indices(committees_per_slot): + let committee = get_beacon_committee(state, slot, committee_index, cache) + indices.add(committee) + + for candidate_index in compute_balance_weighted_selection( + state, indices, seed, size=PTC_SIZE, shuffle_indices=false): + yield candidate_index + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#new-get_indexed_payload_attestation +func get_indexed_payload_attestation*( + state: gloas.BeaconState, slot: Slot, + payload_attestation: PayloadAttestation, + cache: var StateCache): IndexedPayloadAttestation = + ## Return the indexed payload attestation corresponding to ``payload_attestation``. + var + attesting_indices = newSeqOfCap[uint64](PTC_SIZE) + i = 0 + + for index in get_ptc(state, slot, cache): + if payload_attestation.aggregation_bits[i]: + attesting_indices.add(index.uint64) + inc i + + attesting_indices.sort() + + IndexedPayloadAttestation( + attesting_indices: List[uint64, Limit PTC_SIZE].init(attesting_indices), + data: payload_attestation.data, + signature: payload_attestation.signature + ) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#new-is_valid_indexed_payload_attestation +proc is_valid_indexed_payload_attestation*( + state: gloas.BeaconState, + indexed_payload_attestation: IndexedPayloadAttestation): bool = + ## Check if ``indexed_payload_attestation`` is not empty, has sorted + ## and unique indices and has a valid aggregate signature. + + # Verify indices are non-empty and sorted + if indexed_payload_attestation.attesting_indices.len == 0: + return false + + if not toSeq(indexed_payload_attestation.attesting_indices).isSorted: + return false + + # Verify aggregate signature + let + pubkeys = mapIt( + indexed_payload_attestation.attesting_indices, + state.validators[it].pubkey) + domain = get_domain( + state.fork, DOMAIN_PTC_ATTESTER, + GENESIS_EPOCH, state.genesis_validators_root) + signing_root = compute_signing_root( + indexed_payload_attestation.data, domain) + + blsFastAggregateVerify( + pubkeys, signing_root.data, indexed_payload_attestation.signature) diff --git a/beacon_chain/spec/column_map.nim b/beacon_chain/spec/column_map.nim new file mode 100644 index 0000000000..277f6b0d93 --- /dev/null +++ b/beacon_chain/spec/column_map.nim @@ -0,0 +1,83 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [], gcsafe.} + +import + stew/bitops2, + ../spec/datatypes/fulu + +from std/sequtils import mapIt, toSeq +from std/strutils import join + +static: + doAssert(NUMBER_OF_COLUMNS == 2 * 64, "ColumnMap should be updated") + +type + ColumnMap* = object + data: array[2, uint64] + +func init*(t: typedesc[ColumnMap], columns: openArray[ColumnIndex]): ColumnMap = + var res: ColumnMap + for column in columns: + let + index = int(uint64(column) shr 6) + offset = int(uint64(column) and 0x3F'u64) + res.data[index].setBit(offset) + res + +func `and`*(a, b: ColumnMap): ColumnMap = + ColumnMap(data: [a.data[0] and b.data[0], a.data[1] and b.data[1]]) + +func `or`*(a, b: ColumnMap): ColumnMap = + ColumnMap(data: [a.data[0] or b.data[0], a.data[1] or b.data[1]]) + +func `xor`*(a, b: ColumnMap): ColumnMap = + ColumnMap(data: [a.data[0] xor b.data[0], a.data[1] xor b.data[1]]) + +func `not`*(a: ColumnMap): ColumnMap = + ColumnMap(data: [not(a.data[0]), not(a.data[1])]) + +func contains*(a: ColumnMap, column: ColumnIndex): bool = + if uint64(column) >= NUMBER_OF_COLUMNS: + return false + + let + index = int(uint64(column) shr 6) + offset = int(uint64(column) and 0x3F'u64) + a.data[index].getBit(offset) + +iterator items*(a: ColumnMap): ColumnIndex = + var + data0 = a.data[0] + data1 = a.data[1] + + while data0 != 0'u64: + let + # t = data0 and -data0 + t = data0 and (not(data0) + 1'u64) + res = firstOne(data0) + yield ColumnIndex(res - 1) + data0 = data0 xor t + + while data1 != 0'u64: + let + # t = data0 and -data0 + t = data1 and (not(data1) + 1'u64) + res = firstOne(data1) + yield ColumnIndex(64 + res - 1) + data1 = data1 xor t + +func len*(a: ColumnMap): int = + # Returns number of columns in map. + countOnes(a.data[0]) + countOnes(a.data[1]) + +func `$`*(a: ColumnMap): string = + "[" & a.items().toSeq().mapIt($it).join(", ") & "]" + +func shortLog*(a: ColumnMap): string = + $a diff --git a/beacon_chain/spec/crypto.nim b/beacon_chain/spec/crypto.nim index 09d6095217..39ed5fa39b 100644 --- a/beacon_chain/spec/crypto.nim +++ b/beacon_chain/spec/crypto.nim @@ -391,7 +391,7 @@ template toRaw*(x: ValidatorPubKey | SomeSig): auto = x.blob func toHex*(x: BlsCurveType): string = - toHex(toRaw(x)) + byteutils.toHex(toRaw(x)) func toHex*(x: CookedPubKey): string = toHex(x.toPubKey()) diff --git a/beacon_chain/spec/datatypes/altair.nim b/beacon_chain/spec/datatypes/altair.nim index 9862224bf2..c4f191041c 100644 --- a/beacon_chain/spec/datatypes/altair.nim +++ b/beacon_chain/spec/datatypes/altair.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Types specific to altair (i.e. known to have changed across hard forks) - see # `base` for types and guidelines common across forks @@ -60,7 +60,7 @@ const # The first member (`genesis_time`) is 32, subsequent members +1 each. # If there are ever more than 32 members in `BeaconState`, indices change! # `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `52 * 2 + 1`. - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/ssz/merkle-proofs.md # finalized_checkpoint > root FINALIZED_ROOT_GINDEX* = 105.GeneralizedIndex # current_sync_committee @@ -455,7 +455,7 @@ type SyncnetBits* = BitArray[SYNC_COMMITTEE_SUBNET_COUNT] - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#metadata + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/p2p-interface.md#metadata MetaData* = object seq_number*: uint64 attnets*: AttnetBits @@ -506,12 +506,6 @@ type root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - MsgTrustedSignedBeaconBlock* = object - message*: TrustedBeaconBlock - signature*: ValidatorSig - - root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - TrustedSignedBeaconBlock* = object message*: TrustedBeaconBlock signature*: TrustedSig @@ -521,7 +515,6 @@ type SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock SomeBeaconBlock* = BeaconBlock | @@ -719,13 +712,11 @@ func clear*(info: var EpochInfo) = template asSigned*( x: SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SignedBeaconBlock = isomorphicCast[SignedBeaconBlock](x) template asSigVerified*( x: SignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = isomorphicCast[SigVerifiedSignedBeaconBlock](x) @@ -733,16 +724,9 @@ template asSigVerified*( x: BeaconBlock | TrustedBeaconBlock): SigVerifiedBeaconBlock = isomorphicCast[SigVerifiedBeaconBlock](x) -template asMsgTrusted*( - x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock = - isomorphicCast[MsgTrustedSignedBeaconBlock](x) - template asTrusted*( x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock = + SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock = isomorphicCast[TrustedSignedBeaconBlock](x) template asTrusted*( diff --git a/beacon_chain/spec/datatypes/base.nim b/beacon_chain/spec/datatypes/base.nim index 8497cef6f6..3bdd85428f 100644 --- a/beacon_chain/spec/datatypes/base.nim +++ b/beacon_chain/spec/datatypes/base.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # This file contains data types that are part of the spec and thus subject to # serialization and spec updates. @@ -68,13 +68,18 @@ import json_serialization, ssz_serialization/types as sszTypes, ../../version, - ".."/[beacon_time, crypto, digest, presets] + ../[beacon_time, crypto, digest, presets] + +from eth/common/eth_types_json_serialization import readValue, writeValue + +from std/algorithm import isSorted export tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto, - digest, presets + digest, presets, eth, eth_types_json_serialization.readValue, + eth_types_json_serialization.writeValue -const SPEC_VERSION* = "1.5.0-beta.5" +const SPEC_VERSION* = "1.6.0-beta.0" ## Spec version we're aiming to be compatible with, right now const @@ -400,23 +405,6 @@ type beacon_proposer_indices*: Table[Slot, Opt[ValidatorIndex]] sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache] - # This matches the mutable state of the Solidity deposit contract - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/solidity_deposit_contract/deposit_contract.sol - DepositContractState* = object - branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest] - deposit_count*: array[32, byte] # Uint256 - - # https://eips.ethereum.org/EIPS/eip-4881 - FinalizedDepositTreeBranch* = - List[Eth2Digest, Limit DEPOSIT_CONTRACT_TREE_DEPTH] - - DepositTreeSnapshot* = object - finalized*: FinalizedDepositTreeBranch - deposit_root*: Eth2Digest - deposit_count*: uint64 - execution_block_hash*: Eth2Digest - execution_block_height*: uint64 - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#validator ValidatorStatus* = object # This is a validator without the expensive, immutable, append-only parts @@ -962,7 +950,7 @@ func checkForkConsistency*(cfg: RuntimeConfig) = [cfg.GENESIS_FORK_VERSION, cfg.ALTAIR_FORK_VERSION, cfg.BELLATRIX_FORK_VERSION, cfg.CAPELLA_FORK_VERSION, cfg.DENEB_FORK_VERSION, cfg.ELECTRA_FORK_VERSION, - cfg.FULU_FORK_VERSION] + cfg.FULU_FORK_VERSION, cfg.GLOAS_FORK_VERSION] for i in 0 ..< forkVersions.len: for j in i+1 ..< forkVersions.len: @@ -982,6 +970,9 @@ func checkForkConsistency*(cfg: RuntimeConfig) = assertForkEpochOrder(cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH) assertForkEpochOrder(cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH) assertForkEpochOrder(cfg.ELECTRA_FORK_EPOCH, cfg.FULU_FORK_EPOCH) + assertForkEpochOrder(cfg.FULU_FORK_EPOCH, cfg.GLOAS_FORK_EPOCH) + + doAssert isSorted(cfg.BLOB_SCHEDULE, cmp = cmpBlobParameters) func ofLen*[T, N](ListType: type List[T, N], n: int): ListType = if n < N: @@ -989,5 +980,5 @@ func ofLen*[T, N](ListType: type List[T, N], n: int): ListType = else: raise newException(SszSizeMismatchError) -# Specifically has the `Fulu` naming, for easy debugging. -template debugFuluComment* (s: string) = discard \ No newline at end of file +template debugFuluComment*(s: string) = discard +template debugGloasComment*(s: string) = discard diff --git a/beacon_chain/spec/datatypes/bellatrix.nim b/beacon_chain/spec/datatypes/bellatrix.nim index 50565a8ef2..cb804c9c30 100644 --- a/beacon_chain/spec/datatypes/bellatrix.nim +++ b/beacon_chain/spec/datatypes/bellatrix.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Types specific to bellatrix (i.e. known to have changed across hard forks) - see # `base` for types and guidelines common across forks @@ -38,8 +38,7 @@ type # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#custom-types Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION] - ExecutionAddress* = object - data*: array[20, byte] # TODO there's a network_metadata type, but the import hierarchy's inconvenient + ExecutionAddress* = presets.Eth1Address BloomLogs* = object data*: array[BYTES_PER_LOGS_BLOOM, byte] @@ -331,12 +330,6 @@ type root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - MsgTrustedSignedBeaconBlock* = object - message*: TrustedBeaconBlock - signature*: ValidatorSig - - root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - TrustedSignedBeaconBlock* = object message*: TrustedBeaconBlock signature*: TrustedSig @@ -346,7 +339,6 @@ type SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock SomeBeaconBlock* = BeaconBlock | @@ -365,25 +357,6 @@ func fromHex*(T: typedesc[BloomLogs], s: string): T {. raises: [ValueError].} = hexToByteArray(s, result.data) -func fromHex*(T: typedesc[ExecutionAddress], s: string): T {. - raises: [ValueError].} = - hexToByteArray(s, result.data) - -proc writeValue*( - writer: var JsonWriter, value: ExecutionAddress) {.raises: [IOError].} = - writer.writeValue to0xHex(value.data) - -proc readValue*(reader: var JsonReader, value: var ExecutionAddress) {. - raises: [IOError, SerializationError].} = - try: - hexToByteArray(reader.readValue(string), value.data) - except ValueError: - raiseUnexpectedValue(reader, - "ExecutionAddress value should be a valid hex string") - -func `$`*(v: ExecutionAddress): string = - v.data.toHex() - func shortLog*(v: SomeBeaconBlock): auto = ( slot: shortLog(v.slot), @@ -430,15 +403,30 @@ func shortLog*(v: ExecutionPayload): auto = num_transactions: len(v.transactions) ) +func shortLog*(v: ExecutionPayloadHeader): auto = + ( + parent_hash: shortLog(v.parent_hash), + fee_recipient: $v.fee_recipient, + state_root: shortLog(v.state_root), + receipts_root: shortLog(v.receipts_root), + prev_randao: shortLog(v.prev_randao), + block_number: v.block_number, + gas_limit: v.gas_limit, + gas_used: v.gas_used, + timestamp: v.timestamp, + extra_data: toPrettyString(distinctBase v.extra_data), + base_fee_per_gas: $(v.base_fee_per_gas), + block_hash: shortLog(v.block_hash), + transactions_root: shortLog(v.transactions_root), + ) + template asSigned*( x: SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SignedBeaconBlock = isomorphicCast[SignedBeaconBlock](x) template asSigVerified*( x: SignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = isomorphicCast[SigVerifiedSignedBeaconBlock](x) @@ -446,14 +434,7 @@ template asSigVerified*( x: BeaconBlock | TrustedBeaconBlock): SigVerifiedBeaconBlock = isomorphicCast[SigVerifiedBeaconBlock](x) -template asMsgTrusted*( - x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock = - isomorphicCast[MsgTrustedSignedBeaconBlock](x) - template asTrusted*( x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock = + SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock = isomorphicCast[TrustedSignedBeaconBlock](x) diff --git a/beacon_chain/spec/datatypes/capella.nim b/beacon_chain/spec/datatypes/capella.nim index cdc4f720ff..7357575ce6 100644 --- a/beacon_chain/spec/datatypes/capella.nim +++ b/beacon_chain/spec/datatypes/capella.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Types specific to capella (i.e. known to have changed across hard forks) - see # `base` for types and guidelines common across forks @@ -22,17 +22,17 @@ import json_serialization, ssz_serialization/[merkleization, proofs], ssz_serialization/types as sszTypes, - ../digest, + ../[digest, ssz_codec], "."/[base, phase0, altair, bellatrix] -export json_serialization, base +export json_serialization, base, ssz_codec const - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#constants + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/capella/light-client/sync-protocol.md#constants # This index is rooted in `BeaconBlockBody`. # The first member (`randao_reveal`) is 16, subsequent members +1 each. # If there are ever more than 16 members in `BeaconBlockBody`, indices change! - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/ssz/merkle-proofs.md + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/ssz/merkle-proofs.md # execution_payload EXECUTION_PAYLOAD_GINDEX* = 25.GeneralizedIndex @@ -47,7 +47,7 @@ type address*: ExecutionAddress amount*: Gwei - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#blstoexecutionchange + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/capella/beacon-chain.md#blstoexecutionchange BLSToExecutionChange* = object validator_index*: uint64 from_bls_pubkey*: ValidatorPubKey @@ -58,7 +58,7 @@ type message*: BLSToExecutionChange signature*: ValidatorSig - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/beacon-chain.md#historicalsummary + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/capella/beacon-chain.md#historicalsummary HistoricalSummary* = object # `HistoricalSummary` matches the components of the phase0 # `HistoricalBatch` making the two hash_tree_root-compatible. @@ -133,7 +133,7 @@ type ## Execution payload header corresponding to `beacon.body_root` (from Capella onward) execution_branch*: ExecutionBranch - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientbootstrap + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/light-client/sync-protocol.md#lightclientbootstrap LightClientBootstrap* = object header*: LightClientHeader ## Header matching the requested beacon block root @@ -472,12 +472,6 @@ type root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - MsgTrustedSignedBeaconBlock* = object - message*: TrustedBeaconBlock - signature*: ValidatorSig - - root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - TrustedSignedBeaconBlock* = object message*: TrustedBeaconBlock signature*: TrustedSig @@ -487,7 +481,6 @@ type SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock SomeBeaconBlock* = BeaconBlock | @@ -636,6 +629,24 @@ func shortLog*(v: ExecutionPayload): auto = num_withdrawals: len(v.withdrawals) ) +func shortLog*(v: ExecutionPayloadHeader): auto = + ( + parent_hash: shortLog(v.parent_hash), + fee_recipient: $v.fee_recipient, + state_root: shortLog(v.state_root), + receipts_root: shortLog(v.receipts_root), + prev_randao: shortLog(v.prev_randao), + block_number: v.block_number, + gas_limit: v.gas_limit, + gas_used: v.gas_used, + timestamp: v.timestamp, + extra_data: toPrettyString(distinctBase v.extra_data), + base_fee_per_gas: $(v.base_fee_per_gas), + block_hash: shortLog(v.block_hash), + transactions_root: shortLog(v.transactions_root), + withdrawals_root: shortLog(v.withdrawals_root), + ) + func shortLog*(v: BLSToExecutionChange): auto = ( validator_index: v.validator_index, @@ -782,13 +793,11 @@ func upgrade_lc_store_to_capella*( template asSigned*( x: SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SignedBeaconBlock = isomorphicCast[SignedBeaconBlock](x) template asSigVerified*( x: SignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = isomorphicCast[SigVerifiedSignedBeaconBlock](x) @@ -796,14 +805,7 @@ template asSigVerified*( x: BeaconBlock | TrustedBeaconBlock): SigVerifiedBeaconBlock = isomorphicCast[SigVerifiedBeaconBlock](x) -template asMsgTrusted*( - x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock = - isomorphicCast[MsgTrustedSignedBeaconBlock](x) - template asTrusted*( x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock = + SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock = isomorphicCast[TrustedSignedBeaconBlock](x) diff --git a/beacon_chain/spec/datatypes/constants.nim b/beacon_chain/spec/datatypes/constants.nim index 442d67c6ff..6c47d43e10 100644 --- a/beacon_chain/spec/datatypes/constants.nim +++ b/beacon_chain/spec/datatypes/constants.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import chronos/timer @@ -23,7 +23,7 @@ const # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#constants NODE_ID_BITS* = 256 - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#configuration + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/p2p-interface.md#configuration EPOCHS_PER_SUBNET_SUBSCRIPTION* = 256'u64 SUBNETS_PER_NODE* = 2'u64 ATTESTATION_SUBNET_COUNT*: uint64 = 64 @@ -58,11 +58,13 @@ const # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/capella/beacon-chain.md#domain-types DOMAIN_BLS_TO_EXECUTION_CHANGE* = DomainType([byte 0x0a, 0x00, 0x00, 0x00]) + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#domain-types + DOMAIN_PTC_ATTESTER* = DomainType([byte 0x0c, 0x00, 0x00, 0x00]) + DOMAIN_BEACON_BUILDER* = DomainType([byte 0x1b, 0x00, 0x00, 0x00]) + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/fork-choice.md#configuration PROPOSER_SCORE_BOOST*: uint64 = 40 - REORG_HEAD_WEIGHT_THRESHOLD*: uint64 = 20 REORG_PARENT_WEIGHT_THRESHOLD*: uint64 = 160 - REORG_MAX_EPOCHS_SINCE_FINALIZATION* = Epoch(2) # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#configuration MAX_REQUEST_BLOCKS* = 1024'u64 @@ -83,7 +85,14 @@ const # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#withdrawal-prefixes COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02'u8 + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#withdrawal-prefixes + BUILDER_WITHDRAWAL_PREFIX* = 0x03'u8 + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/electra/beacon-chain.md#execution-layer-triggered-requests DEPOSIT_REQUEST_TYPE* = 0x00'u8 WITHDRAWAL_REQUEST_TYPE* = 0x01'u8 CONSOLIDATION_REQUEST_TYPE* = 0x02'u8 + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#misc + BUILDER_PAYMENT_THRESHOLD_NUMERATOR* = 6'u64 + BUILDER_PAYMENT_THRESHOLD_DENOMINATOR* = 10'u64 diff --git a/beacon_chain/spec/datatypes/deneb.nim b/beacon_chain/spec/datatypes/deneb.nim index 3298c5d719..8e7b0fca47 100644 --- a/beacon_chain/spec/datatypes/deneb.nim +++ b/beacon_chain/spec/datatypes/deneb.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Types specific to Deneb (i.e. known to have changed across hard forks) - see # `base` for types and guidelines common across forks @@ -39,21 +39,13 @@ type # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/beacon-chain.md#beaconblockbody KzgCommitments* = List[KzgCommitment, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK] - # TODO this apparently is suppposed to be SSZ-equivalent to Bytes32, but - # current spec doesn't ever SSZ-serialize it or hash_tree_root it - # TODO make `distinct` then add a REST serialization for it specifically, via - # basically to0xHex, then fix BlobSidecarInfoObject to use VersionedHash, not - # string, and rely on REST serialization, rather than serialize VersionedHash - # field manually - VersionedHash* = array[32, byte] - - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#custom-types + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/deneb/beacon-chain.md#custom-types BlobIndex* = uint64 # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/deneb/polynomial-commitments.md#custom-types Blob* = array[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB, byte] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/deneb/p2p-interface.md#blobsidecar + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/deneb/p2p-interface.md#blobsidecar BlobSidecar* = object index*: BlobIndex ## Index of blob in block @@ -107,7 +99,7 @@ type blob_gas_used*: uint64 # [New in Deneb] excess_blob_gas*: uint64 # [New in Deneb] - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/validator.md#blobsbundle + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/deneb/validator.md#blobsbundle KzgProofs* = List[KzgProof, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK] Blobs* = List[Blob, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK] BlobRoots* = List[Eth2Digest, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK] @@ -186,7 +178,7 @@ type signature_slot*: Slot ## Slot at which the aggregate signature was created (untrusted) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate LightClientFinalityUpdate* = object # Header attested to by the sync committee attested_header*: LightClientHeader @@ -325,7 +317,7 @@ type data*: BeaconState root*: Eth2Digest # hash_tree_root(data) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#beaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#beaconblock BeaconBlock* = object ## For each slot, a proposer is chosen from the validator pool to propose ## a new block. Once the block as been proposed, it is transmitted to @@ -492,12 +484,6 @@ type root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - MsgTrustedSignedBeaconBlock* = object - message*: TrustedBeaconBlock - signature*: ValidatorSig - - root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - TrustedSignedBeaconBlock* = object message*: TrustedBeaconBlock signature*: TrustedSig @@ -507,7 +493,6 @@ type SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock SomeBeaconBlock* = BeaconBlock | @@ -598,6 +583,26 @@ func shortLog*(v: ExecutionPayload): auto = excess_blob_gas: $(v.excess_blob_gas) ) +func shortLog*(v: ExecutionPayloadHeader): auto = + ( + parent_hash: shortLog(v.parent_hash), + fee_recipient: $v.fee_recipient, + state_root: shortLog(v.state_root), + receipts_root: shortLog(v.receipts_root), + prev_randao: shortLog(v.prev_randao), + block_number: v.block_number, + gas_limit: v.gas_limit, + gas_used: v.gas_used, + timestamp: v.timestamp, + extra_data: toPrettyString(distinctBase v.extra_data), + base_fee_per_gas: $(v.base_fee_per_gas), + block_hash: shortLog(v.block_hash), + transactions_root: shortLog(v.transactions_root), + withdrawals_root: shortLog(v.withdrawals_root), + blob_gas_used: $(v.blob_gas_used), + excess_blob_gas: $(v.excess_blob_gas) + ) + func shortLog*(x: seq[BlobIdentifier]): string = "[" & x.mapIt(shortLog(it.block_root) & "/" & $it.index).join(", ") & "]" @@ -805,13 +810,11 @@ func upgrade_lc_store_to_deneb*( template asSigned*( x: SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SignedBeaconBlock = isomorphicCast[SignedBeaconBlock](x) template asSigVerified*( x: SignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = isomorphicCast[SigVerifiedSignedBeaconBlock](x) @@ -819,14 +822,7 @@ template asSigVerified*( x: BeaconBlock | TrustedBeaconBlock): SigVerifiedBeaconBlock = isomorphicCast[SigVerifiedBeaconBlock](x) -template asMsgTrusted*( - x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock = - isomorphicCast[MsgTrustedSignedBeaconBlock](x) - template asTrusted*( x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock = + SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock = isomorphicCast[TrustedSignedBeaconBlock](x) diff --git a/beacon_chain/spec/datatypes/electra.nim b/beacon_chain/spec/datatypes/electra.nim index 0aff68a265..ae075fcf94 100644 --- a/beacon_chain/spec/datatypes/electra.nim +++ b/beacon_chain/spec/datatypes/electra.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Types specific to Electra (i.e. known to have changed across hard forks) - see # `base` for types and guidelines common across forks @@ -22,7 +22,7 @@ import ssz_serialization/[merkleization, proofs], ssz_serialization/types as sszTypes, ../digest, - "."/[base, phase0] + "."/[base, phase0, bellatrix] from kzg4844 import KzgCommitment, KzgProof from stew/bitops2 import log2trunc @@ -30,11 +30,12 @@ from stew/byteutils import to0xHex from ./altair import EpochParticipationFlags, InactivityScores, SyncAggregate, SyncCommittee, TrustedSyncAggregate, num_active_participants -from ./bellatrix import BloomLogs, ExecutionAddress, Transaction from ./capella import ExecutionBranch, HistoricalSummary, SignedBLSToExecutionChange, SignedBLSToExecutionChangeList, Withdrawal, EXECUTION_PAYLOAD_GINDEX -from ./deneb import Blobs, BlobsBundle, KzgCommitments, KzgProofs +from ./deneb import + Blobs, BlobsBundle, ExecutionPayload, ExecutionPayloadHeader, KzgCommitments, + KzgProofs export json_serialization, base, kzg4844 @@ -89,65 +90,12 @@ type attestation_1*: TrustedIndexedAttestation # Modified in Electra:EIP7549] attestation_2*: TrustedIndexedAttestation # Modified in Electra:EIP7549] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#executionpayload - ExecutionPayload* = object - # Execution block header fields - parent_hash*: Eth2Digest - fee_recipient*: ExecutionAddress - ## 'beneficiary' in the yellow paper - state_root*: Eth2Digest - receipts_root*: Eth2Digest - logs_bloom*: BloomLogs - prev_randao*: Eth2Digest - ## 'difficulty' in the yellow paper - block_number*: uint64 - ## 'number' in the yellow paper - gas_limit*: uint64 - gas_used*: uint64 - timestamp*: uint64 - extra_data*: List[byte, MAX_EXTRA_DATA_BYTES] - base_fee_per_gas*: UInt256 - - # Extra payload fields - block_hash*: Eth2Digest # Hash of execution block - transactions*: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] - withdrawals*: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] - blob_gas_used*: uint64 - excess_blob_gas*: uint64 - ExecutionPayloadForSigning* = object - executionPayload*: ExecutionPayload + executionPayload*: deneb.ExecutionPayload blockValue*: Wei blobsBundle*: BlobsBundle executionRequests*: seq[seq[byte]] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/beacon-chain.md#executionpayloadheader - ExecutionPayloadHeader* = object - # Execution block header fields - parent_hash*: Eth2Digest - fee_recipient*: ExecutionAddress - state_root*: Eth2Digest - receipts_root*: Eth2Digest - logs_bloom*: BloomLogs - prev_randao*: Eth2Digest - block_number*: uint64 - gas_limit*: uint64 - gas_used*: uint64 - timestamp*: uint64 - extra_data*: List[byte, MAX_EXTRA_DATA_BYTES] - base_fee_per_gas*: UInt256 - - # Extra payload fields - block_hash*: Eth2Digest - ## Hash of execution block - transactions_root*: Eth2Digest - withdrawals_root*: Eth2Digest - blob_gas_used*: uint64 - excess_blob_gas*: uint64 - - ExecutePayload* = proc( - execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].} - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#pendingdeposit PendingDeposit* = object pubkey*: ValidatorPubKey @@ -156,7 +104,7 @@ type signature*: ValidatorSig slot*: Slot - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#pendingpartialwithdrawal + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/electra/beacon-chain.md#pendingpartialwithdrawal PendingPartialWithdrawal* = object validator_index*: uint64 amount*: Gwei @@ -168,7 +116,7 @@ type validator_pubkey*: ValidatorPubKey amount*: Gwei - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/electra/beacon-chain.md#pendingconsolidation + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/electra/beacon-chain.md#pendingconsolidation PendingConsolidation* = object source_index*: uint64 target_index*: uint64 @@ -192,7 +140,7 @@ type aggregate*: Attestation selection_proof*: ValidatorSig - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#signedaggregateandproof + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/validator.md#signedaggregateandproof SignedAggregateAndProof* = object message*: AggregateAndProof signature*: ValidatorSig @@ -211,11 +159,11 @@ type beacon*: BeaconBlockHeader ## Beacon block header - execution*: electra.ExecutionPayloadHeader + execution*: deneb.ExecutionPayloadHeader ## Execution payload header corresponding to `beacon.body_root` (from Capella onward) execution_branch*: capella.ExecutionBranch - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#lightclientbootstrap + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/light-client/sync-protocol.md#lightclientbootstrap LightClientBootstrap* = object header*: LightClientHeader ## Header matching the requested beacon block root @@ -372,8 +320,7 @@ type next_sync_committee*: SyncCommittee # Execution - latest_execution_payload_header*: ExecutionPayloadHeader - ## [Modified in Electra:EIP6110:EIP7002] + latest_execution_payload_header*: deneb.ExecutionPayloadHeader # Withdrawals next_withdrawal_index*: WithdrawalIndex @@ -489,7 +436,7 @@ type sync_aggregate*: SyncAggregate # Execution - execution_payload*: electra.ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] + execution_payload*: deneb.ExecutionPayload bls_to_execution_changes*: SignedBLSToExecutionChangeList blob_kzg_commitments*: KzgCommitments execution_requests*: ExecutionRequests # [New in Electra] @@ -529,7 +476,7 @@ type sync_aggregate*: TrustedSyncAggregate # Execution - execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] + execution_payload*: deneb.ExecutionPayload bls_to_execution_changes*: SignedBLSToExecutionChangeList blob_kzg_commitments*: KzgCommitments execution_requests*: ExecutionRequests # [New in Electra] @@ -557,12 +504,12 @@ type sync_aggregate*: TrustedSyncAggregate # Execution - execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] + execution_payload*: deneb.ExecutionPayload bls_to_execution_changes*: SignedBLSToExecutionChangeList blob_kzg_commitments*: KzgCommitments execution_requests*: ExecutionRequests # [New in Electra] - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signedbeaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#signedbeaconblock SignedBeaconBlock* = object message*: BeaconBlock signature*: ValidatorSig @@ -588,12 +535,6 @@ type root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - MsgTrustedSignedBeaconBlock* = object - message*: TrustedBeaconBlock - signature*: ValidatorSig - - root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - TrustedSignedBeaconBlock* = object message*: TrustedBeaconBlock signature*: TrustedSig @@ -626,7 +567,6 @@ type SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock SomeBeaconBlock* = BeaconBlock | @@ -699,56 +639,15 @@ func shortLog*(v: SomeSignedBeaconBlock): auto = signature: shortLog(v.signature) ) -func shortLog*(v: ExecutionPayload): auto = - ( - parent_hash: shortLog(v.parent_hash), - fee_recipient: $v.fee_recipient, - state_root: shortLog(v.state_root), - receipts_root: shortLog(v.receipts_root), - prev_randao: shortLog(v.prev_randao), - block_number: v.block_number, - gas_limit: v.gas_limit, - gas_used: v.gas_used, - timestamp: v.timestamp, - extra_data: toPrettyString(distinctBase v.extra_data), - base_fee_per_gas: $(v.base_fee_per_gas), - block_hash: shortLog(v.block_hash), - num_transactions: len(v.transactions), - num_withdrawals: len(v.withdrawals), - blob_gas_used: $(v.blob_gas_used), - excess_blob_gas: $(v.excess_blob_gas) - ) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/sync-protocol.md#modified-get_lc_execution_root func get_lc_execution_root*( header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest = let epoch = header.beacon.slot.epoch # [New in Electra] - if epoch >= cfg.ELECTRA_FORK_EPOCH: - return hash_tree_root(header.execution) - - # [Modified in Electra] + # TODO https://github.com/ethereum/consensus-specs/issues/4557 if epoch >= cfg.DENEB_FORK_EPOCH: - let execution_header = deneb.ExecutionPayloadHeader( - parent_hash: header.execution.parent_hash, - fee_recipient: header.execution.fee_recipient, - state_root: header.execution.state_root, - receipts_root: header.execution.receipts_root, - logs_bloom: header.execution.logs_bloom, - prev_randao: header.execution.prev_randao, - block_number: header.execution.block_number, - gas_limit: header.execution.gas_limit, - gas_used: header.execution.gas_used, - timestamp: header.execution.timestamp, - extra_data: header.execution.extra_data, - base_fee_per_gas: header.execution.base_fee_per_gas, - block_hash: header.execution.block_hash, - transactions_root: header.execution.transactions_root, - withdrawals_root: header.execution.withdrawals_root, - blob_gas_used: header.execution.blob_gas_used, - excess_blob_gas: header.execution.excess_blob_gas) - return hash_tree_root(execution_header) + return hash_tree_root(header.execution) if epoch >= cfg.CAPELLA_FORK_EPOCH: let execution_header = capella.ExecutionPayloadHeader( @@ -783,7 +682,7 @@ func is_valid_light_client_header*( if epoch < cfg.CAPELLA_FORK_EPOCH: return - header.execution == static(default(electra.ExecutionPayloadHeader)) and + header.execution == static(default(deneb.ExecutionPayloadHeader)) and header.execution_branch == static(default(ExecutionBranch)) is_valid_merkle_branch( @@ -814,24 +713,7 @@ func upgrade_lc_header_to_electra*( pre: deneb.LightClientHeader): LightClientHeader = LightClientHeader( beacon: pre.beacon, - execution: ExecutionPayloadHeader( - parent_hash: pre.execution.parent_hash, - fee_recipient: pre.execution.fee_recipient, - state_root: pre.execution.state_root, - receipts_root: pre.execution.receipts_root, - logs_bloom: pre.execution.logs_bloom, - prev_randao: pre.execution.prev_randao, - block_number: pre.execution.block_number, - gas_limit: pre.execution.gas_limit, - gas_used: pre.execution.gas_used, - timestamp: pre.execution.timestamp, - extra_data: pre.execution.extra_data, - base_fee_per_gas: pre.execution.base_fee_per_gas, - block_hash: pre.execution.block_hash, - transactions_root: pre.execution.transactions_root, - withdrawals_root: pre.execution.withdrawals_root, - blob_gas_used: pre.execution.blob_gas_used, - excess_blob_gas: pre.execution.excess_blob_gas), + execution: pre.execution, execution_branch: pre.execution_branch) # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data @@ -938,13 +820,11 @@ func upgrade_lc_store_to_electra*( template asSigned*( x: SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SignedBeaconBlock = isomorphicCast[SignedBeaconBlock](x) template asSigVerified*( x: SignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = isomorphicCast[SigVerifiedSignedBeaconBlock](x) @@ -952,16 +832,9 @@ template asSigVerified*( x: BeaconBlock | TrustedBeaconBlock): SigVerifiedBeaconBlock = isomorphicCast[SigVerifiedBeaconBlock](x) -template asMsgTrusted*( - x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock = - isomorphicCast[MsgTrustedSignedBeaconBlock](x) - template asTrusted*( x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock = + SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock = isomorphicCast[TrustedSignedBeaconBlock](x) from std/sets import toHashSet diff --git a/beacon_chain/spec/datatypes/fulu.nim b/beacon_chain/spec/datatypes/fulu.nim index 05a4019d0e..fccf0739d3 100644 --- a/beacon_chain/spec/datatypes/fulu.nim +++ b/beacon_chain/spec/datatypes/fulu.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Types specific to Fulu (i.e. known to have changed across hard forks) - see # `base` for types and guidelines common across forks @@ -16,8 +16,8 @@ {.experimental: "notnil".} import - std/[sequtils, typetraits], - "."/[phase0, base, electra], + std/typetraits, + "."/[phase0, base, bellatrix, electra], chronicles, json_serialization, ssz_serialization/[merkleization, proofs], @@ -25,17 +25,17 @@ import ../digest, kzg4844/[kzg, kzg_abi] +from std/sequtils import mapIt from std/strutils import join -from stew/bitops2 import log2trunc from stew/byteutils import to0xHex from ./altair import EpochParticipationFlags, InactivityScores, SyncAggregate, SyncCommittee, TrustedSyncAggregate, SyncnetBits, num_active_participants -from ./bellatrix import BloomLogs, ExecutionAddress, Transaction from ./capella import ExecutionBranch, HistoricalSummary, SignedBLSToExecutionChange, SignedBLSToExecutionChangeList, Withdrawal, EXECUTION_PAYLOAD_GINDEX -from ./deneb import Blobs, BlobsBundle, KzgCommitments, KzgProofs +from ./deneb import + Blobs, ExecutionPayload, ExecutionPayloadHeader, KzgCommitments, KzgProofs export json_serialization, base @@ -49,20 +49,18 @@ const CELLS_PER_EXT_BLOB* = FIELD_ELEMENTS_PER_EXT_BLOB div FIELD_ELEMENTS_PER_CELL # The number of cells in an extended blob | - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#preset + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/fulu/p2p-interface.md#preset KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH* = 4 KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH_GINDEX* = 27 # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#data-size NUMBER_OF_COLUMNS* = 128 - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#configuration + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/fulu/p2p-interface.md#configuration DATA_COLUMN_SIDECAR_SUBNET_COUNT* = 128 - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#custody-setting - SAMPLES_PER_SLOT* = 8 + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/fulu/das-core.md#custody-setting CUSTODY_REQUIREMENT* = 4 - NUMBER_OF_CUSTODY_GROUPS* = 128 # Minimum number of custody groups an honest node with # validators attached custodies and serves samples from @@ -72,11 +70,8 @@ const # 2**5 * 10**9 (= 32,000,000,000) Gwei BALANCE_PER_ADDITIONAL_CUSTODY_GROUP*: uint64 = 32000000000'u64 - # Number of columns in the network per custody group - COLUMNS_PER_GROUP* = NUMBER_OF_COLUMNS div NUMBER_OF_CUSTODY_GROUPS - type - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/fulu/polynomial-commitments-sampling.md#custom-types + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/fulu/polynomial-commitments-sampling.md#custom-types BLSFieldElement* = KzgBytes32 G2Point* = array[96, byte] PolynomialCoeff* = List[BLSFieldElement, FIELD_ELEMENTS_PER_EXT_BLOB] @@ -92,119 +87,78 @@ type CellIndex* = uint64 CustodyIndex* = uint64 -type DataColumn* = List[KzgCell, Limit(MAX_BLOB_COMMITMENTS_PER_BLOCK)] + DataColumnIndices* = List[ColumnIndex, Limit(NUMBER_OF_COLUMNS)] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#datacolumnsidecar + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.5/specs/fulu/das-core.md#datacolumnsidecar DataColumnSidecar* = object index*: ColumnIndex # Index of column in extended matrix column*: DataColumn kzg_commitments*: KzgCommitments - kzg_proofs*: KzgProofs + kzg_proofs*: deneb.KzgProofs signed_block_header*: SignedBeaconBlockHeader kzg_commitments_inclusion_proof*: array[KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH, Eth2Digest] DataColumnSidecars* = seq[ref DataColumnSidecar] + DataColumnSidecarInfoObject* = object + block_root*: Eth2Digest + index*: ColumnIndex + slot*: Slot + kzg_commitments*: KzgCommitments + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#datacolumnidentifier DataColumnIdentifier* = object block_root*: Eth2Digest index*: ColumnIndex - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#matrixentry + # https://github.com/ethereum/consensus-specs/blob/b8b5fbb8d16f52d42a716fa93289062fe2124c7c/specs/fulu/p2p-interface.md#datacolumnsbyrootidentifier + DataColumnsByRootIdentifier* = object + block_root*: Eth2Digest + indices*: DataColumnIndices + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/fulu/das-core.md#matrixentry MatrixEntry* = object cell*: Cell kzg_proof*: KzgProof column_index*: ColumnIndex row_index*: RowIndex + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.5/specs/fulu/validator.md#blobsbundle + KzgProofs* = List[KzgProof, + Limit FIELD_ELEMENTS_PER_EXT_BLOB * MAX_BLOB_COMMITMENTS_PER_BLOCK] + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.5/specs/fulu/validator.md#blobsbundle + BlobsBundle* = object + commitments*: KzgCommitments + proofs*: fulu.KzgProofs + blobs*: Blobs + # Not in spec, defined in order to compute custody subnets CgcBits* = BitArray[DATA_COLUMN_SIDECAR_SUBNET_COUNT] CgcCount* = uint8 - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#metadata + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/fulu/p2p-interface.md#enr-structure MetaData* = object seq_number*: uint64 attnets*: AttnetBits syncnets*: SyncnetBits custody_group_count*: uint64 - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/beacon-chain.md#executionpayload - ExecutionPayload* = object - # Execution block header fields - parent_hash*: Eth2Digest - fee_recipient*: ExecutionAddress - ## 'beneficiary' in the yellow paper - state_root*: Eth2Digest - receipts_root*: Eth2Digest - logs_bloom*: BloomLogs - prev_randao*: Eth2Digest - ## 'difficulty' in the yellow paper - block_number*: uint64 - ## 'number' in the yellow paper - gas_limit*: uint64 - gas_used*: uint64 - timestamp*: uint64 - extra_data*: List[byte, MAX_EXTRA_DATA_BYTES] - base_fee_per_gas*: UInt256 - - # Extra payload fields - block_hash*: Eth2Digest # Hash of execution block - transactions*: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] - withdrawals*: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] - blob_gas_used*: uint64 - excess_blob_gas*: uint64 - ExecutionPayloadForSigning* = object - executionPayload*: ExecutionPayload + executionPayload*: deneb.ExecutionPayload blockValue*: Wei - blobsBundle*: BlobsBundle + blobsBundle*: fulu.BlobsBundle # [New in Fulu] executionRequests*: seq[seq[byte]] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/beacon-chain.md#executionpayloadheader - ExecutionPayloadHeader* = object - # Execution block header fields - parent_hash*: Eth2Digest - fee_recipient*: ExecutionAddress - state_root*: Eth2Digest - receipts_root*: Eth2Digest - logs_bloom*: BloomLogs - prev_randao*: Eth2Digest - block_number*: uint64 - gas_limit*: uint64 - gas_used*: uint64 - timestamp*: uint64 - extra_data*: List[byte, MAX_EXTRA_DATA_BYTES] - base_fee_per_gas*: UInt256 - - # Extra payload fields - block_hash*: Eth2Digest - ## Hash of execution block - transactions_root*: Eth2Digest - withdrawals_root*: Eth2Digest - blob_gas_used*: uint64 - excess_blob_gas*: uint64 - - ExecutePayload* = proc( - execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].} - - FinalityBranch* = - array[log2trunc(FINALIZED_ROOT_GINDEX_ELECTRA), Eth2Digest] - - CurrentSyncCommitteeBranch* = - array[log2trunc(CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA), Eth2Digest] - - NextSyncCommitteeBranch* = - array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA), Eth2Digest] - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader LightClientHeader* = object beacon*: BeaconBlockHeader ## Beacon block header - execution*: ExecutionPayloadHeader + execution*: deneb.ExecutionPayloadHeader ## Execution payload header corresponding to `beacon.body_root` (from Capella onward) execution_branch*: capella.ExecutionBranch @@ -215,7 +169,7 @@ type current_sync_committee*: SyncCommittee ## Current sync committee corresponding to `header.beacon.state_root` - current_sync_committee_branch*: CurrentSyncCommitteeBranch + current_sync_committee_branch*: electra.CurrentSyncCommitteeBranch # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#lightclientupdate LightClientUpdate* = object @@ -225,25 +179,25 @@ type next_sync_committee*: SyncCommittee ## Next sync committee corresponding to ## `attested_header.beacon.state_root` - next_sync_committee_branch*: NextSyncCommitteeBranch + next_sync_committee_branch*: electra.NextSyncCommitteeBranch # Finalized header corresponding to `attested_header.beacon.state_root` finalized_header*: LightClientHeader - finality_branch*: FinalityBranch + finality_branch*: electra.FinalityBranch sync_aggregate*: SyncAggregate ## Sync committee aggregate signature signature_slot*: Slot ## Slot at which the aggregate signature was created (untrusted) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate LightClientFinalityUpdate* = object # Header attested to by the sync committee attested_header*: LightClientHeader # Finalized header corresponding to `attested_header.beacon.state_root` finalized_header*: LightClientHeader - finality_branch*: FinalityBranch + finality_branch*: electra.FinalityBranch # Sync committee aggregate signature sync_aggregate*: SyncAggregate @@ -353,8 +307,7 @@ type next_sync_committee*: SyncCommittee # Execution - latest_execution_payload_header*: ExecutionPayloadHeader - ## [Modified in Electra:EIP6110:EIP7002] + latest_execution_payload_header*: deneb.ExecutionPayloadHeader # Withdrawals next_withdrawal_index*: WithdrawalIndex @@ -378,6 +331,11 @@ type HashList[PendingPartialWithdrawal, Limit PENDING_PARTIAL_WITHDRAWALS_LIMIT] pending_consolidations*: HashList[PendingConsolidation, Limit PENDING_CONSOLIDATIONS_LIMIT] + + # [New in Fulu:EIP7917] + proposer_lookahead*: + HashArray[Limit ((MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH), uint64] + ## [New in Electra:EIP7251] # TODO Careful, not nil analysis is broken / incomplete and the semantics will @@ -470,7 +428,7 @@ type sync_aggregate*: SyncAggregate # Execution - execution_payload*: fulu.ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] + execution_payload*: deneb.ExecutionPayload bls_to_execution_changes*: SignedBLSToExecutionChangeList blob_kzg_commitments*: KzgCommitments execution_requests*: ExecutionRequests # [New in Electra] @@ -510,7 +468,7 @@ type sync_aggregate*: TrustedSyncAggregate # Execution - execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] + execution_payload*: deneb.ExecutionPayload bls_to_execution_changes*: SignedBLSToExecutionChangeList blob_kzg_commitments*: KzgCommitments execution_requests*: ExecutionRequests # [New in Electra] @@ -538,7 +496,7 @@ type sync_aggregate*: TrustedSyncAggregate # Execution - execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] + execution_payload*: deneb.ExecutionPayload bls_to_execution_changes*: SignedBLSToExecutionChangeList blob_kzg_commitments*: KzgCommitments execution_requests*: ExecutionRequests # [New in Electra] @@ -569,12 +527,6 @@ type root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - MsgTrustedSignedBeaconBlock* = object - message*: TrustedBeaconBlock - signature*: ValidatorSig - - root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - TrustedSignedBeaconBlock* = object message*: TrustedBeaconBlock signature*: TrustedSig @@ -584,7 +536,6 @@ type SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock SomeBeaconBlock* = BeaconBlock | @@ -597,7 +548,7 @@ type BlockContents* = object `block`*: BeaconBlock - kzg_proofs*: KzgProofs + kzg_proofs*: fulu.KzgProofs blobs*: Blobs func shortLog*(v: DataColumnSidecar): auto = @@ -614,6 +565,15 @@ func shortLog*(v: seq[DataColumnSidecar]): auto = func shortLog*(x: seq[DataColumnIdentifier]): string = "[" & x.mapIt(shortLog(it.block_root) & "/" & $it.index).join(", ") & "]" +func shortLog*(xs: seq[DataColumnsByRootIdentifier]): string = + ## Formats like: [abcd…/0,2,4, ef09…/1,3] + "[" & + xs.mapIt( + shortLog(it.block_root) & "/" & + it.indices.mapIt($it).join(",") + ).join(", ") & + "]" + func shortLog*(x: seq[ColumnIndex]): string = "<" & x.mapIt($it).join(", ") & ">" @@ -650,35 +610,13 @@ func shortLog*(v: SomeSignedBeaconBlock): auto = signature: shortLog(v.signature) ) -func shortLog*(v: ExecutionPayload): auto = - ( - parent_hash: shortLog(v.parent_hash), - fee_recipient: $v.fee_recipient, - state_root: shortLog(v.state_root), - receipts_root: shortLog(v.receipts_root), - prev_randao: shortLog(v.prev_randao), - block_number: v.block_number, - gas_limit: v.gas_limit, - gas_used: v.gas_used, - timestamp: v.timestamp, - extra_data: toPrettyString(distinctBase v.extra_data), - base_fee_per_gas: $(v.base_fee_per_gas), - block_hash: shortLog(v.block_hash), - num_transactions: len(v.transactions), - num_withdrawals: len(v.withdrawals), - blob_gas_used: $(v.blob_gas_used), - excess_blob_gas: $(v.excess_blob_gas) - ) - template asSigned*( x: SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SignedBeaconBlock = isomorphicCast[SignedBeaconBlock](x) template asSigVerified*( x: SignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = isomorphicCast[SigVerifiedSignedBeaconBlock](x) @@ -686,14 +624,7 @@ template asSigVerified*( x: BeaconBlock | TrustedBeaconBlock): SigVerifiedBeaconBlock = isomorphicCast[SigVerifiedBeaconBlock](x) -template asMsgTrusted*( - x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock = - isomorphicCast[MsgTrustedSignedBeaconBlock](x) - template asTrusted*( x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock = - isomorphicCast[TrustedSignedBeaconBlock](x) \ No newline at end of file + SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock = + isomorphicCast[TrustedSignedBeaconBlock](x) diff --git a/beacon_chain/spec/datatypes/gloas.nim b/beacon_chain/spec/datatypes/gloas.nim new file mode 100644 index 0000000000..4e98d0aca6 --- /dev/null +++ b/beacon_chain/spec/datatypes/gloas.nim @@ -0,0 +1,626 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [], gcsafe.} + +# Types specific to Fulu (i.e. known to have changed across hard forks) - see +# `base` for types and guidelines common across forks + +# TODO Careful, not nil analysis is broken / incomplete and the semantics will +# likely change in future versions of the language: +# https://github.com/nim-lang/RFCs/issues/250 +{.experimental: "notnil".} + +import + std/typetraits, + "."/[phase0, base, bellatrix, electra, fulu], + chronicles, + json_serialization, + ssz_serialization/[merkleization, proofs], + ssz_serialization/types as sszTypes, + ../digest, + kzg4844/[kzg, kzg_abi] + +from ./altair import + EpochParticipationFlags, InactivityScores, SyncAggregate, SyncCommittee, + TrustedSyncAggregate, SyncnetBits, num_active_participants +from ./capella import + ExecutionBranch, HistoricalSummary, SignedBLSToExecutionChange, + SignedBLSToExecutionChangeList, Withdrawal, EXECUTION_PAYLOAD_GINDEX +from ./deneb import + Blobs, ExecutionPayload, ExecutionPayloadHeader, KzgCommitments, KzgProofs + +export json_serialization, base + +type + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/fork-choice.md#custom-types + PayloadStatus* = uint8 + +const + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#state-list-lengths + BUILDER_PENDING_WITHDRAWALS_LIMIT*: uint64 = 1_048_576 + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/fork-choice.md#constants + PAYLOAD_TIMELY_THRESHOLD*: uint64 = PTC_SIZE div 2 + PAYLOAD_STATUS_PENDING* = PayloadStatus(0) + PAYLOAD_STATUS_EMPTY* = PayloadStatus(1) + PAYLOAD_STATUS_FULL* = PayloadStatus(2) + +type + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#modified-datacolumnsidecar + DataColumnSidecar* = object + index*: ColumnIndex + column*: DataColumn + kzg_commitments*: KzgCommitments + kzg_proofs*: deneb.KzgProofs + beacon_block_root*: Eth2Digest + + ExecutionPayloadForSigning* = object + executionPayload*: deneb.ExecutionPayload + blockValue*: Wei + blobsBundle*: fulu.BlobsBundle # [New in Fulu] + executionRequests*: seq[seq[byte]] + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#executionpayloadbid + ExecutionPayloadBid* = object + # Execution block header fields + parent_block_hash*: Eth2Digest + parent_block_root*: Eth2Digest + block_hash*: Eth2Digest + fee_recipient*: ExecutionAddress + gas_limit*: uint64 + builder_index*: uint64 + slot*: Slot + value*: Gwei + blob_kzg_commitments_root*: Eth2Digest + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#signedexecutionpayloadbid + SignedExecutionPayloadBid* = object + message*: ExecutionPayloadBid + signature*: ValidatorSig + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#executionpayloadenvelope + ExecutionPayloadEnvelope* = object + payload*: deneb.ExecutionPayload + execution_requests*: ExecutionRequests + builder_index*: uint64 + beacon_block_root*: Eth2Digest + slot*: Slot + blob_kzg_commitments*: KzgCommitments + state_root*: Eth2Digest + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#signedexecutionpayloadenvelope + SignedExecutionPayloadEnvelope* = object + message*: ExecutionPayloadEnvelope + signature*: ValidatorSig + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#payloadattestationdata + PayloadAttestationData* = object + beacon_block_root*: Eth2Digest + slot*: Slot + payload_present*: bool + blob_data_available*: bool + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#payloadattestation + PayloadAttestation* = object + aggregation_bits*: BitArray[int PTC_SIZE] + data*: PayloadAttestationData + signature*: ValidatorSig + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#payloadattestationmessage + PayloadAttestationMessage* = object + validatorIndex*: uint64 + data*: PayloadAttestationData + signature*: ValidatorSig + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#indexedpayloadattestation + IndexedPayloadAttestation* = object + attesting_indices*: List[uint64, Limit PTC_SIZE] + data*: PayloadAttestationData + signature*: ValidatorSig + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#builderpendingwithdrawal + BuilderPendingWithdrawal* = object + fee_recipient*: ExecutionAddress + amount*: Gwei + builder_index*: uint64 + withdrawable_epoch*: Epoch + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#builderpendingpayment + BuilderPendingPayment* = object + weight*: Gwei + withdrawal*: BuilderPendingWithdrawal + + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader + LightClientHeader* = object + beacon*: BeaconBlockHeader + ## Beacon block header + execution*: deneb.ExecutionPayloadHeader + ## Execution payload header corresponding to `beacon.body_root` (from Capella onward) + execution_branch*: capella.ExecutionBranch + + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#lightclientbootstrap + LightClientBootstrap* = object + header*: LightClientHeader + ## Header matching the requested beacon block root + + current_sync_committee*: SyncCommittee + ## Current sync committee corresponding to `header.beacon.state_root` + current_sync_committee_branch*: electra.CurrentSyncCommitteeBranch + + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#lightclientupdate + LightClientUpdate* = object + attested_header*: LightClientHeader + ## Header attested to by the sync committee + + next_sync_committee*: SyncCommittee + ## Next sync committee corresponding to + ## `attested_header.beacon.state_root` + next_sync_committee_branch*: electra.NextSyncCommitteeBranch + + # Finalized header corresponding to `attested_header.beacon.state_root` + finalized_header*: LightClientHeader + finality_branch*: electra.FinalityBranch + + sync_aggregate*: SyncAggregate + ## Sync committee aggregate signature + signature_slot*: Slot + ## Slot at which the aggregate signature was created (untrusted) + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate + LightClientFinalityUpdate* = object + # Header attested to by the sync committee + attested_header*: LightClientHeader + + # Finalized header corresponding to `attested_header.beacon.state_root` + finalized_header*: LightClientHeader + finality_branch*: electra.FinalityBranch + + # Sync committee aggregate signature + sync_aggregate*: SyncAggregate + # Slot at which the aggregate signature was created (untrusted) + signature_slot*: Slot + + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate + LightClientOptimisticUpdate* = object + # Header attested to by the sync committee + attested_header*: LightClientHeader + + # Sync committee aggregate signature + sync_aggregate*: SyncAggregate + # Slot at which the aggregate signature was created (untrusted) + signature_slot*: Slot + + SomeLightClientUpdateWithSyncCommittee* = + LightClientUpdate + + SomeLightClientUpdateWithFinality* = + LightClientUpdate | + LightClientFinalityUpdate + + SomeLightClientUpdate* = + LightClientUpdate | + LightClientFinalityUpdate | + LightClientOptimisticUpdate + + SomeLightClientObject* = + LightClientBootstrap | + SomeLightClientUpdate + + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientstore + LightClientStore* = object + finalized_header*: LightClientHeader + ## Header that is finalized + + current_sync_committee*: SyncCommittee + ## Sync committees corresponding to the finalized header + next_sync_committee*: SyncCommittee + + best_valid_update*: Opt[LightClientUpdate] + ## Best available header to switch finalized head to + ## if we see nothing else + + optimistic_header*: LightClientHeader + ## Most recent available reasonably-safe header + + previous_max_active_participants*: uint64 + ## Max number of active participants in a sync committee + ## (used to compute safety threshold) + current_max_active_participants*: uint64 + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#beaconstate + BeaconState* = object + # Versioning + genesis_time*: uint64 + genesis_validators_root*: Eth2Digest + slot*: Slot + fork*: Fork + + # History + latest_block_header*: BeaconBlockHeader + ## `latest_block_header.state_root == ZERO_HASH` temporarily + + block_roots*: HashArray[Limit SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] + ## Needed to process attestations, older to newer + + state_roots*: HashArray[Limit SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] + historical_roots*: HashList[Eth2Digest, Limit HISTORICAL_ROOTS_LIMIT] + ## Frozen in Capella, replaced by historical_summaries + + # Eth1 + eth1_data*: Eth1Data + eth1_data_votes*: + HashList[Eth1Data, Limit(EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH)] + eth1_deposit_index*: uint64 + + # Registry + validators*: HashList[Validator, Limit VALIDATOR_REGISTRY_LIMIT] + balances*: HashList[Gwei, Limit VALIDATOR_REGISTRY_LIMIT] + + # Randomness + randao_mixes*: HashArray[Limit EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest] + + # Slashings + slashings*: HashArray[Limit EPOCHS_PER_SLASHINGS_VECTOR, Gwei] + ## Per-epoch sums of slashed effective balances + + # Participation + previous_epoch_participation*: EpochParticipationFlags + current_epoch_participation*: EpochParticipationFlags + + # Finality + justification_bits*: JustificationBits + ## Bit set for every recent justified epoch + + previous_justified_checkpoint*: Checkpoint + current_justified_checkpoint*: Checkpoint + finalized_checkpoint*: Checkpoint + + # Inactivity + inactivity_scores*: InactivityScores + + # Light client sync committees + current_sync_committee*: SyncCommittee + next_sync_committee*: SyncCommittee + + # Execution + latest_execution_payload_bid*: gloas.ExecutionPayloadBid + + # Withdrawals + next_withdrawal_index*: WithdrawalIndex + next_withdrawal_validator_index*: uint64 + + # Deep history valid from Capella onwards + historical_summaries*: + HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT] + + deposit_requests_start_index*: uint64 # [New in Electra:EIP6110] + deposit_balance_to_consume*: Gwei # [New in Electra:EIP7251] + exit_balance_to_consume*: Gwei # [New in Electra:EIP7251] + earliest_exit_epoch*: Epoch # [New in Electra:EIP7251] + consolidation_balance_to_consume*: Gwei # [New in Electra:EIP7251] + earliest_consolidation_epoch*: Epoch # [New in Electra:EIP7251] + pending_deposits*: HashList[PendingDeposit, Limit PENDING_DEPOSITS_LIMIT] + ## [New in Electra:EIP7251] + + # [New in Electra:EIP7251] + pending_partial_withdrawals*: + HashList[PendingPartialWithdrawal, Limit PENDING_PARTIAL_WITHDRAWALS_LIMIT] + pending_consolidations*: + HashList[PendingConsolidation, Limit PENDING_CONSOLIDATIONS_LIMIT] + + # [New in Fulu:EIP7917] + proposer_lookahead*: + HashArray[Limit ((MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH), uint64] + + # [New in Gloas:EIP7732] + execution_payload_availability*: BitArray[int(SLOTS_PER_HISTORICAL_ROOT)] + # [New in Gloas:EIP7732] + builder_pending_payments*: + HashArray[Limit 2 * SLOTS_PER_EPOCH, BuilderPendingPayment] + # [New in Gloas:EIP7732] + builder_pending_withdrawals*: + HashList[BuilderPendingWithdrawal, Limit BUILDER_PENDING_WITHDRAWALS_LIMIT] + # [New in Gloas:EIP7732] + latest_block_hash*: Eth2Digest + # [New in Gloas:EIP7732] + latest_withdrawals_root*: Eth2Digest + + # TODO Careful, not nil analysis is broken / incomplete and the semantics will + # likely change in future versions of the language: + # https://github.com/nim-lang/RFCs/issues/250 + BeaconStateRef* = ref BeaconState not nil + NilableBeaconStateRef* = ref BeaconState + + # TODO: There should be only a single generic HashedBeaconState definition + HashedBeaconState* = object + data*: BeaconState + root*: Eth2Digest # hash_tree_root(data) + + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#beaconblock + BeaconBlock* = object + ## For each slot, a proposer is chosen from the validator pool to propose + ## a new block. Once the block as been proposed, it is transmitted to + ## validators that will have a chance to vote on it through attestations. + ## Each block collects attestations, or votes, on past blocks, thus a chain + ## is formed. + + slot*: Slot + proposer_index*: uint64 # `ValidatorIndex` after validation + + parent_root*: Eth2Digest + ## Root hash of the previous block + + state_root*: Eth2Digest + ## The state root, _after_ this block has been processed + + body*: BeaconBlockBody + + SigVerifiedBeaconBlock* = object + ## A BeaconBlock that contains verified signatures + ## but that has not been verified for state transition + + slot*: Slot + proposer_index*: uint64 # `ValidatorIndex` after validation + + parent_root*: Eth2Digest + ## Root hash of the previous block + + state_root*: Eth2Digest + ## The state root, _after_ this block has been processed + + body*: SigVerifiedBeaconBlockBody + + TrustedBeaconBlock* = object + ## When we receive blocks from outside sources, they are untrusted and go + ## through several layers of validation. Blocks that have gone through + ## validations can be trusted to be well-formed, with a correct signature, + ## having a parent and applying cleanly to the state that their parent + ## left them with. + ## + ## When loading such blocks from the database, to rewind states for example, + ## it is expensive to redo the validations (in particular, the signature + ## checks), thus `TrustedBlock` uses a `TrustedSig` type to mark that these + ## checks can be skipped. + ## + ## TODO this could probably be solved with some type trickery, but there + ## too many bugs in nim around generics handling, and we've used up + ## the trickery budget in the serialization library already. Until + ## then, the type must be manually kept compatible with its untrusted + ## cousin. + slot*: Slot + proposer_index*: uint64 # `ValidatorIndex` after validation + parent_root*: Eth2Digest + state_root*: Eth2Digest + body*: TrustedBeaconBlockBody + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#beaconblockbody + BeaconBlockBody* = object + randao_reveal*: ValidatorSig + eth1_data*: Eth1Data + ## Eth1 data vote + + graffiti*: GraffitiBytes + ## Arbitrary data + + # Operations + proposer_slashings*: List[ProposerSlashing, Limit MAX_PROPOSER_SLASHINGS] + attester_slashings*: + List[electra.AttesterSlashing, Limit MAX_ATTESTER_SLASHINGS_ELECTRA] + ## [Modified in Electra:EIP7549] + attestations*: List[electra.Attestation, Limit MAX_ATTESTATIONS_ELECTRA] + ## [Modified in Electra:EIP7549] + deposits*: List[Deposit, Limit MAX_DEPOSITS] + voluntary_exits*: List[SignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS] + + sync_aggregate*: SyncAggregate + + # Execution + bls_to_execution_changes*: SignedBLSToExecutionChangeList + + # [New in Gloas:EIP7732] + signed_execution_payload_bid*: SignedExecutionPayloadBid + # [New in Gloas:EIP7732] + payload_attestations*: + List[PayloadAttestation, Limit MAX_PAYLOAD_ATTESTATIONS] + + SigVerifiedBeaconBlockBody* = object + ## A BeaconBlock body with signatures verified + ## including: + ## - Randao reveal + ## - Attestations + ## - ProposerSlashing (SignedBeaconBlockHeader) + ## - AttesterSlashing (IndexedAttestation) + ## - SignedVoluntaryExits + ## - SyncAggregate + ## + ## However: + ## - ETH1Data (Deposits) can contain invalid BLS signatures + ## + ## The block state transition has NOT been verified + randao_reveal*: TrustedSig + eth1_data*: Eth1Data + ## Eth1 data vote + + graffiti*: GraffitiBytes + ## Arbitrary data + + # Operations + proposer_slashings*: + List[TrustedProposerSlashing, Limit MAX_PROPOSER_SLASHINGS] + attester_slashings*: + List[electra.TrustedAttesterSlashing, Limit MAX_ATTESTER_SLASHINGS_ELECTRA] + ## [Modified in Electra:EIP7549] + attestations*: List[electra.TrustedAttestation, Limit MAX_ATTESTATIONS_ELECTRA] + ## [Modified in Electra:EIP7549] + deposits*: List[Deposit, Limit MAX_DEPOSITS] + voluntary_exits*: List[TrustedSignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS] + + sync_aggregate*: TrustedSyncAggregate + + # Execution + bls_to_execution_changes*: SignedBLSToExecutionChangeList + + # [New in Gloas:EIP7732] + signed_execution_payload_bid*: SignedExecutionPayloadBid + # [New in Gloas:EIP7732] + payload_attestations*: + List[PayloadAttestation, Limit MAX_PAYLOAD_ATTESTATIONS] + + TrustedBeaconBlockBody* = object + ## A full verified block + randao_reveal*: TrustedSig + eth1_data*: Eth1Data + ## Eth1 data vote + + graffiti*: GraffitiBytes + ## Arbitrary data + + # Operations + proposer_slashings*: + List[TrustedProposerSlashing, Limit MAX_PROPOSER_SLASHINGS] + attester_slashings*: + List[electra.TrustedAttesterSlashing, Limit MAX_ATTESTER_SLASHINGS_ELECTRA] + ## [Modified in Electra:EIP7549] + attestations*: List[electra.TrustedAttestation, Limit MAX_ATTESTATIONS_ELECTRA] + ## [Modified in Electra:EIP7549] + deposits*: List[Deposit, Limit MAX_DEPOSITS] + voluntary_exits*: List[TrustedSignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS] + + sync_aggregate*: TrustedSyncAggregate + + # Execution + bls_to_execution_changes*: SignedBLSToExecutionChangeList + + # [New in Gloas:EIP7732] + signed_execution_payload_bid*: SignedExecutionPayloadBid + # [New in Gloas:EIP7732] + payload_attestations*: + List[PayloadAttestation, Limit MAX_PAYLOAD_ATTESTATIONS] + + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#signedbeaconblock + SignedBeaconBlock* = object + message*: BeaconBlock + signature*: ValidatorSig + + root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block + + SigVerifiedSignedBeaconBlock* = object + ## A SignedBeaconBlock with signatures verified + ## including: + ## - Block signature + ## - BeaconBlockBody + ## - Randao reveal + ## - Attestations + ## - ProposerSlashing (SignedBeaconBlockHeader) + ## - AttesterSlashing (IndexedAttestation) + ## - SignedVoluntaryExits + ## + ## - ETH1Data (Deposits) can contain invalid BLS signatures + ## + ## The block state transition has NOT been verified + message*: SigVerifiedBeaconBlock + signature*: TrustedSig + + root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block + + TrustedSignedBeaconBlock* = object + message*: TrustedBeaconBlock + signature*: TrustedSig + + root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block + + SomeSignedBeaconBlock* = + SignedBeaconBlock | + SigVerifiedSignedBeaconBlock | + TrustedSignedBeaconBlock + SomeBeaconBlock* = + BeaconBlock | + SigVerifiedBeaconBlock | + TrustedBeaconBlock + SomeBeaconBlockBody* = + BeaconBlockBody | + SigVerifiedBeaconBlockBody | + TrustedBeaconBlockBody + + BlockContents* = object + `block`*: gloas.BeaconBlock + kzg_proofs*: fulu.KzgProofs + blobs*: Blobs + +# TODO: There should be only a single generic HashedBeaconState definition +func initHashedBeaconState*(s: BeaconState): HashedBeaconState = + HashedBeaconState(data: s) + +func shortLog*(v: DataColumnSidecar): auto = + ( + index: v.index, + kzg_commitments: v.kzg_commitments.len, + kzg_proofs: v.kzg_proofs.len, + beacon_block_root: shortLog(v.beacon_block_root), + ) + +func shortLog*(v: SomeBeaconBlock): auto = + ( + slot: shortLog(v.slot), + proposer_index: v.proposer_index, + parent_root: shortLog(v.parent_root), + state_root: shortLog(v.state_root), + eth1data: v.body.eth1_data, + graffiti: $v.body.graffiti, + proposer_slashings_len: v.body.proposer_slashings.len(), + attester_slashings_len: v.body.attester_slashings.len(), + attestations_len: v.body.attestations.len(), + deposits_len: v.body.deposits.len(), + voluntary_exits_len: v.body.voluntary_exits.len(), + sync_committee_participants: v.body.sync_aggregate.num_active_participants, + block_number: 0'u64, + # TODO checksum hex? shortlog? + block_hash: "", + parent_hash: "", + fee_recipient: "", + bls_to_execution_changes_len: v.body.bls_to_execution_changes.len(), + blob_kzg_commitments_len: 0, + ) + +func shortLog*(v: SomeSignedBeaconBlock): auto = + ( + blck: shortLog(v.message), + signature: shortLog(v.signature) + ) + +func shortLog*(v: ExecutionPayloadBid): auto = + ( + parent_block_hash: shortLog(v.parent_block_hash), + parent_block_root: shortLog(v.parent_block_root), + block_hash: shortLog(v.block_hash), + fee_recipient: $v.fee_recipient, + gas_limit: v.gas_limit, + builder_index: v.builder_index, + slot: v.slot, + value: v.value, + blob_kzg_commitments_root: shortLog(v.blob_kzg_commitments_root), + ) + +template asSigned*( + x: SigVerifiedSignedBeaconBlock | + TrustedSignedBeaconBlock): SignedBeaconBlock = + isomorphicCast[SignedBeaconBlock](x) + +template asSigVerified*( + x: SignedBeaconBlock | + TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = + isomorphicCast[SigVerifiedSignedBeaconBlock](x) + +template asSigVerified*( + x: BeaconBlock | TrustedBeaconBlock): SigVerifiedBeaconBlock = + isomorphicCast[SigVerifiedBeaconBlock](x) + +template asTrusted*( + x: SignedBeaconBlock | + SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock = + isomorphicCast[TrustedSignedBeaconBlock](x) diff --git a/beacon_chain/spec/datatypes/phase0.nim b/beacon_chain/spec/datatypes/phase0.nim index a99186ab99..c7f059f4a3 100644 --- a/beacon_chain/spec/datatypes/phase0.nim +++ b/beacon_chain/spec/datatypes/phase0.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Types specific to phase0 (i.e. known to have changed across hard forks) - see # `base` for types and guidelines common across forks @@ -247,12 +247,6 @@ type root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - MsgTrustedSignedBeaconBlock* = object - message*: TrustedBeaconBlock - signature*: ValidatorSig - - root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - TrustedSignedBeaconBlock* = object message*: TrustedBeaconBlock signature*: TrustedSig @@ -315,7 +309,6 @@ type SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock SomeBeaconBlock* = BeaconBlock | @@ -406,13 +399,11 @@ template asTrusted*(x: Attestation): TrustedAttestation = template asSigned*( x: SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SignedBeaconBlock = isomorphicCast[SignedBeaconBlock](x) template asSigVerified*( x: SignedBeaconBlock | - MsgTrustedSignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = isomorphicCast[SigVerifiedSignedBeaconBlock](x) @@ -420,16 +411,9 @@ template asSigVerified*( x: BeaconBlock | TrustedBeaconBlock): SigVerifiedBeaconBlock = isomorphicCast[SigVerifiedBeaconBlock](x) -template asMsgTrusted*( - x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock = - isomorphicCast[MsgTrustedSignedBeaconBlock](x) - template asTrusted*( x: SignedBeaconBlock | - SigVerifiedSignedBeaconBlock | - MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock = + SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock = isomorphicCast[TrustedSignedBeaconBlock](x) func init*( diff --git a/beacon_chain/spec/deposit_snapshots.nim b/beacon_chain/spec/deposit_snapshots.nim deleted file mode 100644 index e6c1d0e7d7..0000000000 --- a/beacon_chain/spec/deposit_snapshots.nim +++ /dev/null @@ -1,151 +0,0 @@ -# beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} - -from stew/objects import isZeroMemory - -import ./eth2_merkleization -from ./datatypes/base import Eth1Data, DepositContractState -from ./digest import Eth2Digest - -export - depositCountBytes, depositCountU64 - -type - OldDepositContractSnapshot* = object - eth1Block*: Eth2Digest - depositContractState*: DepositContractState - - DepositContractSnapshot* = object - eth1Block*: Eth2Digest - depositContractState*: DepositContractState - blockHeight*: uint64 - -func toDepositContractSnapshot*( - d: OldDepositContractSnapshot, - blockHeight: uint64): DepositContractSnapshot = - DepositContractSnapshot( - eth1Block: d.eth1Block, - depositContractState: d.depositContractState, - blockHeight: blockHeight) - -func toOldDepositContractSnapshot*( - d: DepositContractSnapshot): OldDepositContractSnapshot = - OldDepositContractSnapshot( - eth1Block: d.eth1Block, - depositContractState: d.depositContractState) - -template getDepositCountU64*( - d: OldDepositContractSnapshot | DepositContractSnapshot): uint64 = - depositCountU64(d.depositContractState.deposit_count) - -func getDepositRoot*( - d: OldDepositContractSnapshot | DepositContractSnapshot): Eth2Digest = - var merk = DepositsMerkleizer.init(d.depositContractState) - let hash = merk.getFinalHash() - # TODO: mixInLength should accept unsigned int instead of int as - # this right now cuts in half the theoretical number of deposits. - return mixInLength(hash, int(merk.getChunkCount())) - -func isValid*(d: DepositContractSnapshot, wantedDepositRoot: Eth2Digest): bool = - ## `isValid` requires the snapshot to be self-consistent and - ## to point to a specific Ethereum block - not d.eth1Block.isZeroMemory and d.getDepositRoot() == wantedDepositRoot - -func matches*(snapshot: DepositContractSnapshot, eth1_data: Eth1Data): bool = - snapshot.getDepositCountU64() == eth1_data.deposit_count and - snapshot.getDepositRoot() == eth1_data.deposit_root - -# https://eips.ethereum.org/EIPS/eip-4881 -func getExpandedBranch( - finalized: FinalizedDepositTreeBranch, - deposit_count: uint64 -): Opt[array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest]] = - var - branch: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest] - idx = finalized.len - for i in 0 ..< DEPOSIT_CONTRACT_TREE_DEPTH: - if (deposit_count and (1'u64 shl i)) != 0: - dec idx - branch[i] = finalized[idx] - if idx != 0: - return Opt.none array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest] - Opt.some branch - -func init( - T: type DepositsMerkleizer, - finalized: FinalizedDepositTreeBranch, - deposit_root: Eth2Digest, - deposit_count: uint64): Opt[DepositsMerkleizer] = - let branch = ? getExpandedBranch(finalized, deposit_count) - var res = Opt.some DepositsMerkleizer.init(branch, deposit_count) - if res.get().getDepositsRoot() != deposit_root: - res.reset() - res - -func init*( - T: type DepositsMerkleizer, - snapshot: DepositTreeSnapshot): Opt[DepositsMerkleizer] = - DepositsMerkleizer.init( - snapshot.finalized, snapshot.deposit_root, snapshot.deposit_count) - -func init*( - T: type DepositContractSnapshot, - snapshot: DepositTreeSnapshot): Opt[DepositContractSnapshot] = - var res = Opt.some DepositContractSnapshot( - eth1Block: snapshot.execution_block_hash, - depositContractState: DepositContractState( - branch: ? getExpandedBranch(snapshot.finalized, snapshot.deposit_count), - deposit_count: depositCountBytes(snapshot.deposit_count)), - blockHeight: snapshot.execution_block_height) - if not res.get.isValid(snapshot.deposit_root): - res.reset() - res - -func getFinalizedBranch( - branch: openArray[Eth2Digest], - deposit_count: uint64): FinalizedDepositTreeBranch = - doAssert branch.len == DEPOSIT_CONTRACT_TREE_DEPTH - var - finalized: FinalizedDepositTreeBranch - i = branch.high - while i > 0: - dec i - if (deposit_count and (1'u64 shl i)) != 0: - doAssert finalized.add branch[i.int] - finalized - -func getFinalizedBranch( - merkleizer: DepositsMerkleizer): FinalizedDepositTreeBranch = - let chunks = merkleizer.getCombinedChunks() - doAssert chunks.len == DEPOSIT_CONTRACT_TREE_DEPTH + 1 - getFinalizedBranch( - chunks[0 ..< DEPOSIT_CONTRACT_TREE_DEPTH], - merkleizer.getChunkCount()) - -func getTreeSnapshot*( - merkleizer: var DepositsMerkleizer, - execution_block_hash: Eth2Digest, - execution_block_height: uint64): DepositTreeSnapshot = - DepositTreeSnapshot( - finalized: merkleizer.getFinalizedBranch(), - deposit_root: merkleizer.getDepositsRoot(), - deposit_count: merkleizer.getChunkCount(), - execution_block_hash: execution_block_hash, - execution_block_height: execution_block_height) - -func getTreeSnapshot*( - snapshot: DepositContractSnapshot): DepositTreeSnapshot = - let deposit_count = snapshot.getDepositCountU64() - DepositTreeSnapshot( - finalized: getFinalizedBranch( - snapshot.depositContractState.branch, deposit_count), - deposit_root: snapshot.getDepositRoot(), - deposit_count: deposit_count, - execution_block_hash: snapshot.eth1Block, - execution_block_height: snapshot.blockHeight) diff --git a/beacon_chain/spec/eth2_apis/eth2_rest_json_serialization.nim b/beacon_chain/spec/eth2_apis/eth2_rest_json_serialization.nim new file mode 100644 index 0000000000..e621cbe564 --- /dev/null +++ b/beacon_chain/spec/eth2_apis/eth2_rest_json_serialization.nim @@ -0,0 +1,1375 @@ +# beacon_chain +# Copyright (c) 2018-2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [], gcsafe.} + +import + std/[macros, strformat, strutils], + results, + stew/[assign2, base10, byteutils], + faststreams/textio, + json_serialization, + json_serialization/pkg/results, + ../../validators/slashing_protection_common, + ../../consensus_object_pools/block_pools_types, + ../[forks, keystore], + ./[rest_keymanager_types, rest_types] + +export + results, json_serialization, results, slashing_protection_common, block_pools_types, + forks, keystore, rest_keymanager_types, rest_types + +## The RestJson format implements JSON serialization in the way specified +## by the Beacon API: +## +## https://ethereum.github.io/beacon-APIs/ +## +## In this format, we must always set `allowUnknownFields = true` in the +## decode calls in order to conform the following spec: +## +## All JSON responses return the requested data under a data key in the top +## level of their response. Additional metadata may or may not be present +## in other keys at the top level of the response, dependent on the endpoint. +## The rules that require an increase in version number are as follows: +## +## - no field that is listed in an endpoint shall be removed without an increase +## in the version number +## +## - no field that is listed in an endpoint shall be altered in terms of format +## (e.g. from a string to an array) without an increase in the version number +## +## Note that it is possible for a field to be added to an endpoint's data or +## metadata without an increase in the version number. +## +## This also means that when new fields are introduced to the object definitions +## below, one must use the `Opt[T]` type so as not to trigger `requiresAllFields`. + +createJsonFlavor RestJson, + automaticObjectSerialization = false, + requireAllFields = true, + omitOptionalFields = true, + allowUnknownFields = true + +#!fmt: off +RestJson.useDefaultSerializationFor( + AttestationData, + BLSToExecutionChange, + BeaconBlockHeader, + BlobSidecar, + BlobSidecarInfoObject, + BuilderPendingPayment, + BuilderPendingWithdrawal, + Checkpoint, + ConsolidationRequest, + ContributionAndProof, + DataColumnSidecarInfoObject, + DataEnclosedObject, + DataMetaEnclosedObject, + DataOptimisticAndFinalizedObject, + DataOptimisticObject, + DataRootEnclosedObject, + DataVersionEnclosedObject, + DeleteKeystoresBody, + DeleteKeystoresResponse, + DeleteRemoteKeystoresResponse, + DenebSignedBlockContents, + Deposit, + DepositData, + DepositRequest, + DistributedKeystoreInfo, + ElectraSignedBlockContents, + EmptyBody, + Eth1Data, + EventBeaconBlockObject, + EventBeaconBlockGossipObject, + ExecutionPayloadEnvelope, + ExecutionRequests, + FinalizationInfoObject, + Fork, + FuluSignedBlockContents, + GetBlockAttestationsResponse, + GetBlockHeaderResponse, + GetBlockHeadersResponse, + GetDistributedKeystoresResponse, + GetEpochCommitteesResponse, + GetEpochSyncCommitteesResponse, + GetForkChoiceResponse, + GetForkScheduleResponse, + GetGenesisResponse, + GetHistoricalSummariesV1Response, + GetHistoricalSummariesV1ResponseElectra, + GetKeystoresResponse, + GetNextWithdrawalsResponse, + GetPoolAttesterSlashingsResponse, + GetPoolProposerSlashingsResponse, + GetPoolVoluntaryExitsResponse, + GetRemoteKeystoresResponse, + GetSpecVCResponse, + GetStateFinalityCheckpointsResponse, + GetStateForkResponse, + GetStateRandaoResponse, + GetStateRootResponse, + GetStateValidatorBalancesResponse, + GetStateValidatorResponse, + GetStateValidatorsResponse, + GetValidatorGasLimitResponse, + GloasSignedBlockContents, + HeadChangeInfoObject, + HistoricalSummary, + ImportDistributedKeystoresBody, + ImportRemoteKeystoresBody, + IndexedPayloadAttestation, + KeymanagerGenericError, + KeystoreInfo, + ListFeeRecipientResponse, + ListGasLimitResponse, + GetGraffitiResponse, + GraffitiResponse, + PayloadAttestation, + PayloadAttestationData, + PayloadAttestationMessage, + PendingAttestation, + PendingConsolidation, + PendingDeposit, + PendingPartialWithdrawal, + PostKeystoresResponse, + PrepareBeaconProposer, + ProposerSlashing, + RemoteKeystoreInfo, + RemoteSignerInfo, + RequestItemStatus, + RestActivityItem, + RestAttesterDuty, + RestBeaconCommitteeSelection, + RestBeaconStatesCommittees, + RestBeaconStatesFinalityCheckpoints, + RestBlockHeader, + RestBlockHeaderInfo, + RestChainHeadV2, + RestCommitteeSubscription, + RestContributionAndProof, + RestEpochRandao, + RestEpochSyncCommittee, + RestExtraData, + RestGenesis, + RestIndexedErrorMessage, + RestIndexedErrorMessageItem, + RestLivenessItem, + RestMetadata, + RestNetworkIdentity, + RestNimbusTimestamp1, + RestNimbusTimestamp2, + RestNode, + RestNodeExtraData, + RestNodePeer, + RestNodeVersion, + RestPeerCount, + RestProposerDuty, + RestRoot, + RestSignedBlockHeader, + RestSignedContributionAndProof, + RestSyncCommitteeContribution, + RestSyncCommitteeDuty, + RestSyncCommitteeMessage, + RestSyncCommitteeReward, + RestSyncCommitteeSelection, + RestSyncCommitteeSubscription, + RestSyncInfo, + RestValidator, + RestValidatorIdentity, + RestValidatorBalance, + ReorgInfoObject, + SPDIR, + SPDIR_Meta, + SPDIR_SignedAttestation, + SPDIR_SignedBlock, + SPDIR_Validator, + SetFeeRecipientRequest, + SetGasLimitRequest, + SetGraffitiRequest, + SignedBLSToExecutionChange, + SignedBeaconBlockHeader, + SignedContributionAndProof, + SignedExecutionPayloadBid, + SignedExecutionPayloadEnvelope, + SignedValidatorRegistrationV1, + SignedVoluntaryExit, + SyncAggregate, + SyncAggregatorSelectionData, + SyncCommittee, + SyncCommitteeContribution, + SyncCommitteeMessage, + Validator, + ValidatorRegistrationV1, + VoluntaryExit, + Web3SignerAggregationSlotData, + Web3SignerDepositData, + Web3SignerErrorResponse, + Web3SignerForkInfo, + Web3SignerMerkleProof, + Web3SignerRandaoRevealData, + Web3SignerSignatureResponse, + Web3SignerStatusResponse, + Web3SignerSyncCommitteeMessageData, + Web3SignerValidatorRegistration, + Withdrawal, + WithdrawalRequest, + altair.BeaconBlock, + altair.BeaconBlockBody, + altair.BeaconState, + altair.LightClientBootstrap, + altair.LightClientFinalityUpdate, + altair.LightClientHeader, + altair.LightClientOptimisticUpdate, + altair.LightClientUpdate, + bellatrix.BeaconBlock, + bellatrix.BeaconBlockBody, + bellatrix.BeaconState, + bellatrix.ExecutionPayload, + bellatrix.ExecutionPayloadHeader, + bellatrix_mev.BlindedBeaconBlockBody, + bellatrix_mev.BlindedBeaconBlock, + bellatrix_mev.SignedBlindedBeaconBlock, + capella.BeaconBlock, + capella.BeaconBlockBody, + capella.BeaconState, + capella.ExecutionPayload, + capella.ExecutionPayloadHeader, + capella.LightClientBootstrap, + capella.LightClientFinalityUpdate, + capella.LightClientHeader, + capella.LightClientOptimisticUpdate, + capella.LightClientUpdate, + capella_mev.BlindedBeaconBlock, + capella_mev.BlindedBeaconBlockBody, + capella_mev.SignedBlindedBeaconBlock, + deneb.BeaconBlock, + deneb.BeaconBlockBody, + deneb.BeaconState, + deneb.BlobsBundle, + deneb.BlockContents, + deneb.ExecutionPayload, + deneb.ExecutionPayloadHeader, + deneb.LightClientBootstrap, + deneb.LightClientFinalityUpdate, + deneb.LightClientHeader, + deneb.LightClientOptimisticUpdate, + deneb.LightClientUpdate, + deneb_mev.BlindedBeaconBlock, + deneb_mev.BlindedBeaconBlockBody, + deneb_mev.SignedBlindedBeaconBlock, + electra.AggregateAndProof, + electra.Attestation, + electra.AttesterSlashing, + electra.BeaconBlock, + electra.BeaconState, + electra.BeaconBlockBody, + electra.BlockContents, + electra.IndexedAttestation, + electra.LightClientBootstrap, + electra.LightClientFinalityUpdate, + electra.LightClientHeader, + electra.LightClientOptimisticUpdate, + electra.LightClientUpdate, + electra.SignedAggregateAndProof, + electra.SingleAttestation, + electra.TrustedAttestation, + electra_mev.BlindedBeaconBlock, + electra_mev.BlindedBeaconBlockBody, + electra_mev.BuilderBid, + electra_mev.ExecutionPayloadAndBlobsBundle, + electra_mev.SignedBlindedBeaconBlock, + electra_mev.SignedBuilderBid, + fulu.BeaconBlock, + fulu.BeaconBlockBody, + fulu.BeaconState, + fulu.BlobsBundle, + fulu.BlockContents, + fulu.DataColumnSidecar, + fulu_mev.BlindedBeaconBlock, + fulu_mev.BlindedBeaconBlockBody, + fulu_mev.BuilderBid, + fulu_mev.SignedBlindedBeaconBlock, + fulu_mev.SignedBuilderBid, + gloas.BeaconBlock, + gloas.BeaconBlockBody, + gloas.BeaconState, + gloas.BlockContents, + gloas.DataColumnSidecar, + gloas.ExecutionPayloadBid, + phase0.AggregateAndProof, + phase0.Attestation, + phase0.AttesterSlashing, + phase0.BeaconBlock, + phase0.BeaconBlockBody, + phase0.BeaconState, + phase0.IndexedAttestation, + phase0.SignedAggregateAndProof, + phase0.TrustedAttestation +) +#!fmt: on + +type + RestJsonWriter = RestJson.Writer() + RestJsonReader = RestJson.Reader() + +{.pragma: reader, raises: [IOError, SerializationError].} +{.pragma: writer, raises: [IOError].} + +## https://github.com/ethereum/beacon-APIs/blob/v3.1.0/types/primitive.yaml#L57 +proc write0xHex*(w: var RestJsonWriter, value: openArray[byte]) {.writer.} = + w.streamElement(s): + s.write("\"0x") + s.writeHex(value) + s.write('"') + +# TODO +# Tuples are widely used in the responses of the REST server +# If we switch to concrete types there, it would be possible +# to remove this overly generic definition. +template writeValue*(w: RestJsonWriter, value: tuple) = + writeRecordValue(w, value) + +## https://github.com/ethereum/beacon-APIs/blob/v3.1.0/types/primitive.yaml#L31 +proc writeValue*( + w: var RestJsonWriter, value: uint64 | uint32 | uint16 | uint8 +) {.writer.} = + w.streamElement(s): + s.write('"') + s.writeText(value) + s.write('"') + +proc readValue*[T: uint64 | uint32 | uint16 | uint8]( + r: var RestJsonReader, value: var T +) {.reader.} = + let svalue = r.readValue(string) + value = Base10.decode(T, svalue).valueOr: + r.raiseUnexpectedValue($error & ": " & svalue) + +proc writeValue*(w: var RestJsonWriter, value: ConsensusFork) {.writer.} = + w.writeValue(value.toString()) + +proc readValue*(r: var RestJsonReader, value: var ConsensusFork) {.reader.} = + let svalue = r.readValue(string) + # toLowerAscii because Web3Signer uses uppercase (!) + value = ConsensusFork.init(svalue.toLowerAscii).valueOr: + r.raiseUnexpectedValue("Invalid or unknown consensus fork: " & $svalue) + +proc writeValue*(w: var RestJsonWriter, value: RestReward) {.writer.} = + w.streamElement(s): + s.write('"') + s.writeText(int64(value)) + s.write('"') + +proc readValue*(r: var RestJsonReader, value: var RestReward) {.reader.} = + let svalue = r.readValue(string) + if svalue.startsWith("-"): + let res = Base10.decode(uint64, svalue.toOpenArray(1, len(svalue) - 1)).valueOr: + r.raiseUnexpectedValue($error & ": " & svalue) + if res > uint64(high(int64)): + r.raiseUnexpectedValue("Integer value overflow " & svalue) + value = RestReward(-int64(res)) + else: + let res = Base10.decode(uint64, svalue).valueOr: + r.raiseUnexpectedValue($error & ": " & svalue) + if res > uint64(high(int64)): + r.raiseUnexpectedValue("Integer value overflow " & svalue) + value = RestReward(int64(res)) + +proc writeValue*(w: var RestJsonWriter, value: RestNumeric) {.writer.} = + w.streamElement(s): + s.writeText(int(value)) + +proc readValue*(r: var RestJsonReader, value: var RestNumeric) {.reader.} = + if r.tokKind == JsonValueKind.String: + # Nimbus earlier than v23.11.0 erroneously used a string in some number + # fields - provide backwards compatibilty.. + let svalue = r.readValue(string) + try: + value = RestNumeric(parseInt(svalue)) + except ValueError: + r.raiseUnexpectedValue("Expected number/string") + else: + value = RestNumeric(r.parseInt(int)) + +proc writeValue*(w: var RestJsonWriter, value: JustificationBits) {.writer.} = + w.write0xHex([uint8(value)]) + +proc readValue*(r: var RestJsonReader, value: var JustificationBits) {.reader.} = + let hex = r.readValue(string) + try: + value = JustificationBits(hexToByteArray(hex, 1)[0]) + except ValueError: + r.raiseUnexpectedValue("The `justification_bits` value must be a hex string") + +proc writeValue*(w: var RestJsonWriter, value: UInt256) {.writer.} = + w.writeValue(toString(value)) + +proc readValue*(r: var RestJsonReader, value: var UInt256) {.reader.} = + let svalue = r.readValue(string) + try: + value = parse(svalue, UInt256, 10) + except ValueError: + r.raiseUnexpectedValue("UInt256 value should be a valid decimal string") + +proc writeValue*(w: var RestJsonWriter, value: Gwei | Epoch | Slot) {.writer.} = + w.writeValue(distinctBase(value)) + +proc readValue*(r: var RestJsonReader, value: var (Gwei | Epoch | Slot)) {.reader.} = + r.readValue(distinctBase(value)) + +proc writeValue*(w: var RestJsonWriter, value: EpochParticipationFlags) {.writer.} = + for e in w.stepwiseArrayCreation(value.asList): + w.writeValue e + +proc readValue*( + r: var RestJsonReader, value: var EpochParticipationFlags +) {.raises: [SerializationError, IOError].} = + for e in r.readArray(uint8): + if not value.asList.add(e): + r.raiseUnexpectedValue("The participation flags list size exceeds limit") + +proc writeValue*( + w: var RestJsonWriter, + value: RestValidatorIndex | ValidatorIndex | IndexInSyncCommittee | CommitteeIndex, +) {.writer.} = + w.writeValue(distinctBase(value)) + +proc readValue*[T: ValidatorIndex | IndexInSyncCommittee | CommitteeIndex]( + r: var RestJsonReader, value: var T +) {.reader.} = + let v = r.readValue(uint64) + value = T.init(v).valueOr: + r.raiseUnexpectedValue($error) + +proc readValue*(r: var RestJsonReader, value: var RestValidatorIndex) {.reader.} = + r.readValue(uint64(value)) + +proc writeValue*( + w: var RestJsonWriter, value: ValidatorSig | TrustedSig | ValidatorPubKey +) {.writer.} = + w.write0xHex(toRaw(value)) + +proc readValue*[T: ValidatorSig | ValidatorPubKey]( + r: var RestJsonReader, value: var T +) {.reader.} = + let hexValue = r.readValue(string) + value = T.fromHex(hexValue).valueOr: + r.raiseUnexpectedValue($error) + +proc readValue*(r: var RestJsonReader, value: var TrustedSig) {.reader.} = + let hexValue = r.readValue(string) + let sig = ValidatorSig.fromHex(hexValue).valueOr: + r.raiseUnexpectedValue($error) + + value = TrustedSig(blob: sig.blob) + +proc readValue*(r: var RestJsonReader, value: var HashedValidatorPubKey) {.reader.} = + let key = r.readValue(ValidatorPubKey) + + value = HashedValidatorPubKey.init(key) + +proc writeValue*(w: var RestJsonWriter, value: HashedValidatorPubKey) {.writer.} = + w.writeValue(value.pubkey) + +proc readValue*[T: BitSeq | BitList](r: var RestJsonReader, value: var T) {.reader.} = + try: + value = T hexToSeqByte(r.readValue(string)) + except ValueError: + r.raiseUnexpectedValue(&"{$type(value)} should be a valid hex string") + +proc writeValue*(w: var RestJsonWriter, value: BitSeq | BitList | BitArray) {.writer.} = + w.write0xHex(value.bytes) + +proc readValue*(r: var RestJsonReader, value: var BitArray) {.reader.} = + try: + hexToByteArray(r.readValue(string), value.bytes) + except ValueError: + r.raiseUnexpectedValue("BitArray value should be a valid hex string") + +proc readValue*( + r: var RestJsonReader, value: var (Eth2Digest | BloomLogs | Eth1Address | Blob) +) {.reader.} = + try: + hexToByteArray(r.readValue(string), value.data) + except ValueError: + r.raiseUnexpectedValue(&"{$type(value)} should be a valid hex string") + +proc writeValue*( + w: var RestJsonWriter, value: Eth2Digest | BloomLogs | Eth1Address | Blob +) {.writer.} = + w.write0xHex(value.data) + +proc readValue*(r: var RestJsonReader, value: var Blob) {.reader.} = + try: + hexToByteArray(r.readValue(string), value) + except ValueError: + r.raiseUnexpectedValue(&"{$type(value)} should be a valid hex string") + +proc writeValue*(w: var RestJsonWriter, value: Blob) {.writer.} = + w.write0xHex(value) + +proc readValue*(r: var RestJsonReader, value: var (HashArray | HashList)) {.reader.} = + r.readValue(value.data) + value.resetCache() + +proc writeValue*(w: var RestJsonWriter, value: HashArray | HashList) {.writer.} = + w.writeValue(value.data) + +## https://github.com/ethereum/beacon-APIs/blob/v2.4.2/types/primitive.yaml#L135-L146 +proc readValue*( + r: var RestJsonReader, value: var (KzgCommitment | KzgProof | KzgCell) +) {.reader.} = + try: + hexToByteArray(r.readValue(string), distinctBase(value.bytes)) + except ValueError: + r.raiseUnexpectedValue(&"{$typeof(value)} should be a valid hex string") + +proc writeValue*( + w: var RestJsonWriter, value: KzgCommitment | KzgProof | KzgCell +) {.writer.} = + w.write0xHex(value.bytes) + +proc writeValue*(w: var RestJsonWriter, value: GraffitiBytes) {.writer.} = + w.write0xHex(distinctBase(value)) + +proc readValue*(r: var RestJsonReader, value: var GraffitiBytes) {.reader.} = + try: + value = init(GraffitiBytes, r.readValue(string)) + except ValueError as err: + r.raiseUnexpectedValue err.msg + +proc readValue*( + r: var RestJsonReader, + value: var (Version | ForkDigest | DomainType | RestWithdrawalPrefix), +) {.reader.} = + try: + hexToByteArray(r.readValue(string), distinctBase(value)) + except ValueError: + r.raiseUnexpectedValue( + &"Expected a valid hex string with {distinctBase(value).len()} bytes" + ) + +template unrecognizedFieldWarning(fieldNameParam, typeNameParam: string) = + # TODO: There should be a different notification mechanism for informing the + # caller of a deserialization routine for unexpected fields. + # The chonicles import in this module should be removed. + trace "JSON field not recognized by the current version of Nimbus. Consider upgrading", + fieldName = fieldNameParam, typeName = typeNameParam + +template unrecognizedFieldIgnore() = + discard r.readValue(JsonString) + +type + VersionedData = object + version: ConsensusFork + data: JsonString + +RestJson.useDefaultSerializationFor VersionedData + +type Web3SignerVersionedBeaconBlock = object + version: string # Uppercase! + `block`: Opt[JsonString] + block_header: Opt[JsonString] + +RestJson.useDefaultSerializationFor Web3SignerVersionedBeaconBlock + +proc readValue*( + r: var RestJsonReader, value: var Web3SignerForkedBeaconBlock +) {.reader.} = + var tmp: Web3SignerVersionedBeaconBlock + r.readValue(tmp) + let version = ConsensusFork.init(tmp.version.toLowerAscii).valueOr: + r.raiseUnexpectedValue($error) + + if version <= ConsensusFork.Altair: + r.raiseUnexpectedValue("Web3Signer implementation supports Bellatrix and newer") + if tmp.block_header.isNone(): + r.raiseUnexpectedValue("Missing `block_header`") + + let res = RestJson.decode(string(tmp.block_header.get()), BeaconBlockHeader) + value = Web3SignerForkedBeaconBlock(kind: version, data: res) + +proc writeValue*(w: var RestJsonWriter, value: Web3SignerForkedBeaconBlock) {.writer.} = + # https://consensys.github.io/web3signer/web3signer-eth2.html#tag/Signing/operation/ETH2_SIGN + # https://github.com/ConsenSys/web3signer/blob/d51337e96ba5ce410222943556bed7c4856b8e57/core/src/main/java/tech/pegasys/web3signer/core/service/http/handlers/signing/eth2/json/BlockRequestDeserializer.java#L42-L58 + w.writeObject: + w.writeField("version", value.kind.toString.toUpperAscii) + w.writeField("block_header", value.data) + +type VersionedSignedBeaconBlock = VersionedData + +proc readValue*( + r: var RestJsonReader, value: var SomeForkySignedBeaconBlock +) {.reader.} = + # Keep `root` up to date! + r.readRecordValue(value) + value.root = hash_tree_root(value.message) + +proc writeValue*(w: var RestJsonWriter, value: SomeForkySignedBeaconBlock) {.writer.} = + w.writeRecordValue(value) + +proc readValue*(r: var RestJsonReader, value: var ForkedSignedBeaconBlock) {.reader.} = + let v = r.readValue(VersionedSignedBeaconBlock) + + if value.kind != v.version: + value = ForkedSignedBeaconBlock(kind: v.version) + + try: + withBlck(value): + forkyBlck = RestJson.decode(string(v.data), typeof(forkyBlck)) + except SerializationError as exc: + r.raiseUnexpectedValue( + &"""Incorrect {v.version} block format, [{exc.formatMsg("SignedBeaconBlock")}]""" + ) + +proc writeValue*(w: var RestJsonWriter, value: ForkedSignedBeaconBlock) {.writer.} = + w.writeObject: + w.writeField("version", value.kind.toString) + withBlck(value): + w.writeField("data", forkyBlck) + +type VersionedHashedBeaconState = VersionedData +proc readValue*(r: var RestJsonReader, value: var ForkedHashedBeaconState) {.reader.} = + let v = r.readValue(VersionedHashedBeaconState) + + # Use a temporary to avoid stack instances and `value` mutation in case of + # exception + let tmp = (ref ForkedHashedBeaconState)(kind: v.version) + + template toValue(field: untyped) = + tmp[].field.data = RestJson.decode(string(v.data), typeof(tmp[].field.data)) + if tmp[].kind == value.kind: + assign(value.field, tmp[].field) + else: + value = tmp[] # slow, but rare (hopefully) + value.field.root = hash_tree_root(value.field.data) + + try: + case v.version + of ConsensusFork.Phase0: + toValue(phase0Data) + of ConsensusFork.Altair: + toValue(altairData) + of ConsensusFork.Bellatrix: + toValue(bellatrixData) + of ConsensusFork.Capella: + toValue(capellaData) + of ConsensusFork.Deneb: + toValue(denebData) + of ConsensusFork.Electra: + toValue(electraData) + of ConsensusFork.Fulu: + toValue(fuluData) + of ConsensusFork.Gloas: + toValue(gloasData) + except SerializationError: + r.raiseUnexpectedValue(&"Incorrect {v.version} beacon state format") + +proc writeValue*(w: var RestJsonWriter, value: ForkedHashedBeaconState) {.writer.} = + w.writeObject: + w.writeField("version", value.kind.toString) + withState(value): + w.writeField("data", forkyState.data) + +type VersionedLightClientObject = VersionedData + +proc readValue*[T: SomeForkedLightClientObject]( + r: var RestJsonReader, value: var T +) {.reader.} = + let v = r.readValue(VersionedLightClientObject) + + withLcDataFork(lcDataForkAtConsensusFork(v.version)): + when lcDataFork > LightClientDataFork.None: + try: + value = T.init(RestJson.decode(string(v.data), T.Forky(lcDataFork))) + except SerializationError: + r.raiseUnexpectedValue("Incorrect format (" & $lcDataFork & ")") + else: + r.raiseUnexpectedValue("Unsupported fork " & $v.version) + +type VersionedAggregateAndProof = VersionedData +proc readValue*(r: var RestJsonReader, value: var ForkedAggregateAndProof) {.reader.} = + let v = r.readValue(VersionedAggregateAndProof) + + if value.kind != v.version: + value = ForkedAggregateAndProof(kind: v.version) + + try: + withAggregateAndProof(value): + forkyProof = RestJson.decode(string(v.data), typeof(forkyProof)) + except SerializationError as exc: + r.raiseUnexpectedValue( + &"""Incorrect {v.version} aggregated attestation format, [{exc.formatMsg("ForkedAggregateAndProof")}]""" + ) + +proc writeValue*(w: var RestJsonWriter, proof: ForkedAggregateAndProof) {.writer.} = + w.writeObject: + w.writeField("version", proof.kind.toString()) + withAggregateAndProof(proof): + w.writeField("data", forkyProof) + +proc writeValue*(w: var RestJsonWriter, value: Web3SignerRequest) {.writer.} = + w.writeObject: + w.writeField("type", value.kind) + w.writeField("fork_info", value.forkInfo) + w.writeField("signingRoot", value.signingRoot) + + case value.kind + of Web3SignerRequestKind.AggregationSlot: + doAssert(value.forkInfo.isSome(), "forkInfo should be set for " & $value.kind) + w.writeField("aggregation_slot", value.aggregationSlot) + of Web3SignerRequestKind.AggregateAndProof: + doAssert(value.forkInfo.isSome(), "forkInfo should be set for " & $value.kind) + w.writeField("aggregate_and_proof", value.aggregateAndProof) + of Web3SignerRequestKind.AggregateAndProofV2: + doAssert(value.forkInfo.isSome(), "forkInfo should be set for " & $value.kind) + w.writeField("aggregate_and_proof", value.forkedAggregateAndProof) + of Web3SignerRequestKind.Attestation: + doAssert(value.forkInfo.isSome(), "forkInfo should be set for " & $value.kind) + w.writeField("attestation", value.attestation) + of Web3SignerRequestKind.BlockV2: + doAssert(value.forkInfo.isSome(), "forkInfo should be set for " & $value.kind) + # https://github.com/Consensys/web3signer/blob/2d956c019663ac70f60640d23196d1d321c1b1fa/core/src/main/resources/openapi-specs/eth2/signing/schemas.yaml#L483-L500 + w.writeField("beacon_block", value.beaconBlockHeader) + + w.writeField("proofs", value.proofs) + of Web3SignerRequestKind.Deposit: + w.writeField("deposit", value.deposit) + of Web3SignerRequestKind.RandaoReveal: + doAssert(value.forkInfo.isSome(), "forkInfo should be set for " & $value.kind) + w.writeField("randao_reveal", value.randaoReveal) + of Web3SignerRequestKind.VoluntaryExit: + doAssert(value.forkInfo.isSome(), "forkInfo should be set for " & $value.kind) + w.writeField("voluntary_exit", value.voluntaryExit) + of Web3SignerRequestKind.SyncCommitteeMessage: + doAssert(value.forkInfo.isSome(), "forkInfo should be set for " & $value.kind) + w.writeField("sync_committee_message", value.syncCommitteeMessage) + of Web3SignerRequestKind.SyncCommitteeSelectionProof: + doAssert(value.forkInfo.isSome(), "forkInfo should be set for " & $value.kind) + w.writeField("sync_aggregator_selection_data", value.syncAggregatorSelectionData) + of Web3SignerRequestKind.SyncCommitteeContributionAndProof: + doAssert(value.forkInfo.isSome(), "forkInfo should be set for " & $value.kind) + w.writeField("contribution_and_proof", value.syncCommitteeContributionAndProof) + of Web3SignerRequestKind.ValidatorRegistration: + # https://consensys.github.io/web3signer/web3signer-eth2.html#operation/ETH2_SIGN + w.writeField("validator_registration", value.validatorRegistration) + +type RawWeb3SignerRequest = object + `type`: Web3SignerRequestKind + fork_info: Opt[Web3SignerForkInfo] + signingRoot: Opt[Eth2Digest] # Capitalized like so in spec! + proofs: Opt[seq[Web3SignerMerkleProof]] + + # The following fields are present or not depending on the type + aggregation_slot: Opt[Web3SignerAggregationSlotData] + attestation: Opt[AttestationData] + aggregate_and_proof: Opt[JsonString] + beacon_block: Opt[Web3SignerForkedBeaconBlock] + deposit: Opt[Web3SignerDepositData] + randao_reveal: Opt[Web3SignerRandaoRevealData] + voluntary_exit: Opt[VoluntaryExit] + sync_committee_message: Opt[Web3SignerSyncCommitteeMessageData] + sync_aggregator_selection_data: Opt[SyncAggregatorSelectionData] + contribution_and_proof: Opt[ContributionAndProof] + validator_registration: Opt[Web3SignerValidatorRegistration] + +RestJson.useDefaultSerializationFor RawWeb3SignerRequest +proc readValue*(r: var RestJsonReader, value: var Web3SignerRequest) {.reader.} = + let v = r.readValue(RawWeb3SignerRequest) + + template expectedForkInfo(): untyped = + if v.fork_info.isNone(): + r.raiseUnexpectedValue("Field `fork_info` is missing") + v.fork_info + + template expectedField(name: untyped): untyped = + const fieldName = astToStr(name) + + v.name.valueOr: + r.raiseUnexpectedValue("Field `" & fieldName & "` is missing") + + value = + case v.`type` + of Web3SignerRequestKind.AggregationSlot: + Web3SignerRequest( + kind: Web3SignerRequestKind.AggregationSlot, + forkInfo: expectedForkInfo, + signingRoot: v.signingRoot, + aggregationSlot: expectedField(aggregation_slot), + ) + of Web3SignerRequestKind.AggregateAndProof: + let aggregate_and_proof = RestJson.decode( + string expectedField(aggregate_and_proof), phase0.AggregateAndProof + ) + + Web3SignerRequest( + kind: Web3SignerRequestKind.AggregateAndProof, + forkInfo: expectedForkInfo, + signingRoot: v.signingRoot, + aggregateAndProof: aggregate_and_proof, + ) + of Web3SignerRequestKind.AggregateAndProofV2: + let aggregate_and_proof = RestJson.decode( + string expectedField(aggregate_and_proof), ForkedAggregateAndProof + ) + Web3SignerRequest( + kind: Web3SignerRequestKind.AggregateAndProofV2, + forkInfo: expectedForkInfo, + signingRoot: v.signingRoot, + forkedAggregateAndProof: aggregate_and_proof, + ) + of Web3SignerRequestKind.Attestation: + Web3SignerRequest( + kind: Web3SignerRequestKind.Attestation, + forkInfo: expectedForkInfo, + signingRoot: v.signingRoot, + attestation: expectedField(attestation), + ) + of Web3SignerRequestKind.BlockV2: + # https://github.com/ConsenSys/web3signer/blob/41834a927088f1bde7a097e17d19e954d0058e54/core/src/main/resources/openapi-specs/eth2/signing/schemas.yaml#L421-L425 (branch v22.7.0) + # It's the "beacon_block" field even when it's not a block, but a header + Web3SignerRequest( + kind: Web3SignerRequestKind.BlockV2, + forkInfo: expectedForkInfo, + signingRoot: v.signingRoot, + beaconBlockHeader: expectedField(beacon_block), + proofs: v.proofs, + ) + of Web3SignerRequestKind.Deposit: + Web3SignerRequest( + kind: Web3SignerRequestKind.Deposit, + signingRoot: v.signingRoot, + deposit: expectedField(deposit), + ) + of Web3SignerRequestKind.RandaoReveal: + Web3SignerRequest( + kind: Web3SignerRequestKind.RandaoReveal, + forkInfo: expectedForkInfo, + signingRoot: v.signingRoot, + randaoReveal: expectedField(randao_reveal), + ) + of Web3SignerRequestKind.VoluntaryExit: + Web3SignerRequest( + kind: Web3SignerRequestKind.VoluntaryExit, + forkInfo: expectedForkInfo, + signingRoot: v.signingRoot, + voluntaryExit: expectedField(voluntary_exit), + ) + of Web3SignerRequestKind.SyncCommitteeMessage: + Web3SignerRequest( + kind: Web3SignerRequestKind.SyncCommitteeMessage, + forkInfo: expectedForkInfo, + signingRoot: v.signingRoot, + syncCommitteeMessage: expectedField(sync_committee_message), + ) + of Web3SignerRequestKind.SyncCommitteeSelectionProof: + Web3SignerRequest( + kind: Web3SignerRequestKind.SyncCommitteeSelectionProof, + forkInfo: expectedForkInfo, + signingRoot: v.signingRoot, + syncAggregatorSelectionData: expectedField(sync_aggregator_selection_data), + ) + of Web3SignerRequestKind.SyncCommitteeContributionAndProof: + Web3SignerRequest( + kind: Web3SignerRequestKind.SyncCommitteeContributionAndProof, + forkInfo: expectedForkInfo, + signingRoot: v.signingRoot, + syncCommitteeContributionAndProof: expectedField(contribution_and_proof), + ) + of Web3SignerRequestKind.ValidatorRegistration: + Web3SignerRequest( + kind: Web3SignerRequestKind.ValidatorRegistration, + signingRoot: v.signingRoot, + validatorRegistration: expectedField(validator_registration), + ) + +proc writeValue*(w: var RestJsonWriter, value: RemoteKeystoreStatus) {.writer.} = + w.writeObject: + w.writeField("status", $value.status) + w.writeField("message", value.message) + +proc readValue*(r: var RestJsonReader, value: var RemoteKeystoreStatus) {.reader.} = + var message: Opt[string] + var status: Opt[KeystoreStatus] + + for fieldName in readObjectFields(r): + case fieldName + of "message": + if message.isSome(): + r.raiseUnexpectedField( + "Multiple `message` fields found", "RemoteKeystoreStatus" + ) + message = Opt.some(r.readValue(string)) + of "status": + if status.isSome(): + r.raiseUnexpectedField("Multiple `status` fields found", "RemoteKeystoreStatus") + let res = r.readValue(string) + status = Opt.some( + case res + of "error": + KeystoreStatus.error + of "not_active": + KeystoreStatus.notActive + of "not_found": + KeystoreStatus.notFound + of "deleted": + KeystoreStatus.deleted + of "duplicate": + KeystoreStatus.duplicate + of "imported": + KeystoreStatus.imported + else: + r.raiseUnexpectedValue("Invalid `status` value") + ) + else: + unrecognizedFieldWarning(fieldName, typeof(value).name) + + if status.isNone(): + r.raiseUnexpectedValue("Field `status` is missing") + + value = RemoteKeystoreStatus(status: status.get(), message: message) + +proc readValue*( + r: var RestJsonReader, value: var ScryptSalt +) {.raises: [SerializationError, IOError].} = + let res = + try: + hexToSeqByte(r.readValue(string)) + except ValueError: + r.raiseUnexpectedValue("Invalid scrypt salt value") + + if len(res) == 0: + r.raiseUnexpectedValue("Invalid scrypt salt value") + value = ScryptSalt(res) + +proc writeValue*(w: var RestJsonWriter, value: Pbkdf2Params) {.writer.} = + w.writeObject: + w.writeField("dklen", JsonString(Base10.toString(value.dklen))) + w.writeField("c", JsonString(Base10.toString(value.c))) + w.writeField("prf", value.prf) + w.writeField("salt", value.salt) + +proc readValue*( + r: var RestJsonReader, value: var Pbkdf2Params +) {.raises: [SerializationError, IOError].} = + var + dklen: Opt[uint64] + c: Opt[uint64] + prf: Opt[PrfKind] + salt: Opt[Pbkdf2Salt] + + for fieldName in readObjectFields(r): + case fieldName + of "dklen": + if dklen.isSome(): + r.raiseUnexpectedField("Multiple `dklen` fields found", "Pbkdf2Params") + dklen = Opt.some(r.readValue(uint64)) + of "c": + if c.isSome(): + r.raiseUnexpectedField("Multiple `c` fields found", "Pbkdf2Params") + c = Opt.some(r.readValue(uint64)) + of "prf": + if prf.isSome(): + r.raiseUnexpectedField("Multiple `prf` fields found", "Pbkdf2Params") + prf = Opt.some(r.readValue(PrfKind)) + of "salt": + if salt.isSome(): + r.raiseUnexpectedField("Multiple `salt` fields found", "Pbkdf2Params") + salt = Opt.some(r.readValue(Pbkdf2Salt)) + else: + unrecognizedFieldWarning(fieldName, typeof(value).name) + + if dklen.isNone(): + r.raiseUnexpectedValue("Field `dklen` is missing") + if c.isNone(): + r.raiseUnexpectedValue("Field `c` is missing") + if prf.isNone(): + r.raiseUnexpectedValue("Field `prf` is missing") + if salt.isNone(): + r.raiseUnexpectedValue("Field `salt` is missing") + + value = Pbkdf2Params(dklen: dklen.get(), c: c.get(), prf: prf.get(), salt: salt.get()) + +proc writeValue*(w: var RestJsonWriter, value: ScryptParams) {.writer.} = + w.writeObject: + w.writeField("dklen", JsonString(Base10.toString(value.dklen))) + w.writeField("n", JsonString(Base10.toString(uint64(value.n)))) + w.writeField("p", JsonString(Base10.toString(uint64(value.p)))) + w.writeField("r", JsonString(Base10.toString(uint64(value.r)))) + w.writeField("salt", value.salt) + +proc readValue*( + r: var RestJsonReader, value: var ScryptParams +) {.raises: [SerializationError, IOError].} = + var + dklen: Opt[uint64] + n, p, rv: Opt[int] + salt: Opt[ScryptSalt] + + for fieldName in readObjectFields(r): + case fieldName + of "dklen": + if dklen.isSome(): + r.raiseUnexpectedField("Multiple `dklen` fields found", "ScryptParams") + dklen = Opt.some(r.readValue(uint64)) + of "n": + if n.isSome(): + r.raiseUnexpectedField("Multiple `n` fields found", "ScryptParams") + let res = r.readValue(int) + if res < 0: + r.raiseUnexpectedValue("Unexpected negative `n` value") + n = Opt.some(res) + of "p": + if p.isSome(): + r.raiseUnexpectedField("Multiple `p` fields found", "ScryptParams") + let res = r.readValue(int) + if res < 0: + r.raiseUnexpectedValue("Unexpected negative `p` value") + p = Opt.some(res) + of "r": + if rv.isSome(): + r.raiseUnexpectedField("Multiple `r` fields found", "ScryptParams") + let res = r.readValue(int) + if res < 0: + r.raiseUnexpectedValue("Unexpected negative `r` value") + rv = Opt.some(res) + of "salt": + if salt.isSome(): + r.raiseUnexpectedField("Multiple `salt` fields found", "ScryptParams") + salt = Opt.some(r.readValue(ScryptSalt)) + else: + unrecognizedFieldWarning(fieldName, typeof(value).name) + + if dklen.isNone(): + r.raiseUnexpectedValue("Field `dklen` is missing") + if n.isNone(): + r.raiseUnexpectedValue("Field `n` is missing") + if p.isNone(): + r.raiseUnexpectedValue("Field `p` is missing") + if rv.isNone(): + r.raiseUnexpectedValue("Field `r` is missing") + if salt.isNone(): + r.raiseUnexpectedValue("Field `salt` is missing") + + value = ScryptParams( + dklen: dklen.get(), n: n.get(), p: p.get(), r: rv.get(), salt: salt.get() + ) + +proc writeValue*( + w: var RestJsonWriter, value: Keystore +) {. + error: + "keystores must be converted to json with Json.encode(keystore). " & + "There is no REST-specific encoding" +.} + +proc readValue*( + r: var RestJsonReader, value: var Keystore +) {. + error: + "Keystores must be loaded with `parseKeystore`. " & + "There is no REST-specific encoding" +.} + +proc writeValue*( + w: var RestJsonWriter, value: KeystoresAndSlashingProtection +) {.writer.} = + let keystores = block: + var res: seq[string] + for keystore in value.keystores: + let encoded = Json.encode(keystore) + res.add(encoded) + res + + w.writeObject: + w.writeField("keystores", keystores) + w.writeField("passwords", value.passwords) + if value.slashing_protection.isSome(): + let slashingProtection = RestJson.encode(value.slashing_protection.get) + w.writeField("slashing_protection", slashingProtection) + +proc readValue*( + r: var RestJsonReader, value: var KeystoresAndSlashingProtection +) {.raises: [SerializationError, IOError].} = + var + strKeystores: seq[string] + passwords: seq[string] + strSlashing: Opt[string] + + for fieldName in readObjectFields(r): + case fieldName + of "keystores": + strKeystores = r.readValue(seq[string]) + of "passwords": + passwords = r.readValue(seq[string]) + of "slashing_protection": + if strSlashing.isSome(): + r.raiseUnexpectedField( + "Multiple `slashing_protection` fields found", + "KeystoresAndSlashingProtection", + ) + strSlashing = Opt.some(r.readValue(string)) + else: + unrecognizedFieldWarning(fieldName, typeof(value).name) + + if len(strKeystores) == 0: + r.raiseUnexpectedValue("Missing or empty `keystores` value") + if len(passwords) == 0: + r.raiseUnexpectedValue("Missing or empty `passwords` value") + + let keystores = block: + var res: seq[Keystore] + for item in strKeystores: + let key = + try: + parseKeystore(item) + except SerializationError: + # TODO re-raise the exception by adjusting the column index, so the user + # will get an accurate syntax error within the larger message + r.raiseUnexpectedValue("Invalid keystore format") + res.add(key) + res + + let slashing = + if strSlashing.isSome(): + let db = + try: + RestJson.decode(strSlashing.get(), SPDIR) + except SerializationError: + r.raiseUnexpectedValue("Invalid slashing protection format") + Opt.some(db) + else: + Opt.none(SPDIR) + + value = KeystoresAndSlashingProtection( + keystores: keystores, passwords: passwords, slashing_protection: slashing + ) + +proc writeValue*(w: var RestJsonWriter, value: RestNodeValidity) {.writer.} = + w.writeValue($value) + +proc readValue*( + r: var RestJsonReader, value: var RestErrorMessage +) {.raises: [SerializationError, IOError].} = + var + code: Opt[int] + message: Opt[string] + stacktraces: Opt[seq[string]] + + for fieldName in readObjectFields(r): + case fieldName + of "code": + if code.isSome(): + r.raiseUnexpectedField("Multiple `code` fields found", "RestErrorMessage") + let ires = + try: + let res = r.readValue(int) + if res < 0: + r.raiseUnexpectedValue("Invalid `code` field value") + Opt.some(res) + except SerializationError: + Opt.none(int) + if ires.isNone(): + let sres = + try: + parseInt(r.readValue(string)) + except ValueError: + r.raiseUnexpectedValue("Invalid `code` field format") + if sres < 0: + r.raiseUnexpectedValue("Invalid `code` field value") + code = Opt.some(sres) + else: + code = ires + of "message": + if message.isSome(): + r.raiseUnexpectedField("Multiple `message` fields found", "RestErrorMessage") + message = Opt.some(r.readValue(string)) + of "stacktraces": + if stacktraces.isSome(): + r.raiseUnexpectedField( + "Multiple `stacktraces` fields found", "RestErrorMessage" + ) + stacktraces = Opt.some(r.readValue(seq[string])) + else: + unrecognizedFieldIgnore() + + if code.isNone(): + r.raiseUnexpectedValue("Missing or invalid `code` value") + if message.isNone(): + r.raiseUnexpectedValue("Missing or invalid `message` value") + + value = + RestErrorMessage(code: code.get(), message: message.get(), stacktraces: stacktraces) + +proc writeValue*(w: var RestJsonWriter, value: RestErrorMessage) {.writer.} = + w.writeObject: + w.writeField("code", value.code) + w.writeField("message", value.message) + w.writeField("stacktraces", value.stacktraces) + +proc readValue*( + r: var RestJsonReader, value: var VCRuntimeConfig +) {.raises: [SerializationError, IOError].} = + for fieldName in readObjectFields(r): + let fieldValue = + case toLowerAscii(fieldName) + of "blob_schedule": + string(r.readValue(JsonString)) + else: + r.readValue(string) + + if value.hasKeyOrPut(toUpperAscii(fieldName), fieldValue): + let msg = "Multiple `" & fieldName & "` fields found" + r.raiseUnexpectedField(msg, "VCRuntimeConfig") + +type VersionedMaybeBlindedBeaconBlock = object + version: ConsensusFork + execution_payload_blinded: bool + execution_payload_value: UInt256 + consensus_block_value: Opt[UInt256] + data: JsonString + +RestJson.useDefaultSerializationFor VersionedMaybeBlindedBeaconBlock + +proc readValue*( + r: var RestJsonReader, value: var ProduceBlockResponseV3 +) {.raises: [SerializationError, IOError].} = + let v = r.readValue(VersionedMaybeBlindedBeaconBlock) + + # TODO (cheatfate): At some point we should add check for missing + # `consensus_block_value` + + withConsensusFork(v.version): + debugGloasComment "re-add gloas mev" + value = + when consensusFork >= ConsensusFork.Gloas: + if v.execution_payload_blinded: + r.raiseUnexpectedValue( + &"`execution_payload_blinded` unsupported for {v.version}" + ) + ForkedMaybeBlindedBeaconBlock.init( + RestJson.decode(string(v.data), consensusFork.BlockContents) + ) + elif consensusFork >= ConsensusFork.Electra: + if v.execution_payload_blinded: + ForkedMaybeBlindedBeaconBlock.init( + RestJson.decode(string(v.data), consensusFork.BlindedBlockContents), + Opt.some v.execution_payload_value, + v.consensus_block_value, + ) + else: + ForkedMaybeBlindedBeaconBlock.init( + RestJson.decode(string(v.data), consensusFork.BlockContents), + Opt.some v.execution_payload_value, + v.consensus_block_value, + ) + elif consensusFork >= ConsensusFork.Bellatrix: + if v.execution_payload_blinded: + r.raiseUnexpectedValue( + &"`execution_payload_blinded` unsupported for {v.version}" + ) + ForkedMaybeBlindedBeaconBlock.init( + RestJson.decode(string(v.data), consensusFork.BlockContents), + Opt.some v.execution_payload_value, + v.consensus_block_value, + ) + else: + if v.execution_payload_blinded: + r.raiseUnexpectedValue( + &"`execution_payload_blinded` unsupported for {v.version}" + ) + ForkedMaybeBlindedBeaconBlock.init( + RestJson.decode(string(v.data), consensusFork.BlockContents) + ) + +proc writeValue*(w: var RestJsonWriter, value: ProduceBlockResponseV3) {.writer.} = + w.writeObject: + withForkyMaybeBlindedBlck(value): + w.writeField("version", consensusFork.toString()) + w.writeField("execution_payload_blinded", isBlinded) + if value.executionValue.isSome(): + w.writeField("execution_payload_value", $(value.executionValue.get())) + if value.consensusValue.isSome(): + w.writeField("consensus_block_value", $(value.consensusValue.get())) + w.writeField("data", forkyMaybeBlindedBlck) + +proc writeValue*(w: var RestJsonWriter, value: GraffitiString) {.writer.} = + w.writeValue($value) + +proc readValue*(r: var RestJsonReader, T: type GraffitiString): T {.reader.} = + let res = init(GraffitiString, r.readValue(string)) + if res.isErr(): + r.raiseUnexpectedValue res.error + res.get + +proc writeValue*(w: var RestJsonWriter, value: ValidatorIdent) {.writer.} = + case value.kind + of ValidatorQueryKind.Index: + w.writeValue(value.index) + of ValidatorQueryKind.Key: + w.writeValue(value.key) + +proc readValue*(r: var RestJsonReader, value: var ValidatorIdent) {.reader.} = + value = ValidatorIdent.parse(r.readValue(string)).valueOr: + r.raiseUnexpectedValue($error) + +type RawRestValidatorRequest = object + ids: Opt[seq[ValidatorIdent]] + statuses: Opt[seq[string]] + +RestJson.useDefaultSerializationFor RawRestValidatorRequest +proc readValue*(r: var RestJsonReader, value: var RestValidatorRequest) {.reader.} = + let + v = r.readValue(RawRestValidatorRequest) + filter = block: + if v.statuses.isSome(): + var res: ValidatorFilter + for item in v.statuses.get(): + let value = ValidatorFilter.parse(item).valueOr: + r.raiseUnexpectedValue($error) + # Test for uniqueness of value. + if value * res != {}: + r.raiseUnexpectedValue( + "The `statuses` array should consist of only unique values" + ) + res.incl(value) + Opt.some(res) + else: + Opt.none(ValidatorFilter) + + # Test for uniqueness of value will be happened on higher layer. + value = RestValidatorRequest(ids: v.ids, status: filter) + +proc writeValue*(w: var RestJsonWriter, value: RestValidatorRequest) {.writer.} = + w.writeObject: + w.writeField("ids", value.ids) + if value.status.isSome(): + let res = value.status.get().toList() + if len(res) > 0: + w.writeField("statuses", res) + +type VersionedAttestation = VersionedData + +proc readValue*(r: var RestJsonReader, value: var ForkedAttestation) {.reader.} = + let v = r.readValue(VersionedAttestation) + if value.kind != v.version: + value = ForkedAttestation(kind: v.version) + + try: + withAttestation(value): + forkyAttestation = RestJson.decode(string(v.data), typeof(forkyAttestation)) + except SerializationError as exc: + r.raiseUnexpectedValue( + &"""Incorrect {v.version} attestation format, [{exc.formatMsg("ForkedAttestation")}]""" + ) + +proc writeValue*(w: var RestJsonWriter, value: ForkedAttestation) {.writer.} = + w.writeObject: + w.writeField("version", value.kind.toString()) + withAttestation(value): + w.writeField("data", forkyAttestation) diff --git a/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim b/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim index df939c1c83..24a68add84 100644 --- a/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim +++ b/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim @@ -5,29 +5,17 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} - -import std/[typetraits, strutils] -import results, stew/[assign2, base10, byteutils, endians2], presto/common, - libp2p/peerid, serialization, json_serialization, - json_serialization/std/[net, sets], - json_serialization/stew/results as jsonSerializationResults, - stint, chronicles -import ".."/[eth2_ssz_serialization, forks, keystore], - ".."/../consensus_object_pools/block_pools_types, - ".."/mev/[bellatrix_mev, capella_mev], - ".."/../validators/slashing_protection_common, - "."/[rest_types, rest_keymanager_types] -import nimcrypto/utils as ncrutils - -export - eth2_ssz_serialization, results, peerid, common, serialization, chronicles, - json_serialization, net, sets, rest_types, slashing_protection_common, - jsonSerializationResults, rest_keymanager_types - -from web3/primitives import Hash32, Quantity -from json import getStr, newJString -export primitives.Hash32, primitives.Quantity +{.push raises: [], gcsafe.} + +import + std/[json, strutils], + stew/[base10, byteutils], + libp2p/peerid, + presto/common as presto_common, + ../eth2_ssz_serialization, + ./eth2_rest_json_serialization + +export peerid, presto_common, eth2_ssz_serialization, eth2_rest_json_serialization func decodeMediaType*( contentType: Opt[ContentTypeData]): Result[MediaType, string] = @@ -35,293 +23,6 @@ func decodeMediaType*( return err("Missing or incorrect Content-Type") ok contentType.get.mediaType -type - EmptyBody* = object - -createJsonFlavor RestJson - -RestJson.useDefaultSerializationFor( - AttestationData, - BLSToExecutionChange, - BeaconBlockHeader, - BlobSidecar, - BlobSidecarInfoObject, - BlobsBundle, - Checkpoint, - ConsolidationRequest, - ContributionAndProof, - DataEnclosedObject, - DataMetaEnclosedObject, - DataOptimisticAndFinalizedObject, - DataOptimisticObject, - DataRootEnclosedObject, - DataVersionEnclosedObject, - DeleteKeystoresBody, - DeleteKeystoresResponse, - DeleteRemoteKeystoresResponse, - DenebSignedBlockContents, - Deposit, - DepositData, - DepositRequest, - DepositTreeSnapshot, - DistributedKeystoreInfo, - ElectraSignedBlockContents, - EmptyBody, - Eth1Data, - EventBeaconBlockObject, - EventBeaconBlockGossipObject, - ExecutionRequests, - Fork, - FuluSignedBlockContents, - GetBlockAttestationsResponse, - GetBlockHeaderResponse, - GetBlockHeadersResponse, - GetDepositContractResponse, - GetDepositSnapshotResponse, - GetDistributedKeystoresResponse, - GetEpochCommitteesResponse, - GetEpochSyncCommitteesResponse, - GetForkChoiceResponse, - GetForkScheduleResponse, - GetGenesisResponse, - GetKeystoresResponse, - GetNextWithdrawalsResponse, - GetPoolAttesterSlashingsResponse, - GetPoolProposerSlashingsResponse, - GetPoolVoluntaryExitsResponse, - GetRemoteKeystoresResponse, - GetSpecVCResponse, - GetStateFinalityCheckpointsResponse, - GetStateForkResponse, - GetStateRandaoResponse, - GetStateRootResponse, - GetStateValidatorBalancesResponse, - GetStateValidatorResponse, - GetStateValidatorsResponse, - GetValidatorGasLimitResponse, - HistoricalSummary, - ImportDistributedKeystoresBody, - ImportRemoteKeystoresBody, - KeymanagerGenericError, - KeystoreInfo, - ListFeeRecipientResponse, - ListGasLimitResponse, - GetGraffitiResponse, - GraffitiResponse, - PendingAttestation, - PendingConsolidation, - PendingDeposit, - PendingPartialWithdrawal, - PostKeystoresResponse, - PrepareBeaconProposer, - ProposerSlashing, - RemoteKeystoreInfo, - RemoteSignerInfo, - RequestItemStatus, - RestAttesterDuty, - RestBeaconCommitteeSelection, - RestBeaconStatesCommittees, - RestBeaconStatesFinalityCheckpoints, - RestBlockHeader, - RestBlockHeaderInfo, - RestChainHeadV2, - RestCommitteeSubscription, - RestContributionAndProof, - RestDepositContract, - RestEpochRandao, - RestEpochSyncCommittee, - RestExtraData, - RestGenesis, - RestIndexedErrorMessage, - RestIndexedErrorMessageItem, - RestMetadata, - RestNetworkIdentity, - RestNimbusTimestamp1, - RestNimbusTimestamp2, - RestNode, - RestNodeExtraData, - RestNodePeer, - RestNodeVersion, - RestPeerCount, - RestProposerDuty, - RestRoot, - RestSignedBlockHeader, - RestSignedContributionAndProof, - RestSyncCommitteeContribution, - RestSyncCommitteeDuty, - RestSyncCommitteeMessage, - RestSyncCommitteeSelection, - RestSyncCommitteeSubscription, - RestSyncInfo, - RestValidator, - RestValidatorBalance, - SPDIR, - SPDIR_Meta, - SPDIR_SignedAttestation, - SPDIR_SignedBlock, - SPDIR_Validator, - SetFeeRecipientRequest, - SetGasLimitRequest, - SetGraffitiRequest, - SignedBLSToExecutionChange, - SignedBeaconBlockHeader, - SignedContributionAndProof, - SignedValidatorRegistrationV1, - SignedVoluntaryExit, - SyncAggregate, - SyncAggregatorSelectionData, - SyncCommittee, - SyncCommitteeContribution, - SyncCommitteeMessage, - Validator, - ValidatorRegistrationV1, - VoluntaryExit, - Web3SignerAggregationSlotData, - Web3SignerDepositData, - Web3SignerErrorResponse, - Web3SignerForkInfo, - Web3SignerMerkleProof, - Web3SignerRandaoRevealData, - Web3SignerSignatureResponse, - Web3SignerStatusResponse, - Web3SignerSyncCommitteeMessageData, - Web3SignerValidatorRegistration, - Withdrawal, - WithdrawalRequest, - altair.BeaconBlock, - altair.BeaconBlockBody, - altair.BeaconState, - altair.LightClientBootstrap, - altair.LightClientFinalityUpdate, - altair.LightClientHeader, - altair.LightClientOptimisticUpdate, - altair.LightClientUpdate, - altair.SignedBeaconBlock, - bellatrix.BeaconBlock, - bellatrix.BeaconBlockBody, - bellatrix.BeaconState, - bellatrix.ExecutionPayload, - bellatrix.ExecutionPayloadHeader, - bellatrix.SignedBeaconBlock, - bellatrix_mev.BlindedBeaconBlockBody, - bellatrix_mev.BlindedBeaconBlock, - bellatrix_mev.SignedBlindedBeaconBlock, - capella.BeaconBlock, - capella.BeaconBlockBody, - capella.BeaconState, - capella.ExecutionPayload, - capella.ExecutionPayloadHeader, - capella.LightClientBootstrap, - capella.LightClientFinalityUpdate, - capella.LightClientHeader, - capella.LightClientOptimisticUpdate, - capella.LightClientUpdate, - capella.SignedBeaconBlock, - capella_mev.BlindedBeaconBlock, - capella_mev.BlindedBeaconBlockBody, - capella_mev.SignedBlindedBeaconBlock, - deneb.BeaconBlock, - deneb.BeaconBlockBody, - deneb.BeaconState, - deneb.BlockContents, - deneb.ExecutionPayload, - deneb.ExecutionPayloadHeader, - deneb.LightClientBootstrap, - deneb.LightClientFinalityUpdate, - deneb.LightClientHeader, - deneb.LightClientOptimisticUpdate, - deneb.LightClientUpdate, - deneb.SignedBeaconBlock, - deneb_mev.BlindedBeaconBlock, - deneb_mev.BlindedBeaconBlockBody, - deneb_mev.BuilderBid, - deneb_mev.ExecutionPayloadAndBlobsBundle, - deneb_mev.SignedBlindedBeaconBlock, - deneb_mev.SignedBuilderBid, - electra.AggregateAndProof, - electra.Attestation, - electra.AttesterSlashing, - electra.BeaconBlock, - electra.BeaconState, - electra.BeaconBlockBody, - electra.BlockContents, - electra.ExecutionPayload, - electra.ExecutionPayloadHeader, - electra.IndexedAttestation, - electra.LightClientBootstrap, - electra.LightClientFinalityUpdate, - electra.LightClientHeader, - electra.LightClientOptimisticUpdate, - electra.LightClientUpdate, - electra.SignedAggregateAndProof, - electra.SignedBeaconBlock, - electra.SingleAttestation, - electra.TrustedAttestation, - electra_mev.BlindedBeaconBlock, - electra_mev.BlindedBeaconBlockBody, - electra_mev.BuilderBid, - electra_mev.ExecutionPayloadAndBlobsBundle, - electra_mev.SignedBlindedBeaconBlock, - electra_mev.SignedBuilderBid, - fulu.BeaconBlock, - fulu.BeaconBlockBody, - fulu.BeaconState, - fulu.BlockContents, - fulu.ExecutionPayload, - fulu.ExecutionPayloadHeader, - fulu.SignedBeaconBlock, - fulu_mev.BlindedBeaconBlock, - fulu_mev.BlindedBeaconBlockBody, - fulu_mev.BuilderBid, - fulu_mev.ExecutionPayloadAndBlobsBundle, - fulu_mev.SignedBlindedBeaconBlock, - fulu_mev.SignedBuilderBid, - phase0.AggregateAndProof, - phase0.Attestation, - phase0.AttesterSlashing, - phase0.BeaconBlock, - phase0.BeaconBlockBody, - phase0.BeaconState, - phase0.IndexedAttestation, - phase0.SignedAggregateAndProof, - phase0.SignedBeaconBlock, - phase0.TrustedAttestation -) - -# TODO -# Tuples are widely used in the responses of the REST server -# If we switch to concrete types there, it would be possible -# to remove this overly generic definition. -template writeValue*(w: JsonWriter[RestJson], value: tuple) = - writeRecordValue(w, value) - -## The RestJson format implements JSON serialization in the way specified -## by the Beacon API: -## -## https://ethereum.github.io/beacon-APIs/ -## -## In this format, we must always set `allowUnknownFields = true` in the -## decode calls in order to conform the following spec: -## -## All JSON responses return the requested data under a data key in the top -## level of their response. Additional metadata may or may not be present -## in other keys at the top level of the response, dependent on the endpoint. -## The rules that require an increase in version number are as follows: -## -## - no field that is listed in an endpoint shall be removed without an increase -## in the version number -## -## - no field that is listed in an endpoint shall be altered in terms of format -## (e.g. from a string to an array) without an increase in the version number -## -## Note that it is possible for a field to be added to an endpoint's data or -## metadata without an increase in the version number. -## -## TODO nim-json-serializations should allow setting up this policy per format -## -## This also means that when new fields are introduced to the object definitions -## below, one must use the `Opt[T]` type. - const DecimalSet = {'0' .. '9'} # Base10 (decimal) set of chars @@ -338,13 +39,13 @@ const UrlEncodedMediaType* = MediaType.init("application/x-www-form-urlencoded") UnableDecodeVersionError = "Unable to decode version" UnableDecodeError = "Unable to decode data" - UnexpectedDecodeError = "Unexpected decoding error" InvalidContentTypeError* = "Invalid content type" UnexpectedForkVersionError* = "Unexpected fork version received" type EncodeTypes* = BlobSidecarInfoObject | + DataColumnSidecarInfoObject | DeleteKeystoresBody | EmptyBody | ImportDistributedKeystoresBody | @@ -371,6 +72,7 @@ type DenebSignedBlockContents | ElectraSignedBlockContents | FuluSignedBlockContents | + GloasSignedBlockContents | ForkedMaybeBlindedBeaconBlock | deneb_mev.SignedBlindedBeaconBlock | electra_mev.SignedBlindedBeaconBlock | @@ -393,12 +95,9 @@ type seq[RestSyncCommitteeSelection] MevDecodeTypes* = - GetHeaderResponseDeneb | GetHeaderResponseElectra | GetHeaderResponseFulu | - SubmitBlindedBlockResponseDeneb | - SubmitBlindedBlockResponseElectra | - SubmitBlindedBlockResponseFulu + SubmitBlindedBlockResponseElectra DecodeTypes* = DataEnclosedObject | @@ -409,6 +108,8 @@ type DataOptimisticAndFinalizedObject | GetBlockV2Response | GetDistributedKeystoresResponse | + GetHistoricalSummariesV1Response | + GetHistoricalSummariesV1ResponseElectra | GetKeystoresResponse | GetRemoteKeystoresResponse | GetStateForkResponse | @@ -440,11 +141,36 @@ type RestBlockTypes* = phase0.BeaconBlock | altair.BeaconBlock | bellatrix.BeaconBlock | capella.BeaconBlock | - deneb.BlockContents | deneb_mev.BlindedBeaconBlock | - electra.BlockContents | fulu.BlockContents | - electra_mev.BlindedBeaconBlock | + deneb.BlockContents | electra.BlockContents | + fulu.BlockContents | electra_mev.BlindedBeaconBlock | fulu_mev.BlindedBeaconBlock +func ethHeaders( + consensusFork: ConsensusFork, + hasRestAllowedOrigin: bool): HttpTable = + var headers = HttpTable.init [ + ("eth-consensus-version", consensusFork.toString())] + if hasRestAllowedOrigin: + headers.add("access-control-expose-headers", "eth-consensus-version") + headers + +func ethHeaders( + consensusFork: ConsensusFork, + isBlinded: bool, + executionValue: UInt256, + consensusValue: UInt256, + hasRestAllowedOrigin: bool): HttpTable = + var headers = HttpTable.init [ + ("eth-consensus-version", consensusFork.toString()), + ("eth-execution-payload-blinded", if isBlinded: "true" else: "false"), + ("eth-execution-payload-value", toString(executionValue, 10)), + ("eth-consensus-block-value", toString(consensusValue, 10))] + if hasRestAllowedOrigin: + headers.add("access-control-expose-headers", static( + "eth-consensus-version, eth-execution-payload-blinded, " & + "eth-execution-payload-value, eth-consensus-block-value")) + headers + func readStrictHexChar(c: char, radix: static[uint8]): Result[int8, cstring] = ## Converts an hex char to an int const @@ -530,385 +256,281 @@ func strictParse*[bits: static[int]](input: string, inc(currentIndex) ok(res) -proc prepareJsonResponse*(t: typedesc[RestApiResponse], d: auto): seq[byte] = - let res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("data", d) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) - res +template withRestJsonWriter(w, typ, body: untyped): untyped = + try: + var stream = memoryOutput() + var w = JsonWriter[RestJson].init(stream) + body + stream.getOutput(typ) + except IOError: + raiseAssert "No IOError from memoryOutput" + +proc prepareJsonResponse*(_: typedesc[RestApiResponse], d: auto): seq[byte] = + withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("data", d) proc prepareJsonStringResponse*[T: SomeForkedLightClientObject]( - t: typedesc[RestApiResponse], d: RestVersioned[T]): string = - let res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - withForkyObject(d.data): - when lcDataFork > LightClientDataFork.None: - writer.beginRecord() - writer.writeField("version", d.jsonVersion.toString()) - writer.writeField("data", forkyObject) - writer.endRecord() - stream.getOutput(string) - except IOError: + _: typedesc[RestApiResponse], d: RestVersioned[T]): string = + withForkyObject(d.data): + when lcDataFork > LightClientDataFork.None: + withRestJsonWriter(w, string): + w.writeObject: + w.writeField("version", d.jsonVersion.toString()) + w.writeField("data", forkyObject) + else: default(string) - res -proc prepareJsonStringResponse*(t: typedesc[RestApiResponse], d: auto): string = - let res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.writeValue(d) - stream.getOutput(string) - except IOError: - default(string) - res +proc prepareJsonStringResponse*(_: typedesc[RestApiResponse], d: auto): string = + RestJson.encode(d) -proc jsonResponseWRoot*(t: typedesc[RestApiResponse], data: auto, +proc jsonResponseWRoot*(_: typedesc[RestApiResponse], data: auto, dependent_root: Eth2Digest, execOpt: Opt[bool]): RestApiResponse = - let res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("dependent_root", dependent_root) - if execOpt.isSome(): - writer.writeField("execution_optimistic", execOpt.get()) - writer.writeField("data", data) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) + let res = withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("dependent_root", dependent_root) + w.writeField("execution_optimistic", execOpt) + w.writeField("data", data) + RestApiResponse.response(res, Http200, "application/json") -proc jsonResponse*(t: typedesc[RestApiResponse], data: auto): RestApiResponse = - let res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("data", data) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) +proc jsonResponse*(_: typedesc[RestApiResponse], data: auto): RestApiResponse = + let res = withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("data", data) + RestApiResponse.response(res, Http200, "application/json") -proc jsonResponseBlock*(t: typedesc[RestApiResponse], - data: ForkySignedBlindedBeaconBlock, - consensusFork: ConsensusFork, - execOpt: Opt[bool], - finalized: bool): RestApiResponse = +proc jsonResponseBlock*( + _: typedesc[RestApiResponse], + data: ForkySignedBlindedBeaconBlock, + execOpt: Opt[bool], + finalized: bool, + consensusFork: ConsensusFork, + hasRestAllowedOrigin: bool): RestApiResponse = let - headers = [("eth-consensus-version", consensusFork.toString())] - res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("version", consensusFork.toString()) - if execOpt.isSome(): - writer.writeField("execution_optimistic", execOpt.get()) - writer.writeField("finalized", finalized) - writer.writeField("data", data) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) + headers = consensusFork.ethHeaders(hasRestAllowedOrigin) + res = withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("version", consensusFork) + w.writeField("execution_optimistic", execOpt) + w.writeField("finalized", finalized) + w.writeField("data", data) + RestApiResponse.response(res, Http200, "application/json", headers = headers) -proc jsonResponseBlock*(t: typedesc[RestApiResponse], - data: ForkedSignedBeaconBlock, - execOpt: Opt[bool], - finalized: bool): RestApiResponse = +proc jsonResponseBlock*( + _: typedesc[RestApiResponse], + data: ForkedSignedBeaconBlock, + execOpt: Opt[bool], + finalized: bool, + hasRestAllowedOrigin: bool): RestApiResponse = let - headers = [("eth-consensus-version", data.kind.toString())] - res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("version", data.kind.toString()) - if execOpt.isSome(): - writer.writeField("execution_optimistic", execOpt.get()) - writer.writeField("finalized", finalized) + headers = data.kind.ethHeaders(hasRestAllowedOrigin) + res = withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("version", data.kind) + w.writeField("execution_optimistic", execOpt) + w.writeField("finalized", finalized) withBlck(data): - writer.writeField("data", forkyBlck) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) + w.writeField("data", forkyBlck) + RestApiResponse.response(res, Http200, "application/json", headers = headers) -proc jsonResponseState*(t: typedesc[RestApiResponse], - data: ForkedHashedBeaconState, - execOpt: Opt[bool]): RestApiResponse = +proc jsonResponseState*( + _: typedesc[RestApiResponse], + data: ForkedHashedBeaconState, + execOpt: Opt[bool], + finalized: bool, + hasRestAllowedOrigin: bool): RestApiResponse = let - headers = [("eth-consensus-version", data.kind.toString())] - res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("version", data.kind.toString()) - if execOpt.isSome(): - writer.writeField("execution_optimistic", execOpt.get()) + headers = data.kind.ethHeaders(hasRestAllowedOrigin) + res = withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("version", data.kind) + w.writeField("execution_optimistic", execOpt) + w.writeField("finalized", finalized) withState(data): - writer.writeField("data", forkyState.data) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) + w.writeField("data", forkyState.data) + RestApiResponse.response(res, Http200, "application/json", headers = headers) -proc jsonResponseWOpt*(t: typedesc[RestApiResponse], data: auto, +proc jsonResponseWOpt*(_: typedesc[RestApiResponse], data: auto, execOpt: Opt[bool]): RestApiResponse = - let res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - if execOpt.isSome(): - writer.writeField("execution_optimistic", execOpt.get()) - writer.writeField("data", data) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) + let res = withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("execution_optimistic", execOpt) + w.writeField("data", data) + RestApiResponse.response(res, Http200, "application/json") proc prepareJsonResponseFinalized*( - t: typedesc[RestApiResponse], data: auto, exec: Opt[bool], + _: typedesc[RestApiResponse], data: auto, exec: Opt[bool], finalized: bool ): seq[byte] = - try: - var - stream = memoryOutput() - writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - if exec.isSome(): - writer.writeField("execution_optimistic", exec.get()) - writer.writeField("finalized", finalized) - writer.writeField("data", data) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) + withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("execution_optimistic", exec) + w.writeField("finalized", finalized) + w.writeField("data", data) -proc jsonResponseFinalized*(t: typedesc[RestApiResponse], data: auto, +proc jsonResponseFinalized*(_: typedesc[RestApiResponse], data: auto, exec: Opt[bool], finalized: bool): RestApiResponse = let res = RestApiResponse.prepareJsonResponseFinalized(data, exec, finalized) RestApiResponse.response(res, Http200, "application/json") -proc jsonResponseFinalizedWVersion*(t: typedesc[RestApiResponse], - data: auto, - exec: Opt[bool], - finalized: bool, - version: ConsensusFork): RestApiResponse = +proc jsonResponseFinalizedWVersion*( + _: typedesc[RestApiResponse], + data: auto, + exec: Opt[bool], + finalized: bool, + version: ConsensusFork, + hasRestAllowedOrigin: bool): RestApiResponse = let - headers = [("eth-consensus-version", version.toString())] - res = - block: - var default: seq[byte] - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("version", version.toString()) - if exec.isSome(): - writer.writeField("execution_optimistic", exec.get()) - writer.writeField("finalized", finalized) - writer.writeField("data", data) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default + headers = version.ethHeaders(hasRestAllowedOrigin) + res = withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("version", version) + w.writeField("execution_optimistic", exec) + w.writeField("finalized", finalized) + w.writeField("data", data) + RestApiResponse.response(res, Http200, "application/json", headers = headers) -proc jsonResponseWVersion*(t: typedesc[RestApiResponse], data: auto, - version: ConsensusFork): RestApiResponse = +proc jsonResponseWVersion*( + _: typedesc[RestApiResponse], + data: auto, + version: ConsensusFork, + hasRestAllowedOrigin: bool): RestApiResponse = let - headers = [("eth-consensus-version", version.toString())] - res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("version", version.toString()) - writer.writeField("data", data) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) + headers = version.ethHeaders(hasRestAllowedOrigin) + res = withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("version", version) + w.writeField("data", data) + RestApiResponse.response(res, Http200, "application/json", headers = headers) proc jsonResponseVersioned*[T: SomeForkedLightClientObject]( - t: typedesc[RestApiResponse], + _: typedesc[RestApiResponse], entries: openArray[RestVersioned[T]]): RestApiResponse = - let res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - for e in writer.stepwiseArrayCreation(entries): + let res = withRestJsonWriter(w, seq[byte]): + for e in w.stepwiseArrayCreation(entries): withForkyObject(e.data): when lcDataFork > LightClientDataFork.None: - writer.beginRecord() - writer.writeField("version", e.jsonVersion.toString()) - writer.writeField("data", forkyObject) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) + w.writeObject: + w.writeField("version", e.jsonVersion.toString()) + w.writeField("data", forkyObject) + RestApiResponse.response(res, Http200, "application/json") -proc jsonResponsePlain*(t: typedesc[RestApiResponse], +proc jsonPlainEncoded(data: auto): seq[byte] = + withRestJsonWriter(w, seq[byte]): + w.writeValue(data) + +proc jsonResponsePlain*(_: typedesc[RestApiResponse], data: auto): RestApiResponse = - let res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.writeValue(data) - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) + let res = data.jsonPlainEncoded() RestApiResponse.response(res, Http200, "application/json") -proc jsonResponsePlain*(t: typedesc[RestApiResponse], - data: auto, headers: HttpTable): RestApiResponse = - let res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.writeValue(data) - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) +proc jsonResponsePlain*( + _: typedesc[RestApiResponse], + data: auto, + consensusFork: ConsensusFork, + hasRestAllowedOrigin: bool): RestApiResponse = + let + res = data.jsonPlainEncoded() + headers = consensusFork.ethHeaders(hasRestAllowedOrigin) + RestApiResponse.response(res, Http200, "application/json", headers = headers) + +proc jsonResponsePlain*( + _: typedesc[RestApiResponse], + data: auto, + consensusFork: ConsensusFork, + isBlinded: bool, + executionValue: UInt256, + consensusValue: UInt256, + hasRestAllowedOrigin: bool): RestApiResponse = + let + res = data.jsonPlainEncoded() + headers = consensusFork.ethHeaders( + isBlinded, executionValue, consensusValue, hasRestAllowedOrigin) RestApiResponse.response(res, Http200, "application/json", headers = headers) -proc jsonResponseWMeta*(t: typedesc[RestApiResponse], +proc jsonResponseWMeta*(_: typedesc[RestApiResponse], data: auto, meta: auto): RestApiResponse = - let res = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("data", data) - writer.writeField("meta", meta) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) + let res = withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("data", data) + w.writeField("meta", meta) + RestApiResponse.response(res, Http200, "application/json") -proc jsonMsgResponse*(t: typedesc[RestApiResponse], +proc jsonMsgResponse*(_: typedesc[RestApiResponse], msg: string = ""): RestApiResponse = - let data = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("code", 200) - writer.writeField("message", msg) - writer.endRecord() - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) + let data = withRestJsonWriter(w, seq[byte]): + w.writeObject: + w.writeField("code", 200) + w.writeField("message", msg) + RestApiResponse.response(data, Http200, "application/json") -proc jsonError*(t: typedesc[RestApiResponse], status: HttpCode = Http200, +proc jsonError*(_: typedesc[RestApiResponse], status: HttpCode = Http200, msg: string = ""): RestApiResponse = - let data = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("code", int(status.toInt())) - writer.writeField("message", msg) - writer.endRecord() - stream.getOutput(string) - except IOError: - default(string) + let data = withRestJsonWriter(w, string): + w.writeObject: + w.writeField("code", int(status.toInt())) + w.writeField("message", msg) + RestApiResponse.error(status, data, "application/json") -proc jsonError*(t: typedesc[RestApiResponse], status: HttpCode = Http200, +proc jsonError*(_: typedesc[RestApiResponse], status: HttpCode = Http200, msg: string = "", stacktrace: string): RestApiResponse = - let data = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("code", int(status.toInt())) - writer.writeField("message", msg) + let data = withRestJsonWriter(w, string): + w.writeObject: + w.writeField("code", int(status.toInt())) + w.writeField("message", msg) if len(stacktrace) > 0: - writer.writeField("stacktraces", [stacktrace]) - writer.endRecord() - stream.getOutput(string) - except IOError: - default(string) + w.writeField("stacktraces", [stacktrace]) + RestApiResponse.error(status, data, "application/json") -proc jsonError*(t: typedesc[RestApiResponse], status: HttpCode = Http200, +proc jsonError*(_: typedesc[RestApiResponse], status: HttpCode = Http200, msg: string = "", stacktraces: openArray[string]): RestApiResponse = - let data = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("code", int(status.toInt())) - writer.writeField("message", msg) - writer.writeField("stacktraces", stacktraces) - writer.endRecord() - stream.getOutput(string) - except IOError: - default(string) + let data = withRestJsonWriter(w, string): + w.writeObject: + w.writeField("code", int(status.toInt())) + w.writeField("message", msg) + w.writeField("stacktraces", stacktraces) + RestApiResponse.error(status, data, "application/json") -proc jsonError*(t: typedesc[RestApiResponse], +proc jsonError*(_: typedesc[RestApiResponse], rmsg: RestErrorMessage): RestApiResponse = - let data = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("code", rmsg.code) - writer.writeField("message", rmsg.message) - if rmsg.stacktraces.isSome(): - writer.writeField("stacktraces", rmsg.stacktraces) - writer.endRecord() - stream.getOutput(string) - except IOError: - default(string) + let data = withRestJsonWriter(w, string): + w.writeObject: + w.writeField("code", rmsg.code) + w.writeField("message", rmsg.message) + w.writeField("stacktraces", rmsg.stacktraces) + RestApiResponse.error(rmsg.code.toHttpCode().get(), data, "application/json") -proc jsonErrorList*(t: typedesc[RestApiResponse], +proc jsonErrorList*(_: typedesc[RestApiResponse], status: HttpCode = Http200, msg: string = "", failures: auto): RestApiResponse = - let data = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.beginRecord() - writer.writeField("code", int(status.toInt())) - writer.writeField("message", msg) - writer.writeField("failures", failures) - writer.endRecord() - stream.getOutput(string) - except IOError: - default(string) + let data = withRestJsonWriter(w, string): + w.writeObject: + w.writeField("code", int(status.toInt())) + w.writeField("message", msg) + w.writeField("failures", failures) + RestApiResponse.error(status, data, "application/json") proc sszResponseVersioned*[T: SomeForkedLightClientObject]( - t: typedesc[RestApiResponse], + _: typedesc[RestApiResponse], entries: openArray[RestVersioned[T]]): RestApiResponse = let res = try: @@ -919,2000 +541,48 @@ proc sszResponseVersioned*[T: SomeForkedLightClientObject]( var cursor = stream.delayFixedSizeWrite(sizeof(uint64)) let initPos = stream.pos stream.write e.sszContext.data - var writer = SszWriter.init(stream) - writer.writeValue forkyUpdate + var w = SszWriter.init(stream) + w.writeValue forkyUpdate cursor.finalWrite (stream.pos - initPos).uint64.toBytesLE() stream.getOutput(seq[byte]) except IOError: default(seq[byte]) RestApiResponse.response(res, Http200, "application/octet-stream") -proc sszResponsePlain*(t: typedesc[RestApiResponse], res: seq[byte], - headers: openArray[RestKeyValueTuple] = [] - ): RestApiResponse = - RestApiResponse.response(res, Http200, "application/octet-stream", - headers = headers) - -proc sszResponse*(t: typedesc[RestApiResponse], data: auto, - headers: openArray[RestKeyValueTuple] = [] - ): RestApiResponse = - let res = - try: - var stream = memoryOutput() - var writer = SszWriter.init(stream) - writer.writeValue(data) - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) - RestApiResponse.response(res, Http200, "application/octet-stream", - headers = headers) - -proc sszResponse*(t: typedesc[RestApiResponse], data: auto, - headers: HttpTable): RestApiResponse = - let res = - try: - var stream = memoryOutput() - var writer = SszWriter.init(stream) - writer.writeValue(data) - stream.getOutput(seq[byte]) - except IOError: - default(seq[byte]) - RestApiResponse.response(res, Http200, "application/octet-stream", - headers = headers) - -template hexOriginal(data: openArray[byte]): string = - to0xHex(data) - -proc decodeJsonString*[T](t: typedesc[T], - data: JsonString): Result[T, cstring] = - try: - ok(RestJson.decode(string(data), T, - requireAllFields = true, - allowUnknownFields = true)) - except SerializationError: - err("Unable to deserialize data") - -## uint64 -proc writeValue*( - w: var JsonWriter[RestJson], value: uint64) {.raises: [IOError].} = - writeValue(w, Base10.toString(value)) - -proc readValue*(reader: var JsonReader[RestJson], value: var uint64) {. - raises: [IOError, SerializationError].} = - let svalue = reader.readValue(string) - let res = Base10.decode(uint64, svalue) - if res.isOk(): - value = res.get() - else: - reader.raiseUnexpectedValue($res.error() & ": " & svalue) - -## RestReward -proc writeValue*( - w: var JsonWriter[RestJson], value: RestReward) {.raises: [IOError].} = - writeValue(w, $int64(value)) - -proc readValue*(reader: var JsonReader[RestJson], value: var RestReward) {. - raises: [IOError, SerializationError].} = - let svalue = reader.readValue(string) - if svalue.startsWith("-"): - let res = - Base10.decode(uint64, svalue.toOpenArray(1, len(svalue) - 1)).valueOr: - reader.raiseUnexpectedValue($error & ": " & svalue) - if res > uint64(high(int64)): - reader.raiseUnexpectedValue("Integer value overflow " & svalue) - value = RestReward(-int64(res)) - else: - let res = - Base10.decode(uint64, svalue).valueOr: - reader.raiseUnexpectedValue($error & ": " & svalue) - if res > uint64(high(int64)): - reader.raiseUnexpectedValue("Integer value overflow " & svalue) - value = RestReward(int64(res)) - -## uint8 -proc writeValue*( - w: var JsonWriter[RestJson], value: uint8) {.raises: [IOError].} = - writeValue(w, Base10.toString(value)) - -proc readValue*(reader: var JsonReader[RestJson], value: var uint8) {. - raises: [IOError, SerializationError].} = - let svalue = reader.readValue(string) - let res = Base10.decode(uint8, svalue) - if res.isOk(): - value = res.get() - else: - reader.raiseUnexpectedValue($res.error() & ": " & svalue) - -## BlockNumber/Quantity -proc writeValue*( - w: var JsonWriter[RestJson], value: Quantity) {.raises: [IOError].} = - w.writeValue(distinctBase(value)) - -proc readValue*( - reader: var JsonReader[RestJson], - value: var Quantity) {.raises: [IOError, SerializationError].} = - reader.readValue(distinctBase(value)) - -## RestNumeric -proc writeValue*(w: var JsonWriter[RestJson], - value: RestNumeric) {.raises: [IOError].} = - writeValue(w, int(value)) - -proc readValue*(reader: var JsonReader[RestJson], - value: var RestNumeric) {. - raises: [IOError, SerializationError].} = - value = RestNumeric(reader.readValue(int)) - -## JustificationBits -proc writeValue*( - w: var JsonWriter[RestJson], value: JustificationBits -) {.raises: [IOError].} = - w.writeValue hexOriginal([uint8(value)]) - -proc readValue*(reader: var JsonReader[RestJson], value: var JustificationBits) {. - raises: [IOError, SerializationError].} = - let hex = reader.readValue(string) - try: - value = JustificationBits(hexToByteArray(hex, 1)[0]) - except ValueError: - raiseUnexpectedValue(reader, - "The `justification_bits` value must be a hex string") - -## UInt256 -proc writeValue*( - w: var JsonWriter[RestJson], value: UInt256) {.raises: [IOError].} = - writeValue(w, toString(value)) - -proc readValue*(reader: var JsonReader[RestJson], value: var UInt256) {. - raises: [IOError, SerializationError].} = - let svalue = reader.readValue(string) - try: - value = parse(svalue, UInt256, 10) - except ValueError: - raiseUnexpectedValue(reader, - "UInt256 value should be a valid decimal string") - -## Gwei -proc writeValue*( - writer: var JsonWriter[RestJson], value: Gwei) {.raises: [IOError].} = - writer.writeValue(distinctBase(value)) - -proc readValue*( - reader: var JsonReader[RestJson], - value: var Gwei) {.raises: [IOError, SerializationError].} = - reader.readValue(distinctBase(value)) - -## Slot -proc writeValue*( - writer: var JsonWriter[RestJson], value: Slot) {.raises: [IOError].} = - writeValue(writer, Base10.toString(uint64(value))) - -proc readValue*(reader: var JsonReader[RestJson], value: var Slot) {. - raises: [IOError, SerializationError].} = - let svalue = reader.readValue(string) - let res = Base10.decode(uint64, svalue) - if res.isOk(): - value = Slot(res.get()) - else: - reader.raiseUnexpectedValue($res.error()) - -## Epoch -proc writeValue*( - writer: var JsonWriter[RestJson], value: Epoch) {.raises: [IOError].} = - writeValue(writer, Base10.toString(uint64(value))) - -proc readValue*(reader: var JsonReader[RestJson], value: var Epoch) {. - raises: [IOError, SerializationError].} = - let svalue = reader.readValue(string) - let res = Base10.decode(uint64, svalue) - if res.isOk(): - value = Epoch(res.get()) - else: - reader.raiseUnexpectedValue($res.error()) - -## EpochParticipationFlags -proc writeValue*( - writer: var JsonWriter[RestJson], epochFlags: EpochParticipationFlags -) {.raises: [IOError].} = - for e in writer.stepwiseArrayCreation(epochFlags.asList): - writer.writeValue $e - -proc readValue*(reader: var JsonReader[RestJson], - epochFlags: var EpochParticipationFlags) - {.raises: [SerializationError, IOError].} = - for e in reader.readArray(string): - let parsed = try: - parseBiggestUInt(e) - except ValueError: - reader.raiseUnexpectedValue( - "A string-encoded 8-bit usigned integer value expected") - - if parsed > uint8.high: - reader.raiseUnexpectedValue( - "The unsigned integer value should fit in 8 bits") - - if not epochFlags.asList.add(uint8(parsed)): - reader.raiseUnexpectedValue( - "The participation flags list size exceeds limit") - -## ValidatorIndex -proc writeValue*( - writer: var JsonWriter[RestJson], value: ValidatorIndex -) {.raises: [IOError].} = - writeValue(writer, Base10.toString(uint64(value))) - -proc readValue*(reader: var JsonReader[RestJson], value: var ValidatorIndex) - {.raises: [IOError, SerializationError].} = - let svalue = reader.readValue(string) - let res = Base10.decode(uint64, svalue) - if res.isOk(): - let v = res.get() - if v < VALIDATOR_REGISTRY_LIMIT: - value = ValidatorIndex(v) - else: - reader.raiseUnexpectedValue( - "Validator index is bigger then VALIDATOR_REGISTRY_LIMIT") - else: - reader.raiseUnexpectedValue($res.error()) - -## IndexInSyncCommittee -proc writeValue*( - writer: var JsonWriter[RestJson], value: IndexInSyncCommittee -) {.raises: [IOError].} = - writeValue(writer, Base10.toString(distinctBase(value))) - -proc readValue*(reader: var JsonReader[RestJson], value: var IndexInSyncCommittee) - {.raises: [IOError, SerializationError].} = - let svalue = reader.readValue(string) - let res = Base10.decode(uint64, svalue) - if res.isOk(): - let v = res.get() - if v < SYNC_COMMITTEE_SIZE: - value = IndexInSyncCommittee(v) - else: - reader.raiseUnexpectedValue( - "Index in committee is bigger than SYNC_COMMITTEE_SIZE") - else: - reader.raiseUnexpectedValue($res.error()) - -## RestValidatorIndex -proc writeValue*( - writer: var JsonWriter[RestJson], value: RestValidatorIndex -) {.raises: [IOError].} = - writeValue(writer, Base10.toString(uint64(value))) - -proc readValue*(reader: var JsonReader[RestJson], - value: var RestValidatorIndex) {. - raises: [IOError, SerializationError].} = - let svalue = reader.readValue(string) - let res = Base10.decode(uint64, svalue) - if res.isOk(): - let v = res.get() - value = RestValidatorIndex(v) - else: - reader.raiseUnexpectedValue($res.error()) - -## CommitteeIndex -proc writeValue*( - writer: var JsonWriter[RestJson], value: CommitteeIndex -) {.raises: [IOError].} = - writeValue(writer, value.asUInt64) - -proc readValue*(reader: var JsonReader[RestJson], value: var CommitteeIndex) {. - raises: [IOError, SerializationError].} = - var v: uint64 - reader.readValue(v) - - let res = CommitteeIndex.init(v) - if res.isOk(): - value = res.get() - else: - reader.raiseUnexpectedValue($res.error()) - -## ValidatorSig -proc writeValue*( - writer: var JsonWriter[RestJson], value: ValidatorSig -) {.raises: [IOError].} = - writeValue(writer, hexOriginal(toRaw(value))) - -proc readValue*(reader: var JsonReader[RestJson], value: var ValidatorSig) {. - raises: [IOError, SerializationError].} = - let hexValue = reader.readValue(string) - let res = ValidatorSig.fromHex(hexValue) - if res.isOk(): - value = res.get() - else: - reader.raiseUnexpectedValue($res.error()) - -## TrustedSig -proc writeValue*( - writer: var JsonWriter[RestJson], value: TrustedSig -) {.raises: [IOError].} = - writeValue(writer, hexOriginal(toRaw(value))) - -proc readValue*(reader: var JsonReader[RestJson], value: var TrustedSig) {. - raises: [IOError, SerializationError].} = - let hexValue = reader.readValue(string) - let res = ValidatorSig.fromHex(hexValue) - if res.isOk(): - value = cast[TrustedSig](res.get()) - else: - reader.raiseUnexpectedValue($res.error()) - -## ValidatorPubKey -proc writeValue*( - writer: var JsonWriter[RestJson], value: ValidatorPubKey -) {.raises: [IOError].} = - writeValue(writer, hexOriginal(toRaw(value))) - -proc readValue*(reader: var JsonReader[RestJson], value: var ValidatorPubKey) {. - raises: [IOError, SerializationError].} = - let hexValue = reader.readValue(string) - let res = ValidatorPubKey.fromHex(hexValue) - if res.isOk(): - value = res.get() - else: - reader.raiseUnexpectedValue($res.error()) - -proc readValue*(reader: var JsonReader[RestJson], value: var HashedValidatorPubKey) {. - raises: [IOError, SerializationError].} = - var key: ValidatorPubKey - readValue(reader, key) - - value = HashedValidatorPubKey.init(key) - -proc writeValue*( - writer: var JsonWriter[RestJson], value: HashedValidatorPubKey) {.raises: [IOError].} = - writeValue(writer, value.pubkey) - -## BitSeq -proc readValue*(reader: var JsonReader[RestJson], value: var BitSeq) {. - raises: [IOError, SerializationError].} = - try: - value = BitSeq hexToSeqByte(reader.readValue(string)) - except ValueError: - raiseUnexpectedValue(reader, "A BitSeq value should be a valid hex string") - -proc writeValue*( - writer: var JsonWriter[RestJson], value: BitSeq) {.raises: [IOError].} = - writeValue(writer, hexOriginal(value.bytes())) - -## BitList -proc readValue*(reader: var JsonReader[RestJson], value: var BitList) {. - raises: [IOError, SerializationError].} = - type T = type(value) - value = T readValue(reader, BitSeq) - -proc writeValue*( - writer: var JsonWriter[RestJson], value: BitList) {.raises: [IOError].} = - writeValue(writer, BitSeq value) - -## BitArray -proc readValue*(reader: var JsonReader[RestJson], value: var BitArray) {. - raises: [IOError, SerializationError].} = - try: - hexToByteArray(readValue(reader, string), value.bytes) - except ValueError: - raiseUnexpectedValue(reader, - "A BitArray value should be a valid hex string") - -proc writeValue*( - writer: var JsonWriter[RestJson], value: BitArray) {.raises: [IOError].} = - writeValue(writer, hexOriginal(value.bytes)) - -## BlockHash/Hash32 -proc readValue*(reader: var JsonReader[RestJson], value: var Hash32) {. - raises: [IOError, SerializationError].} = - try: - hexToByteArray(reader.readValue(string), distinctBase(value)) - except ValueError: - raiseUnexpectedValue(reader, - "Hash32 value should be a valid hex string") - -proc writeValue*( - writer: var JsonWriter[RestJson], value: Hash32) {.raises: [IOError].} = - writeValue(writer, hexOriginal(distinctBase(value))) - -## Eth2Digest -proc readValue*(reader: var JsonReader[RestJson], value: var Eth2Digest) {. - raises: [IOError, SerializationError].} = - try: - hexToByteArray(reader.readValue(string), value.data) - except ValueError: - raiseUnexpectedValue(reader, - "Eth2Digest value should be a valid hex string") - -proc writeValue*( - writer: var JsonWriter[RestJson], value: Eth2Digest) {.raises: [IOError].} = - writeValue(writer, hexOriginal(value.data)) - -## BloomLogs -proc readValue*(reader: var JsonReader[RestJson], value: var BloomLogs) {. - raises: [IOError, SerializationError].} = - try: - hexToByteArray(reader.readValue(string), value.data) - except ValueError: - raiseUnexpectedValue(reader, - "BloomLogs value should be a valid hex string") - -proc writeValue*( - writer: var JsonWriter[RestJson], value: BloomLogs) {.raises: [IOError].} = - writeValue(writer, hexOriginal(value.data)) - -## HashArray -proc readValue*(reader: var JsonReader[RestJson], value: var HashArray) {. - raises: [IOError, SerializationError].} = - readValue(reader, value.data) - -proc writeValue*( - writer: var JsonWriter[RestJson], value: HashArray) {.raises: [IOError].} = - writeValue(writer, value.data) - -## HashList -proc readValue*(reader: var JsonReader[RestJson], value: var HashList) {. - raises: [IOError, SerializationError].} = - readValue(reader, value.data) - value.resetCache() - -proc writeValue*( - writer: var JsonWriter[RestJson], value: HashList) {.raises: [IOError].} = - writeValue(writer, value.data) - -## Eth1Address -proc readValue*(reader: var JsonReader[RestJson], value: var Eth1Address) {. - raises: [IOError, SerializationError].} = - try: - hexToByteArray(reader.readValue(string), distinctBase(value)) - except ValueError: - raiseUnexpectedValue(reader, - "Eth1Address value should be a valid hex string") - -proc writeValue*( - writer: var JsonWriter[RestJson], value: Eth1Address -) {.raises: [IOError].} = - writeValue(writer, hexOriginal(distinctBase(value))) - -## Blob -## https://github.com/ethereum/beacon-APIs/blob/v2.4.2/types/primitive.yaml#L129-L133 -proc readValue*(reader: var JsonReader[RestJson], value: var Blob) {. - raises: [IOError, SerializationError].} = - try: - hexToByteArray(reader.readValue(string), distinctBase(value)) - except ValueError: - raiseUnexpectedValue(reader, - "Blob value should be a valid hex string") - -proc writeValue*( - writer: var JsonWriter[RestJson], value: Blob -) {.raises: [IOError].} = - writeValue(writer, hexOriginal(distinctBase(value))) - -## KzgCommitment and KzgProof; both are the same type, but this makes it -## explicit. -## https://github.com/ethereum/beacon-APIs/blob/v2.4.2/types/primitive.yaml#L135-L146 -proc readValue*(reader: var JsonReader[RestJson], - value: var (KzgCommitment|KzgProof)) {. - raises: [IOError, SerializationError].} = - try: - hexToByteArray(reader.readValue(string), distinctBase(value.bytes)) - except ValueError: - raiseUnexpectedValue(reader, - "KzgCommitment value should be a valid hex string") - -proc writeValue*( - writer: var JsonWriter[RestJson], value: KzgCommitment | KzgProof -) {.raises: [IOError].} = - writeValue(writer, hexOriginal(distinctBase(value.bytes))) - -## GraffitiBytes -proc writeValue*( - writer: var JsonWriter[RestJson], value: GraffitiBytes -) {.raises: [IOError].} = - writeValue(writer, hexOriginal(distinctBase(value))) - -proc readValue*(reader: var JsonReader[RestJson], T: type GraffitiBytes): T - {.raises: [IOError, SerializationError].} = - try: - init(GraffitiBytes, reader.readValue(string)) - except ValueError as err: - reader.raiseUnexpectedValue err.msg - -## Version | ForkDigest | DomainType | GraffitiBytes | RestWithdrawalPrefix -proc readValue*( - reader: var JsonReader[RestJson], - value: var (Version | ForkDigest | DomainType | GraffitiBytes | - RestWithdrawalPrefix)) {. - raises: [IOError, SerializationError].} = - try: - hexToByteArray(reader.readValue(string), distinctBase(value)) - except ValueError: - raiseUnexpectedValue( - reader, "Expected a valid hex string with " & $value.len() & " bytes") - -template unrecognizedFieldWarning(fieldNameParam, typeNameParam: string) = - # TODO: There should be a different notification mechanism for informing the - # caller of a deserialization routine for unexpected fields. - # The chonicles import in this module should be removed. - trace "JSON field not recognized by the current version of Nimbus. Consider upgrading", - fieldName = fieldNameParam, typeName = typeNameParam - -template unrecognizedFieldIgnore = - discard readValue(reader, JsonString) - -## ForkedBeaconBlock -template prepareForkedBlockReading(blockType: typedesc, - reader: var JsonReader[RestJson], - version: var Opt[ConsensusFork], - data: var Opt[JsonString], - blinded: var Opt[bool], - payloadValue: var Opt[UInt256], - blockValue: var Opt[UInt256]) = - for fieldName {.inject.} in readObjectFields(reader): - case fieldName - of "version": - if version.isSome(): - reader.raiseUnexpectedField("Multiple version fields found", - blockType.name) - let vres = reader.readValue(string).toLowerAscii() - version = ConsensusFork.init(vres) - if version.isNone(): - reader.raiseUnexpectedValue("Incorrect version field value") - of "data": - when (blockType is ForkedBlindedBeaconBlock) or - (blockType is ProduceBlockResponseV3): - if data.isSome(): - reader.raiseUnexpectedField( - "Multiple '" & fieldName & "' fields found", blockType.name) - data = Opt.some(reader.readValue(JsonString)) - else: - unrecognizedFieldWarning(fieldName, blockType.name) - of "block_header", "block": - when (blockType is Web3SignerForkedBeaconBlock): - if data.isSome(): - reader.raiseUnexpectedField( - "Multiple '" & fieldName & "' fields found", blockType.name) - data = Opt.some(reader.readValue(JsonString)) - else: - unrecognizedFieldWarning(fieldName, blockType.name) - of "execution_payload_blinded": - when (blockType is ProduceBlockResponseV3): - if blinded.isSome(): - reader.raiseUnexpectedField( - "Multiple `execution_payload_blinded` fields found", blockType.name) - blinded = Opt.some(reader.readValue(bool)) - else: - unrecognizedFieldWarning(fieldName, blockType.name) - of "execution_payload_value": - when (blockType is ProduceBlockResponseV3): - if payloadValue.isSome(): - reader.raiseUnexpectedField( - "Multiple `execution_payload_value` fields found", blockType.name) - payloadValue = Opt.some(reader.readValue(UInt256)) - else: - unrecognizedFieldWarning(fieldName, blockType.name) - of "consensus_block_value": - when (blockType is ProduceBlockResponseV3): - if blockValue.isSome(): - reader.raiseUnexpectedField( - "Multiple `consensus_block_value` fields found", blockType.name) - blockValue = Opt.some(reader.readValue(UInt256)) - else: - unrecognizedFieldWarning(fieldName, blockType.name) - else: - unrecognizedFieldWarning(fieldName, blockType.name) - - if version.isNone(): - reader.raiseUnexpectedValue("Field `version` is missing") - if data.isNone(): - reader.raiseUnexpectedValue("Field `data` is missing") - -proc readValue*[BlockType: ForkedBlindedBeaconBlock]( - reader: var JsonReader[RestJson], - value: var BlockType - ) {.raises: [IOError, SerializationError].} = - var - version: Opt[ConsensusFork] - data: Opt[JsonString] - blinded: Opt[bool] - payloadValue: Opt[UInt256] - blockValue: Opt[UInt256] - - prepareForkedBlockReading(BlockType, reader, version, data, blinded, - payloadValue, blockValue) - - case version.get(): - of ConsensusFork.Phase0: - let res = - try: - RestJson.decode(string(data.get()), - phase0.BeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError as exc: - reader.raiseUnexpectedValue("Incorrect phase0 block format, [" & - exc.formatMsg("BlindedBlock") & "]") - - value = ForkedBlindedBeaconBlock(kind: ConsensusFork.Phase0, - phase0Data: res) - of ConsensusFork.Altair: - let res = - try: - RestJson.decode(string(data.get()), - altair.BeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError as exc: - reader.raiseUnexpectedValue("Incorrect altair block format, [" & - exc.formatMsg("BlindedBlock") & "]") - value = ForkedBlindedBeaconBlock(kind: ConsensusFork.Altair, - altairData: res) - of ConsensusFork.Bellatrix .. ConsensusFork.Capella: - reader.raiseUnexpectedValue("pre-Deneb blinded block formats unsupported") - of ConsensusFork.Deneb: - let res = - try: - RestJson.decode(string(data.get()), - deneb_mev.BlindedBeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError as exc: - reader.raiseUnexpectedValue("Incorrect deneb block format, [" & - exc.formatMsg("BlindedBlock") & "]") - value = ForkedBlindedBeaconBlock(kind: ConsensusFork.Deneb, - denebData: res) - of ConsensusFork.Electra: - let res = - try: - RestJson.decode(string(data.get()), - electra_mev.BlindedBeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError as exc: - reader.raiseUnexpectedValue("Incorrect electra block format, [" & - exc.formatMsg("BlindedBlock") & "]") - value = ForkedBlindedBeaconBlock(kind: ConsensusFork.Electra, - electraData: res) - of ConsensusFork.Fulu: - let res = - try: - RestJson.decode(string(data.get()), - fulu_mev.BlindedBeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError as exc: - reader.raiseUnexpectedValue("Incorrect electra block format, [" & - exc.formatMsg("BlindedBlock") & "]") - value = ForkedBlindedBeaconBlock(kind: ConsensusFork.Fulu, - fuluData: res) - -proc readValue*[BlockType: Web3SignerForkedBeaconBlock]( - reader: var JsonReader[RestJson], - value: var BlockType) {.raises: [IOError, SerializationError].} = - var - version: Opt[ConsensusFork] - data: Opt[JsonString] - blinded: Opt[bool] - payloadValue: Opt[UInt256] - blockValue: Opt[UInt256] - - prepareForkedBlockReading(BlockType, reader, version, data, blinded, - payloadValue, blockValue) - - if version.get() <= ConsensusFork.Altair: - reader.raiseUnexpectedValue( - "Web3Signer implementation supports Capella and newer") - - let res = - try: - RestJson.decode(string(data.get()), - BeaconBlockHeader, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect block header format") - - value = Web3SignerForkedBeaconBlock(kind: version.get(), data: res) - -proc writeValue*[BlockType: Web3SignerForkedBeaconBlock]( - writer: var JsonWriter[RestJson], value: BlockType) {.raises: [IOError].} = - # https://consensys.github.io/web3signer/web3signer-eth2.html#tag/Signing/operation/ETH2_SIGN - # https://github.com/ConsenSys/web3signer/blob/d51337e96ba5ce410222943556bed7c4856b8e57/core/src/main/java/tech/pegasys/web3signer/core/service/http/handlers/signing/eth2/json/BlockRequestDeserializer.java#L42-L58 - writer.beginRecord() - writer.writeField("version", value.kind.toString.toUpperAscii) - writer.writeField("block_header", value.data) - writer.endRecord() - -## ForkedSignedBeaconBlock -proc readValue*(reader: var JsonReader[RestJson], - value: var ForkedSignedBeaconBlock) {. - raises: [IOError, SerializationError].} = - var - version: Opt[ConsensusFork] - data: Opt[JsonString] - - for fieldName in readObjectFields(reader): - case fieldName - of "version": - if version.isSome(): - reader.raiseUnexpectedField("Multiple version fields found", - "ForkedSignedBeaconBlock") - version = ConsensusFork.init(reader.readValue(string)) - if version.isNone: - reader.raiseUnexpectedValue("Incorrect version field value") - of "data": - if data.isSome(): - reader.raiseUnexpectedField("Multiple data fields found", - "ForkedSignedBeaconBlock") - data = Opt.some(reader.readValue(JsonString)) - else: - unrecognizedFieldWarning(fieldName, typeof(value).name) - - if version.isNone(): - reader.raiseUnexpectedValue("Field version is missing") - if data.isNone(): - reader.raiseUnexpectedValue("Field data is missing") - - case version.get(): - of ConsensusFork.Phase0: - let res = - try: - RestJson.decode(string(data.get()), - phase0.SignedBeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect phase0 block format") - - value = ForkedSignedBeaconBlock.init(res) - of ConsensusFork.Altair: - let res = - try: - RestJson.decode(string(data.get()), - altair.SignedBeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect altair block format") - - value = ForkedSignedBeaconBlock.init(res) - of ConsensusFork.Bellatrix: - let res = - try: - RestJson.decode(string(data.get()), - bellatrix.SignedBeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect bellatrix block format") - - value = ForkedSignedBeaconBlock.init(res) - of ConsensusFork.Capella: - let res = - try: - RestJson.decode(string(data.get()), - capella.SignedBeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect capella block format") - - value = ForkedSignedBeaconBlock.init(res) - of ConsensusFork.Deneb: - let res = - try: - RestJson.decode(string(data.get()), - deneb.SignedBeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect deneb block format") - - value = ForkedSignedBeaconBlock.init(res) - of ConsensusFork.Electra: - let res = - try: - RestJson.decode(string(data.get()), - electra.SignedBeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect electra block format") - - value = ForkedSignedBeaconBlock.init(res) - of ConsensusFork.Fulu: - let res = - try: - RestJson.decode(string(data.get()), - fulu.SignedBeaconBlock, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect fulu block format") - - value = ForkedSignedBeaconBlock.init(res) - withBlck(value): - forkyBlck.root = hash_tree_root(forkyBlck.message) - -proc writeValue*( - writer: var JsonWriter[RestJson], value: ForkedSignedBeaconBlock -) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("version", value.kind.toString) - case value.kind - of ConsensusFork.Phase0: - writer.writeField("data", value.phase0Data) - of ConsensusFork.Altair: - writer.writeField("data", value.altairData) - of ConsensusFork.Bellatrix: - writer.writeField("data", value.bellatrixData) - of ConsensusFork.Capella: - writer.writeField("data", value.capellaData) - of ConsensusFork.Deneb: - writer.writeField("data", value.denebData) - of ConsensusFork.Electra: - writer.writeField("data", value.electraData) - of ConsensusFork.Fulu: - writer.writeField("data", value.fuluData) - writer.endRecord() - -# ForkedHashedBeaconState is used where a `ForkedBeaconState` normally would -# be used, mainly because caching the hash early on is easier to do -proc readValue*(reader: var JsonReader[RestJson], - value: var ForkedHashedBeaconState) {. - raises: [IOError, SerializationError].} = - var - version: Opt[ConsensusFork] - data: Opt[JsonString] - - for fieldName in readObjectFields(reader): - case fieldName - of "version": - if version.isSome(): - reader.raiseUnexpectedField("Multiple version fields found", - "ForkedBeaconState") - version = ConsensusFork.init(reader.readValue(string)) - if version.isNone: - reader.raiseUnexpectedValue("Incorrect version field value") - of "data": - if data.isSome(): - reader.raiseUnexpectedField("Multiple data fields found", - "ForkedBeaconState") - data = Opt.some(reader.readValue(JsonString)) - else: - unrecognizedFieldWarning(fieldName, typeof(value).name) - - if version.isNone(): - reader.raiseUnexpectedValue("Field version is missing") - if data.isNone(): - reader.raiseUnexpectedValue("Field data is missing") - - # Use a temporary to avoid stack instances and `value` mutation in case of - # exception +proc sszResponsePlain*( + _: typedesc[RestApiResponse], + res: seq[byte], + consensusFork: ConsensusFork, + hasRestAllowedOrigin: bool): RestApiResponse = + let headers = consensusFork.ethHeaders(hasRestAllowedOrigin) + RestApiResponse.response( + res, Http200, "application/octet-stream", headers = headers) + +proc sszResponse*( + _: typedesc[RestApiResponse], + data: auto, + consensusFork: ConsensusFork, + hasRestAllowedOrigin: bool): RestApiResponse = let - tmp = (ref ForkedHashedBeaconState)(kind: version.get()) - - template toValue(field: untyped) = - if tmp[].kind == value.kind: - assign(value.field, tmp[].field) - else: - value = tmp[] # slow, but rare (hopefully) - value.field.root = hash_tree_root(value.field.data) - - case version.get(): - of ConsensusFork.Phase0: - try: - tmp[].phase0Data.data = RestJson.decode( - string(data.get()), - phase0.BeaconState, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect phase0 beacon state format") - - toValue(phase0Data) - of ConsensusFork.Altair: - try: - tmp[].altairData.data = RestJson.decode( - string(data.get()), - altair.BeaconState, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect altair beacon state format") - - toValue(altairData) - of ConsensusFork.Bellatrix: - try: - tmp[].bellatrixData.data = RestJson.decode( - string(data.get()), - bellatrix.BeaconState, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect bellatrix beacon state format") - toValue(bellatrixData) - of ConsensusFork.Capella: - try: - tmp[].capellaData.data = RestJson.decode( - string(data.get()), - capella.BeaconState, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect capella beacon state format") - toValue(capellaData) - of ConsensusFork.Deneb: - try: - tmp[].denebData.data = RestJson.decode( - string(data.get()), - deneb.BeaconState, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect deneb beacon state format") - toValue(denebData) - of ConsensusFork.Electra: - try: - tmp[].electraData.data = RestJson.decode( - string(data.get()), - electra.BeaconState, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect electra beacon state format") - toValue(electraData) - of ConsensusFork.Fulu: - try: - tmp[].fuluData.data = RestJson.decode( - string(data.get()), - fulu.BeaconState, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect fulu beacon state format") - toValue(fuluData) - -proc writeValue*( - writer: var JsonWriter[RestJson], value: ForkedHashedBeaconState -) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("version", value.kind.toString) - case value.kind - of ConsensusFork.Phase0: - writer.writeField("data", value.phase0Data.data) - of ConsensusFork.Altair: - writer.writeField("data", value.altairData.data) - of ConsensusFork.Bellatrix: - writer.writeField("data", value.bellatrixData.data) - of ConsensusFork.Capella: - writer.writeField("data", value.capellaData.data) - of ConsensusFork.Deneb: - writer.writeField("data", value.denebData.data) - of ConsensusFork.Electra: - writer.writeField("data", value.electraData.data) - of ConsensusFork.Fulu: - writer.writeField("data", value.fuluData.data) - writer.endRecord() - -## SomeForkedLightClientObject -proc readValue*[T: SomeForkedLightClientObject]( - reader: var JsonReader[RestJson], value: var T) {. - raises: [IOError, SerializationError].} = - var - version: Opt[ConsensusFork] - data: Opt[JsonString] - - for fieldName in readObjectFields(reader): - case fieldName - of "version": - if version.isSome: - reader.raiseUnexpectedField("Multiple version fields found", T.name) - let consensusFork = - ConsensusFork.decodeString(reader.readValue(string)).valueOr: - reader.raiseUnexpectedValue("Incorrect version field value") - version.ok consensusFork - of "data": - if data.isSome: - reader.raiseUnexpectedField("Multiple data fields found", T.name) - data.ok reader.readValue(JsonString) - else: - unrecognizedFieldWarning(fieldName, typeof(value).name) - - if version.isNone: - reader.raiseUnexpectedValue("Field version is missing") - if data.isNone: - reader.raiseUnexpectedValue("Field data is missing") - - withLcDataFork(lcDataForkAtConsensusFork(version.get)): - when lcDataFork > LightClientDataFork.None: - try: - value = T.init(RestJson.decode( - string(data.get()), - T.Forky(lcDataFork), - requireAllFields = true, - allowUnknownFields = true)) - except SerializationError: - reader.raiseUnexpectedValue("Incorrect format (" & $lcDataFork & ")") - else: - reader.raiseUnexpectedValue("Unsupported fork " & $version.get) - -## ForkedAggregateAndProof -proc readValue*(reader: var JsonReader[RestJson], - value: var ForkedAggregateAndProof) {. - raises: [IOError, SerializationError].} = - var - version: Opt[ConsensusFork] - data: Opt[JsonString] - - for fieldName {.inject.} in readObjectFields(reader): - case fieldName - of "version": - if version.isSome(): - reader.raiseUnexpectedField("Multiple version fields found", - "ForkedAggregateAndProof") - let vres = reader.readValue(string).toLowerAscii() - version = ConsensusFork.init(vres) - if version.isNone(): - reader.raiseUnexpectedValue("Incorrect version field value") - of "data": - if data.isSome(): - reader.raiseUnexpectedField( - "Multiple '" & fieldName & "' fields found", - "ForkedAggregateAndProof") - data = Opt.some(reader.readValue(JsonString)) - else: - unrecognizedFieldWarning(fieldName, "ForkedAggregateAndProof") - - if version.isNone(): - reader.raiseUnexpectedValue("Field `version` is missing") - if data.isNone(): - reader.raiseUnexpectedValue("Field `data` is missing") - - withConsensusFork(version.get()): - when consensusFork < ConsensusFork.Electra: - let res = - try: - RestJson.decode(string(data.get()), - phase0.AggregateAndProof, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError as exc: - reader.raiseUnexpectedValue( - "Incorrect phase0 aggregated attestation format, [" & - exc.formatMsg("ForkedAggregateAndProof") & "]") - value = ForkedAggregateAndProof.init(res, consensusFork) - else: - let res = - try: - RestJson.decode(string(data.get()), - electra.AggregateAndProof, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError as exc: - reader.raiseUnexpectedValue( - "Incorrect electra aggregated attestation format, [" & - exc.formatMsg("ForkedAggregateAndProof") & "]") - value = ForkedAggregateAndProof.init(res, consensusFork) - -proc writeValue*(writer: var JsonWriter[RestJson], - proof: ForkedAggregateAndProof) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("version", proof.kind.toString()) - withAggregateAndProof(proof): - writer.writeField("data", forkyProof) - writer.endRecord() - -## Web3SignerRequest -proc writeValue*( - writer: var JsonWriter[RestJson], value: Web3SignerRequest -) {.raises: [IOError].} = - writer.beginRecord() - case value.kind - of Web3SignerRequestKind.AggregationSlot: - doAssert(value.forkInfo.isSome(), - "forkInfo should be set for this type of request") - writer.writeField("type", "AGGREGATION_SLOT") - writer.writeField("fork_info", value.forkInfo.get()) - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - writer.writeField("aggregation_slot", value.aggregationSlot) - of Web3SignerRequestKind.AggregateAndProof: - doAssert(value.forkInfo.isSome(), - "forkInfo should be set for this type of request") - writer.writeField("type", "AGGREGATE_AND_PROOF") - writer.writeField("fork_info", value.forkInfo.get()) - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - writer.writeField("aggregate_and_proof", value.aggregateAndProof) - of Web3SignerRequestKind.AggregateAndProofV2: - doAssert(value.forkInfo.isSome(), - "forkInfo should be set for this type of request") - writer.writeField("type", "AGGREGATE_AND_PROOF_V2") - writer.writeField("fork_info", value.forkInfo.get()) - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - writer.writeField("aggregate_and_proof", value.forkedAggregateAndProof) - of Web3SignerRequestKind.Attestation: - doAssert(value.forkInfo.isSome(), - "forkInfo should be set for this type of request") - writer.writeField("type", "ATTESTATION") - writer.writeField("fork_info", value.forkInfo.get()) - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - writer.writeField("attestation", value.attestation) - of Web3SignerRequestKind.BlockV2: - doAssert(value.forkInfo.isSome(), - "forkInfo should be set for this type of request") - writer.writeField("type", "BLOCK_V2") - writer.writeField("fork_info", value.forkInfo.get()) - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - - # https://github.com/Consensys/web3signer/blob/2d956c019663ac70f60640d23196d1d321c1b1fa/core/src/main/resources/openapi-specs/eth2/signing/schemas.yaml#L483-L500 - writer.writeField("beacon_block", value.beaconBlockHeader) - - if isSome(value.proofs): - writer.writeField("proofs", value.proofs.get()) - of Web3SignerRequestKind.Deposit: - writer.writeField("type", "DEPOSIT") - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - writer.writeField("deposit", value.deposit) - of Web3SignerRequestKind.RandaoReveal: - doAssert(value.forkInfo.isSome(), - "forkInfo should be set for this type of request") - writer.writeField("type", "RANDAO_REVEAL") - writer.writeField("fork_info", value.forkInfo.get()) - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - writer.writeField("randao_reveal", value.randaoReveal) - of Web3SignerRequestKind.VoluntaryExit: - doAssert(value.forkInfo.isSome(), - "forkInfo should be set for this type of request") - writer.writeField("type", "VOLUNTARY_EXIT") - writer.writeField("fork_info", value.forkInfo.get()) - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - writer.writeField("voluntary_exit", value.voluntaryExit) - of Web3SignerRequestKind.SyncCommitteeMessage: - doAssert(value.forkInfo.isSome(), - "forkInfo should be set for this type of request") - writer.writeField("type", "SYNC_COMMITTEE_MESSAGE") - writer.writeField("fork_info", value.forkInfo.get()) - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - writer.writeField("sync_committee_message", value.syncCommitteeMessage) - of Web3SignerRequestKind.SyncCommitteeSelectionProof: - doAssert(value.forkInfo.isSome(), - "forkInfo should be set for this type of request") - writer.writeField("type", "SYNC_COMMITTEE_SELECTION_PROOF") - writer.writeField("fork_info", value.forkInfo.get()) - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - writer.writeField("sync_aggregator_selection_data", - value.syncAggregatorSelectionData) - of Web3SignerRequestKind.SyncCommitteeContributionAndProof: - doAssert(value.forkInfo.isSome(), - "forkInfo should be set for this type of request") - writer.writeField("type", "SYNC_COMMITTEE_CONTRIBUTION_AND_PROOF") - writer.writeField("fork_info", value.forkInfo.get()) - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - writer.writeField("contribution_and_proof", - value.syncCommitteeContributionAndProof) - of Web3SignerRequestKind.ValidatorRegistration: - # https://consensys.github.io/web3signer/web3signer-eth2.html#operation/ETH2_SIGN - doAssert(value.forkInfo.isSome(), - "forkInfo should be set for this type of request") - writer.writeField("type", "VALIDATOR_REGISTRATION") - writer.writeField("fork_info", value.forkInfo.get()) - if isSome(value.signingRoot): - writer.writeField("signingRoot", value.signingRoot) - writer.writeField("validator_registration", value.validatorRegistration) - writer.endRecord() - -proc readValue*(reader: var JsonReader[RestJson], - value: var Web3SignerRequest) {. - raises: [IOError, SerializationError].} = - var - requestKind: Opt[Web3SignerRequestKind] - forkInfo: Opt[Web3SignerForkInfo] - signingRoot: Opt[Eth2Digest] - data: Opt[JsonString] - proofs: seq[Web3SignerMerkleProof] - dataName: string - - for fieldName in readObjectFields(reader): - case fieldName - of "type": - if requestKind.isSome(): - reader.raiseUnexpectedField("Multiple `type` fields found", - "Web3SignerRequest") - let vres = reader.readValue(string) - requestKind = Opt.some( - case vres - of "AGGREGATION_SLOT": - Web3SignerRequestKind.AggregationSlot - of "AGGREGATE_AND_PROOF": - Web3SignerRequestKind.AggregateAndProof - of "AGGREGATE_AND_PROOF_V2": - Web3SignerRequestKind.AggregateAndProofV2 - of "ATTESTATION": - Web3SignerRequestKind.Attestation - of "BLOCK_V2": - Web3SignerRequestKind.BlockV2 - of "DEPOSIT": - Web3SignerRequestKind.Deposit - of "RANDAO_REVEAL": - Web3SignerRequestKind.RandaoReveal - of "VOLUNTARY_EXIT": - Web3SignerRequestKind.VoluntaryExit - of "SYNC_COMMITTEE_MESSAGE": - Web3SignerRequestKind.SyncCommitteeMessage - of "SYNC_COMMITTEE_SELECTION_PROOF": - Web3SignerRequestKind.SyncCommitteeSelectionProof - of "SYNC_COMMITTEE_CONTRIBUTION_AND_PROOF": - Web3SignerRequestKind.SyncCommitteeContributionAndProof - of "VALIDATOR_REGISTRATION": - Web3SignerRequestKind.ValidatorRegistration - else: - reader.raiseUnexpectedValue("Unexpected `type` value") - ) - of "fork_info": - if forkInfo.isSome(): - reader.raiseUnexpectedField("Multiple `fork_info` fields found", - "Web3SignerRequest") - forkInfo = Opt.some(reader.readValue(Web3SignerForkInfo)) - of "signingRoot": - if signingRoot.isSome(): - reader.raiseUnexpectedField("Multiple `signingRoot` fields found", - "Web3SignerRequest") - signingRoot = Opt.some(reader.readValue(Eth2Digest)) - of "proofs": - let newProofs = reader.readValue(seq[Web3SignerMerkleProof]) - proofs.add(newProofs) - of "aggregation_slot", "aggregate_and_proof", "block", "beacon_block", - "randao_reveal", "voluntary_exit", "sync_committee_message", - "sync_aggregator_selection_data", "contribution_and_proof", - "attestation", "deposit", "validator_registration": - if data.isSome(): - reader.raiseUnexpectedField("Multiple data fields found", - "Web3SignerRequest") - dataName = fieldName - data = Opt.some(reader.readValue(JsonString)) - - else: - unrecognizedFieldWarning(fieldName, typeof(value).name) - - if requestKind.isNone(): - reader.raiseUnexpectedValue("Field `type` is missing") - - value = - case requestKind.get() - of Web3SignerRequestKind.AggregationSlot: - if dataName != "aggregation_slot": - reader.raiseUnexpectedValue("Field `aggregation_slot` is missing") - if forkInfo.isNone(): - reader.raiseUnexpectedValue("Field `fork_info` is missing") - let data = decodeJsonString(Web3SignerAggregationSlotData, - data.get()).valueOr: - reader.raiseUnexpectedValue( - "Incorrect field `aggregation_slot` format") - Web3SignerRequest(kind: Web3SignerRequestKind.AggregationSlot, - forkInfo: forkInfo, signingRoot: signingRoot, aggregationSlot: data - ) - of Web3SignerRequestKind.AggregateAndProof: - if dataName != "aggregate_and_proof": - reader.raiseUnexpectedValue("Field `aggregate_and_proof` is missing") - if forkInfo.isNone(): - reader.raiseUnexpectedValue("Field `fork_info` is missing") - let data = decodeJsonString(phase0.AggregateAndProof, data.get()).valueOr: - reader.raiseUnexpectedValue( - "Incorrect field `aggregate_and_proof` format") - Web3SignerRequest( - kind: Web3SignerRequestKind.AggregateAndProof, - forkInfo: forkInfo, signingRoot: signingRoot, aggregateAndProof: data - ) - of Web3SignerRequestKind.AggregateAndProofV2: - if dataName != "aggregate_and_proof": - reader.raiseUnexpectedValue("Field `aggregate_and_proof` is missing") - if forkInfo.isNone(): - reader.raiseUnexpectedValue("Field `fork_info` is missing") - let data = decodeJsonString(ForkedAggregateAndProof, data.get()).valueOr: - reader.raiseUnexpectedValue( - "Incorrect field `aggregate_and_proof` format") - Web3SignerRequest( - kind: Web3SignerRequestKind.AggregateAndProofV2, - forkInfo: forkInfo, signingRoot: signingRoot, - forkedAggregateAndProof: data - ) - of Web3SignerRequestKind.Attestation: - if dataName != "attestation": - reader.raiseUnexpectedValue("Field `attestation` is missing") - if forkInfo.isNone(): - reader.raiseUnexpectedValue("Field `fork_info` is missing") - let data = decodeJsonString(AttestationData, data.get()).valueOr: - reader.raiseUnexpectedValue("Incorrect field `attestation` format") - Web3SignerRequest( - kind: Web3SignerRequestKind.Attestation, - forkInfo: forkInfo, signingRoot: signingRoot, attestation: data - ) - of Web3SignerRequestKind.BlockV2: - # https://github.com/ConsenSys/web3signer/blob/41834a927088f1bde7a097e17d19e954d0058e54/core/src/main/resources/openapi-specs/eth2/signing/schemas.yaml#L421-L425 (branch v22.7.0) - # It's the "beacon_block" field even when it's not a block, but a header - if dataName != "beacon_block": - reader.raiseUnexpectedValue("Field `beacon_block` is missing") - if forkInfo.isNone(): - reader.raiseUnexpectedValue("Field `fork_info` is missing") - let data = decodeJsonString(Web3SignerForkedBeaconBlock, - data.get()).valueOr: - reader.raiseUnexpectedValue("Incorrect field `beacon_block` format") - if len(proofs) > 0: - Web3SignerRequest( - kind: Web3SignerRequestKind.BlockV2, - forkInfo: forkInfo, signingRoot: signingRoot, beaconBlockHeader: data, - proofs: Opt.some(proofs) - ) - else: - Web3SignerRequest( - kind: Web3SignerRequestKind.BlockV2, - forkInfo: forkInfo, signingRoot: signingRoot, beaconBlockHeader: data - ) - of Web3SignerRequestKind.Deposit: - if dataName != "deposit": - reader.raiseUnexpectedValue("Field `deposit` is missing") - let data = decodeJsonString(Web3SignerDepositData, data.get()).valueOr: - reader.raiseUnexpectedValue("Incorrect field `deposit` format") - Web3SignerRequest( - kind: Web3SignerRequestKind.Deposit, - signingRoot: signingRoot, deposit: data - ) - of Web3SignerRequestKind.RandaoReveal: - if dataName != "randao_reveal": - reader.raiseUnexpectedValue("Field `randao_reveal` is missing") - if forkInfo.isNone(): - reader.raiseUnexpectedValue("Field `fork_info` is missing") - let data = decodeJsonString(Web3SignerRandaoRevealData, - data.get()).valueOr: - reader.raiseUnexpectedValue("Incorrect field `randao_reveal` format") - Web3SignerRequest( - kind: Web3SignerRequestKind.RandaoReveal, - forkInfo: forkInfo, signingRoot: signingRoot, randaoReveal: data - ) - of Web3SignerRequestKind.VoluntaryExit: - if dataName != "voluntary_exit": - reader.raiseUnexpectedValue("Field `voluntary_exit` is missing") - if forkInfo.isNone(): - reader.raiseUnexpectedValue("Field `fork_info` is missing") - let data = decodeJsonString(VoluntaryExit, data.get()).valueOr: - reader.raiseUnexpectedValue("Incorrect field `voluntary_exit` format") - Web3SignerRequest( - kind: Web3SignerRequestKind.VoluntaryExit, - forkInfo: forkInfo, signingRoot: signingRoot, voluntaryExit: data - ) - of Web3SignerRequestKind.SyncCommitteeMessage: - if dataName != "sync_committee_message": - reader.raiseUnexpectedValue( - "Field `sync_committee_message` is missing") - if forkInfo.isNone(): - reader.raiseUnexpectedValue("Field `fork_info` is missing") - let data = decodeJsonString(Web3SignerSyncCommitteeMessageData, - data.get()).valueOr: - reader.raiseUnexpectedValue( - "Incorrect field `sync_committee_message` format") - Web3SignerRequest( - kind: Web3SignerRequestKind.SyncCommitteeMessage, - forkInfo: forkInfo, signingRoot: signingRoot, - syncCommitteeMessage: data - ) - of Web3SignerRequestKind.SyncCommitteeSelectionProof: - if dataName != "sync_aggregator_selection_data": - reader.raiseUnexpectedValue( - "Field `sync_aggregator_selection_data` is missing") - if forkInfo.isNone(): - reader.raiseUnexpectedValue("Field `fork_info` is missing") - let data = decodeJsonString(SyncAggregatorSelectionData, - data.get()).valueOr: - reader.raiseUnexpectedValue( - "Incorrect field `sync_aggregator_selection_data` format") - Web3SignerRequest( - kind: Web3SignerRequestKind.SyncCommitteeSelectionProof, - forkInfo: forkInfo, signingRoot: signingRoot, - syncAggregatorSelectionData: data - ) - of Web3SignerRequestKind.SyncCommitteeContributionAndProof: - if dataName != "contribution_and_proof": - reader.raiseUnexpectedValue( - "Field `contribution_and_proof` is missing") - if forkInfo.isNone(): - reader.raiseUnexpectedValue("Field `fork_info` is missing") - let data = decodeJsonString(ContributionAndProof, data.get()).valueOr: - reader.raiseUnexpectedValue( - "Incorrect field `contribution_and_proof` format") - Web3SignerRequest( - kind: Web3SignerRequestKind.SyncCommitteeContributionAndProof, - forkInfo: forkInfo, signingRoot: signingRoot, - syncCommitteeContributionAndProof: data - ) - of Web3SignerRequestKind.ValidatorRegistration: - if dataName != "validator_registration": - reader.raiseUnexpectedValue( - "Field `validator_registration` is missing") - if forkInfo.isNone(): - reader.raiseUnexpectedValue("Field `fork_info` is missing") - let data = decodeJsonString(Web3SignerValidatorRegistration, - data.get()).valueOr: - reader.raiseUnexpectedValue( - "Incorrect field `validator_registration` format") - Web3SignerRequest( - kind: Web3SignerRequestKind.ValidatorRegistration, - forkInfo: forkInfo, signingRoot: signingRoot, - validatorRegistration: data - ) - -## RemoteKeystoreStatus -proc writeValue*( - writer: var JsonWriter[RestJson], value: RemoteKeystoreStatus -) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("status", $value.status) - if value.message.isSome(): - writer.writeField("message", value.message.get()) - writer.endRecord() - -proc readValue*(reader: var JsonReader[RestJson], - value: var RemoteKeystoreStatus) {. - raises: [IOError, SerializationError].} = - var message: Opt[string] - var status: Opt[KeystoreStatus] - - for fieldName in readObjectFields(reader): - case fieldName - of "message": - if message.isSome(): - reader.raiseUnexpectedField("Multiple `message` fields found", - "RemoteKeystoreStatus") - message = Opt.some(reader.readValue(string)) - of "status": - if status.isSome(): - reader.raiseUnexpectedField("Multiple `status` fields found", - "RemoteKeystoreStatus") - let res = reader.readValue(string) - status = Opt.some( - case res - of "error": - KeystoreStatus.error - of "not_active": - KeystoreStatus.notActive - of "not_found": - KeystoreStatus.notFound - of "deleted": - KeystoreStatus.deleted - of "duplicate": - KeystoreStatus.duplicate - of "imported": - KeystoreStatus.imported - else: - reader.raiseUnexpectedValue("Invalid `status` value") - ) - else: - unrecognizedFieldWarning(fieldName, typeof(value).name) - - if status.isNone(): - reader.raiseUnexpectedValue("Field `status` is missing") - - value = RemoteKeystoreStatus(status: status.get(), message: message) - -## ScryptSalt -proc readValue*(reader: var JsonReader[RestJson], value: var ScryptSalt) {. - raises: [SerializationError, IOError].} = - let res = ncrutils.fromHex(reader.readValue(string)) - if len(res) == 0: - reader.raiseUnexpectedValue("Invalid scrypt salt value") - value = ScryptSalt(res) - -## Pbkdf2Params -proc writeValue*( - writer: var JsonWriter[RestJson], value: Pbkdf2Params -) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("dklen", JsonString(Base10.toString(value.dklen))) - writer.writeField("c", JsonString(Base10.toString(value.c))) - writer.writeField("prf", value.prf) - writer.writeField("salt", value.salt) - writer.endRecord() - -proc readValue*(reader: var JsonReader[RestJson], value: var Pbkdf2Params) {. - raises: [SerializationError, IOError].} = - var - dklen: Opt[uint64] - c: Opt[uint64] - prf: Opt[PrfKind] - salt: Opt[Pbkdf2Salt] - - for fieldName in readObjectFields(reader): - case fieldName - of "dklen": - if dklen.isSome(): - reader.raiseUnexpectedField("Multiple `dklen` fields found", - "Pbkdf2Params") - dklen = Opt.some(reader.readValue(uint64)) - of "c": - if c.isSome(): - reader.raiseUnexpectedField("Multiple `c` fields found", - "Pbkdf2Params") - c = Opt.some(reader.readValue(uint64)) - of "prf": - if prf.isSome(): - reader.raiseUnexpectedField("Multiple `prf` fields found", - "Pbkdf2Params") - prf = Opt.some(reader.readValue(PrfKind)) - of "salt": - if salt.isSome(): - reader.raiseUnexpectedField("Multiple `salt` fields found", - "Pbkdf2Params") - salt = Opt.some(reader.readValue(Pbkdf2Salt)) - else: - unrecognizedFieldWarning(fieldName, typeof(value).name) - - if dklen.isNone(): - reader.raiseUnexpectedValue("Field `dklen` is missing") - if c.isNone(): - reader.raiseUnexpectedValue("Field `c` is missing") - if prf.isNone(): - reader.raiseUnexpectedValue("Field `prf` is missing") - if salt.isNone(): - reader.raiseUnexpectedValue("Field `salt` is missing") - - value = Pbkdf2Params( - dklen: dklen.get(), - c: c.get(), - prf: prf.get(), - salt: salt.get() - ) - -## ScryptParams -proc writeValue*( - writer: var JsonWriter[RestJson], value: ScryptParams -) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("dklen", JsonString(Base10.toString(value.dklen))) - writer.writeField("n", JsonString(Base10.toString(uint64(value.n)))) - writer.writeField("p", JsonString(Base10.toString(uint64(value.p)))) - writer.writeField("r", JsonString(Base10.toString(uint64(value.r)))) - writer.writeField("salt", value.salt) - writer.endRecord() - -proc readValue*(reader: var JsonReader[RestJson], value: var ScryptParams) {. - raises: [SerializationError, IOError].} = - var - dklen: Opt[uint64] - n, p, r: Opt[int] - salt: Opt[ScryptSalt] - - for fieldName in readObjectFields(reader): - case fieldName - of "dklen": - if dklen.isSome(): - reader.raiseUnexpectedField("Multiple `dklen` fields found", - "ScryptParams") - dklen = Opt.some(reader.readValue(uint64)) - of "n": - if n.isSome(): - reader.raiseUnexpectedField("Multiple `n` fields found", - "ScryptParams") - let res = reader.readValue(int) - if res < 0: - reader.raiseUnexpectedValue("Unexpected negative `n` value") - n = Opt.some(res) - of "p": - if p.isSome(): - reader.raiseUnexpectedField("Multiple `p` fields found", - "ScryptParams") - let res = reader.readValue(int) - if res < 0: - reader.raiseUnexpectedValue("Unexpected negative `p` value") - p = Opt.some(res) - of "r": - if r.isSome(): - reader.raiseUnexpectedField("Multiple `r` fields found", - "ScryptParams") - let res = reader.readValue(int) - if res < 0: - reader.raiseUnexpectedValue("Unexpected negative `r` value") - r = Opt.some(res) - of "salt": - if salt.isSome(): - reader.raiseUnexpectedField("Multiple `salt` fields found", - "ScryptParams") - salt = Opt.some(reader.readValue(ScryptSalt)) - else: - unrecognizedFieldWarning(fieldName, typeof(value).name) - - if dklen.isNone(): - reader.raiseUnexpectedValue("Field `dklen` is missing") - if n.isNone(): - reader.raiseUnexpectedValue("Field `n` is missing") - if p.isNone(): - reader.raiseUnexpectedValue("Field `p` is missing") - if r.isNone(): - reader.raiseUnexpectedValue("Field `r` is missing") - if salt.isNone(): - reader.raiseUnexpectedValue("Field `salt` is missing") - - value = ScryptParams( - dklen: dklen.get(), - n: n.get(), p: p.get(), r: r.get(), - salt: salt.get() - ) - -## Keystore -proc writeValue*( - writer: var JsonWriter[RestJson], value: Keystore -) {.error: "keystores must be converted to json with Json.encode(keystore). " & - "There is no REST-specific encoding" .} - -proc readValue*(reader: var JsonReader[RestJson], value: var Keystore) {. - error: "Keystores must be loaded with `parseKeystore`. " & - "There is no REST-specific encoding".} - -## KeystoresAndSlashingProtection -proc writeValue*( - writer: var JsonWriter[RestJson], value: KeystoresAndSlashingProtection -) {.raises: [IOError].} = - writer.beginRecord() - let keystores = - block: - var res: seq[string] - for keystore in value.keystores: - let encoded = Json.encode(keystore) - res.add(encoded) - res - writer.writeField("keystores", keystores) - writer.writeField("passwords", value.passwords) - if value.slashing_protection.isSome(): - let slashingProtection = RestJson.encode(value.slashing_protection.get) - writer.writeField("slashing_protection", slashingProtection) - writer.endRecord() - -proc readValue*(reader: var JsonReader[RestJson], - value: var KeystoresAndSlashingProtection) {. - raises: [SerializationError, IOError].} = - var - strKeystores: seq[string] - passwords: seq[string] - strSlashing: Opt[string] - - for fieldName in readObjectFields(reader): - case fieldName - of "keystores": - strKeystores = reader.readValue(seq[string]) - of "passwords": - passwords = reader.readValue(seq[string]) - of "slashing_protection": - if strSlashing.isSome(): - reader.raiseUnexpectedField( - "Multiple `slashing_protection` fields found", - "KeystoresAndSlashingProtection") - strSlashing = Opt.some(reader.readValue(string)) - else: - unrecognizedFieldWarning(fieldName, typeof(value).name) - - if len(strKeystores) == 0: - reader.raiseUnexpectedValue("Missing or empty `keystores` value") - if len(passwords) == 0: - reader.raiseUnexpectedValue("Missing or empty `passwords` value") - - let keystores = - block: - var res: seq[Keystore] - for item in strKeystores: - let key = - try: - parseKeystore(item) - except SerializationError: - # TODO re-raise the exception by adjusting the column index, so the user - # will get an accurate syntax error within the larger message - reader.raiseUnexpectedValue("Invalid keystore format") - res.add(key) - res - - let slashing = - if strSlashing.isSome(): - let db = - try: - RestJson.decode(strSlashing.get(), - SPDIR, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError: - reader.raiseUnexpectedValue("Invalid slashing protection format") - Opt.some(db) - else: - Opt.none(SPDIR) - - value = KeystoresAndSlashingProtection( - keystores: keystores, passwords: passwords, slashing_protection: slashing - ) - -## RestActivityItem -proc writeValue*( - writer: var JsonWriter[RestJson], value: RestActivityItem -) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("index", value.index) - writer.writeField("epoch", value.epoch) - writer.writeField("active", value.active) - writer.endRecord() - -proc readValue*(reader: var JsonReader[RestJson], - value: var RestActivityItem) {. - raises: [SerializationError, IOError].} = - var index: Opt[ValidatorIndex] - var epoch: Opt[Epoch] - var active: Opt[bool] - - for fieldName in readObjectFields(reader): - case fieldName - of "index": - if index.isSome(): - reader.raiseUnexpectedField( - "Multiple `index` fields found", "RestActivityItem") - index = Opt.some(reader.readValue(ValidatorIndex)) - of "epoch": - if epoch.isSome(): - reader.raiseUnexpectedField( - "Multiple `epoch` fields found", "RestActivityItem") - epoch = Opt.some(reader.readValue(Epoch)) - of "active": - if active.isSome(): - reader.raiseUnexpectedField( - "Multiple `active` fields found", "RestActivityItem") - active = Opt.some(reader.readValue(bool)) - else: - unrecognizedFieldIgnore() - - if index.isNone(): - reader.raiseUnexpectedValue("Missing or empty `index` value") - if epoch.isNone(): - reader.raiseUnexpectedValue("Missing or empty `epoch` value") - if active.isNone(): - reader.raiseUnexpectedValue("Missing or empty `active` value") - - value = RestActivityItem(index: index.get(), epoch: epoch.get(), - active: active.get()) - -## RestLivenessItem -proc writeValue*( - writer: var JsonWriter[RestJson], value: RestLivenessItem -) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("index", value.index) - writer.writeField("is_live", value.is_live) - writer.endRecord() - -proc readValue*(reader: var JsonReader[RestJson], - value: var RestLivenessItem) {. - raises: [SerializationError, IOError].} = - var index: Opt[ValidatorIndex] - var isLive: Opt[bool] - - for fieldName in readObjectFields(reader): - case fieldName - of "index": - if index.isSome(): - reader.raiseUnexpectedField( - "Multiple `index` fields found", "RestLivenessItem") - index = Opt.some(reader.readValue(ValidatorIndex)) - of "is_live": - if isLive.isSome(): - reader.raiseUnexpectedField( - "Multiple `is_live` fields found", "RestLivenessItem") - isLive = Opt.some(reader.readValue(bool)) - else: - unrecognizedFieldIgnore() - - if index.isNone(): - reader.raiseUnexpectedValue("Missing or empty `index` value") - if isLive.isNone(): - reader.raiseUnexpectedValue("Missing or empty `is_live` value") - - value = RestLivenessItem(index: index.get(), is_live: isLive.get()) - -## HeadChangeInfoObject -proc writeValue*( - writer: var JsonWriter[RestJson], value: HeadChangeInfoObject -) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("slot", value.slot) - writer.writeField("block", value.block_root) - writer.writeField("state", value.state_root) - writer.writeField("epoch_transition", value.epoch_transition) - writer.writeField("previous_duty_dependent_root", - value.previous_duty_dependent_root) - writer.writeField("current_duty_dependent_root", - value.current_duty_dependent_root) - if value.optimistic.isSome(): - writer.writeField("execution_optimistic", value.optimistic.get()) - writer.endRecord() - -## ReorgInfoObject -proc writeValue*( - writer: var JsonWriter[RestJson], value: ReorgInfoObject -) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("slot", value.slot) - writer.writeField("depth", value.depth) - writer.writeField("old_head_block", value.old_head_block) - writer.writeField("new_head_block", value.new_head_block) - writer.writeField("old_head_state", value.old_head_state) - writer.writeField("new_head_state", value.new_head_state) - if value.optimistic.isSome(): - writer.writeField("execution_optimistic", value.optimistic.get()) - writer.endRecord() - -## FinalizationInfoObject -proc writeValue*( - writer: var JsonWriter[RestJson], value: FinalizationInfoObject -) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("block", value.block_root) - writer.writeField("state", value.state_root) - writer.writeField("epoch", value.epoch) - if value.optimistic.isSome(): - writer.writeField("execution_optimistic", value.optimistic.get()) - writer.endRecord() - -## RestNodeValidity -proc writeValue*( - writer: var JsonWriter[RestJson], value: RestNodeValidity -) {.raises: [IOError].} = - writer.writeValue($value) - -## RestErrorMessage -proc readValue*(reader: var JsonReader[RestJson], - value: var RestErrorMessage) {. - raises: [SerializationError, IOError].} = - var - code: Opt[int] - message: Opt[string] - stacktraces: Opt[seq[string]] - - for fieldName in readObjectFields(reader): - case fieldName - of "code": - if code.isSome(): - reader.raiseUnexpectedField("Multiple `code` fields found", - "RestErrorMessage") - let ires = - try: - let res = reader.readValue(int) - if res < 0: - reader.raiseUnexpectedValue("Invalid `code` field value") - Opt.some(res) - except SerializationError: - Opt.none(int) - if ires.isNone(): - let sres = - try: parseInt(reader.readValue(string)) - except ValueError: - reader.raiseUnexpectedValue("Invalid `code` field format") - if sres < 0: - reader.raiseUnexpectedValue("Invalid `code` field value") - code = Opt.some(sres) - else: - code = ires - of "message": - if message.isSome(): - reader.raiseUnexpectedField("Multiple `message` fields found", - "RestErrorMessage") - message = Opt.some(reader.readValue(string)) - of "stacktraces": - if stacktraces.isSome(): - reader.raiseUnexpectedField("Multiple `stacktraces` fields found", - "RestErrorMessage") - stacktraces = Opt.some(reader.readValue(seq[string])) - else: - unrecognizedFieldIgnore() - - if code.isNone(): - reader.raiseUnexpectedValue("Missing or invalid `code` value") - if message.isNone(): - reader.raiseUnexpectedValue("Missing or invalid `message` value") - - value = RestErrorMessage( - code: code.get(), message: message.get(), - stacktraces: stacktraces - ) - -proc writeValue*(writer: var JsonWriter[RestJson], value: RestErrorMessage) {. - raises: [IOError].} = - writer.beginRecord() - writer.writeField("code", value.code) - writer.writeField("message", value.message) - if value.stacktraces.isSome(): - writer.writeField("stacktraces", value.stacktraces.get()) - writer.endRecord() - -## VCRuntimeConfig -proc readValue*(reader: var JsonReader[RestJson], - value: var VCRuntimeConfig) {. - raises: [SerializationError, IOError].} = - for fieldName in readObjectFields(reader): - let fieldValue = reader.readValue(string) - if value.hasKeyOrPut(toUpperAscii(fieldName), fieldValue): - let msg = "Multiple `" & fieldName & "` fields found" - reader.raiseUnexpectedField(msg, "VCRuntimeConfig") - -## ForkedMaybeBlindedBeaconBlock -proc writeValue*(writer: var JsonWriter[RestJson], - value: ProduceBlockResponseV3) {.raises: [IOError].} = - writer.beginRecord() - withForkyMaybeBlindedBlck(value): - writer.writeField("version", consensusFork.toString()) - writer.writeField("execution_payload_blinded", isBlinded) - if value.executionValue.isSome(): - writer.writeField("execution_payload_value", - $(value.executionValue.get())) - if value.consensusValue.isSome(): - writer.writeField("consensus_block_value", - $(value.consensusValue.get())) - writer.writeField("data", forkyMaybeBlindedBlck) - writer.endRecord() - -proc readValue*(reader: var JsonReader[RestJson], - value: var ProduceBlockResponseV3) {. - raises: [SerializationError, IOError].} = - var - version: Opt[ConsensusFork] - blinded: Opt[bool] - executionValue: Opt[UInt256] - consensusValue: Opt[UInt256] - data: Opt[JsonString] - - prepareForkedBlockReading(ProduceBlockResponseV3, reader, version, data, - blinded, executionValue, consensusValue) - - if blinded.isNone(): - reader.raiseUnexpectedValue("Field `execution_payload_blinded` is missing") - if executionValue.isNone(): - reader.raiseUnexpectedValue("Field `execution_payload_value` is missing") - # TODO (cheatfate): At some point we should add check for missing - # `consensus_block_value` too - if data.isNone(): - reader.raiseUnexpectedValue("Field `data` is missing") - - withConsensusFork(version.get): - when consensusFork >= ConsensusFork.Deneb: - if blinded.get: - value = ForkedMaybeBlindedBeaconBlock.init( - RestJson.decode( - string(data.get()), consensusFork.BlindedBlockContents, - requireAllFields = true, allowUnknownFields = true), - executionValue, consensusValue) - else: - value = ForkedMaybeBlindedBeaconBlock.init( - RestJson.decode( - string(data.get()), consensusFork.BlockContents, - requireAllFields = true, allowUnknownFields = true), - executionValue, consensusValue) - elif consensusFork >= ConsensusFork.Bellatrix: - if blinded.get: - reader.raiseUnexpectedValue( - "`execution_payload_blinded` unsupported for `version`") - value = ForkedMaybeBlindedBeaconBlock.init( - RestJson.decode( - string(data.get()), consensusFork.BlockContents, - requireAllFields = true, allowUnknownFields = true), - executionValue, consensusValue) - else: - if blinded.get: - reader.raiseUnexpectedValue( - "`execution_payload_blinded` unsupported for `version`") - value = ForkedMaybeBlindedBeaconBlock.init( - RestJson.decode( - string(data.get()), consensusFork.BlockContents, - requireAllFields = true, allowUnknownFields = true)) + res = SSZ.encode(data) + headers = consensusFork.ethHeaders(hasRestAllowedOrigin) + RestApiResponse.response( + res, Http200, "application/octet-stream", headers = headers) + +proc sszResponse*( + _: typedesc[RestApiResponse], + data: auto, + consensusFork: ConsensusFork, + isBlinded: bool, + executionValue: UInt256, + consensusValue: UInt256, + hasRestAllowedOrigin: bool): RestApiResponse = + let + res = SSZ.encode(data) + headers = consensusFork.ethHeaders( + isBlinded, executionValue, consensusValue, hasRestAllowedOrigin) + RestApiResponse.response( + res, Http200, "application/octet-stream", headers = headers) proc parseRoot(value: string): Result[Eth2Digest, cstring] = try: @@ -2920,20 +590,8 @@ proc parseRoot(value: string): Result[Eth2Digest, cstring] = except ValueError: err("Unable to decode root value") -## GraffitiString -proc writeValue*(writer: var JsonWriter[RestJson], value: GraffitiString) {. - raises: [IOError].} = - writeValue(writer, $value) - -proc readValue*(reader: var JsonReader[RestJson], T: type GraffitiString): T {. - raises: [IOError, SerializationError].} = - let res = init(GraffitiString, reader.readValue(string)) - if res.isErr(): - reader.raiseUnexpectedValue res.error - res.get - proc decodeBody*( - t: typedesc[RestPublishedSignedBeaconBlock], + _: typedesc[RestPublishedSignedBeaconBlock], body: ContentBody, version: string ): Result[RestPublishedSignedBeaconBlock, RestErrorMessage] = @@ -2942,113 +600,38 @@ proc decodeBody*( return err(RestErrorMessage.init(Http400, UnableDecodeVersionError, [version, $error])) - template getBlck(blckType: untyped): untyped = - try: - RestPublishedSignedBeaconBlock(ForkedSignedBeaconBlock.init( - RestJson.decode(body.data, blckType, - requireAllFields = true, - allowUnknownFields = true))) - except SerializationError as exc: - debug "Failed to decode JSON data", - err = exc.formatMsg(""), - data = string.fromBytes(body.data) - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - - withConsensusFork(consensusFork): - ok(getBlck(consensusFork.SignedBeaconBlock)) + try: + var res = ForkedSignedBeaconBlock(kind: consensusFork) + withBlck(res): + forkyBlck = RestJson.decode(body.data, typeof(forkyBlck)) + ok RestPublishedSignedBeaconBlock(res) + except SerializationError as exc: + debug "Failed to decode JSON data", + err = exc.formatMsg(""), data = string.fromBytes(body.data) + err RestErrorMessage.init( + Http400, UnableDecodeError, [version, exc.formatMsg("")] + ) elif body.contentType == OctetStreamMediaType: let consensusFork = ConsensusFork.decodeString(version).valueOr: return err(RestErrorMessage.init(Http400, UnableDecodeVersionError, [version, $error])) - case consensusFork - of ConsensusFork.Phase0: - let blck = - try: - SSZ.decode(body.data, phase0.SignedBeaconBlock) - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBeaconBlock(ForkedSignedBeaconBlock.init(blck))) - of ConsensusFork.Altair: - let blck = - try: - SSZ.decode(body.data, altair.SignedBeaconBlock) - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBeaconBlock(ForkedSignedBeaconBlock.init(blck))) - of ConsensusFork.Bellatrix: - let blck = - try: - SSZ.decode(body.data, bellatrix.SignedBeaconBlock) - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBeaconBlock(ForkedSignedBeaconBlock.init(blck))) - of ConsensusFork.Capella: - let blck = - try: - SSZ.decode(body.data, capella.SignedBeaconBlock) - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBeaconBlock(ForkedSignedBeaconBlock.init(blck))) - of ConsensusFork.Deneb: - let blck = - try: - SSZ.decode(body.data, deneb.SignedBeaconBlock) - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBeaconBlock(ForkedSignedBeaconBlock.init(blck))) - of ConsensusFork.Electra: - let blck = - try: - SSZ.decode(body.data, electra.SignedBeaconBlock) - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBeaconBlock(ForkedSignedBeaconBlock.init(blck))) - of ConsensusFork.Fulu: - let blck = - try: - SSZ.decode(body.data, fulu.SignedBeaconBlock) - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBeaconBlock(ForkedSignedBeaconBlock.init(blck))) + try: + var res = ForkedSignedBeaconBlock(kind: consensusFork) + withBlck(res): + forkyBlck = SSZ.decode(body.data, typeof(forkyBlck)) + + ok RestPublishedSignedBeaconBlock(res) + except SerializationError as exc: + err RestErrorMessage.init( + Http400, UnableDecodeError, [version, exc.formatMsg("")] + ) else: err(RestErrorMessage.init(Http415, InvalidContentTypeError, [version, $body.contentType])) proc decodeBody*( - t: typedesc[RestPublishedSignedBlockContents], + _: typedesc[RestPublishedSignedBlockContents], body: ContentBody, version: string ): Result[RestPublishedSignedBlockContents, RestErrorMessage] = @@ -3057,188 +640,30 @@ proc decodeBody*( return err(RestErrorMessage.init(Http400, UnableDecodeVersionError, [version, $error])) - template getBlck(blckType: untyped): untyped = - try: - var res = RestJson.decode(body.data, blckType, - requireAllFields = true, - allowUnknownFields = true) - when compiles(res.signed_block.messsage): - {.error: "Deneb and later forks handled in case statement".} - else: - RestPublishedSignedBlockContents.init( - res.message, hash_tree_root(res.message), res.signature) - except SerializationError as exc: - debug "Failed to decode JSON data", - err = exc.formatMsg(""), - data = string.fromBytes(body.data) - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - - let data = - case consensusFork - of ConsensusFork.Phase0: getBlck(phase0.SignedBeaconBlock) - of ConsensusFork.Altair: getBlck(altair.SignedBeaconBlock) - of ConsensusFork.Bellatrix: getBlck(bellatrix.SignedBeaconBlock) - of ConsensusFork.Capella: getBlck(capella.SignedBeaconBlock) - of ConsensusFork.Deneb: - try: - var res = RestJson.decode(body.data, DenebSignedBlockContents, - requireAllFields = true, - allowUnknownFields = true) - res.signed_block.root = hash_tree_root(res.signed_block.message) - RestPublishedSignedBlockContents( - kind: ConsensusFork.Deneb, denebData: res) - except SerializationError as exc: - debug "Failed to decode JSON data", - err = exc.formatMsg(""), - data = string.fromBytes(body.data) - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - of ConsensusFork.Electra: - try: - var res = RestJson.decode(body.data, ElectraSignedBlockContents, - requireAllFields = true, - allowUnknownFields = true) - res.signed_block.root = hash_tree_root(res.signed_block.message) - RestPublishedSignedBlockContents( - kind: ConsensusFork.Electra, electraData: res) - except SerializationError as exc: - debug "Failed to decode JSON data", - err = exc.formatMsg(""), - data = string.fromBytes(body.data) - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - of ConsensusFork.Fulu: - try: - var res = RestJson.decode(body.data, FuluSignedBlockContents, - requireAllFields = true, - allowUnknownFields = true) - res.signed_block.root = hash_tree_root(res.signed_block.message) - RestPublishedSignedBlockContents( - kind: ConsensusFork.Fulu, fuluData: res) - except SerializationError as exc: - debug "Failed to decode JSON data", - err = exc.formatMsg(""), - data = string.fromBytes(body.data) - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - - ok(data) + try: + var res = RestPublishedSignedBlockContents(kind: consensusFork) + withForkyBlck(res): + forkyData = RestJson.decode(body.data, typeof(forkyData)) + ok res + except SerializationError as exc: + debug "Failed to decode JSON data", + err = exc.formatMsg(""), data = string.fromBytes(body.data) + err RestErrorMessage.init( + Http400, UnableDecodeError, [version, exc.formatMsg("")] + ) elif body.contentType == OctetStreamMediaType: let consensusFork = ConsensusFork.decodeString(version).valueOr: return err(RestErrorMessage.init(Http400, UnableDecodeVersionError, [version, $error])) - case consensusFork - of ConsensusFork.Phase0: - let blck = - try: - var res = SSZ.decode(body.data, phase0.SignedBeaconBlock) - res.root = hash_tree_root(res.message) - res - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBlockContents( - kind: ConsensusFork.Phase0, phase0Data: blck)) - of ConsensusFork.Altair: - let blck = - try: - var res = SSZ.decode(body.data, altair.SignedBeaconBlock) - res.root = hash_tree_root(res.message) - res - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBlockContents( - kind: ConsensusFork.Altair, altairData: blck)) - of ConsensusFork.Bellatrix: - let blck = - try: - var res = SSZ.decode(body.data, bellatrix.SignedBeaconBlock) - res.root = hash_tree_root(res.message) - res - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBlockContents( - kind: ConsensusFork.Bellatrix, bellatrixData: blck)) - of ConsensusFork.Capella: - let blck = - try: - var res = SSZ.decode(body.data, capella.SignedBeaconBlock) - res.root = hash_tree_root(res.message) - res - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBlockContents( - kind: ConsensusFork.Capella, capellaData: blck)) - of ConsensusFork.Deneb: - let blckContents = - try: - var res = SSZ.decode(body.data, DenebSignedBlockContents) - res.signed_block.root = hash_tree_root(res.signed_block.message) - res - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBlockContents( - kind: ConsensusFork.Deneb, denebData: blckContents)) - of ConsensusFork.Electra: - let blckContents = - try: - var res = SSZ.decode(body.data, ElectraSignedBlockContents) - res.signed_block.root = hash_tree_root(res.signed_block.message) - res - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBlockContents( - kind: ConsensusFork.Electra, electraData: blckContents)) - of ConsensusFork.Fulu: - let blckContents = - try: - var res = SSZ.decode(body.data, FuluSignedBlockContents) - res.signed_block.root = hash_tree_root(res.signed_block.message) - res - except SerializationError as exc: - return err(RestErrorMessage.init(Http400, UnableDecodeError, - [version, exc.formatMsg("")])) - except CatchableError as exc: - return err(RestErrorMessage.init(Http400, UnexpectedDecodeError, - [version, $exc.msg])) - ok(RestPublishedSignedBlockContents( - kind: ConsensusFork.Fulu, fuluData: blckContents)) + try: + var res = RestPublishedSignedBlockContents(kind: consensusFork) + withForkyBlck(res): + forkyData = SSZ.decode(body.data, typeof(forkyData)) + ok res + except SerializationError as exc: + err RestErrorMessage.init( + Http400, UnableDecodeError, [version, exc.formatMsg("")] + ) else: err(RestErrorMessage.init(Http415, InvalidContentTypeError, [version, $body.contentType])) @@ -3252,9 +677,7 @@ proc decodeBodyJsonOrSsz*( try: RestJson.decode( body.data, - seq[SignedValidatorRegistrationV1], - requireAllFields = true, - allowUnknownFields = true) + seq[SignedValidatorRegistrationV1]) except SerializationError as exc: debug "Failed to deserialize REST JSON data", err = exc.formatMsg("") @@ -3285,17 +708,13 @@ proc decodeBytesJsonOrSsz*( contentType: Opt[ContentTypeData], version: string ): Result[T, RestErrorMessage] = - var res {.noinit.}: T + var res: T const typeFork = kind(typeof(res.data)) if contentType == ApplicationJsonMediaType: res = try: - RestJson.decode( - data, - T, - requireAllFields = true, - allowUnknownFields = true) + RestJson.decode(data, T) except SerializationError as exc: debug "Failed to deserialize REST JSON data", err = exc.formatMsg("") @@ -3336,55 +755,35 @@ proc decodeBytesJsonOrSsz*( err(RestErrorMessage.init(Http415, InvalidContentTypeError, [$contentType])) -proc decodeBody*[T](t: typedesc[T], - body: ContentBody): Result[T, cstring] = +proc decodeBody*(T: typedesc, body: ContentBody): Result[T, cstring] = if body.contentType != ApplicationJsonMediaType: return err("Unsupported content type") - let data = + + try: + ok RestJson.decode(body.data, T) + except SerializationError as exc: + debug "Failed to deserialize REST JSON data", + err = exc.formatMsg(""), + data = string.fromBytes(body.data) + err("Unable to deserialize data") + +proc decodeBodyJsonOrSsz*(T: typedesc, + body: ContentBody): Result[T, RestErrorMessage] = + if body.contentType == ApplicationJsonMediaType: try: - RestJson.decode(body.data, T, - requireAllFields = true, - allowUnknownFields = true) + ok RestJson.decode(body.data, T) except SerializationError as exc: - debug "Failed to deserialize REST JSON data", + debug "Failed to decode JSON data", err = exc.formatMsg(""), data = string.fromBytes(body.data) - return err("Unable to deserialize data") - except CatchableError: - return err("Unexpected deserialization error") - ok(data) - -proc decodeBodyJsonOrSsz*[T](t: typedesc[T], - body: ContentBody): Result[T, RestErrorMessage] = - if body.contentType == ApplicationJsonMediaType: - let data = - try: - RestJson.decode(body.data, T, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError as exc: - debug "Failed to decode JSON data", - err = exc.formatMsg(""), - data = string.fromBytes(body.data) - return err( - RestErrorMessage.init(Http400, UnableDecodeError, + err(RestErrorMessage.init(Http400, UnableDecodeError, [exc.formatMsg("")])) - except CatchableError as exc: - return err( - RestErrorMessage.init(Http400, UnexpectedDecodeError, [$exc.msg])) - ok(data) elif body.contentType == OctetStreamMediaType: - let blck = - try: - SSZ.decode(body.data, T) - except SerializationError as exc: - return err( - RestErrorMessage.init(Http400, UnableDecodeError, + try: + ok SSZ.decode(body.data, T) + except SerializationError as exc: + err(RestErrorMessage.init(Http400, UnableDecodeError, [exc.formatMsg("")])) - except CatchableError as exc: - return err( - RestErrorMessage.init(Http400, UnexpectedDecodeError, [$exc.msg])) - ok(blck) else: err(RestErrorMessage.init(Http415, InvalidContentTypeError, [$body.contentType])) @@ -3393,24 +792,14 @@ proc encodeBytes*(value: seq[SignedValidatorRegistrationV1], contentType: string): RestResult[seq[byte]] = case contentType of "application/json": - try: - var - stream = memoryOutput() - writer = JsonWriter[RestJson].init(stream) - writer.writeArray(value) - ok(stream.getOutput(seq[byte])) - except IOError: - return err("Input/output error") - except SerializationError: - return err("Serialization error") + ok block: + withRestJsonWriter(w, seq[byte]): + w.writeArray(value) of "application/octet-stream": - try: - ok(SSZ.encode( - init( - List[SignedValidatorRegistrationV1, Limit VALIDATOR_REGISTRY_LIMIT], - value))) - except SerializationError: - return err("Serialization error") + ok(SSZ.encode( + init( + List[SignedValidatorRegistrationV1, Limit VALIDATOR_REGISTRY_LIMIT], + value))) else: err("Content-Type not supported") @@ -3418,18 +807,9 @@ proc encodeBytes*[T: EncodeTypes](value: T, contentType: string): RestResult[seq[byte]] = case contentType of "application/json": - let data = - block: - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.writeValue(value) - stream.getOutput(seq[byte]) - except IOError: - return err("Input/output error") - except SerializationError: - return err("Serialization error") - ok(data) + ok block: + withRestJsonWriter(w, seq[byte]): + w.writeValue(value) else: err("Content-Type not supported") @@ -3437,18 +817,9 @@ proc encodeBytes*[T: EncodeArrays](value: T, contentType: string): RestResult[seq[byte]] = case contentType of "application/json": - let data = - block: - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.writeArray(value) - stream.getOutput(seq[byte]) - except IOError: - return err("Input/output error") - except SerializationError: - return err("Serialization error") - ok(data) + ok block: + withRestJsonWriter(w, seq[byte]): + w.writeArray(value) else: err("Content-Type not supported") @@ -3458,21 +829,11 @@ proc encodeBytes*[T: EncodeOctetTypes]( ): RestResult[seq[byte]] = case contentType of "application/json": - try: - var - stream = memoryOutput() - writer = JsonWriter[RestJson].init(stream) - writer.writeValue(value) - ok(stream.getOutput(seq[byte])) - except IOError: - err("Input/output error") - except SerializationError: - err("Serialization error") + ok block: + withRestJsonWriter(w, seq[byte]): + w.writeValue(value) of "application/octet-stream": - try: - ok(SSZ.encode(value)) - except CatchableError: - err("Serialization error") + ok(SSZ.encode(value)) else: err("Content-Type not supported") @@ -3506,9 +867,7 @@ proc decodeBytes*[T: ProduceBlockResponseV3]( if mediaType == ApplicationJsonMediaType: try: - ok(RestJson.decode(value, T, - requireAllFields = true, - allowUnknownFields = true)) + ok(RestJson.decode(value, T)) except SerializationError as exc: debug "Failed to deserialize REST JSON data", err = exc.formatMsg(""), @@ -3542,7 +901,10 @@ proc decodeBytes*[T: ProduceBlockResponseV3]( except ValueError: return err("Incorrect `Eth-Consensus-Block-Value` header value") withConsensusFork(fork): - when consensusFork >= ConsensusFork.Deneb: + debugGloasComment "" + when consensusFork == ConsensusFork.Gloas: + return err("gloas produceblockv3 not available yet") + elif consensusFork >= ConsensusFork.Electra: if blinded: let contents = ? readSszResBytes(consensusFork.BlindedBlockContents, value) @@ -3587,9 +949,7 @@ proc decodeBytes*[T: DecodeTypes]( if mediaType == ApplicationJsonMediaType: try: - ok RestJson.decode(value, T, - requireAllFields = true, - allowUnknownFields = true) + ok RestJson.decode(value, T) except SerializationError as exc: debug "Failed to deserialize REST JSON data", err = exc.formatMsg(""), @@ -3612,30 +972,30 @@ func encodeString*( ok(Base10.toString(uint64(value))) func encodeString*(value: ValidatorSig): RestResult[string] = - ok(hexOriginal(toRaw(value))) + ok(to0xHex(toRaw(value))) func encodeString*(value: GraffitiBytes): RestResult[string] = - ok(hexOriginal(distinctBase(value))) + ok(to0xHex(distinctBase(value))) func encodeString*(value: Eth2Digest): RestResult[string] = - ok(hexOriginal(value.data)) + ok(to0xHex(value.data)) func encodeString*(value: ValidatorIdent): RestResult[string] = case value.kind of ValidatorQueryKind.Index: ok(Base10.toString(uint64(value.index))) of ValidatorQueryKind.Key: - ok(hexOriginal(toRaw(value.key))) + ok(to0xHex(toRaw(value.key))) func encodeString*(value: ValidatorPubKey): RestResult[string] = - ok(hexOriginal(toRaw(value))) + ok(to0xHex(toRaw(value))) func encodeString*(value: StateIdent): RestResult[string] = case value.kind of StateQueryKind.Slot: ok(Base10.toString(uint64(value.slot))) of StateQueryKind.Root: - ok(hexOriginal(value.root.data)) + ok(to0xHex(value.root.data)) of StateQueryKind.Named: case value.value of StateIdentType.Head: @@ -3661,7 +1021,7 @@ func encodeString*(value: BlockIdent): RestResult[string] = of BlockQueryKind.Slot: ok(Base10.toString(uint64(value.slot))) of BlockQueryKind.Root: - ok(hexOriginal(value.root.data)) + ok(to0xHex(value.root.data)) of BlockQueryKind.Named: case value.value of BlockIdentType.Head: @@ -3739,6 +1099,8 @@ func decodeString*(t: typedesc[EventTopic], ok(EventTopic.AttesterSlashing) of "blob_sidecar": ok(EventTopic.BlobSidecar) + of "data_column_sidecar": + ok(EventTopic.DataColumnSidecar) of "finalized_checkpoint": ok(EventTopic.FinalizedCheckpoint) of "chain_reorg": @@ -3774,6 +1136,8 @@ func encodeString*(value: set[EventTopic]): Result[string, cstring] = res.add("attester_slashing,") if EventTopic.BlobSidecar in value: res.add("blob_sidecar,") + if EventTopic.DataColumnSidecar in value: + res.add("data_column_sidecar,") if EventTopic.FinalizedCheckpoint in value: res.add("finalized_checkpoint,") if EventTopic.ChainReorg in value: @@ -3789,45 +1153,6 @@ func encodeString*(value: set[EventTopic]): Result[string, cstring] = res.setLen(len(res) - 1) ok(res) -func toList*(value: set[ValidatorFilterKind]): seq[string] = - const - pendingSet = {ValidatorFilterKind.PendingInitialized, - ValidatorFilterKind.PendingQueued} - activeSet = {ValidatorFilterKind.ActiveOngoing, - ValidatorFilterKind.ActiveExiting, - ValidatorFilterKind.ActiveSlashed} - exitedSet = {ValidatorFilterKind.ExitedUnslashed, - ValidatorFilterKind.ExitedSlashed} - withdrawSet = {ValidatorFilterKind.WithdrawalPossible, - ValidatorFilterKind.WithdrawalDone} - var - res: seq[string] - v = value - - template processSet(argSet, argName: untyped): untyped = - if argSet * v == argSet: - res.add(argName) - v.excl(argSet) - - template processSingle(argSingle, argName): untyped = - if argSingle in v: - res.add(argName) - - processSet(pendingSet, "pending") - processSet(activeSet, "active") - processSet(exitedSet, "exited") - processSet(withdrawSet, "withdrawal") - processSingle(ValidatorFilterKind.PendingInitialized, "pending_initialized") - processSingle(ValidatorFilterKind.PendingQueued, "pending_queued") - processSingle(ValidatorFilterKind.ActiveOngoing, "active_ongoing") - processSingle(ValidatorFilterKind.ActiveExiting, "active_exiting") - processSingle(ValidatorFilterKind.ActiveSlashed, "active_slashed") - processSingle(ValidatorFilterKind.ExitedUnslashed, "exited_unslashed") - processSingle(ValidatorFilterKind.ExitedSlashed, "exited_slashed") - processSingle(ValidatorFilterKind.WithdrawalPossible, "withdrawal_possible") - processSingle(ValidatorFilterKind.WithdrawalDone, "withdrawal_done") - res - func decodeString*(t: typedesc[ValidatorSig], value: string): Result[ValidatorSig, cstring] = if len(value) != ValidatorSigSize + 2: @@ -3948,24 +1273,7 @@ func decodeString*(t: typedesc[BroadcastValidationType], func decodeString*(t: typedesc[ValidatorIdent], value: string): Result[ValidatorIdent, cstring] = - if len(value) > 2: - if (value[0] == '0') and (value[1] == 'x'): - if len(value) != ValidatorKeySize + 2: - err("Incorrect validator's key value length") - else: - let res = ? ValidatorPubKey.fromHex(value) - ok(ValidatorIdent(kind: ValidatorQueryKind.Key, - key: res)) - elif (value[0] in DecimalSet) and (value[1] in DecimalSet): - let res = ? Base10.decode(uint64, value) - ok(ValidatorIdent(kind: ValidatorQueryKind.Index, - index: RestValidatorIndex(res))) - else: - err("Incorrect validator identifier value") - else: - let res = ? Base10.decode(uint64, value) - ok(ValidatorIdent(kind: ValidatorQueryKind.Index, - index: RestValidatorIndex(res))) + ValidatorIdent.parse(value) func decodeString*(t: typedesc[PeerId], value: string): Result[PeerId, cstring] = @@ -3991,215 +1299,15 @@ func decodeString*(t: typedesc[Eth2Digest], func decodeString*(t: typedesc[ValidatorFilter], value: string): Result[ValidatorFilter, cstring] = - case value - of "pending_initialized": - ok({ValidatorFilterKind.PendingInitialized}) - of "pending_queued": - ok({ValidatorFilterKind.PendingQueued}) - of "active_ongoing": - ok({ValidatorFilterKind.ActiveOngoing}) - of "active_exiting": - ok({ValidatorFilterKind.ActiveExiting}) - of "active_slashed": - ok({ValidatorFilterKind.ActiveSlashed}) - of "exited_unslashed": - ok({ValidatorFilterKind.ExitedUnslashed}) - of "exited_slashed": - ok({ValidatorFilterKind.ExitedSlashed}) - of "withdrawal_possible": - ok({ValidatorFilterKind.WithdrawalPossible}) - of "withdrawal_done": - ok({ValidatorFilterKind.WithdrawalDone}) - of "pending": - ok({ - ValidatorFilterKind.PendingInitialized, - ValidatorFilterKind.PendingQueued - }) - of "active": - ok({ - ValidatorFilterKind.ActiveOngoing, - ValidatorFilterKind.ActiveExiting, - ValidatorFilterKind.ActiveSlashed - }) - of "exited": - ok({ - ValidatorFilterKind.ExitedUnslashed, - ValidatorFilterKind.ExitedSlashed - }) - of "withdrawal": - ok({ - ValidatorFilterKind.WithdrawalPossible, - ValidatorFilterKind.WithdrawalDone - }) - else: - err("Incorrect validator state identifier value") - + ValidatorFilter.parse(value) func decodeString*(t: typedesc[ConsensusFork], value: string): Result[ConsensusFork, cstring] = - let vres = ConsensusFork.init(toLowerAscii(value)) - if vres.isSome: - ok(vres.get) - else: + ConsensusFork.init(toLowerAscii(value)) or err("Unsupported or invalid beacon block fork version") proc decodeString*(t: typedesc[EventBeaconBlockObject], value: string): Result[EventBeaconBlockObject, string] = try: - ok(RestJson.decode(value, t, - requireAllFields = true, - allowUnknownFields = true)) + ok(RestJson.decode(value, t)) except SerializationError as exc: err(exc.formatMsg("")) - -## ValidatorIdent -proc writeValue*(w: var JsonWriter[RestJson], - value: ValidatorIdent) {.raises: [IOError].} = - writeValue(w, value.encodeString().get()) - -proc readValue*(reader: var JsonReader[RestJson], - value: var ValidatorIdent) {. - raises: [IOError, SerializationError].} = - value = decodeString(ValidatorIdent, reader.readValue(string)).valueOr: - raise newException(SerializationError, $error) - -## RestValidatorRequest -proc readValue*(reader: var JsonReader[RestJson], - value: var RestValidatorRequest) {. - raises: [IOError, SerializationError].} = - var - statuses: Opt[seq[string]] - ids: Opt[seq[string]] - - for fieldName in readObjectFields(reader): - case fieldName - of "ids": - if ids.isSome(): - reader.raiseUnexpectedField("Multiple `ids` fields found", - "RestValidatorRequest") - ids = Opt.some(reader.readValue(seq[string])) - of "statuses": - if statuses.isSome(): - reader.raiseUnexpectedField("Multiple `statuses` fields found", - "RestValidatorRequest") - statuses = Opt.some(reader.readValue(seq[string])) - else: - unrecognizedFieldWarning(fieldName, typeof(value).name) - - let - validatorIds = - block: - # Test for uniqueness of value will be happened on higher layer. - if ids.isSome(): - var res: seq[ValidatorIdent] - for item in ids.get(): - let value = decodeString(ValidatorIdent, item).valueOr: - reader.raiseUnexpectedValue($error) - res.add(value) - Opt.some(res) - else: - Opt.none(seq[ValidatorIdent]) - filter = - block: - if statuses.isSome(): - var res: ValidatorFilter - for item in statuses.get(): - let value = decodeString(ValidatorFilter, item).valueOr: - reader.raiseUnexpectedValue($error) - # Test for uniqueness of value. - if value * res != {}: - reader.raiseUnexpectedValue( - "The `statuses` array should consist of only unique values") - res.incl(value) - Opt.some(res) - else: - Opt.none(ValidatorFilter) - - value = RestValidatorRequest(ids: validatorIds, status: filter) - -proc writeValue*(writer: var JsonWriter[RestJson], - value: RestValidatorRequest) {.raises: [IOError].} = - writer.beginRecord() - if value.ids.isSome(): - var res: seq[string] - for item in value.ids.get(): - res.add(item.encodeString().get()) - writer.writeField("ids", res) - if value.status.isSome(): - let res = value.status.get().toList() - if len(res) > 0: - writer.writeField("statuses", res) - writer.endRecord() - -## RestSyncCommitteeReward -proc writeValue*(writer: var JsonWriter[RestJson], - value: RestSyncCommitteeReward) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("validator_index", value.validator_index) - writer.writeField("reward", value.reward) - writer.endRecord() - -## ForkedAttestation -proc readValue*(reader: var JsonReader[RestJson], - value: var ForkedAttestation) {. - raises: [IOError, SerializationError].} = - var - version: Opt[ConsensusFork] - data: Opt[JsonString] - - for fieldName {.inject.} in readObjectFields(reader): - case fieldName - of "version": - if version.isSome(): - reader.raiseUnexpectedField("Multiple version fields found", - "ForkedAttestation") - let vres = reader.readValue(string).toLowerAscii() - version = ConsensusFork.init(vres) - if version.isNone(): - reader.raiseUnexpectedValue("Incorrect version field value") - of "data": - if data.isSome(): - reader.raiseUnexpectedField( - "Multiple '" & fieldName & "' fields found", "ForkedAttestation") - data = Opt.some(reader.readValue(JsonString)) - else: - unrecognizedFieldWarning(fieldName, "ForkedAttestation") - - if version.isNone(): - reader.raiseUnexpectedValue("Field `version` is missing") - if data.isNone(): - reader.raiseUnexpectedValue("Field `data` is missing") - - withConsensusFork(version.get()): - when consensusFork < ConsensusFork.Electra: - let res = - try: - RestJson.decode(string(data.get()), - phase0.Attestation, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError as exc: - reader.raiseUnexpectedValue( - "Incorrect phase0 attestation format, [" & - exc.formatMsg("ForkedAttestation") & "]") - value = ForkedAttestation.init(res, consensusFork) - else: - let res = - try: - RestJson.decode(string(data.get()), - electra.Attestation, - requireAllFields = true, - allowUnknownFields = true) - except SerializationError as exc: - reader.raiseUnexpectedValue( - "Incorrect electra attestation format, [" & - exc.formatMsg("ForkedAttestation") & "]") - value = ForkedAttestation.init(res, consensusFork) - -## ForkedAttestation -proc writeValue*(writer: var JsonWriter[RestJson], - attestation: ForkedAttestation) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("version", attestation.kind.toString()) - withAttestation(attestation): - writer.writeField("data", forkyAttestation) - writer.endRecord() \ No newline at end of file diff --git a/beacon_chain/spec/eth2_apis/rest_beacon_calls.nim b/beacon_chain/spec/eth2_apis/rest_beacon_calls.nim index 3f5a26e456..ff5893c0ca 100644 --- a/beacon_chain/spec/eth2_apis/rest_beacon_calls.nim +++ b/beacon_chain/spec/eth2_apis/rest_beacon_calls.nim @@ -5,28 +5,30 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import chronos, presto/client, chronicles, ".."/".."/validators/slashing_protection_common, - ".."/mev/[bellatrix_mev, capella_mev], ".."/[helpers, forks, keystore, eth2_ssz_serialization], "."/[rest_types, rest_common, eth2_rest_serialization] -from ".."/datatypes/capella import SignedBeaconBlock +from ../mev/bellatrix_mev import SignedBlindedBeaconBlock +from ../mev/capella_mev import SignedBlindedBeaconBlock +from ../mev/deneb_mev import SignedBlindedBeaconBlock export chronos, client, rest_types, eth2_rest_serialization type - ForkySignedBlockContents* = + ForkySignedBlockContents = phase0.SignedBeaconBlock | altair.SignedBeaconBlock | bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock | DenebSignedBlockContents | ElectraSignedBlockContents | - FuluSignedBlockContents + FuluSignedBlockContents | + GloasSignedBlockContents proc getGenesis*(): RestResponse[GetGenesisResponse] {. rest, endpoint: "/eth/v1/beacon/genesis", @@ -133,53 +135,6 @@ proc getBlockHeader*( else: raiseRestResponseError(resp) -proc publishBlock*(body: phase0.SignedBeaconBlock): RestPlainResponse {. - rest, endpoint: "/eth/v1/beacon/blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlock - -proc publishBlock*(body: altair.SignedBeaconBlock): RestPlainResponse {. - rest, endpoint: "/eth/v1/beacon/blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlock - -proc publishBlock*(body: bellatrix.SignedBeaconBlock): RestPlainResponse {. - rest, endpoint: "/eth/v1/beacon/blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlock - -proc publishBlock*(body: capella.SignedBeaconBlock): RestPlainResponse {. - rest, endpoint: "/eth/v1/beacon/blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlock - -proc publishBlock*(body: DenebSignedBlockContents): RestPlainResponse {. - rest, endpoint: "/eth/v1/beacon/blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlock - -proc publishBlock*(body: ElectraSignedBlockContents): RestPlainResponse {. - rest, endpoint: "/eth/v1/beacon/blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlock - -proc publishBlock*(body: FuluSignedBlockContents): RestPlainResponse {. - rest, endpoint: "/eth/v1/beacon/blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlock - -proc publishSszBlock*( - client: RestClientRef, - blck: ForkySignedBeaconBlock - ): Future[RestPlainResponse] {.async.} = - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlock - let - consensus = typeof(blck).kind.toString() - resp = await client.publishBlock( - blck, restContentType = $OctetStreamMediaType, - extraHeaders = @[("eth-consensus-version", consensus)]) - return resp - proc publishBlockV2( broadcast_validation: Option[BroadcastValidationType], body: phase0.SignedBeaconBlock @@ -229,6 +184,12 @@ proc publishBlockV2( meth: MethodPost.} ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlockV2 +proc publishBlockV2( + broadcast_validation: Option[BroadcastValidationType], + body: GloasSignedBlockContents +): RestPlainResponse {.rest, endpoint: "/eth/v2/beacon/blocks", + meth: MethodPost.} + ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlockV2 proc publishBlockV2*( client: RestClientRef, @@ -244,6 +205,8 @@ proc publishBlockV2*( ConsensusFork.Electra.toString() elif blck is FuluSignedBlockContents: ConsensusFork.Fulu.toString() + elif blck is GloasSignedBlockContents: + ConsensusFork.Gloas.toString() else: typeof(blck).kind.toString() client.publishBlockV2( @@ -325,41 +288,6 @@ proc publishSszBlindedBlock*( blck, restContentType = $OctetStreamMediaType, extraHeaders = @[("eth-consensus-version", consensus)]) -proc publishBlindedBlockV2*( - broadcast_validation: Option[BroadcastValidationType], - body: phase0.SignedBeaconBlock -): RestPlainResponse {.rest, endpoint: "/eth/v2/beacon/blinded_blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlindedBlock - -proc publishBlindedBlockV2*( - broadcast_validation: Option[BroadcastValidationType], - body: altair.SignedBeaconBlock -): RestPlainResponse {.rest, endpoint: "/eth/v2/beacon/blinded_blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlindedBlock - -proc publishBlindedBlockV2*( - broadcast_validation: Option[BroadcastValidationType], - body: bellatrix_mev.SignedBlindedBeaconBlock -): RestPlainResponse {.rest, endpoint: "/eth/v2/beacon/blinded_blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlindedBlock - -proc publishBlindedBlockV2*( - broadcast_validation: Option[BroadcastValidationType], - body: capella_mev.SignedBlindedBeaconBlock -): RestPlainResponse {.rest, endpoint: "/eth/v2/beacon/blinded_blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlindedBlock - -proc publishBlindedBlockV2*( - broadcast_validation: Option[BroadcastValidationType], - body: deneb_mev.SignedBlindedBeaconBlock -): RestPlainResponse {.rest, endpoint: "/eth/v2/beacon/blinded_blocks", - meth: MethodPost.} - ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlindedBlock - proc publishBlindedBlockV2*( broadcast_validation: Option[BroadcastValidationType], body: electra_mev.SignedBlindedBeaconBlock @@ -374,7 +302,7 @@ proc publishBlindedBlockV2*( meth: MethodPost.} ## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlindedBlock -proc publishBlindedBlockV2*( +proc publishJsonBlindedBlockV2*( client: RestClientRef, broadcast_validation: Option[BroadcastValidationType], blck: ForkySignedBlindedBeaconBlock @@ -558,8 +486,3 @@ proc submitPoolVoluntaryExit*(body: SignedVoluntaryExit): RestPlainResponse {. rest, endpoint: "/eth/v1/beacon/pool/voluntary_exits", meth: MethodPost.} ## https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolVoluntaryExit - -proc getDepositSnapshot*(): RestResponse[GetDepositSnapshotResponse] {. - rest, endpoint: "/eth/v1/beacon/deposit_snapshot", - meth: MethodGet.} - ## https://github.com/ethereum/EIPs/blob/master/EIPS/eip-4881.md diff --git a/beacon_chain/spec/eth2_apis/rest_config_calls.nim b/beacon_chain/spec/eth2_apis/rest_config_calls.nim index 2d6f5c472c..1e7e3c1a0c 100644 --- a/beacon_chain/spec/eth2_apis/rest_config_calls.nim +++ b/beacon_chain/spec/eth2_apis/rest_config_calls.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -20,7 +20,3 @@ proc getForkSchedulePlain*(): RestPlainResponse {. proc getSpecVC*(): RestResponse[GetSpecVCResponse] {. rest, endpoint: "/eth/v1/config/spec", meth: MethodGet.} ## https://ethereum.github.io/beacon-APIs/#/Config/getSpec - -proc getDepositContract*(): RestResponse[GetDepositContractResponse] {. - rest, endpoint: "/eth/v1/config/deposit_contract", meth: MethodGet.} - ## https://ethereum.github.io/beacon-APIs/#/Config/getDepositContract diff --git a/beacon_chain/spec/eth2_apis/rest_keymanager_calls.nim b/beacon_chain/spec/eth2_apis/rest_keymanager_calls.nim index a1fb047361..3f033e7fb1 100644 --- a/beacon_chain/spec/eth2_apis/rest_keymanager_calls.nim +++ b/beacon_chain/spec/eth2_apis/rest_keymanager_calls.nim @@ -16,9 +16,9 @@ import export client, rest_types, eth2_rest_serialization, rest_keymanager_types -UUID.serializesAsBaseIn RestJson -KeyPath.serializesAsBaseIn RestJson -WalletName.serializesAsBaseIn RestJson +UUID.serializesAsBase RestJson +KeyPath.serializesAsBase RestJson +WalletName.serializesAsBase RestJson proc raiseKeymanagerGenericError*(resp: RestPlainResponse) {. noreturn, raises: [RestError].} = diff --git a/beacon_chain/spec/eth2_apis/rest_nimbus_calls.nim b/beacon_chain/spec/eth2_apis/rest_nimbus_calls.nim index 3de4ceded1..883ec0560a 100644 --- a/beacon_chain/spec/eth2_apis/rest_nimbus_calls.nim +++ b/beacon_chain/spec/eth2_apis/rest_nimbus_calls.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -17,7 +17,7 @@ proc getValidatorsActivity*(epoch: Epoch, rest, endpoint: "/nimbus/v1/validator/activity/{epoch}", meth: MethodPost.} -proc getTimesyncInifo*(body: RestNimbusTimestamp1): RestPlainResponse {. +proc getTimesyncInfo(body: RestNimbusTimestamp1): RestPlainResponse {. rest, endpoint: "/nimbus/v1/timesync", meth: MethodPost.} proc getTimeOffset*(client: RestClientRef, @@ -26,7 +26,7 @@ proc getTimeOffset*(client: RestClientRef, let timestamp1 = getTimestamp() data = RestNimbusTimestamp1(timestamp1: timestamp1) - resp = await client.getTimesyncInifo(data) + resp = await client.getTimesyncInfo(data) timestamp4 = getTimestamp() case resp.status @@ -76,3 +76,118 @@ proc getTimeOffset*(client: RestClientRef, let msg = "Error response (" & $resp.status & ") [" & error.message & "]" raise (ref RestResponseError)( msg: msg, status: error.code, message: error.message) + +func decodeSszResponse( + T: type ForkedHistoricalSummariesWithProof, + data: openArray[byte], + historicalSummariesFork: HistoricalSummariesFork, + cfg: RuntimeConfig, +): T {.raises: [RestDecodingError].} = + case historicalSummariesFork + of HistoricalSummariesFork.Electra: + let summaries = + try: + SSZ.decode(data, GetHistoricalSummariesV1ResponseElectra) + except SerializationError as exc: + raise newException(RestDecodingError, exc.msg) + ForkedHistoricalSummariesWithProof.init(summaries) + of HistoricalSummariesFork.Capella: + let summaries = + try: + SSZ.decode(data, GetHistoricalSummariesV1Response) + except SerializationError as exc: + raise newException(RestDecodingError, exc.msg) + ForkedHistoricalSummariesWithProof.init(summaries) + +proc decodeJsonResponse( + T: type ForkedHistoricalSummariesWithProof, + data: openArray[byte], + historicalSummariesFork: HistoricalSummariesFork, + cfg: RuntimeConfig, +): T {.raises: [RestDecodingError].} = + case historicalSummariesFork + of HistoricalSummariesFork.Electra: + let summaries = decodeBytes( + GetHistoricalSummariesV1ResponseElectra, data, Opt.none(ContentTypeData) + ).valueOr: + raise newException(RestDecodingError, $error) + ForkedHistoricalSummariesWithProof.init(summaries) + of HistoricalSummariesFork.Capella: + let summaries = decodeBytes( + GetHistoricalSummariesV1Response, data, Opt.none(ContentTypeData) + ).valueOr: + raise newException(RestDecodingError, $error) + ForkedHistoricalSummariesWithProof.init(summaries) + +proc decodeHttpResponse( + T: type ForkedHistoricalSummariesWithProof, + data: openArray[byte], + mediaType: MediaType, + consensusFork: ConsensusFork, + cfg: RuntimeConfig, +): T {.raises: [RestDecodingError].} = + let historicalSummariesFork = historicalSummariesForkAtConsensusFork(consensusFork).valueOr: + raiseRestDecodingBytesError(cstring("Unsupported fork: " & $consensusFork)) + + if mediaType == OctetStreamMediaType: + ForkedHistoricalSummariesWithProof.decodeSszResponse(data, historicalSummariesFork, cfg) + elif mediaType == ApplicationJsonMediaType: + ForkedHistoricalSummariesWithProof.decodeJsonResponse(data, historicalSummariesFork, cfg) + else: + raise newException(RestDecodingError, "Unsupported content-type") + +proc getHistoricalSummariesV1Plain*( + state_id: StateIdent +): RestPlainResponse {. + rest, + endpoint: "/nimbus/v1/debug/beacon/states/{state_id}/historical_summaries", + accept: preferSSZ, + meth: MethodGet +.} + +proc getHistoricalSummariesV1*( + client: RestClientRef, state_id: StateIdent, cfg: RuntimeConfig, restAccept = "" +): Future[Opt[ForkedHistoricalSummariesWithProof]] {. + async: ( + raises: [ + CancelledError, RestEncodingError, RestDnsResolveError, RestCommunicationError, + RestDecodingError, RestResponseError, + ] + ) +.} = + let resp = + if len(restAccept) > 0: + await client.getHistoricalSummariesV1Plain(state_id, restAcceptType = restAccept) + else: + await client.getHistoricalSummariesV1Plain(state_id) + + return + case resp.status + of 200: + if resp.contentType.isNone() or isWildCard(resp.contentType.get().mediaType): + raise newException(RestDecodingError, "Missing or incorrect Content-Type") + else: + let + consensusFork = ConsensusFork.decodeString( + resp.headers.getString("eth-consensus-version") + ).valueOr: + raiseRestDecodingBytesError(error) + mediaType = resp.contentType.value().mediaType + + Opt.some( + ForkedHistoricalSummariesWithProof.decodeHttpResponse( + resp.data, mediaType, consensusFork, cfg + ) + ) + of 404: + Opt.none(ForkedHistoricalSummariesWithProof) + of 400, 500: + let error = decodeBytes(RestErrorMessage, resp.data, resp.contentType).valueOr: + let msg = + "Incorrect response error format (" & $resp.status & ") [" & $error & "]" + raise (ref RestResponseError)(msg: msg, status: resp.status) + let msg = "Error response (" & $resp.status & ") [" & error.message & "]" + raise + (ref RestResponseError)(msg: msg, status: error.code, message: error.message) + else: + raiseRestResponseError(resp) diff --git a/beacon_chain/spec/eth2_apis/rest_types.nim b/beacon_chain/spec/eth2_apis/rest_types.nim index beba6a19d0..af6b62dc65 100644 --- a/beacon_chain/spec/eth2_apis/rest_types.nim +++ b/beacon_chain/spec/eth2_apis/rest_types.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Types used by both client and server in the common REST API: # https://ethereum.github.io/beacon-APIs/ @@ -15,11 +15,11 @@ import std/[json, tables], - stew/base10, web3/primitives, httputils, - ".."/[deposit_snapshots, forks], - ".."/mev/[deneb_mev] + results, + stew/base10, httputils, stew/bitops2, + ../forks -export forks, phase0, altair, bellatrix, capella, deneb_mev, tables, httputils +export forks, tables, httputils, results const # https://github.com/ethereum/eth2.0-APIs/blob/master/apis/beacon/states/validator_balances.yaml#L17 @@ -57,7 +57,7 @@ type # https://github.com/ethereum/beacon-APIs/blob/v2.4.2/apis/eventstream/index.yaml EventTopic* {.pure.} = enum Head, Block, Attestation, BlockGossip, VoluntaryExit, BLSToExecutionChange, - ProposerSlashing, AttesterSlashing, BlobSidecar, SingleAttestation, + ProposerSlashing, AttesterSlashing, BlobSidecar, DataColumnSidecar, SingleAttestation, FinalizedCheckpoint, ChainReorg, ContributionAndProof, LightClientFinalityUpdate, LightClientOptimisticUpdate @@ -224,6 +224,11 @@ type status*: string validator*: Validator + RestValidatorIdentity* = object + index*: ValidatorIndex + pubkey*: ValidatorPubKey + activation_epoch*: Epoch + RestBlockHeader* = object slot*: Slot proposer_index*: ValidatorIndex @@ -256,8 +261,8 @@ type head_slot*: Slot sync_distance*: uint64 is_syncing*: bool - is_optimistic*: Option[bool] - el_offline*: Option[bool] + is_optimistic*: Opt[bool] + el_offline*: Opt[bool] RestPeerCount* = object disconnected*: uint64 @@ -312,7 +317,12 @@ type FuluSignedBlockContents* = object signed_block*: fulu.SignedBeaconBlock - kzg_proofs*: deneb.KzgProofs + kzg_proofs*: fulu.KzgProofs + blobs*: deneb.Blobs + + GloasSignedBlockContents* = object + signed_block*: gloas.SignedBeaconBlock + kzg_proofs*: fulu.KzgProofs blobs*: deneb.Blobs RestPublishedSignedBlockContents* = object @@ -324,15 +334,12 @@ type of ConsensusFork.Deneb: denebData*: DenebSignedBlockContents of ConsensusFork.Electra: electraData*: ElectraSignedBlockContents of ConsensusFork.Fulu: fuluData*: FuluSignedBlockContents + of ConsensusFork.Gloas: gloasData*: GloasSignedBlockContents ProduceBlockResponseV3* = ForkedMaybeBlindedBeaconBlock VCRuntimeConfig* = Table[string, string] - RestDepositContract* = object - chain_id*: string - address*: string - RestBlockInfo* = object slot*: Slot blck* {.serializedFieldName: "block".}: Eth2Digest @@ -358,16 +365,16 @@ type DataRootEnclosedObject*[T] = object dependent_root*: Eth2Digest data*: T - execution_optimistic*: Option[bool] + execution_optimistic*: Opt[bool] DataOptimisticObject*[T] = object data*: T - execution_optimistic*: Option[bool] + execution_optimistic*: Opt[bool] DataOptimisticAndFinalizedObject*[T] = object data*: T - execution_optimistic*: Option[bool] - finalized*: Option[bool] + execution_optimistic*: Opt[bool] + finalized*: Opt[bool] ForkedSignedBlockHeader* = object message*: uint32 # message offset @@ -410,10 +417,8 @@ type # https://consensys.github.io/web3signer/web3signer-eth2.html#operation/ETH2_SIGN Web3SignerValidatorRegistration* = object - feeRecipient* {. - serializedFieldName: "fee_recipient".}: string - gasLimit* {. - serializedFieldName: "gas_limit".}: uint64 + fee_recipient*: Eth1Address + gas_limit*: uint64 timestamp*: uint64 pubkey*: ValidatorPubKey @@ -421,11 +426,20 @@ type index*: GeneralizedIndex proof*: seq[Eth2Digest] + # https://github.com/ethereum/remote-signing-api/blob/87a392deb4e43209ca896dde6b4ec40bef7ee02c/signing/paths/sign.yaml#L37 Web3SignerRequestKind* {.pure.} = enum - AggregationSlot, AggregateAndProof, AggregateAndProofV2, Attestation, - BlockV2, Deposit, RandaoReveal, VoluntaryExit, SyncCommitteeMessage, - SyncCommitteeSelectionProof, SyncCommitteeContributionAndProof, - ValidatorRegistration + AggregationSlot = "AGGREGATION_SLOT" + AggregateAndProof = "AGGREGATE_AND_PROOF" + AggregateAndProofV2 = "AGGREGATE_AND_PROOF_V2" + Attestation = "ATTESTATION" + BlockV2 = "BLOCK_V2" + Deposit = "DEPOSIT" + RandaoReveal = "RANDAO_REVEAL" + VoluntaryExit = "VOLUNTARY_EXIT" + SyncCommitteeMessage = "SYNC_COMMITTEE_MESSAGE", + SyncCommitteeSelectionProof = "SYNC_COMMITTEE_SELECTION_PROOF" + SyncCommitteeContributionAndProof = "SYNC_COMMITTEE_CONTRIBUTION_AND_PROOF" + ValidatorRegistration = "VALIDATOR_REGISTRATION" Web3SignerRequest* = object signingRoot*: Opt[Eth2Digest] @@ -514,8 +528,6 @@ type GetBlockHeadersResponse* = DataEnclosedObject[seq[RestBlockHeaderInfo]] GetBlockRootResponse* = DataOptimisticObject[RestRoot] GetDebugChainHeadsV2Response* = DataEnclosedObject[seq[RestChainHeadV2]] - GetDepositContractResponse* = DataEnclosedObject[RestDepositContract] - GetDepositSnapshotResponse* = DataEnclosedObject[DepositTreeSnapshot] GetEpochCommitteesResponse* = DataEnclosedObject[seq[RestBeaconStatesCommittees]] GetForkScheduleResponse* = DataEnclosedObject[seq[Fork]] GetGenesisResponse* = DataEnclosedObject[RestGenesis] @@ -549,12 +561,9 @@ type SubmitBeaconCommitteeSelectionsResponse* = DataEnclosedObject[seq[RestBeaconCommitteeSelection]] SubmitSyncCommitteeSelectionsResponse* = DataEnclosedObject[seq[RestSyncCommitteeSelection]] - GetHeaderResponseDeneb* = DataVersionEnclosedObject[deneb_mev.SignedBuilderBid] GetHeaderResponseElectra* = DataVersionEnclosedObject[electra_mev.SignedBuilderBid] GetHeaderResponseFulu* = DataVersionEnclosedObject[fulu_mev.SignedBuilderBid] - SubmitBlindedBlockResponseDeneb* = DataVersionEnclosedObject[deneb_mev.ExecutionPayloadAndBlobsBundle] SubmitBlindedBlockResponseElectra* = DataVersionEnclosedObject[electra_mev.ExecutionPayloadAndBlobsBundle] - SubmitBlindedBlockResponseFulu* = DataVersionEnclosedObject[fulu_mev.ExecutionPayloadAndBlobsBundle] RestNodeValidity* {.pure.} = enum valid = "VALID", @@ -564,8 +573,8 @@ type RestNodeExtraData* = object justified_root*: Eth2Digest finalized_root*: Eth2Digest - u_justified_checkpoint*: Option[Checkpoint] - u_finalized_checkpoint*: Option[Checkpoint] + u_justified_checkpoint*: Opt[Checkpoint] + u_finalized_checkpoint*: Opt[Checkpoint] best_child*: Eth2Digest best_descendant*: Eth2Digest @@ -578,7 +587,7 @@ type weight*: uint64 validity*: RestNodeValidity execution_block_hash*: Eth2Digest - extra_data*: Option[RestNodeExtraData] + extra_data*: Opt[RestNodeExtraData] RestExtraData* = object discard @@ -589,6 +598,8 @@ type fork_choice_nodes*: seq[RestNode] extra_data*: RestExtraData + EmptyBody* = object + func isLowestScoreAggregatedAttestation*(a: phase0.Attestation): bool = (a.data.slot == GENESIS_SLOT) and (a.data.index == 0'u64) and @@ -607,38 +618,52 @@ func `==`*(a, b: RestValidatorIndex): bool {.borrow.} template withForkyBlck*( x: RestPublishedSignedBlockContents, body: untyped): untyped = case x.kind + of ConsensusFork.Gloas: + const consensusFork {.inject, used.} = ConsensusFork.Gloas + template forkyData: untyped {.inject, used.} = x.gloasData + template forkyBlck: untyped {.inject, used.} = x.gloasData.signed_block + template kzg_proofs: untyped {.inject, used.} = x.gloasData.kzg_proofs + template blobs: untyped {.inject, used.} = x.gloasData.blobs + body of ConsensusFork.Fulu: const consensusFork {.inject, used.} = ConsensusFork.Fulu + template forkyData: untyped {.inject, used.} = x.fuluData template forkyBlck: untyped {.inject, used.} = x.fuluData.signed_block template kzg_proofs: untyped {.inject, used.} = x.fuluData.kzg_proofs template blobs: untyped {.inject, used.} = x.fuluData.blobs body of ConsensusFork.Electra: const consensusFork {.inject, used.} = ConsensusFork.Electra + template forkyData: untyped {.inject, used.} = x.electraData template forkyBlck: untyped {.inject, used.} = x.electraData.signed_block template kzg_proofs: untyped {.inject, used.} = x.electraData.kzg_proofs template blobs: untyped {.inject, used.} = x.electraData.blobs body of ConsensusFork.Deneb: const consensusFork {.inject, used.} = ConsensusFork.Deneb + template forkyData: untyped {.inject, used.} = x.denebData template forkyBlck: untyped {.inject, used.} = x.denebData.signed_block template kzg_proofs: untyped {.inject, used.} = x.denebData.kzg_proofs template blobs: untyped {.inject, used.} = x.denebData.blobs body of ConsensusFork.Capella: const consensusFork {.inject, used.} = ConsensusFork.Capella + template forkyData: untyped {.inject, used.} = x.capellaData template forkyBlck: untyped {.inject, used.} = x.capellaData body of ConsensusFork.Bellatrix: const consensusFork {.inject, used.} = ConsensusFork.Bellatrix + template forkyData: untyped {.inject, used.} = x.bellatrixData template forkyBlck: untyped {.inject, used.} = x.bellatrixData body of ConsensusFork.Altair: const consensusFork {.inject, used.} = ConsensusFork.Altair + template forkyData: untyped {.inject, used.} = x.altairData template forkyBlck: untyped {.inject, used.} = x.altairData body of ConsensusFork.Phase0: const consensusFork {.inject, used.} = ConsensusFork.Phase0 + template forkyData: untyped {.inject, used.} = x.phase0Data template forkyBlck: untyped {.inject, used.} = x.phase0Data body @@ -660,6 +685,8 @@ func init*(T: type ForkedSignedBeaconBlock, ForkedSignedBeaconBlock.init(contents.electraData.signed_block) of ConsensusFork.Fulu: ForkedSignedBeaconBlock.init(contents.fuluData.signed_block) + of ConsensusFork.Gloas: + ForkedSignedBeaconBlock.init(contents.gloasData.signed_block) func init*(t: typedesc[RestPublishedSignedBlockContents], blck: phase0.BeaconBlock, root: Eth2Digest, @@ -749,6 +776,22 @@ func init*(t: typedesc[RestPublishedSignedBlockContents], ) ) +func init*(t: typedesc[RestPublishedSignedBlockContents], + contents: gloas.BlockContents, root: Eth2Digest, + signature: ValidatorSig): RestPublishedSignedBlockContents = + RestPublishedSignedBlockContents( + kind: ConsensusFork.Gloas, + gloasData: GloasSignedBlockContents( + signed_block: gloas.SignedBeaconBlock( + message: contents.`block`, + root: root, + signature: signature + ), + kzg_proofs: contents.kzg_proofs, + blobs: contents.blobs + ) + ) + func init*(t: typedesc[StateIdent], v: StateIdentType): StateIdent = StateIdent(kind: StateQueryKind.Named, value: v) @@ -784,6 +827,12 @@ func init*(t: typedesc[RestValidator], index: ValidatorIndex, RestValidator(index: index, balance: Base10.toString(balance), status: status, validator: validator) +func init*(t: typedesc[RestValidatorIdentity], index: ValidatorIndex, + pubkey: ValidatorPubKey, + activation_epoch: Epoch): RestValidatorIdentity = + RestValidatorIdentity(index: index, pubkey: pubkey, + activation_epoch: activation_epoch) + func init*(t: typedesc[RestValidatorBalance], index: ValidatorIndex, balance: Gwei): RestValidatorBalance = RestValidatorBalance(index: index, balance: Base10.toString(balance)) @@ -958,22 +1007,17 @@ func init*(t: typedesc[Web3SignerRequest], fork: Fork, syncCommitteeContributionAndProof: data ) -from stew/byteutils import to0xHex - -func init*(t: typedesc[Web3SignerRequest], fork: Fork, +func init*(t: typedesc[Web3SignerRequest], genesis_validators_root: Eth2Digest, data: ValidatorRegistrationV1, signingRoot: Opt[Eth2Digest] = Opt.none(Eth2Digest) ): Web3SignerRequest = Web3SignerRequest( kind: Web3SignerRequestKind.ValidatorRegistration, - forkInfo: Opt.some(Web3SignerForkInfo( - fork: fork, genesis_validators_root: genesis_validators_root - )), signingRoot: signingRoot, validatorRegistration: Web3SignerValidatorRegistration( - feeRecipient: data.fee_recipient.data.to0xHex, - gasLimit: data.gas_limit, + fee_recipient: data.fee_recipient, + gas_limit: data.gas_limit, timestamp: data.timestamp, pubkey: data.pubkey) ) @@ -1078,3 +1122,185 @@ func toValidatorIndex*(value: RestValidatorIndex): Result[ValidatorIndex, err(ValidatorIndexError.TooHighValue) else: doAssert(false, "ValidatorIndex type size is incorrect") + +## Types and helpers for historical_summaries + proof endpoint +const + # gIndex for historical_summaries field (27th field in BeaconState) + HISTORICAL_SUMMARIES_GINDEX* = GeneralizedIndex(59) # 32 + 27 = 59 + HISTORICAL_SUMMARIES_GINDEX_ELECTRA* = GeneralizedIndex(91) # 64 + 27 = 91 + +type + # Note: these could go in separate Capella/Electra spec files if they were + # part of the specification. + HistoricalSummariesProof* = array[log2trunc(HISTORICAL_SUMMARIES_GINDEX), Eth2Digest] + HistoricalSummariesProofElectra* = + array[log2trunc(HISTORICAL_SUMMARIES_GINDEX_ELECTRA), Eth2Digest] + + # REST API types + GetHistoricalSummariesV1Response* = object + historical_summaries*: HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT] + proof*: HistoricalSummariesProof + slot*: Slot + + GetHistoricalSummariesV1ResponseElectra* = object + historical_summaries*: HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT] + proof*: HistoricalSummariesProofElectra + slot*: Slot + + ForkyGetHistoricalSummariesV1Response* = + GetHistoricalSummariesV1Response | + GetHistoricalSummariesV1ResponseElectra + + HistoricalSummariesFork* {.pure.} = enum + Capella = 0, + Electra = 1 + + # REST client response type + ForkedHistoricalSummariesWithProof* = object + case kind*: HistoricalSummariesFork + of HistoricalSummariesFork.Capella: capellaData*: GetHistoricalSummariesV1Response + of HistoricalSummariesFork.Electra: electraData*: GetHistoricalSummariesV1ResponseElectra + +template historical_summaries_gindex*( + kind: static HistoricalSummariesFork): GeneralizedIndex = + case kind + of HistoricalSummariesFork.Electra: + HISTORICAL_SUMMARIES_GINDEX_ELECTRA + of HistoricalSummariesFork.Capella: + HISTORICAL_SUMMARIES_GINDEX + +template getHistoricalSummariesResponse*( + kind: static HistoricalSummariesFork): auto = + when kind >= HistoricalSummariesFork.Electra: + GetHistoricalSummariesV1ResponseElectra + elif kind >= HistoricalSummariesFork.Capella: + GetHistoricalSummariesV1Response + +template init*( + T: type ForkedHistoricalSummariesWithProof, + historical_summaries: GetHistoricalSummariesV1Response, +): T = + ForkedHistoricalSummariesWithProof( + kind: HistoricalSummariesFork.Capella, capellaData: historical_summaries + ) + +template init*( + T: type ForkedHistoricalSummariesWithProof, + historical_summaries: GetHistoricalSummariesV1ResponseElectra, +): T = + ForkedHistoricalSummariesWithProof( + kind: HistoricalSummariesFork.Electra, electraData: historical_summaries + ) + +template withForkyHistoricalSummariesWithProof*( + x: ForkedHistoricalSummariesWithProof, body: untyped): untyped = + case x.kind + of HistoricalSummariesFork.Electra: + const historicalFork {.inject, used.} = HistoricalSummariesFork.Electra + template forkySummaries: untyped {.inject, used.} = x.electraData + body + of HistoricalSummariesFork.Capella: + const historicalFork {.inject, used.} = HistoricalSummariesFork.Capella + template forkySummaries: untyped {.inject, used.} = x.capellaData + body + +func historicalSummariesForkAtConsensusFork*(consensusFork: ConsensusFork): Opt[HistoricalSummariesFork] = + static: doAssert HistoricalSummariesFork.high == HistoricalSummariesFork.Electra + if consensusFork >= ConsensusFork.Electra: + Opt.some HistoricalSummariesFork.Electra + elif consensusFork >= ConsensusFork.Capella: + Opt.some HistoricalSummariesFork.Capella + else: + Opt.none HistoricalSummariesFork + +func parse*(_: type ValidatorIdent, value: string): Result[ValidatorIdent, cstring] = + # Either key or index depending on prefix + if len(value) > 2 and (value[0] == '0') and (value[1] == 'x'): + let res = ? ValidatorPubKey.fromHex(value) + ok(ValidatorIdent(kind: ValidatorQueryKind.Key, key: res)) + else: + let res = RestValidatorIndex(? Base10.decode(uint64, value)) + ok(ValidatorIdent(kind: ValidatorQueryKind.Index, index: res)) + +func parse*(_: type ValidatorFilter, value: string): Result[ValidatorFilter, cstring] = + case value + of "pending_initialized": + ok({ValidatorFilterKind.PendingInitialized}) + of "pending_queued": + ok({ValidatorFilterKind.PendingQueued}) + of "active_ongoing": + ok({ValidatorFilterKind.ActiveOngoing}) + of "active_exiting": + ok({ValidatorFilterKind.ActiveExiting}) + of "active_slashed": + ok({ValidatorFilterKind.ActiveSlashed}) + of "exited_unslashed": + ok({ValidatorFilterKind.ExitedUnslashed}) + of "exited_slashed": + ok({ValidatorFilterKind.ExitedSlashed}) + of "withdrawal_possible": + ok({ValidatorFilterKind.WithdrawalPossible}) + of "withdrawal_done": + ok({ValidatorFilterKind.WithdrawalDone}) + of "pending": + ok({ + ValidatorFilterKind.PendingInitialized, + ValidatorFilterKind.PendingQueued + }) + of "active": + ok({ + ValidatorFilterKind.ActiveOngoing, + ValidatorFilterKind.ActiveExiting, + ValidatorFilterKind.ActiveSlashed + }) + of "exited": + ok({ + ValidatorFilterKind.ExitedUnslashed, + ValidatorFilterKind.ExitedSlashed + }) + of "withdrawal": + ok({ + ValidatorFilterKind.WithdrawalPossible, + ValidatorFilterKind.WithdrawalDone + }) + else: + err("Incorrect validator state identifier value") + +func toList*(value: set[ValidatorFilterKind]): seq[string] = + const + pendingSet = {ValidatorFilterKind.PendingInitialized, + ValidatorFilterKind.PendingQueued} + activeSet = {ValidatorFilterKind.ActiveOngoing, + ValidatorFilterKind.ActiveExiting, + ValidatorFilterKind.ActiveSlashed} + exitedSet = {ValidatorFilterKind.ExitedUnslashed, + ValidatorFilterKind.ExitedSlashed} + withdrawSet = {ValidatorFilterKind.WithdrawalPossible, + ValidatorFilterKind.WithdrawalDone} + var + res: seq[string] + v = value + + template processSet(argSet, argName: untyped): untyped = + if argSet * v == argSet: + res.add(argName) + v.excl(argSet) + + template processSingle(argSingle, argName): untyped = + if argSingle in v: + res.add(argName) + + processSet(pendingSet, "pending") + processSet(activeSet, "active") + processSet(exitedSet, "exited") + processSet(withdrawSet, "withdrawal") + processSingle(ValidatorFilterKind.PendingInitialized, "pending_initialized") + processSingle(ValidatorFilterKind.PendingQueued, "pending_queued") + processSingle(ValidatorFilterKind.ActiveOngoing, "active_ongoing") + processSingle(ValidatorFilterKind.ActiveExiting, "active_exiting") + processSingle(ValidatorFilterKind.ActiveSlashed, "active_slashed") + processSingle(ValidatorFilterKind.ExitedUnslashed, "exited_unslashed") + processSingle(ValidatorFilterKind.ExitedSlashed, "exited_slashed") + processSingle(ValidatorFilterKind.WithdrawalPossible, "withdrawal_possible") + processSingle(ValidatorFilterKind.WithdrawalDone, "withdrawal_done") + res diff --git a/beacon_chain/spec/eth2_merkleization.nim b/beacon_chain/spec/eth2_merkleization.nim index 71618fce93..324eb48471 100644 --- a/beacon_chain/spec/eth2_merkleization.nim +++ b/beacon_chain/spec/eth2_merkleization.nim @@ -10,7 +10,6 @@ # Import this module to get access to `hash_tree_root` for spec types import - stew/endians2, std/sets, ssz_serialization/[merkleization, proofs], ./ssz_codec @@ -26,9 +25,6 @@ from ./datatypes/fulu import HashedBeaconState, SignedBeaconBlock export ssz_codec, merkleization, proofs -type - DepositsMerkleizer* = SszMerkleizer2[DEPOSIT_CONTRACT_TREE_DEPTH + 1] - # Can't use `ForkyHashedBeaconState`/`ForkyHashedSignedBeaconBlock` without # creating recursive module dependency through `forks`. func hash_tree_root*( @@ -45,34 +41,6 @@ func hash_tree_root*( fulu.SignedBeaconBlock) {. error: "SignedBeaconBlock should not be hashed".} -func depositCountBytes*(x: uint64): array[32, byte] = - doAssert(x <= 4294967295'u64) - var z = x - for i in 0..3: - result[31-i] = byte(int64(z) %% 256'i64) - z = z div 256 - -func depositCountU64*(xs: openArray[byte]): uint64 = - ## depositCountU64 considers just the first 4 bytes as - ## MAX_DEPOSIT_COUNT is defined as 2^32 - 1. - for i in 0 .. 27: - doAssert xs[i] == 0 - return uint64.fromBytesBE(xs[24..31]) - -func init*(T: type DepositsMerkleizer, s: DepositContractState): DepositsMerkleizer = - let count = depositCountU64(s.deposit_count) - DepositsMerkleizer.init(s.branch, count) - -func toDepositContractState*(merkleizer: DepositsMerkleizer): DepositContractState = - # TODO There is an off by one discrepancy in the size of the arrays here that - # need to be investigated. It shouldn't matter as long as the tree is - # not populated to its maximum size. - result.branch[0..31] = merkleizer.getCombinedChunks[0..31] - result.deposit_count[24..31] = merkleizer.getChunkCount().toBytesBE - -func getDepositsRoot*(m: var DepositsMerkleizer): Eth2Digest = - mixInLength(m.getFinalHash, int m.totalChunks) - func hash*(v: ref HashedValidatorPubKeyItem): Hash = if not isNil(v): hash(v[].key) diff --git a/beacon_chain/spec/eth2_ssz_serialization.nim b/beacon_chain/spec/eth2_ssz_serialization.nim index 4d7af53ca4..05c021b467 100644 --- a/beacon_chain/spec/eth2_ssz_serialization.nim +++ b/beacon_chain/spec/eth2_ssz_serialization.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -19,6 +19,7 @@ import from ./datatypes/deneb import SignedBeaconBlock, TrustedSignedBeaconBlock from ./datatypes/electra import SignedBeaconBlock, TrustedSignedBeaconBlock from ./datatypes/fulu import SignedBeaconBlock, TrustedSignedBeaconBlock +from ./datatypes/gloas import SignedBeaconBlock, TrustedSignedBeaconBlock export phase0, altair, ssz_codec, ssz_serialization, eth2_merkleization @@ -74,6 +75,12 @@ template readSszBytes*( template readSszBytes*( data: openArray[byte], val: var fulu.TrustedSignedBeaconBlock, updateRoot = true) = readAndUpdateRoot(data, val, updateRoot) +template readSszBytes*( + data: openArray[byte], val: var gloas.SignedBeaconBlock, updateRoot = true) = + readAndUpdateRoot(data, val, updateRoot) +template readSszBytes*( + data: openArray[byte], val: var gloas.TrustedSignedBeaconBlock, updateRoot = true) = + readAndUpdateRoot(data, val, updateRoot) template readSszBytes*( data: openArray[byte], val: var auto, updateRoot: bool) = diff --git a/beacon_chain/spec/forks.nim b/beacon_chain/spec/forks.nim index fe272d1f44..dd1b73e554 100644 --- a/beacon_chain/spec/forks.nim +++ b/beacon_chain/spec/forks.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import std/macros, @@ -16,13 +16,17 @@ import "."/[ block_id, eth2_merkleization, eth2_ssz_serialization, forks_light_client, presets], - ./datatypes/[phase0, altair, bellatrix, capella, deneb, electra, fulu], + ./datatypes/[phase0, altair, bellatrix, capella, deneb, electra, fulu, gloas], ./mev/[bellatrix_mev, capella_mev, deneb_mev, electra_mev, fulu_mev] +from std/sequtils import mapIt +from stew/staticfor import staticFor + export - extras, block_id, phase0, altair, bellatrix, capella, deneb, electra, - fulu, eth2_merkleization, eth2_ssz_serialization, forks_light_client, - presets, deneb_mev, electra_mev, fulu_mev + extras, block_id, eth2_merkleization, eth2_ssz_serialization, forks_light_client, + presets, + phase0, altair, bellatrix, capella, deneb, electra, fulu, gloas, + bellatrix_mev, capella_mev, deneb_mev, electra_mev, fulu_mev # This file contains helpers for dealing with forks - we have two ways we can # deal with forks: @@ -42,13 +46,15 @@ export type ConsensusFork* {.pure.} = enum - Phase0, - Altair, - Bellatrix, - Capella, - Deneb, - Electra, - Fulu + ## Fork names spelled as in beacon API spec + Phase0 = "phase0" + Altair = "altair" + Bellatrix = "bellatrix" + Capella = "capella" + Deneb = "deneb" + Electra = "electra" + Fulu = "fulu" + Gloas = "gloas" ForkyBeaconState* = phase0.BeaconState | @@ -57,7 +63,8 @@ type capella.BeaconState | deneb.BeaconState | electra.BeaconState | - fulu.BeaconState + fulu.BeaconState | + gloas.BeaconState ForkyHashedBeaconState* = phase0.HashedBeaconState | @@ -66,7 +73,8 @@ type capella.HashedBeaconState | deneb.HashedBeaconState | electra.HashedBeaconState | - fulu.HashedBeaconState + fulu.HashedBeaconState | + gloas.HashedBeaconState ForkedHashedBeaconState* = object case kind*: ConsensusFork @@ -77,20 +85,20 @@ type of ConsensusFork.Deneb: denebData*: deneb.HashedBeaconState of ConsensusFork.Electra: electraData*: electra.HashedBeaconState of ConsensusFork.Fulu: fuluData*: fulu.HashedBeaconState + of ConsensusFork.Gloas: gloasData*: gloas.HashedBeaconState ForkyExecutionPayload* = bellatrix.ExecutionPayload | capella.ExecutionPayload | - deneb.ExecutionPayload | - electra.ExecutionPayload | - fulu.ExecutionPayload + deneb.ExecutionPayload ForkyExecutionPayloadHeader* = bellatrix.ExecutionPayloadHeader | capella.ExecutionPayloadHeader | - deneb.ExecutionPayloadHeader | - electra.ExecutionPayloadHeader | - fulu.ExecutionPayloadHeader + deneb.ExecutionPayloadHeader + + ForkyExecutionPayloadOrHeader* = + ForkyExecutionPayload | ForkyExecutionPayloadHeader ForkyBeaconBlockBody* = phase0.BeaconBlockBody | @@ -99,7 +107,8 @@ type capella.BeaconBlockBody | deneb.BeaconBlockBody | electra.BeaconBlockBody | - fulu.BeaconBlockBody + fulu.BeaconBlockBody | + gloas.BeaconBlockBody ForkySigVerifiedBeaconBlockBody* = phase0.SigVerifiedBeaconBlockBody | @@ -108,7 +117,8 @@ type capella.SigVerifiedBeaconBlockBody | deneb.SigVerifiedBeaconBlockBody | electra.SigVerifiedBeaconBlockBody | - fulu.SigVerifiedBeaconBlockBody + fulu.SigVerifiedBeaconBlockBody | + gloas.SigVerifiedBeaconBlockBody ForkyTrustedBeaconBlockBody* = phase0.TrustedBeaconBlockBody | @@ -117,7 +127,8 @@ type capella.TrustedBeaconBlockBody | deneb.TrustedBeaconBlockBody | electra.TrustedBeaconBlockBody | - fulu.TrustedBeaconBlockBody + fulu.TrustedBeaconBlockBody | + gloas.TrustedBeaconBlockBody SomeForkyBeaconBlockBody* = ForkyBeaconBlockBody | @@ -131,7 +142,8 @@ type capella.BeaconBlock | deneb.BeaconBlock | electra.BeaconBlock | - fulu.BeaconBlock + fulu.BeaconBlock | + gloas.BeaconBlock ForkySigVerifiedBeaconBlock* = phase0.SigVerifiedBeaconBlock | @@ -140,7 +152,8 @@ type capella.SigVerifiedBeaconBlock | deneb.SigVerifiedBeaconBlock | electra.SigVerifiedBeaconBlock | - fulu.SigVerifiedBeaconBlock + fulu.SigVerifiedBeaconBlock | + gloas.SigVerifiedBeaconBlock ForkyTrustedBeaconBlock* = phase0.TrustedBeaconBlock | @@ -149,7 +162,8 @@ type capella.TrustedBeaconBlock | deneb.TrustedBeaconBlock | electra.TrustedBeaconBlock | - fulu.TrustedBeaconBlock + fulu.TrustedBeaconBlock | + gloas.TrustedBeaconBlock SomeForkyBeaconBlock* = ForkyBeaconBlock | @@ -161,13 +175,38 @@ type capella.ExecutionPayloadForSigning | deneb.ExecutionPayloadForSigning | electra.ExecutionPayloadForSigning | - fulu.ExecutionPayloadForSigning + fulu.ExecutionPayloadForSigning | + gloas.ExecutionPayloadForSigning ForkyBlindedBeaconBlock* = - deneb_mev.BlindedBeaconBlock | electra_mev.BlindedBeaconBlock | fulu_mev.BlindedBeaconBlock + SomeForkyBlindedBeaconBlock* = + ForkyBlindedBeaconBlock | + electra_mev.SigVerifiedBlindedBeaconBlock | + fulu_mev.SigVerifiedBlindedBeaconBlock + + SomeForkyBlindedBeaconBlockBody* = + electra_mev.BlindedBeaconBlockBody | + fulu_mev.BlindedBeaconBlockBody | + electra_mev.SigVerifiedBlindedBeaconBlockBody | + fulu_mev.SigVerifiedBlindedBeaconBlockBody + + ForkyBuilderBid* = + electra_mev.BuilderBid | + fulu_mev.BuilderBid + + ForkySignedBuilderBid* = + electra_mev.SignedBuilderBid | + fulu_mev.SignedBuilderBid + + ForkyBlockContents* = + deneb.BlockContents | + electra.BlockContents | + fulu.BlockContents | + gloas.BlockContents + ForkyAggregateAndProof* = phase0.AggregateAndProof | electra.AggregateAndProof @@ -189,6 +228,7 @@ type of ConsensusFork.Deneb: denebData*: phase0.Attestation of ConsensusFork.Electra: electraData*: electra.Attestation of ConsensusFork.Fulu: fuluData*: electra.Attestation + of ConsensusFork.Gloas: gloasData*: electra.Attestation ForkedAggregateAndProof* = object case kind*: ConsensusFork @@ -199,6 +239,7 @@ type of ConsensusFork.Deneb: denebData*: phase0.AggregateAndProof of ConsensusFork.Electra: electraData*: electra.AggregateAndProof of ConsensusFork.Fulu: fuluData*: electra.AggregateAndProof + of ConsensusFork.Gloas: gloasData*: electra.AggregateAndProof ForkedBeaconBlock* = object case kind*: ConsensusFork @@ -209,6 +250,7 @@ type of ConsensusFork.Deneb: denebData*: deneb.BeaconBlock of ConsensusFork.Electra: electraData*: electra.BeaconBlock of ConsensusFork.Fulu: fuluData*: fulu.BeaconBlock + of ConsensusFork.Gloas: gloasData*: gloas.BeaconBlock ForkedMaybeBlindedBeaconBlock* = object case kind*: ConsensusFork @@ -221,27 +263,20 @@ type of ConsensusFork.Capella: capellaData*: capella.BeaconBlock of ConsensusFork.Deneb: - denebData*: deneb_mev.MaybeBlindedBeaconBlock + denebData*: deneb.BlockContents of ConsensusFork.Electra: electraData*: electra_mev.MaybeBlindedBeaconBlock of ConsensusFork.Fulu: fuluData*: fulu_mev.MaybeBlindedBeaconBlock + of ConsensusFork.Gloas: + gloasData*: gloas.BlockContents consensusValue*: Opt[UInt256] executionValue*: Opt[UInt256] Web3SignerForkedBeaconBlock* = object kind*: ConsensusFork data*: BeaconBlockHeader - - ForkedBlindedBeaconBlock* = object - case kind*: ConsensusFork - of ConsensusFork.Phase0: phase0Data*: phase0.BeaconBlock - of ConsensusFork.Altair: altairData*: altair.BeaconBlock - of ConsensusFork.Bellatrix: bellatrixData*: bellatrix_mev.BlindedBeaconBlock - of ConsensusFork.Capella: capellaData*: capella_mev.BlindedBeaconBlock - of ConsensusFork.Deneb: denebData*: deneb_mev.BlindedBeaconBlock - of ConsensusFork.Electra: electraData*: electra_mev.BlindedBeaconBlock - of ConsensusFork.Fulu: fuluData*: fulu_mev.BlindedBeaconBlock + ## From Bellatrix onwards, a header is all that's needed ForkySignedBeaconBlock* = phase0.SignedBeaconBlock | @@ -250,7 +285,8 @@ type capella.SignedBeaconBlock | deneb.SignedBeaconBlock | electra.SignedBeaconBlock | - fulu.SignedBeaconBlock + fulu.SignedBeaconBlock | + gloas.SignedBeaconBlock ForkedSignedBeaconBlock* = object case kind*: ConsensusFork @@ -261,6 +297,7 @@ type of ConsensusFork.Deneb: denebData*: deneb.SignedBeaconBlock of ConsensusFork.Electra: electraData*: electra.SignedBeaconBlock of ConsensusFork.Fulu: fuluData*: fulu.SignedBeaconBlock + of ConsensusFork.Gloas: gloasData*: gloas.SignedBeaconBlock ForkySignedBlindedBeaconBlock* = phase0.SignedBeaconBlock | @@ -280,6 +317,7 @@ type of ConsensusFork.Deneb: denebData*: deneb_mev.SignedBlindedBeaconBlock of ConsensusFork.Electra: electraData*: electra_mev.SignedBlindedBeaconBlock of ConsensusFork.Fulu: fuluData*: fulu_mev.SignedBlindedBeaconBlock + of ConsensusFork.Gloas: gloasData*: fulu_mev.SignedBlindedBeaconBlock ForkySigVerifiedSignedBeaconBlock* = phase0.SigVerifiedSignedBeaconBlock | @@ -288,16 +326,8 @@ type capella.SigVerifiedSignedBeaconBlock | deneb.SigVerifiedSignedBeaconBlock | electra.SigVerifiedSignedBeaconBlock | - fulu.SigVerifiedSignedBeaconBlock - - ForkyMsgTrustedSignedBeaconBlock* = - phase0.MsgTrustedSignedBeaconBlock | - altair.MsgTrustedSignedBeaconBlock | - bellatrix.MsgTrustedSignedBeaconBlock | - capella.MsgTrustedSignedBeaconBlock | - deneb.MsgTrustedSignedBeaconBlock | - electra.MsgTrustedSignedBeaconBlock | - fulu.MsgTrustedSignedBeaconBlock + fulu.SigVerifiedSignedBeaconBlock | + gloas.SigVerifiedSignedBeaconBlock ForkyTrustedSignedBeaconBlock* = phase0.TrustedSignedBeaconBlock | @@ -306,17 +336,8 @@ type capella.TrustedSignedBeaconBlock | deneb.TrustedSignedBeaconBlock | electra.TrustedSignedBeaconBlock | - fulu.TrustedSignedBeaconBlock - - ForkedMsgTrustedSignedBeaconBlock* = object - case kind*: ConsensusFork - of ConsensusFork.Phase0: phase0Data*: phase0.MsgTrustedSignedBeaconBlock - of ConsensusFork.Altair: altairData*: altair.MsgTrustedSignedBeaconBlock - of ConsensusFork.Bellatrix: bellatrixData*: bellatrix.MsgTrustedSignedBeaconBlock - of ConsensusFork.Capella: capellaData*: capella.MsgTrustedSignedBeaconBlock - of ConsensusFork.Deneb: denebData*: deneb.MsgTrustedSignedBeaconBlock - of ConsensusFork.Electra: electraData*: electra.MsgTrustedSignedBeaconBlock - of ConsensusFork.Fulu: fuluData*: fulu.MsgTrustedSignedBeaconBlock + fulu.TrustedSignedBeaconBlock | + gloas.TrustedSignedBeaconBlock ForkedTrustedSignedBeaconBlock* = object case kind*: ConsensusFork @@ -327,11 +348,11 @@ type of ConsensusFork.Deneb: denebData*: deneb.TrustedSignedBeaconBlock of ConsensusFork.Electra: electraData*: electra.TrustedSignedBeaconBlock of ConsensusFork.Fulu: fuluData*: fulu.TrustedSignedBeaconBlock + of ConsensusFork.Gloas: gloasData*: gloas.TrustedSignedBeaconBlock SomeForkySignedBeaconBlock* = ForkySignedBeaconBlock | ForkySigVerifiedSignedBeaconBlock | - ForkyMsgTrustedSignedBeaconBlock | ForkyTrustedSignedBeaconBlock EpochInfoFork* {.pure.} = enum @@ -346,13 +367,14 @@ type ForkyEpochInfo* = phase0.EpochInfo | altair.EpochInfo ForkDigests* = object - phase0*: ForkDigest - altair*: ForkDigest - bellatrix*: ForkDigest - capella*: ForkDigest - deneb*: ForkDigest - electra*: ForkDigest - fulu*: ForkDigest + phase0*: ForkDigest + altair*: ForkDigest + bellatrix: ForkDigest + capella: ForkDigest + deneb: ForkDigest + electra: ForkDigest + fuluInt: ForkDigest + bpos: seq[(Epoch, ConsensusFork, ForkDigest)] template kind*( x: typedesc[ @@ -365,7 +387,6 @@ template kind*( phase0.SigVerifiedBeaconBlockBody | phase0.TrustedBeaconBlockBody | phase0.SigVerifiedSignedBeaconBlock | - phase0.MsgTrustedSignedBeaconBlock | phase0.TrustedSignedBeaconBlock | phase0.Attestation | phase0.AggregateAndProof | @@ -383,7 +404,6 @@ template kind*( altair.SigVerifiedBeaconBlockBody | altair.TrustedBeaconBlockBody | altair.SigVerifiedSignedBeaconBlock | - altair.MsgTrustedSignedBeaconBlock | altair.TrustedSignedBeaconBlock]): ConsensusFork = ConsensusFork.Altair @@ -391,9 +411,7 @@ template kind*( x: typedesc[ bellatrix.BeaconState | bellatrix.HashedBeaconState | - bellatrix.ExecutionPayload | bellatrix.ExecutionPayloadForSigning | - bellatrix.ExecutionPayloadHeader | bellatrix.BeaconBlock | bellatrix.SignedBeaconBlock | bellatrix.TrustedBeaconBlock | @@ -401,7 +419,6 @@ template kind*( bellatrix.SigVerifiedBeaconBlockBody | bellatrix.TrustedBeaconBlockBody | bellatrix.SigVerifiedSignedBeaconBlock | - bellatrix.MsgTrustedSignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock] | bellatrix_mev.SignedBlindedBeaconBlock): ConsensusFork = ConsensusFork.Bellatrix @@ -410,9 +427,7 @@ template kind*( x: typedesc[ capella.BeaconState | capella.HashedBeaconState | - capella.ExecutionPayload | capella.ExecutionPayloadForSigning | - capella.ExecutionPayloadHeader | capella.BeaconBlock | capella.SignedBeaconBlock | capella.TrustedBeaconBlock | @@ -420,7 +435,6 @@ template kind*( capella.SigVerifiedBeaconBlockBody | capella.TrustedBeaconBlockBody | capella.SigVerifiedSignedBeaconBlock | - capella.MsgTrustedSignedBeaconBlock | capella.TrustedSignedBeaconBlock | capella_mev.SignedBlindedBeaconBlock]): ConsensusFork = ConsensusFork.Capella @@ -429,9 +443,7 @@ template kind*( x: typedesc[ deneb.BeaconState | deneb.HashedBeaconState | - deneb.ExecutionPayload | deneb.ExecutionPayloadForSigning | - deneb.ExecutionPayloadHeader | deneb.BeaconBlock | deneb.SignedBeaconBlock | deneb.TrustedBeaconBlock | @@ -439,20 +451,15 @@ template kind*( deneb.SigVerifiedBeaconBlockBody | deneb.TrustedBeaconBlockBody | deneb.SigVerifiedSignedBeaconBlock | - deneb.MsgTrustedSignedBeaconBlock | deneb.TrustedSignedBeaconBlock | - deneb_mev.SignedBlindedBeaconBlock | - deneb_mev.SignedBuilderBid | - deneb_mev.ExecutionPayloadAndBlobsBundle]): ConsensusFork = + deneb_mev.SignedBlindedBeaconBlock]): ConsensusFork = ConsensusFork.Deneb template kind*( x: typedesc[ electra.BeaconState | electra.HashedBeaconState | - electra.ExecutionPayload | electra.ExecutionPayloadForSigning | - electra.ExecutionPayloadHeader | electra.BeaconBlock | electra.SignedBeaconBlock | electra.TrustedBeaconBlock | @@ -460,12 +467,12 @@ template kind*( electra.SigVerifiedBeaconBlockBody | electra.TrustedBeaconBlockBody | electra.SigVerifiedSignedBeaconBlock | - electra.MsgTrustedSignedBeaconBlock | electra.TrustedSignedBeaconBlock | electra.Attestation | electra.SingleAttestation | electra.AggregateAndProof | electra.SignedAggregateAndProof | + electra_mev.BlindedBeaconBlock | electra_mev.SignedBlindedBeaconBlock | electra_mev.SignedBuilderBid | electra_mev.ExecutionPayloadAndBlobsBundle]): ConsensusFork = @@ -475,9 +482,7 @@ template kind*( x: typedesc[ fulu.BeaconState | fulu.HashedBeaconState | - fulu.ExecutionPayload | fulu.ExecutionPayloadForSigning | - fulu.ExecutionPayloadHeader | fulu.BeaconBlock | fulu.SignedBeaconBlock | fulu.TrustedBeaconBlock | @@ -485,186 +490,206 @@ template kind*( fulu.SigVerifiedBeaconBlockBody | fulu.TrustedBeaconBlockBody | fulu.SigVerifiedSignedBeaconBlock | - fulu.MsgTrustedSignedBeaconBlock | fulu.TrustedSignedBeaconBlock | + fulu_mev.BlindedBeaconBlock | fulu_mev.SignedBlindedBeaconBlock | - fulu_mev.SignedBuilderBid | - fulu_mev.ExecutionPayloadAndBlobsBundle]): ConsensusFork = + fulu_mev.SignedBuilderBid]): ConsensusFork = ConsensusFork.Fulu -template BeaconState*(kind: static ConsensusFork): auto = - when kind == ConsensusFork.Fulu: - typedesc[fulu.BeaconState] +template kind*( + x: typedesc[ + gloas.BeaconState | + gloas.HashedBeaconState | + gloas.ExecutionPayloadForSigning | + gloas.BeaconBlock | + gloas.SignedBeaconBlock | + gloas.TrustedBeaconBlock | + gloas.BeaconBlockBody | + gloas.SigVerifiedBeaconBlockBody | + gloas.TrustedBeaconBlockBody | + gloas.SigVerifiedSignedBeaconBlock | + gloas.TrustedSignedBeaconBlock]): ConsensusFork = + ConsensusFork.Gloas + +template BeaconState*(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Gloas: + gloas.BeaconState + elif kind == ConsensusFork.Fulu: + fulu.BeaconState elif kind == ConsensusFork.Electra: - typedesc[electra.BeaconState] + electra.BeaconState elif kind == ConsensusFork.Deneb: - typedesc[deneb.BeaconState] + deneb.BeaconState elif kind == ConsensusFork.Capella: - typedesc[capella.BeaconState] + capella.BeaconState elif kind == ConsensusFork.Bellatrix: - typedesc[bellatrix.BeaconState] + bellatrix.BeaconState elif kind == ConsensusFork.Altair: - typedesc[altair.BeaconState] + altair.BeaconState elif kind == ConsensusFork.Phase0: - typedesc[phase0.BeaconState] + phase0.BeaconState else: - static: raiseAssert "Unreachable" + {.error: "BeaconState unsupported in " & $kind.} -template BeaconBlock*(kind: static ConsensusFork): auto = - when kind == ConsensusFork.Fulu: - typedesc[fulu.BeaconBlock] +template BeaconBlock*(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Gloas: + gloas.BeaconBlock + elif kind == ConsensusFork.Fulu: + fulu.BeaconBlock elif kind == ConsensusFork.Electra: - typedesc[electra.BeaconBlock] + electra.BeaconBlock elif kind == ConsensusFork.Deneb: - typedesc[deneb.BeaconBlock] + deneb.BeaconBlock elif kind == ConsensusFork.Capella: - typedesc[capella.BeaconBlock] + capella.BeaconBlock elif kind == ConsensusFork.Bellatrix: - typedesc[bellatrix.BeaconBlock] + bellatrix.BeaconBlock elif kind == ConsensusFork.Altair: - typedesc[altair.BeaconBlock] + altair.BeaconBlock elif kind == ConsensusFork.Phase0: - typedesc[phase0.BeaconBlock] + phase0.BeaconBlock else: - static: raiseAssert "Unreachable" + {.error: "BeaconBlock unsupported in " & $kind.} -template BeaconBlockBody*(kind: static ConsensusFork): auto = - when kind == ConsensusFork.Fulu: - typedesc[fulu.BeaconBlockBody] +template BeaconBlockBody*(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Gloas: + gloas.BeaconBlockBody + elif kind == ConsensusFork.Fulu: + fulu.BeaconBlockBody elif kind == ConsensusFork.Electra: - typedesc[electra.BeaconBlockBody] + electra.BeaconBlockBody elif kind == ConsensusFork.Deneb: - typedesc[deneb.BeaconBlockBody] + deneb.BeaconBlockBody elif kind == ConsensusFork.Capella: - typedesc[capella.BeaconBlockBody] + capella.BeaconBlockBody elif kind == ConsensusFork.Bellatrix: - typedesc[bellatrix.BeaconBlockBody] + bellatrix.BeaconBlockBody elif kind == ConsensusFork.Altair: - typedesc[altair.BeaconBlockBody] + altair.BeaconBlockBody elif kind == ConsensusFork.Phase0: - typedesc[phase0.BeaconBlockBody] + phase0.BeaconBlockBody else: - static: raiseAssert "Unreachable" + {.error: "BeaconBlockBody unsupported in " & $kind.} -template SignedBeaconBlock*(kind: static ConsensusFork): auto = - when kind == ConsensusFork.Fulu: - typedesc[fulu.SignedBeaconBlock] +template SignedBeaconBlock*(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Gloas: + gloas.SignedBeaconBlock + elif kind == ConsensusFork.Fulu: + fulu.SignedBeaconBlock elif kind == ConsensusFork.Electra: - typedesc[electra.SignedBeaconBlock] + electra.SignedBeaconBlock elif kind == ConsensusFork.Deneb: - typedesc[deneb.SignedBeaconBlock] + deneb.SignedBeaconBlock elif kind == ConsensusFork.Capella: - typedesc[capella.SignedBeaconBlock] + capella.SignedBeaconBlock elif kind == ConsensusFork.Bellatrix: - typedesc[bellatrix.SignedBeaconBlock] + bellatrix.SignedBeaconBlock elif kind == ConsensusFork.Altair: - typedesc[altair.SignedBeaconBlock] + altair.SignedBeaconBlock elif kind == ConsensusFork.Phase0: - typedesc[phase0.SignedBeaconBlock] + phase0.SignedBeaconBlock else: - static: raiseAssert "Unreachable" + {.error: "SignedBeaconBlock unsupported in " & $kind.} -template TrustedSignedBeaconBlock*(kind: static ConsensusFork): auto = - when kind == ConsensusFork.Fulu: - typedesc[fulu.TrustedSignedBeaconBlock] +template TrustedSignedBeaconBlock*(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Gloas: + gloas.TrustedSignedBeaconBlock + elif kind == ConsensusFork.Fulu: + fulu.TrustedSignedBeaconBlock elif kind == ConsensusFork.Electra: - typedesc[electra.TrustedSignedBeaconBlock] + electra.TrustedSignedBeaconBlock elif kind == ConsensusFork.Deneb: - typedesc[deneb.TrustedSignedBeaconBlock] + deneb.TrustedSignedBeaconBlock elif kind == ConsensusFork.Capella: - typedesc[capella.TrustedSignedBeaconBlock] + capella.TrustedSignedBeaconBlock elif kind == ConsensusFork.Bellatrix: - typedesc[bellatrix.TrustedSignedBeaconBlock] + bellatrix.TrustedSignedBeaconBlock elif kind == ConsensusFork.Altair: - typedesc[altair.TrustedSignedBeaconBlock] + altair.TrustedSignedBeaconBlock elif kind == ConsensusFork.Phase0: - typedesc[phase0.TrustedSignedBeaconBlock] + phase0.TrustedSignedBeaconBlock else: - static: raiseAssert "Unreachable" + {.error: "TrustedSignedBeaconBlock unsupported in " & $kind.} -template ExecutionPayloadForSigning*(kind: static ConsensusFork): auto = - when kind == ConsensusFork.Fulu: - typedesc[fulu.ExecutionPayloadForSigning] +template ExecutionPayloadHeader*(kind: static ConsensusFork): typedesc = + when kind in [ + ConsensusFork.Gloas, ConsensusFork.Fulu, ConsensusFork.Electra, + ConsensusFork.Deneb]: + deneb.ExecutionPayloadHeader + elif kind == ConsensusFork.Capella: + capella.ExecutionPayloadHeader + elif kind == ConsensusFork.Bellatrix: + bellatrix.ExecutionPayloadHeader + else: + {.error: "ExecutionPayloadHeader unsupported in " & $kind.} + +template ExecutionPayloadForSigning*(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Gloas: + gloas.ExecutionPayloadForSigning + elif kind == ConsensusFork.Fulu: + fulu.ExecutionPayloadForSigning elif kind == ConsensusFork.Electra: - typedesc[electra.ExecutionPayloadForSigning] + electra.ExecutionPayloadForSigning elif kind == ConsensusFork.Deneb: - typedesc[deneb.ExecutionPayloadForSigning] + deneb.ExecutionPayloadForSigning elif kind == ConsensusFork.Capella: - typedesc[capella.ExecutionPayloadForSigning] + capella.ExecutionPayloadForSigning elif kind == ConsensusFork.Bellatrix: - typedesc[bellatrix.ExecutionPayloadForSigning] + bellatrix.ExecutionPayloadForSigning else: - static: raiseAssert "Unreachable" + {.error: "ExecutionPayloadForSigning unsupported in " & $kind.} template BlindedBeaconBlock*(kind: static ConsensusFork): auto = when kind == ConsensusFork.Fulu: - typedesc[fulu_mev.BlindedBeaconBlock] + fulu_mev.BlindedBeaconBlock elif kind == ConsensusFork.Electra: - typedesc[electra_mev.BlindedBeaconBlock] - elif kind == ConsensusFork.Deneb: - typedesc[deneb_mev.BlindedBeaconBlock] - elif kind == ConsensusFork.Capella or kind == ConsensusFork.Bellatrix: - static: raiseAssert "Unsupported" + electra_mev.BlindedBeaconBlock else: - static: raiseAssert "Unreachable" + {.error: "BlindedBeaconBlock unsupported in " & $kind.} template MaybeBlindedBeaconBlock*(kind: static ConsensusFork): auto = when kind == ConsensusFork.Fulu: - typedesc[fulu_mev.MaybeBlindedBeaconBlock] + fulu_mev.MaybeBlindedBeaconBlock elif kind == ConsensusFork.Electra: - typedesc[electra_mev.MaybeBlindedBeaconBlock] - elif kind == ConsensusFork.Deneb: - typedesc[deneb_mev.MaybeBlindedBeaconBlock] - elif kind == ConsensusFork.Capella or kind == ConsensusFork.Bellatrix: - static: raiseAssert "Unsupported" + electra_mev.MaybeBlindedBeaconBlock else: - static: raiseAssert "Unreachable" + {.error: "MaybeBlindedBeaconBlock unsupported in " & $kind.} -template SignedBlindedBeaconBlock*(kind: static ConsensusFork): auto = +template SignedBlindedBeaconBlock*(kind: static ConsensusFork): typedesc = when kind == ConsensusFork.Fulu: - typedesc[fulu_mev.SignedBlindedBeaconBlock] + fulu_mev.SignedBlindedBeaconBlock elif kind == ConsensusFork.Electra: - typedesc[electra_mev.SignedBlindedBeaconBlock] - elif kind == ConsensusFork.Deneb: - typedesc[deneb_mev.SignedBlindedBeaconBlock] - elif kind == ConsensusFork.Capella or kind == ConsensusFork.Bellatrix: - static: raiseAssert "Unsupported" + electra_mev.SignedBlindedBeaconBlock + else: + {.error: "SignedBlindedBeaconBlock unsupported in " & $kind.} + +template BuilderBid*(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Fulu: + fulu_mev.BuilderBid + elif kind == ConsensusFork.Electra: + electra_mev.BuilderBid + else: + {.error: "BuilderBid unsupported in " & $kind.} + +template SignedBuilderBid*(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Fulu: + fulu_mev.SignedBuilderBid + elif kind == ConsensusFork.Electra: + electra_mev.SignedBuilderBid else: - static: raiseAssert "Unreachable" + {.error: "SignedBuilderBid unsupported in " & $kind.} template Forky*( x: typedesc[ForkedSignedBeaconBlock], kind: static ConsensusFork): auto = kind.SignedBeaconBlock -# Workaround method used for tests that involve walking through -# `nim-eth2-scnarios`fork dirs, to be removed once Fulu is -# included in new release. -template withAllButFulu*( +template withAll*( x: typedesc[ConsensusFork], body: untyped): untyped = - static: doAssert ConsensusFork.high == ConsensusFork.Fulu - block: - const consensusFork {.inject, used.} = ConsensusFork.Electra - body + static: doAssert ConsensusFork.high == ConsensusFork.Gloas block: - const consensusFork {.inject, used.} = ConsensusFork.Deneb + const consensusFork {.inject, used.} = ConsensusFork.Gloas body - block: - const consensusFork {.inject, used.} = ConsensusFork.Capella - body - block: - const consensusFork {.inject, used.} = ConsensusFork.Bellatrix - body - block: - const consensusFork {.inject, used.} = ConsensusFork.Altair - body - block: - const consensusFork {.inject, used.} = ConsensusFork.Phase0 - body - -template withAll*( - x: typedesc[ConsensusFork], body: untyped): untyped = - static: doAssert ConsensusFork.high == ConsensusFork.Fulu block: const consensusFork {.inject, used.} = ConsensusFork.Fulu body @@ -690,6 +715,9 @@ template withAll*( template withConsensusFork*( x: ConsensusFork, body: untyped): untyped = case x + of ConsensusFork.Gloas: + const consensusFork {.inject, used.} = ConsensusFork.Gloas + body of ConsensusFork.Fulu: const consensusFork {.inject, used.} = ConsensusFork.Fulu body @@ -712,55 +740,55 @@ template withConsensusFork*( const consensusFork {.inject, used.} = ConsensusFork.Phase0 body -template BlockContents*( - kind: static ConsensusFork): auto = - when kind == ConsensusFork.Fulu: - typedesc[fulu.BlockContents] +template BlockContents*(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Gloas: + gloas.BlockContents + elif kind == ConsensusFork.Fulu: + fulu.BlockContents elif kind == ConsensusFork.Electra: - typedesc[electra.BlockContents] + electra.BlockContents elif kind == ConsensusFork.Deneb: - typedesc[deneb.BlockContents] + deneb.BlockContents elif kind == ConsensusFork.Capella: - typedesc[capella.BeaconBlock] + capella.BeaconBlock elif kind == ConsensusFork.Bellatrix: - typedesc[bellatrix.BeaconBlock] + bellatrix.BeaconBlock elif kind == ConsensusFork.Altair: - typedesc[altair.BeaconBlock] + altair.BeaconBlock elif kind == ConsensusFork.Phase0: - typedesc[phase0.BeaconBlock] + phase0.BeaconBlock else: - {.error: "BlockContents does not support " & $kind.} + {.error: "BlockContents unsupported in " & $kind.} template BlindedBlockContents*( kind: static ConsensusFork): auto = when kind == ConsensusFork.Fulu: - typedesc[fulu_mev.BlindedBeaconBlock] + fulu_mev.BlindedBeaconBlock elif kind == ConsensusFork.Electra: - typedesc[electra_mev.BlindedBeaconBlock] - elif kind == ConsensusFork.Deneb: - typedesc[deneb_mev.BlindedBeaconBlock] + electra_mev.BlindedBeaconBlock else: - {.error: "BlindedBlockContents does not support " & $kind.} + {.error: "BlindedBlockContents unsupported in " & $kind.} template PayloadAttributes*( - kind: static ConsensusFork): auto = + kind: static ConsensusFork): typedesc = # This also determines what `engine_forkchoiceUpdated` version will be used. when kind >= ConsensusFork.Deneb: - typedesc[PayloadAttributesV3] + PayloadAttributesV3 elif kind >= ConsensusFork.Capella: # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/shanghai.md#specification-1 # Consensus layer client MUST call this method instead of # `engine_forkchoiceUpdatedV1` under any of the following conditions: # `headBlockHash` references a block which `timestamp` is greater or # equal to the Shanghai timestamp - typedesc[PayloadAttributesV2] + PayloadAttributesV2 elif kind >= ConsensusFork.Bellatrix: - typedesc[PayloadAttributesV1] + PayloadAttributesV1 else: - {.error: "PayloadAttributes does not support " & $kind.} + {.error: "PayloadAttributes unsupported in " & $kind.} # `eth2_merkleization` cannot import `forks` (circular), so the check is here -static: doAssert ConsensusFork.high == ConsensusFork.Fulu, +debugGloasComment "actually verify this" +static: doAssert ConsensusFork.high == ConsensusFork.Gloas, "eth2_merkleization has been checked and `hash_tree_root` is up to date" # TODO when https://github.com/nim-lang/Nim/issues/21086 fixed, use return type @@ -793,6 +821,10 @@ func new*(T: type ForkedHashedBeaconState, data: fulu.BeaconState): ref ForkedHashedBeaconState = (ref T)(kind: ConsensusFork.Fulu, fuluData: fulu.HashedBeaconState( data: data, root: hash_tree_root(data))) +func new*(T: type ForkedHashedBeaconState, data: gloas.BeaconState): + ref ForkedHashedBeaconState = + (ref T)(kind: ConsensusFork.Gloas, gloasData: gloas.HashedBeaconState( + data: data, root: hash_tree_root(data))) template init*(T: type ForkedBeaconBlock, blck: phase0.BeaconBlock): T = T(kind: ConsensusFork.Phase0, phase0Data: blck) @@ -808,6 +840,8 @@ template init*(T: type ForkedBeaconBlock, blck: electra.BeaconBlock): T = T(kind: ConsensusFork.Electra, electraData: blck) template init*(T: type ForkedBeaconBlock, blck: fulu.BeaconBlock): T = T(kind: ConsensusFork.Fulu, fuluData: blck) +template init*(T: type ForkedBeaconBlock, blck: gloas.BeaconBlock): T = + T(kind: ConsensusFork.Gloas, gloasData: blck) template init*(T: type ForkedSignedBeaconBlock, blck: phase0.SignedBeaconBlock): T = T(kind: ConsensusFork.Phase0, phase0Data: blck) @@ -823,6 +857,8 @@ template init*(T: type ForkedSignedBeaconBlock, blck: electra.SignedBeaconBlock) T(kind: ConsensusFork.Electra, electraData: blck) template init*(T: type ForkedSignedBeaconBlock, blck: fulu.SignedBeaconBlock): T = T(kind: ConsensusFork.Fulu, fuluData: blck) +template init*(T: type ForkedSignedBeaconBlock, blck: gloas.SignedBeaconBlock): T = + T(kind: ConsensusFork.Gloas, gloasData: blck) func init*(T: type ForkedSignedBeaconBlock, forked: ForkedBeaconBlock, blockRoot: Eth2Digest, signature: ValidatorSig): T = @@ -862,40 +898,11 @@ func init*(T: type ForkedSignedBeaconBlock, forked: ForkedBeaconBlock, fuluData: fulu.SignedBeaconBlock(message: forked.fuluData, root: blockRoot, signature: signature)) - -func init*(T: type ForkedSignedBlindedBeaconBlock, - forked: ForkedBlindedBeaconBlock, blockRoot: Eth2Digest, - signature: ValidatorSig): T = - case forked.kind - of ConsensusFork.Phase0: - T(kind: ConsensusFork.Phase0, - phase0Data: phase0.SignedBeaconBlock(message: forked.phase0Data, - root: blockRoot, - signature: signature)) - of ConsensusFork.Altair: - T(kind: ConsensusFork.Altair, - altairData: altair.SignedBeaconBlock(message: forked.altairData, - root: blockRoot, - signature: signature)) - of ConsensusFork.Bellatrix: - T(kind: ConsensusFork.Bellatrix, - bellatrixData: default(bellatrix_mev.SignedBlindedBeaconBlock)) - of ConsensusFork.Capella: - T(kind: ConsensusFork.Capella, - capellaData: capella_mev.SignedBlindedBeaconBlock(message: forked.capellaData, - signature: signature)) - of ConsensusFork.Deneb: - T(kind: ConsensusFork.Deneb, - denebData: deneb_mev.SignedBlindedBeaconBlock(message: forked.denebData, - signature: signature)) - of ConsensusFork.Electra: - T(kind: ConsensusFork.Electra, - electraData: electra_mev.SignedBlindedBeaconBlock(message: forked.electraData, - signature: signature)) - of ConsensusFork.Fulu: - T(kind: ConsensusFork.Fulu, - fuluData: fulu_mev.SignedBlindedBeaconBlock(message: forked.fuluData, - signature: signature)) + of ConsensusFork.Gloas: + T(kind: ConsensusFork.Gloas, + gloasData: gloas.SignedBeaconBlock(message: forked.gloasData, + root: blockRoot, + signature: signature)) template init*(T: type ForkedSignedBlindedBeaconBlock, blck: capella_mev.BlindedBeaconBlock, blockRoot: Eth2Digest, @@ -925,17 +932,6 @@ template init*(T: type ForkedSignedBlindedBeaconBlock, fuluData: fulu_mev.SignedBlindedBeaconBlock( message: blck, signature: signature)) -template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: phase0.MsgTrustedSignedBeaconBlock): T = - T(kind: ConsensusFork.Phase0, phase0Data: blck) -template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: altair.MsgTrustedSignedBeaconBlock): T = - T(kind: ConsensusFork.Altair, altairData: blck) -template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: bellatrix.MsgTrustedSignedBeaconBlock): T = - T(kind: ConsensusFork.Bellatrix, bellatrixData: blck) -template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: capella.MsgTrustedSignedBeaconBlock): T = - T(kind: ConsensusFork.Capella, capellaData: blck) -template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: deneb.MsgTrustedSignedBeaconBlock): T = - T(kind: ConsensusFork.Deneb, denebData: blck) - template init*(T: type ForkedTrustedSignedBeaconBlock, blck: phase0.TrustedSignedBeaconBlock): T = T(kind: ConsensusFork.Phase0, phase0Data: blck) template init*(T: type ForkedTrustedSignedBeaconBlock, blck: altair.TrustedSignedBeaconBlock): T = @@ -950,26 +946,16 @@ template init*(T: type ForkedTrustedSignedBeaconBlock, blck: electra.TrustedSign T(kind: ConsensusFork.Electra, electraData: blck) template init*(T: type ForkedTrustedSignedBeaconBlock, blck: fulu.TrustedSignedBeaconBlock): T = T(kind: ConsensusFork.Fulu, fuluData: blck) +template init*(T: type ForkedTrustedSignedBeaconBlock, blck: gloas.TrustedSignedBeaconBlock): T = + T(kind: ConsensusFork.Gloas, gloasData: blck) template toString*(kind: ConsensusFork): string = - case kind - of ConsensusFork.Phase0: - "phase0" - of ConsensusFork.Altair: - "altair" - of ConsensusFork.Bellatrix: - "bellatrix" - of ConsensusFork.Capella: - "capella" - of ConsensusFork.Deneb: - "deneb" - of ConsensusFork.Electra: - "electra" - of ConsensusFork.Fulu: - "fulu" + $kind template init*(T: typedesc[ConsensusFork], value: string): Opt[ConsensusFork] = case value + of "gloas": + Opt.some ConsensusFork.Gloas of "fulu": Opt.some ConsensusFork.Fulu of "electra": @@ -998,6 +984,10 @@ template init*(T: type ForkedEpochInfo, info: altair.EpochInfo): T = template withState*(x: ForkedHashedBeaconState, body: untyped): untyped = case x.kind + of ConsensusFork.Gloas: + const consensusFork {.inject, used.} = ConsensusFork.Gloas + template forkyState: untyped {.inject, used.} = x.gloasData + body of ConsensusFork.Fulu: const consensusFork {.inject, used.} = ConsensusFork.Fulu template forkyState: untyped {.inject, used.} = x.fuluData @@ -1030,9 +1020,12 @@ template withState*(x: ForkedHashedBeaconState, body: untyped): untyped = template forky*( x: ForkedBeaconBlock | + ForkedSignedBeaconBlock | ForkedHashedBeaconState, kind: static ConsensusFork): untyped = - when kind == ConsensusFork.Fulu: + when kind == ConsensusFork.Gloas: + x.gloasData + elif kind == ConsensusFork.Fulu: x.fuluData elif kind == ConsensusFork.Electra: x.electraData @@ -1047,7 +1040,7 @@ template forky*( elif kind == ConsensusFork.Phase0: x.phase0Data else: - static: raiseAssert "Unreachable" + {.error: "Unreachable".} template withEpochInfo*(x: ForkedEpochInfo, body: untyped): untyped = case x.kind @@ -1070,7 +1063,8 @@ template withEpochInfo*( template withEpochInfo*( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState, + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState, x: var ForkedEpochInfo, body: untyped): untyped = if x.kind != EpochInfoFork.Altair: # Rare, so efficiency not critical @@ -1105,9 +1099,21 @@ func setStateRoot*(x: var ForkedHashedBeaconState, root: Eth2Digest) = withState(x): forkyState.root = root {.pop.} +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/fulu/beacon-chain.md#new-get_blob_parameters +func get_blob_parameters*(cfg: RuntimeConfig, epoch: Epoch): BlobParameters = + ## Return the blob parameters at a given epoch. + for entry in cfg.BLOB_SCHEDULE: + if epoch >= entry.EPOCH: + return entry + BlobParameters( + EPOCH: cfg.ELECTRA_FORK_EPOCH, + MAX_BLOBS_PER_BLOCK: cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + func consensusForkEpoch*( cfg: RuntimeConfig, consensusFork: ConsensusFork): Epoch = case consensusFork + of ConsensusFork.Gloas: + cfg.GLOAS_FORK_EPOCH of ConsensusFork.Fulu: cfg.FULU_FORK_EPOCH of ConsensusFork.Electra: @@ -1126,7 +1132,8 @@ func consensusForkEpoch*( func consensusForkAtEpoch*(cfg: RuntimeConfig, epoch: Epoch): ConsensusFork = ## Return the current fork for the given epoch. static: - doAssert high(ConsensusFork) == ConsensusFork.Fulu + doAssert high(ConsensusFork) == ConsensusFork.Gloas + doAssert ConsensusFork.Gloas > ConsensusFork.Fulu doAssert ConsensusFork.Fulu > ConsensusFork.Electra doAssert ConsensusFork.Electra > ConsensusFork.Deneb doAssert ConsensusFork.Deneb > ConsensusFork.Capella @@ -1135,7 +1142,8 @@ func consensusForkAtEpoch*(cfg: RuntimeConfig, epoch: Epoch): ConsensusFork = doAssert ConsensusFork.Altair > ConsensusFork.Phase0 doAssert GENESIS_EPOCH == 0 - if epoch >= cfg.FULU_FORK_EPOCH: ConsensusFork.Fulu + if epoch >= cfg.GLOAS_FORK_EPOCH: ConsensusFork.Gloas + elif epoch >= cfg.FULU_FORK_EPOCH: ConsensusFork.Fulu elif epoch >= cfg.ELECTRA_FORK_EPOCH: ConsensusFork.Electra elif epoch >= cfg.DENEB_FORK_EPOCH: ConsensusFork.Deneb elif epoch >= cfg.CAPELLA_FORK_EPOCH: ConsensusFork.Capella @@ -1145,10 +1153,12 @@ func consensusForkAtEpoch*(cfg: RuntimeConfig, epoch: Epoch): ConsensusFork = func consensusForkForDigest*( forkDigests: ForkDigests, forkDigest: ForkDigest): Opt[ConsensusFork] = - static: doAssert high(ConsensusFork) == ConsensusFork.Fulu - if forkDigest == forkDigests.fulu: + static: doAssert high(ConsensusFork) == ConsensusFork.Gloas + # Past Fulu, this reverse lookup doesn't work anyway in a good way, needs to + # be refactored + if forkDigest == forkDigests.fuluInt: ok ConsensusFork.Fulu - elif forkDigest == forkDigests.electra: + elif forkDigest == forkDigests.electra: ok ConsensusFork.Electra elif forkDigest == forkDigests.deneb: ok ConsensusFork.Deneb @@ -1161,13 +1171,19 @@ func consensusForkForDigest*( elif forkDigest == forkDigests.phase0: ok ConsensusFork.Phase0 else: + for (epoch, consensusFork, bpoForkDigest) in forkDigests.bpos: + if forkDigest == bpoForkDigest: + return ok consensusFork err() func atConsensusFork*( forkDigests: ForkDigests, consensusFork: ConsensusFork): ForkDigest = + debugGloasComment "atConsensusFork is deprecated anyway, should be gone before we need it for gloas, otherwise look at again" case consensusFork + of ConsensusFork.Gloas: + forkDigests.fuluInt of ConsensusFork.Fulu: - forkDigests.fulu + forkDigests.fuluInt of ConsensusFork.Electra: forkDigests.electra of ConsensusFork.Deneb: @@ -1183,49 +1199,45 @@ func atConsensusFork*( template atEpoch*( forkDigests: ForkDigests, epoch: Epoch, cfg: RuntimeConfig): ForkDigest = - forkDigests.atConsensusFork(cfg.consensusForkAtEpoch(epoch)) + if epoch >= cfg.FULU_FORK_EPOCH: + var res: Opt[ForkDigest] + for (bpoEpoch, _, forkDigest) in forkDigests.bpos: + if epoch >= bpoEpoch: + res = Opt[ForkDigest].ok(forkDigest) + break + res.valueOr: + # In BPO-compatible fork, without BPOs + forkDigests.atConsensusFork(cfg.consensusForkAtEpoch(epoch)) + else: + forkDigests.atConsensusFork(cfg.consensusForkAtEpoch(epoch)) + +iterator forkDigests*(consensusFork: ConsensusFork, forkDigests: ForkDigests): ForkDigest = + yield forkDigests.atConsensusFork(consensusFork) + + if consensusFork >= ConsensusFork.Fulu: + for (_, bpoConsensusFork, forkDigest) in forkDigests.bpos: + if bpoConsensusFork == consensusFork: + yield forkDigest template asSigned*( - x: ForkedMsgTrustedSignedBeaconBlock | - ForkedTrustedSignedBeaconBlock -): ForkedSignedBeaconBlock = + x: ForkedTrustedSignedBeaconBlock): ForkedSignedBeaconBlock = isomorphicCast[ForkedSignedBeaconBlock](x) template asSigned*( - x: ref ForkedMsgTrustedSignedBeaconBlock | - ref ForkedTrustedSignedBeaconBlock -): ref ForkedSignedBeaconBlock = + x: ref ForkedTrustedSignedBeaconBlock): ref ForkedSignedBeaconBlock = isomorphicCast[ref ForkedSignedBeaconBlock](x) -template asMsgTrusted*( - x: ForkedSignedBeaconBlock | - ForkedTrustedSignedBeaconBlock -): ForkedMsgTrustedSignedBeaconBlock = - isomorphicCast[ForkedMsgTrustedSignedBeaconBlock](x) - -template asMsgTrusted*( - x: ref ForkedSignedBeaconBlock | - ref ForkedTrustedSignedBeaconBlock -): ref ForkedMsgTrustedSignedBeaconBlock = - isomorphicCast[ref ForkedMsgTrustedSignedBeaconBlock](x) - template asTrusted*( - x: ForkedSignedBeaconBlock | - ForkedMsgTrustedSignedBeaconBlock -): ForkedTrustedSignedBeaconBlock = + x: ForkedSignedBeaconBlock): ForkedTrustedSignedBeaconBlock = isomorphicCast[ForkedTrustedSignedBeaconBlock](x) template asTrusted*( - x: ref ForkedSignedBeaconBlock | - ref ForkedMsgTrustedSignedBeaconBlock -): ref ForkedTrustedSignedBeaconBlock = + x: ref ForkedSignedBeaconBlock): ref ForkedTrustedSignedBeaconBlock = isomorphicCast[ref ForkedTrustedSignedBeaconBlock](x) template withBlck*( - x: ForkedBeaconBlock | - ForkedSignedBeaconBlock | ForkedMsgTrustedSignedBeaconBlock | - ForkedTrustedSignedBeaconBlock | ForkedBlindedBeaconBlock | - ForkedSignedBlindedBeaconBlock, + x: ForkedBeaconBlock | ForkedSignedBeaconBlock | + ForkedTrustedSignedBeaconBlock | ForkedSignedBlindedBeaconBlock, body: untyped): untyped = case x.kind of ConsensusFork.Phase0: @@ -1256,6 +1268,10 @@ template withBlck*( const consensusFork {.inject, used.} = ConsensusFork.Fulu template forkyBlck: untyped {.inject, used.} = x.fuluData body + of ConsensusFork.Gloas: + const consensusFork {.inject, used.} = ConsensusFork.Gloas + template forkyBlck: untyped {.inject, used.} = x.gloasData + body func proposer_index*(x: ForkedBeaconBlock): uint64 = withBlck(x): forkyBlck.proposer_index @@ -1269,9 +1285,7 @@ func hash_tree_root*(x: Web3SignerForkedBeaconBlock): Eth2Digest = func hash_tree_root*(_: Opt[auto]) {.error.} template getForkedBlockField*( - x: ForkedSignedBeaconBlock | - ForkedMsgTrustedSignedBeaconBlock | - ForkedTrustedSignedBeaconBlock, + x: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock, y: untyped): untyped = # unsafeAddr avoids a copy of the field in some cases (case x.kind @@ -1281,10 +1295,10 @@ template getForkedBlockField*( of ConsensusFork.Capella: unsafeAddr x.capellaData.message.y of ConsensusFork.Deneb: unsafeAddr x.denebData.message.y of ConsensusFork.Electra: unsafeAddr x.electraData.message.y - of ConsensusFork.Fulu: unsafeAddr x.fuluData.message.y)[] + of ConsensusFork.Fulu: unsafeAddr x.fuluData.message.y + of ConsensusFork.Gloas: unsafeAddr x.gloasData.message.y)[] template signature*(x: ForkedSignedBeaconBlock | - ForkedMsgTrustedSignedBeaconBlock | ForkedSignedBlindedBeaconBlock): ValidatorSig = withBlck(x): forkyBlck.signature @@ -1292,33 +1306,36 @@ template signature*(x: ForkedTrustedSignedBeaconBlock): TrustedSig = withBlck(x): forkyBlck.signature template root*(x: ForkedSignedBeaconBlock | - ForkedMsgTrustedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): Eth2Digest = withBlck(x): forkyBlck.root template slot*(x: ForkedSignedBeaconBlock | - ForkedMsgTrustedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): Slot = withBlck(x): forkyBlck.message.slot -template shortLog*(x: ForkedBeaconBlock | ForkedBlindedBeaconBlock): auto = +template shortLog*(x: ForkedBeaconBlock): auto = withBlck(x): shortLog(forkyBlck) template shortLog*(x: ForkedSignedBeaconBlock | - ForkedMsgTrustedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock | ForkedSignedBlindedBeaconBlock): auto = withBlck(x): shortLog(forkyBlck) chronicles.formatIt ForkedBeaconBlock: it.shortLog chronicles.formatIt ForkedSignedBeaconBlock: it.shortLog -chronicles.formatIt ForkedMsgTrustedSignedBeaconBlock: it.shortLog chronicles.formatIt ForkedTrustedSignedBeaconBlock: it.shortLog template withForkyMaybeBlindedBlck*( b: ForkedMaybeBlindedBeaconBlock, body: untyped): untyped = + debugGloasComment "re-add mev to gloas" case b.kind + of ConsensusFork.Gloas: + const + consensusFork {.inject, used.} = ConsensusFork.Gloas + isBlinded {.inject, used.} = false + template forkyMaybeBlindedBlck: untyped {.inject, used.} = b.gloasData + body of ConsensusFork.Fulu: const consensusFork {.inject, used.} = ConsensusFork.Fulu template d: untyped = b.fuluData @@ -1344,17 +1361,11 @@ template withForkyMaybeBlindedBlck*( template forkyMaybeBlindedBlck: untyped {.inject, used.} = d.data body of ConsensusFork.Deneb: - const consensusFork {.inject, used.} = ConsensusFork.Deneb - template d: untyped = b.denebData - case d.isBlinded: - of true: - const isBlinded {.inject, used.} = true - template forkyMaybeBlindedBlck: untyped {.inject, used.} = d.blindedData - body - of false: - const isBlinded {.inject, used.} = false - template forkyMaybeBlindedBlck: untyped {.inject, used.} = d.data - body + const + consensusFork {.inject, used.} = ConsensusFork.Deneb + isBlinded {.inject, used.} = false + template forkyMaybeBlindedBlck: untyped {.inject, used.} = b.denebData + body of ConsensusFork.Capella: const consensusFork {.inject, used.} = ConsensusFork.Capella @@ -1393,10 +1404,14 @@ template shortLog*(x: ForkedMaybeBlindedBeaconBlock): auto = template withStateAndBlck*( s: ForkedHashedBeaconState, b: ForkedBeaconBlock | ForkedSignedBeaconBlock | - ForkedMsgTrustedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock, body: untyped): untyped = case s.kind + of ConsensusFork.Gloas: + const consensusFork {.inject, used.} = ConsensusFork.Gloas + template forkyState: untyped {.inject.} = s.gloasData + template forkyBlck: untyped {.inject.} = b.gloasData + body of ConsensusFork.Fulu: const consensusFork {.inject, used.} = ConsensusFork.Fulu template forkyState: untyped {.inject.} = s.fuluData @@ -1435,6 +1450,10 @@ template withStateAndBlck*( template withAttestation*(a: ForkedAttestation, body: untyped): untyped = case a.kind + of ConsensusFork.Gloas: + const consensusFork {.inject, used.} = ConsensusFork.Gloas + template forkyAttestation: untyped {.inject.} = a.gloasData + body of ConsensusFork.Fulu: const consensusFork {.inject, used.} = ConsensusFork.Fulu template forkyAttestation: untyped {.inject.} = a.fuluData @@ -1467,6 +1486,10 @@ template withAttestation*(a: ForkedAttestation, body: untyped): untyped = template withAggregateAndProof*(a: ForkedAggregateAndProof, body: untyped): untyped = case a.kind + of ConsensusFork.Gloas: + const consensusFork {.inject, used.} = ConsensusFork.Gloas + template forkyProof: untyped {.inject.} = a.gloasData + body of ConsensusFork.Fulu: const consensusFork {.inject, used.} = ConsensusFork.Fulu template forkyProof: untyped {.inject.} = a.fuluData @@ -1497,8 +1520,7 @@ template withAggregateAndProof*(a: ForkedAggregateAndProof, body func toBeaconBlockHeader*( - blck: SomeForkyBeaconBlock | deneb_mev.BlindedBeaconBlock | - electra_mev.BlindedBeaconBlock | fulu_mev.BlindedBeaconBlock): + blck: SomeForkyBeaconBlock | ForkyBlindedBeaconBlock): BeaconBlockHeader = ## Reduce a given `BeaconBlock` to its `BeaconBlockHeader`. BeaconBlockHeader( @@ -1514,8 +1536,7 @@ template toBeaconBlockHeader*( blck.message.toBeaconBlockHeader() template toBeaconBlockHeader*( - blckParam: ForkedMsgTrustedSignedBeaconBlock | - ForkedTrustedSignedBeaconBlock): BeaconBlockHeader = + blckParam: ForkedTrustedSignedBeaconBlock): BeaconBlockHeader = ## Reduce a given signed beacon block to its `BeaconBlockHeader`. withBlck(blckParam): forkyBlck.toBeaconBlockHeader() @@ -1569,8 +1590,15 @@ func fuluFork*(cfg: RuntimeConfig): Fork = current_version: cfg.FULU_FORK_VERSION, epoch: cfg.FULU_FORK_EPOCH) +func gloasFork*(cfg: RuntimeConfig): Fork = + Fork( + previous_version: cfg.FULU_FORK_VERSION, + current_version: cfg.GLOAS_FORK_VERSION, + epoch: cfg.GLOAS_FORK_EPOCH) + func forkAtEpoch*(cfg: RuntimeConfig, epoch: Epoch): Fork = case cfg.consensusForkAtEpoch(epoch) + of ConsensusFork.Gloas: cfg.gloasFork of ConsensusFork.Fulu: cfg.fuluFork of ConsensusFork.Electra: cfg.electraFork of ConsensusFork.Deneb: cfg.denebFork @@ -1581,6 +1609,7 @@ func forkAtEpoch*(cfg: RuntimeConfig, epoch: Epoch): Fork = func forkVersionAtEpoch*(cfg: RuntimeConfig, epoch: Epoch): Version = case cfg.consensusForkAtEpoch(epoch) + of ConsensusFork.Gloas: cfg.GLOAS_FORK_VERSION of ConsensusFork.Fulu: cfg.FULU_FORK_VERSION of ConsensusFork.Electra: cfg.ELECTRA_FORK_VERSION of ConsensusFork.Deneb: cfg.DENEB_FORK_VERSION @@ -1590,8 +1619,23 @@ func forkVersionAtEpoch*(cfg: RuntimeConfig, epoch: Epoch): Version = of ConsensusFork.Phase0: cfg.GENESIS_FORK_VERSION func nextForkEpochAtEpoch*(cfg: RuntimeConfig, epoch: Epoch): Epoch = + ## Used to construct the eth2 field of ENRs + debugGloasComment "probably wrong, definitely look at again, and if right, refactor" case cfg.consensusForkAtEpoch(epoch) - of ConsensusFork.Fulu: FAR_FUTURE_EPOCH + of ConsensusFork.Gloas: + var res = FAR_FUTURE_EPOCH + for entry in cfg.BLOB_SCHEDULE: + if epoch >= entry.EPOCH: + break + res = entry.EPOCH + res + of ConsensusFork.Fulu: + var res = FAR_FUTURE_EPOCH + for entry in cfg.BLOB_SCHEDULE: + if epoch >= entry.EPOCH: + break + res = entry.EPOCH + res of ConsensusFork.Electra: cfg.FULU_FORK_EPOCH of ConsensusFork.Deneb: cfg.ELECTRA_FORK_EPOCH of ConsensusFork.Capella: cfg.DENEB_FORK_EPOCH @@ -1601,13 +1645,14 @@ func nextForkEpochAtEpoch*(cfg: RuntimeConfig, epoch: Epoch): Epoch = func forkVersion*(cfg: RuntimeConfig, consensusFork: ConsensusFork): Version = case consensusFork - of ConsensusFork.Phase0: cfg.GENESIS_FORK_VERSION - of ConsensusFork.Altair: cfg.ALTAIR_FORK_VERSION - of ConsensusFork.Bellatrix: cfg.BELLATRIX_FORK_VERSION - of ConsensusFork.Capella: cfg.CAPELLA_FORK_VERSION - of ConsensusFork.Deneb: cfg.DENEB_FORK_VERSION - of ConsensusFork.Electra: cfg.ELECTRA_FORK_VERSION - of ConsensusFork.Fulu: cfg.FULU_FORK_VERSION + of ConsensusFork.Phase0: cfg.GENESIS_FORK_VERSION + of ConsensusFork.Altair: cfg.ALTAIR_FORK_VERSION + of ConsensusFork.Bellatrix: cfg.BELLATRIX_FORK_VERSION + of ConsensusFork.Capella: cfg.CAPELLA_FORK_VERSION + of ConsensusFork.Deneb: cfg.DENEB_FORK_VERSION + of ConsensusFork.Electra: cfg.ELECTRA_FORK_VERSION + of ConsensusFork.Fulu: cfg.FULU_FORK_VERSION + of ConsensusFork.Gloas: cfg.GLOAS_FORK_VERSION func lcDataForkAtConsensusFork*( consensusFork: ConsensusFork): LightClientDataFork = @@ -1629,7 +1674,8 @@ func getForkSchedule*(cfg: RuntimeConfig): array[7, Fork] = ## This procedure is used by HTTP REST framework and validator client. ## ## NOTE: Update this procedure when new fork will be scheduled. - static: doAssert high(ConsensusFork) == ConsensusFork.Fulu + static: doAssert high(ConsensusFork) == ConsensusFork.Gloas + debugGloasComment "deliberately don't expose this to REST yet" [cfg.genesisFork(), cfg.altairFork(), cfg.bellatrixFork(), cfg.capellaFork(), cfg.denebFork(), cfg.electraFork(), cfg.fuluFork()] @@ -1715,10 +1761,33 @@ func compute_fork_digest*(current_version: Version, compute_fork_data_root( current_version, genesis_validators_root).data.toOpenArray(0, 3) +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/fulu/beacon-chain.md#modified-compute_fork_digest +func compute_fork_digest_fulu*( + cfg: RuntimeConfig, genesis_validators_root: Eth2Digest, epoch: Epoch): + ForkDigest = + ## Return the 4-byte fork digest for the ``version`` and + ## ``genesis_validators_root`` XOR'd with the hash of the blob parameters for + ## ``epoch``. + ## + ## This is a digest primarily used for domain separation on the p2p layer. + ## 4-bytes suffices for practical separation of forks/chains. + let + fork_version = forkVersionAtEpoch(cfg, epoch) + base_digest = compute_fork_data_root(fork_version, genesis_validators_root) + blob_parameters = get_blob_parameters(cfg, epoch) + + var bpo_buf: array[16, byte] + bpo_buf[0 .. 7] = toBytesLE(distinctBase(blob_parameters.EPOCH)) + bpo_buf[8 .. 15] = toBytesLE(blob_parameters.MAX_BLOBS_PER_BLOCK) + let bpo_digest = eth2digest(bpo_buf) + var res: array[4, byte] + staticFor i, 0 ..< len(res): + res[i] = base_digest.data[i] xor bpo_digest.data[i] + ForkDigest(res) + func init*(T: type ForkDigests, cfg: RuntimeConfig, genesis_validators_root: Eth2Digest): T = - static: doAssert high(ConsensusFork) == ConsensusFork.Fulu T( phase0: compute_fork_digest(cfg.GENESIS_FORK_VERSION, genesis_validators_root), @@ -1732,8 +1801,15 @@ func init*(T: type ForkDigests, compute_fork_digest(cfg.DENEB_FORK_VERSION, genesis_validators_root), electra: compute_fork_digest(cfg.ELECTRA_FORK_VERSION, genesis_validators_root), - fulu: - compute_fork_digest(cfg.FULU_FORK_VERSION, genesis_validators_root) + fuluInt: + compute_fork_digest_fulu( + cfg, genesis_validators_root, cfg.FULU_FORK_EPOCH), + bpos: mapIt( + cfg.BLOB_SCHEDULE, + ( + it.EPOCH, + consensusForkAtEpoch(cfg, it.EPOCH), + compute_fork_digest_fulu(cfg, genesis_validators_root, it.EPOCH))) ) func toBlockId*(header: BeaconBlockHeader): BlockId = @@ -1743,7 +1819,6 @@ func toBlockId*(blck: SomeForkySignedBeaconBlock): BlockId = BlockId(root: blck.root, slot: blck.message.slot) func toBlockId*(blck: ForkedSignedBeaconBlock | - ForkedMsgTrustedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): BlockId = withBlck(blck): BlockId(root: forkyBlck.root, slot: forkyBlck.message.slot) @@ -1790,20 +1865,7 @@ template init*(T: type ForkedMaybeBlindedBeaconBlock, evalue: Opt[UInt256], cvalue: Opt[UInt256]): T = ForkedMaybeBlindedBeaconBlock( kind: ConsensusFork.Deneb, - denebData: deneb_mev.MaybeBlindedBeaconBlock( - isBlinded: false, - data: blck), - consensusValue: cvalue, - executionValue: evalue) - -template init*(T: type ForkedMaybeBlindedBeaconBlock, - blck: deneb_mev.BlindedBeaconBlock, - evalue: Opt[UInt256], cvalue: Opt[UInt256]): T = - ForkedMaybeBlindedBeaconBlock( - kind: ConsensusFork.Deneb, - denebData: deneb_mev.MaybeBlindedBeaconBlock( - isBlinded: true, - blindedData: blck), + denebData: blck, consensusValue: cvalue, executionValue: evalue) @@ -1840,6 +1902,12 @@ template init*(T: type ForkedMaybeBlindedBeaconBlock, consensusValue: cvalue, executionValue: evalue) +template init*(T: type ForkedMaybeBlindedBeaconBlock, + blck: gloas.BlockContents): T = + ForkedMaybeBlindedBeaconBlock( + kind: ConsensusFork.Gloas, + gloasData: blck) + template init*(T: type ForkedMaybeBlindedBeaconBlock, blck: fulu_mev.BlindedBeaconBlock, evalue: Opt[UInt256], cvalue: Opt[UInt256]): T = @@ -1879,9 +1947,7 @@ template init*(T: type ForkedAttestation, ForkedAttestation(kind: ConsensusFork.Capella, capellaData: attestation) of ConsensusFork.Deneb: ForkedAttestation(kind: ConsensusFork.Deneb, denebData: attestation) - of ConsensusFork.Electra: - raiseAssert $fork & " fork should not be used for this type of attestation" - of ConsensusFork.Fulu: + of ConsensusFork.Electra .. ConsensusFork.Gloas: raiseAssert $fork & " fork should not be used for this type of attestation" template init*(T: type ForkedAttestation, @@ -1894,6 +1960,8 @@ template init*(T: type ForkedAttestation, ForkedAttestation(kind: ConsensusFork.Electra, electraData: attestation) of ConsensusFork.Fulu: ForkedAttestation(kind: ConsensusFork.Fulu, fuluData: attestation) + of ConsensusFork.Gloas: + ForkedAttestation(kind: ConsensusFork.Gloas, gloasData: attestation) template init*(T: type ForkedAggregateAndProof, proof: phase0.AggregateAndProof, @@ -1909,10 +1977,7 @@ template init*(T: type ForkedAggregateAndProof, ForkedAggregateAndProof(kind: ConsensusFork.Capella, capellaData: proof) of ConsensusFork.Deneb: ForkedAggregateAndProof(kind: ConsensusFork.Deneb, denebData: proof) - of ConsensusFork.Electra: - raiseAssert $fork & - " fork should not be used for this type of aggregate and proof" - of ConsensusFork.Fulu: + of ConsensusFork.Electra .. ConsensusFork.Gloas: raiseAssert $fork & " fork should not be used for this type of aggregate and proof" @@ -1927,3 +1992,151 @@ template init*(T: type ForkedAggregateAndProof, ForkedAggregateAndProof(kind: ConsensusFork.Electra, electraData: proof) of ConsensusFork.Fulu: ForkedAggregateAndProof(kind: ConsensusFork.Fulu, fuluData: proof) + of ConsensusFork.Gloas: + ForkedAggregateAndProof(kind: ConsensusFork.Gloas, gloasData: proof) + +func kzg_commitments*(eps: ForkyExecutionPayloadForSigning): KzgCommitments = + when typeof(eps).kind >= ConsensusFork.Deneb: + eps.blobsBundle.commitments + else: + default(KzgCommitments) + +# These need access to eth_merkleization indirectly +func toSignedBlindedBeaconBlock*( + blck: bellatrix.SignedBeaconBlock +): bellatrix_mev.SignedBlindedBeaconBlock = + SignedBlindedBeaconBlock( + message: bellatrix_mev.BlindedBeaconBlock( + slot: blck.message.slot, + proposer_index: blck.message.proposer_index, + parent_root: blck.message.parent_root, + state_root: blck.message.state_root, + body: bellatrix_mev.BlindedBeaconBlockBody( + randao_reveal: blck.message.body.randao_reveal, + eth1_data: blck.message.body.eth1_data, + graffiti: blck.message.body.graffiti, + proposer_slashings: blck.message.body.proposer_slashings, + attester_slashings: blck.message.body.attester_slashings, + attestations: blck.message.body.attestations, + deposits: blck.message.body.deposits, + voluntary_exits: blck.message.body.voluntary_exits, + sync_aggregate: blck.message.body.sync_aggregate, + execution_payload_header: + blck.message.body.execution_payload.toExecutionPayloadHeader(), + ), + ), + signature: blck.signature, + ) + +func toSignedBlindedBeaconBlock*( + blck: capella.SignedBeaconBlock +): capella_mev.SignedBlindedBeaconBlock = + capella_mev.SignedBlindedBeaconBlock( + message: capella_mev.BlindedBeaconBlock( + slot: blck.message.slot, + proposer_index: blck.message.proposer_index, + parent_root: blck.message.parent_root, + state_root: blck.message.state_root, + body: capella_mev.BlindedBeaconBlockBody( + randao_reveal: blck.message.body.randao_reveal, + eth1_data: blck.message.body.eth1_data, + graffiti: blck.message.body.graffiti, + proposer_slashings: blck.message.body.proposer_slashings, + attester_slashings: blck.message.body.attester_slashings, + attestations: blck.message.body.attestations, + deposits: blck.message.body.deposits, + voluntary_exits: blck.message.body.voluntary_exits, + sync_aggregate: blck.message.body.sync_aggregate, + execution_payload_header: + blck.message.body.execution_payload.toExecutionPayloadHeader(), + bls_to_execution_changes: blck.message.body.bls_to_execution_changes, + ), + ), + signature: blck.signature, + ) + +func toSignedBlindedBeaconBlock*( + blck: deneb.SignedBeaconBlock +): deneb_mev.SignedBlindedBeaconBlock = + deneb_mev.SignedBlindedBeaconBlock( + message: deneb_mev.BlindedBeaconBlock( + slot: blck.message.slot, + proposer_index: blck.message.proposer_index, + parent_root: blck.message.parent_root, + state_root: blck.message.state_root, + body: deneb_mev.BlindedBeaconBlockBody( + randao_reveal: blck.message.body.randao_reveal, + eth1_data: blck.message.body.eth1_data, + graffiti: blck.message.body.graffiti, + proposer_slashings: blck.message.body.proposer_slashings, + attester_slashings: blck.message.body.attester_slashings, + attestations: blck.message.body.attestations, + deposits: blck.message.body.deposits, + voluntary_exits: blck.message.body.voluntary_exits, + sync_aggregate: blck.message.body.sync_aggregate, + execution_payload_header: + blck.message.body.execution_payload.toExecutionPayloadHeader(), + bls_to_execution_changes: blck.message.body.bls_to_execution_changes, + blob_kzg_commitments: blck.message.body.blob_kzg_commitments, + ), + ), + signature: blck.signature, + ) + +func toSignedBlindedBeaconBlock*( + blck: electra.SignedBeaconBlock +): electra_mev.SignedBlindedBeaconBlock = + electra_mev.SignedBlindedBeaconBlock( + message: electra_mev.BlindedBeaconBlock( + slot: blck.message.slot, + proposer_index: blck.message.proposer_index, + parent_root: blck.message.parent_root, + state_root: blck.message.state_root, + body: electra_mev.BlindedBeaconBlockBody( + randao_reveal: blck.message.body.randao_reveal, + eth1_data: blck.message.body.eth1_data, + graffiti: blck.message.body.graffiti, + proposer_slashings: blck.message.body.proposer_slashings, + attester_slashings: blck.message.body.attester_slashings, + attestations: blck.message.body.attestations, + deposits: blck.message.body.deposits, + voluntary_exits: blck.message.body.voluntary_exits, + sync_aggregate: blck.message.body.sync_aggregate, + execution_payload_header: + blck.message.body.execution_payload.toExecutionPayloadHeader(), + bls_to_execution_changes: blck.message.body.bls_to_execution_changes, + blob_kzg_commitments: blck.message.body.blob_kzg_commitments, + execution_requests: blck.message.body.execution_requests, + ), + ), + signature: blck.signature, + ) + +func toSignedBlindedBeaconBlock*( + blck: fulu.SignedBeaconBlock +): fulu_mev.SignedBlindedBeaconBlock = + fulu_mev.SignedBlindedBeaconBlock( + message: fulu_mev.BlindedBeaconBlock( + slot: blck.message.slot, + proposer_index: blck.message.proposer_index, + parent_root: blck.message.parent_root, + state_root: blck.message.state_root, + body: fulu_mev.BlindedBeaconBlockBody( + randao_reveal: blck.message.body.randao_reveal, + eth1_data: blck.message.body.eth1_data, + graffiti: blck.message.body.graffiti, + proposer_slashings: blck.message.body.proposer_slashings, + attester_slashings: blck.message.body.attester_slashings, + attestations: blck.message.body.attestations, + deposits: blck.message.body.deposits, + voluntary_exits: blck.message.body.voluntary_exits, + sync_aggregate: blck.message.body.sync_aggregate, + execution_payload_header: + blck.message.body.execution_payload.toExecutionPayloadHeader(), + bls_to_execution_changes: blck.message.body.bls_to_execution_changes, + blob_kzg_commitments: blck.message.body.blob_kzg_commitments, + execution_requests: blck.message.body.execution_requests, + ), + ), + signature: blck.signature, + ) diff --git a/beacon_chain/spec/forks_light_client.nim b/beacon_chain/spec/forks_light_client.nim index 5b3a2c3062..ee61fcedcf 100644 --- a/beacon_chain/spec/forks_light_client.nim +++ b/beacon_chain/spec/forks_light_client.nim @@ -8,7 +8,8 @@ {.push raises: [].} import - ./datatypes/[phase0, altair, bellatrix, capella, deneb, electra, fulu], + ./datatypes/[ + phase0, altair, bellatrix, capella, deneb, electra, fulu, gloas], ./eth2_merkleization type @@ -222,13 +223,13 @@ template finalized_root_gindex*( else: static: raiseAssert "Unreachable" -template FinalityBranch*(kind: static LightClientDataFork): auto = +template FinalityBranch*(kind: static LightClientDataFork): typedesc = when kind >= LightClientDataFork.Electra: - typedesc[electra.FinalityBranch] + electra.FinalityBranch elif kind >= LightClientDataFork.Altair: - typedesc[altair.FinalityBranch] + altair.FinalityBranch else: - static: raiseAssert "Unreachable" + {.error: "BeaconState unsupported in " & $kind.} template current_sync_committee_gindex*( kind: static LightClientDataFork): GeneralizedIndex = @@ -239,11 +240,11 @@ template current_sync_committee_gindex*( else: static: raiseAssert "Unreachable" -template CurrentSyncCommitteeBranch*(kind: static LightClientDataFork): auto = +template CurrentSyncCommitteeBranch*(kind: static LightClientDataFork): typedesc = when kind >= LightClientDataFork.Electra: - typedesc[electra.CurrentSyncCommitteeBranch] + electra.CurrentSyncCommitteeBranch elif kind >= LightClientDataFork.Altair: - typedesc[altair.CurrentSyncCommitteeBranch] + altair.CurrentSyncCommitteeBranch else: static: raiseAssert "Unreachable" @@ -256,133 +257,133 @@ template next_sync_committee_gindex*( else: static: raiseAssert "Unreachable" -template NextSyncCommitteeBranch*(kind: static LightClientDataFork): auto = +template NextSyncCommitteeBranch*(kind: static LightClientDataFork): typedesc = when kind >= LightClientDataFork.Electra: - typedesc[electra.NextSyncCommitteeBranch] + electra.NextSyncCommitteeBranch elif kind >= LightClientDataFork.Altair: - typedesc[altair.NextSyncCommitteeBranch] + altair.NextSyncCommitteeBranch else: static: raiseAssert "Unreachable" -template LightClientHeader*(kind: static LightClientDataFork): auto = +template LightClientHeader*(kind: static LightClientDataFork): typedesc = when kind == LightClientDataFork.Electra: - typedesc[electra.LightClientHeader] + electra.LightClientHeader elif kind == LightClientDataFork.Deneb: - typedesc[deneb.LightClientHeader] + deneb.LightClientHeader elif kind == LightClientDataFork.Capella: - typedesc[capella.LightClientHeader] + capella.LightClientHeader elif kind == LightClientDataFork.Altair: - typedesc[altair.LightClientHeader] + altair.LightClientHeader else: static: raiseAssert "Unreachable" -template LightClientBootstrap*(kind: static LightClientDataFork): auto = +template LightClientBootstrap*(kind: static LightClientDataFork): typedesc = when kind == LightClientDataFork.Electra: - typedesc[electra.LightClientBootstrap] + electra.LightClientBootstrap elif kind == LightClientDataFork.Deneb: - typedesc[deneb.LightClientBootstrap] + deneb.LightClientBootstrap elif kind == LightClientDataFork.Capella: - typedesc[capella.LightClientBootstrap] + capella.LightClientBootstrap elif kind == LightClientDataFork.Altair: - typedesc[altair.LightClientBootstrap] + altair.LightClientBootstrap else: static: raiseAssert "Unreachable" -template LightClientUpdate*(kind: static LightClientDataFork): auto = +template LightClientUpdate*(kind: static LightClientDataFork): typedesc = when kind == LightClientDataFork.Electra: - typedesc[electra.LightClientUpdate] + electra.LightClientUpdate elif kind == LightClientDataFork.Deneb: - typedesc[deneb.LightClientUpdate] + deneb.LightClientUpdate elif kind == LightClientDataFork.Capella: - typedesc[capella.LightClientUpdate] + capella.LightClientUpdate elif kind == LightClientDataFork.Altair: - typedesc[altair.LightClientUpdate] + altair.LightClientUpdate else: static: raiseAssert "Unreachable" -template LightClientFinalityUpdate*(kind: static LightClientDataFork): auto = +template LightClientFinalityUpdate*(kind: static LightClientDataFork): typedesc = when kind == LightClientDataFork.Electra: - typedesc[electra.LightClientFinalityUpdate] + electra.LightClientFinalityUpdate elif kind == LightClientDataFork.Deneb: - typedesc[deneb.LightClientFinalityUpdate] + deneb.LightClientFinalityUpdate elif kind == LightClientDataFork.Capella: - typedesc[capella.LightClientFinalityUpdate] + capella.LightClientFinalityUpdate elif kind == LightClientDataFork.Altair: - typedesc[altair.LightClientFinalityUpdate] + altair.LightClientFinalityUpdate else: static: raiseAssert "Unreachable" -template LightClientOptimisticUpdate*(kind: static LightClientDataFork): auto = +template LightClientOptimisticUpdate*(kind: static LightClientDataFork): typedesc = when kind == LightClientDataFork.Electra: - typedesc[electra.LightClientOptimisticUpdate] + electra.LightClientOptimisticUpdate elif kind == LightClientDataFork.Deneb: - typedesc[deneb.LightClientOptimisticUpdate] + deneb.LightClientOptimisticUpdate elif kind == LightClientDataFork.Capella: - typedesc[capella.LightClientOptimisticUpdate] + capella.LightClientOptimisticUpdate elif kind == LightClientDataFork.Altair: - typedesc[altair.LightClientOptimisticUpdate] + altair.LightClientOptimisticUpdate else: static: raiseAssert "Unreachable" -template LightClientStore*(kind: static LightClientDataFork): auto = +template LightClientStore*(kind: static LightClientDataFork): typedesc = when kind == LightClientDataFork.Electra: - typedesc[electra.LightClientStore] + electra.LightClientStore elif kind == LightClientDataFork.Deneb: - typedesc[deneb.LightClientStore] + deneb.LightClientStore elif kind == LightClientDataFork.Capella: - typedesc[capella.LightClientStore] + capella.LightClientStore elif kind == LightClientDataFork.Altair: - typedesc[altair.LightClientStore] + altair.LightClientStore else: static: raiseAssert "Unreachable" template Forky*( x: typedesc[ForkedLightClientHeader], - kind: static LightClientDataFork): auto = + kind: static LightClientDataFork): typedesc = kind.LightClientHeader template Forky*( x: typedesc[ForkedLightClientBootstrap], - kind: static LightClientDataFork): auto = + kind: static LightClientDataFork): typedesc = kind.LightClientBootstrap template Forky*( x: typedesc[ForkedLightClientUpdate], - kind: static LightClientDataFork): auto = + kind: static LightClientDataFork): typedesc = kind.LightClientUpdate template Forky*( x: typedesc[ForkedLightClientFinalityUpdate], - kind: static LightClientDataFork): auto = + kind: static LightClientDataFork): typedesc = kind.LightClientFinalityUpdate template Forky*( x: typedesc[ForkedLightClientOptimisticUpdate], - kind: static LightClientDataFork): auto = + kind: static LightClientDataFork): typedesc = kind.LightClientOptimisticUpdate template Forky*( x: typedesc[ForkedLightClientStore], - kind: static LightClientDataFork): auto = + kind: static LightClientDataFork): typedesc = kind.LightClientStore -template Forked*(x: typedesc[ForkyLightClientHeader]): auto = - typedesc[ForkedLightClientHeader] +template Forked*(x: typedesc[ForkyLightClientHeader]): typedesc = + ForkedLightClientHeader -template Forked*(x: typedesc[ForkyLightClientBootstrap]): auto = - typedesc[ForkedLightClientBootstrap] +template Forked*(x: typedesc[ForkyLightClientBootstrap]): typedesc = + ForkedLightClientBootstrap -template Forked*(x: typedesc[ForkyLightClientUpdate]): auto = - typedesc[ForkedLightClientUpdate] +template Forked*(x: typedesc[ForkyLightClientUpdate]): typedesc = + ForkedLightClientUpdate -template Forked*(x: typedesc[ForkyLightClientFinalityUpdate]): auto = - typedesc[ForkedLightClientFinalityUpdate] +template Forked*(x: typedesc[ForkyLightClientFinalityUpdate]): typedesc = + ForkedLightClientFinalityUpdate -template Forked*(x: typedesc[ForkyLightClientOptimisticUpdate]): auto = - typedesc[ForkedLightClientOptimisticUpdate] +template Forked*(x: typedesc[ForkyLightClientOptimisticUpdate]): typedesc = + ForkedLightClientOptimisticUpdate -template Forked*(x: typedesc[ForkyLightClientStore]): auto = - typedesc[ForkedLightClientStore] +template Forked*(x: typedesc[ForkyLightClientStore]): typedesc = + ForkedLightClientStore template withAll*( x: typedesc[LightClientDataFork], body: untyped): untyped = @@ -1003,6 +1004,76 @@ func migratingToDataFork*[ upgradedObject.migrateToDataFork(newKind) upgradedObject +# Convenience-based location for toExecutionPayloadHeader because this is the +# first time we have access to `hash_tree_root` in a universally available +# module +# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/beacon-chain.md#process_execution_payload +proc toExecutionPayloadHeader*( + payload: bellatrix.ExecutionPayload +): bellatrix.ExecutionPayloadHeader = + bellatrix.ExecutionPayloadHeader( + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + extra_data: payload.extra_data, + transactions_root: hash_tree_root(payload.transactions), + ) + +# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#modified-process_execution_payload +proc toExecutionPayloadHeader*( + payload: capella.ExecutionPayload +): capella.ExecutionPayloadHeader = + capella.ExecutionPayloadHeader( + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + extra_data: payload.extra_data, + transactions_root: hash_tree_root(payload.transactions), + withdrawals_root: hash_tree_root(payload.withdrawals), # [New in Capella] + ) + +# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#process_execution_payload +proc toExecutionPayloadHeader*( + payload: deneb.ExecutionPayload +): deneb.ExecutionPayloadHeader = + deneb.ExecutionPayloadHeader( + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + extra_data: payload.extra_data, + transactions_root: hash_tree_root(payload.transactions), + withdrawals_root: hash_tree_root(payload.withdrawals), + blob_gas_used: payload.blob_gas_used, # [New in Deneb] + excess_blob_gas: payload.excess_blob_gas, # [New in Deneb] + ) + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/full-node.md#block_to_light_client_header func toAltairLightClientHeader( # `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095 @@ -1036,25 +1107,9 @@ func toCapellaLightClientHeader( blck: capella.SignedBeaconBlock | capella.TrustedSignedBeaconBlock ): capella.LightClientHeader = - template payload: untyped = blck.message.body.execution_payload capella.LightClientHeader( beacon: blck.message.toBeaconBlockHeader(), - execution: capella.ExecutionPayloadHeader( - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - extra_data: payload.extra_data, - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - transactions_root: hash_tree_root(payload.transactions), - withdrawals_root: hash_tree_root(payload.withdrawals)), + execution: blck.message.body.execution_payload.toExecutionPayloadHeader(), execution_branch: blck.message.body.build_proof( capella.EXECUTION_PAYLOAD_GINDEX).get) @@ -1107,27 +1162,9 @@ func toDenebLightClientHeader( blck: deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock ): deneb.LightClientHeader = - template payload: untyped = blck.message.body.execution_payload deneb.LightClientHeader( beacon: blck.message.toBeaconBlockHeader(), - execution: deneb.ExecutionPayloadHeader( - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - extra_data: payload.extra_data, - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - transactions_root: hash_tree_root(payload.transactions), - withdrawals_root: hash_tree_root(payload.withdrawals), - blob_gas_used: payload.blob_gas_used, - excess_blob_gas: payload.excess_blob_gas), + execution: blck.message.body.execution_payload.toExecutionPayloadHeader(), execution_branch: blck.message.body.build_proof( capella.EXECUTION_PAYLOAD_GINDEX).get) @@ -1156,7 +1193,7 @@ func toElectraLightClientHeader( template payload: untyped = blck.message.body.execution_payload electra.LightClientHeader( beacon: blck.message.toBeaconBlockHeader(), - execution: electra.ExecutionPayloadHeader( + execution: deneb.ExecutionPayloadHeader( parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -1178,59 +1215,13 @@ func toElectraLightClientHeader( func toElectraLightClientHeader( # `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095 blck: - deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock -): electra.LightClientHeader = - template payload: untyped = blck.message.body.execution_payload - electra.LightClientHeader( - beacon: blck.message.toBeaconBlockHeader(), - execution: electra.ExecutionPayloadHeader( - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - extra_data: payload.extra_data, - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - transactions_root: hash_tree_root(payload.transactions), - withdrawals_root: hash_tree_root(payload.withdrawals), - blob_gas_used: payload.blob_gas_used, - excess_blob_gas: payload.excess_blob_gas), - execution_branch: blck.message.body.build_proof( - capella.EXECUTION_PAYLOAD_GINDEX).get) - -func toElectraLightClientHeader( - # `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095 - blck: + deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock | electra.SignedBeaconBlock | electra.TrustedSignedBeaconBlock | fulu.SignedBeaconBlock | fulu.TrustedSignedBeaconBlock ): electra.LightClientHeader = - template payload: untyped = blck.message.body.execution_payload electra.LightClientHeader( beacon: blck.message.toBeaconBlockHeader(), - execution: electra.ExecutionPayloadHeader( - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - extra_data: payload.extra_data, - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - transactions_root: hash_tree_root(payload.transactions), - withdrawals_root: hash_tree_root(payload.withdrawals), - blob_gas_used: payload.blob_gas_used, - excess_blob_gas: payload.excess_blob_gas), + execution: blck.message.body.execution_payload.toExecutionPayloadHeader(), execution_branch: blck.message.body.build_proof( capella.EXECUTION_PAYLOAD_GINDEX).get) diff --git a/beacon_chain/spec/helpers.nim b/beacon_chain/spec/helpers.nim index 2340844420..138b9234ee 100644 --- a/beacon_chain/spec/helpers.nim +++ b/beacon_chain/spec/helpers.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Uncategorized helper functions from the spec @@ -26,7 +26,7 @@ import export eth2_merkleization, forks, ssz_codec, rlp, eth_types_rlp.append -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/phase0/weak-subjectivity.md#constants +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/weak-subjectivity.md#constants const ETH_TO_GWEI = 1_000_000_000.Gwei func toEther*(gwei: Gwei): Ether = @@ -107,7 +107,7 @@ func get_current_epoch*(state: ForkyBeaconState): Epoch = ## Return the current epoch. state.slot.epoch -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#get_current_epoch +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#get_current_epoch func get_current_epoch*(state: ForkedHashedBeaconState): Epoch = ## Return the current epoch. withState(state): get_current_epoch(forkyState.data) @@ -208,7 +208,7 @@ func add_flag*(flags: ParticipationFlags, flag_index: TimelyFlag): Participation let flag = ParticipationFlags(1'u8 shl ord(flag_index)) flags or flag -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#has_flag +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/beacon-chain.md#has_flag func has_flag*(flags: ParticipationFlags, flag_index: TimelyFlag): bool = let flag = ParticipationFlags(1'u8 shl ord(flag_index)) (flags and flag) == flag @@ -227,9 +227,8 @@ func verify_blob_sidecar_inclusion_proof*( ok() func create_blob_sidecars*( - forkyBlck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock | - fulu.SignedBeaconBlock, - kzg_proofs: KzgProofs, + forkyBlck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock, + kzg_proofs: deneb.KzgProofs, blobs: Blobs): seq[BlobSidecar] = template kzg_commitments: untyped = forkyBlck.message.body.blob_kzg_commitments @@ -380,7 +379,7 @@ func contextEpoch*(bootstrap: ForkyLightClientBootstrap): Epoch = # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch = update.attested_header.beacon.slot.epoch @@ -392,11 +391,20 @@ func is_merge_transition_complete*( default(typeof(state.latest_execution_payload_header)) state.latest_execution_payload_header != defaultExecutionPayloadHeader +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#modified-is_merge_transition_complete +func is_merge_transition_complete*(state: gloas.BeaconState): bool = + var bid = default(gloas.ExecutionPayloadBid) + const kzgs = default(KzgCommitments) + bid.blob_kzg_commitments_root = kzgs.hash_tree_root() + state.latest_execution_payload_bid != bid + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/sync/optimistic.md#helpers func is_execution_block*(body: SomeForkyBeaconBlockBody): bool = - when typeof(body).kind >= ConsensusFork.Bellatrix: - const defaultExecutionPayload = - default(typeof(body.execution_payload)) + when typeof(body).kind == ConsensusFork.Gloas: + debugGloasComment "" + false + elif typeof(body).kind >= ConsensusFork.Bellatrix: + const defaultExecutionPayload = default(typeof(body.execution_payload)) body.execution_payload != defaultExecutionPayload else: false @@ -408,34 +416,23 @@ func is_execution_block*(blck: SomeForkyBeaconBlock): bool = func is_merge_transition_block( state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | fulu.BeaconState, - body: bellatrix.BeaconBlockBody | bellatrix.TrustedBeaconBlockBody | - bellatrix.SigVerifiedBeaconBlockBody | - capella.BeaconBlockBody | capella.TrustedBeaconBlockBody | - capella.SigVerifiedBeaconBlockBody | - deneb.BeaconBlockBody | deneb.TrustedBeaconBlockBody | - deneb.SigVerifiedBeaconBlockBody | - electra.BeaconBlockBody | electra.TrustedBeaconBlockBody | - electra.SigVerifiedBeaconBlockBody | - fulu.BeaconBlockBody | fulu.TrustedBeaconBlockBody | - fulu.SigVerifiedBeaconBlockBody): bool = - const defaultExecutionPayload = default(typeof(body.execution_payload)) - not is_merge_transition_complete(state) and - body.execution_payload != defaultExecutionPayload + body: SomeForkyBeaconBlockBody | SomeForkyBlindedBeaconBlockBody): bool = + when body is SomeForkyBlindedBeaconBlockBody: + const defaultExecutionPayload = default(typeof(body.execution_payload_header)) + not is_merge_transition_complete(state) and + body.execution_payload_header != defaultExecutionPayload + elif typeof(body).kind >= ConsensusFork.Bellatrix: + const defaultExecutionPayload = default(typeof(body.execution_payload)) + not is_merge_transition_complete(state) and + body.execution_payload != defaultExecutionPayload + else: + false -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/bellatrix/beacon-chain.md#is_execution_enabled +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/bellatrix/beacon-chain.md#is_execution_enabled func is_execution_enabled*( state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | fulu.BeaconState, - body: bellatrix.BeaconBlockBody | bellatrix.TrustedBeaconBlockBody | - bellatrix.SigVerifiedBeaconBlockBody | - capella.BeaconBlockBody | capella.TrustedBeaconBlockBody | - capella.SigVerifiedBeaconBlockBody | - deneb.BeaconBlockBody | deneb.TrustedBeaconBlockBody | - deneb.SigVerifiedBeaconBlockBody | - electra.BeaconBlockBody | electra.TrustedBeaconBlockBody | - electra.SigVerifiedBeaconBlockBody | - fulu.BeaconBlockBody | fulu.TrustedBeaconBlockBody | - fulu.SigVerifiedBeaconBlockBody): bool = + body: SomeForkyBeaconBlockBody | SomeForkyBlindedBeaconBlockBody): bool = is_merge_transition_block(state, body) or is_merge_transition_complete(state) # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot @@ -454,8 +451,7 @@ template append*(w: var RlpWriter, withdrawal: capella.Withdrawal) = address: EthAddress withdrawal.address.data, amount: distinctBase(withdrawal.amount))) -proc computeTransactionsTrieRoot( - payload: ForkyExecutionPayload): EthHash32 = +func computeTransactionsTrieRoot(payload: ForkyExecutionPayload): EthHash32 = orderedTrieRoot(payload.transactions.asSeq) # https://eips.ethereum.org/EIPS/eip-7685 @@ -482,9 +478,9 @@ func computeRequestsHash( requestsHash.to(EthHash32) -proc toExecutionBlockHeader( +func toExecutionBlockHeader( payload: ForkyExecutionPayload, - parentRoot: Eth2Digest, + parentRoot: Opt[Eth2Digest], requestsHash = Opt.none(EthHash32)): EthHeader = static: # `GasInt` is signed. We only use it for hashing. doAssert sizeof(GasInt) == sizeof(payload.gas_limit) @@ -493,23 +489,23 @@ proc toExecutionBlockHeader( let txRoot = payload.computeTransactionsTrieRoot() withdrawalsRoot = - when typeof(payload).kind >= ConsensusFork.Capella: + when compiles(payload.withdrawals): Opt.some orderedTrieRoot(payload.withdrawals.asSeq) else: Opt.none(EthHash32) blobGasUsed = - when typeof(payload).kind >= ConsensusFork.Deneb: + when compiles(payload.blob_gas_used): Opt.some payload.blob_gas_used else: Opt.none(uint64) excessBlobGas = - when typeof(payload).kind >= ConsensusFork.Deneb: + when compiles(payload.excess_blob_gas): Opt.some payload.excess_blob_gas else: Opt.none(uint64) parentBeaconBlockRoot = - when typeof(payload).kind >= ConsensusFork.Deneb: - Opt.some EthHash32(parentRoot.data) + if parentRoot.isSome(): + Opt.some(parentRoot.get().to(EthHash32)) else: Opt.none(EthHash32) @@ -536,39 +532,35 @@ proc toExecutionBlockHeader( parentBeaconBlockRoot : parentBeaconBlockRoot, # EIP-4788 requestsHash : requestsHash) # EIP-7685 -proc compute_execution_block_hash*( +func compute_execution_block_hash*( body: ForkyBeaconBlockBody, parentRoot: Eth2Digest): Eth2Digest = when typeof(body).kind >= ConsensusFork.Electra: body.execution_payload.toExecutionBlockHeader( - parentRoot, Opt.some body.execution_requests.computeRequestsHash()) - .rlpHash().to(Eth2Digest) + Opt.some parentRoot, Opt.some body.execution_requests.computeRequestsHash()) + .computeRlpHash().to(Eth2Digest) + elif typeof(body).kind >= ConsensusFork.Deneb: + body.execution_payload.toExecutionBlockHeader( + Opt.some parentRoot) + .computeRlpHash().to(Eth2Digest) else: - body.execution_payload.toExecutionBlockHeader(parentRoot) - .rlpHash().to(Eth2Digest) + body.execution_payload.toExecutionBlockHeader(Opt.none(Eth2Digest)) + .computeRlpHash().to(Eth2Digest) -proc compute_execution_block_hash*(blck: ForkyBeaconBlock): Eth2Digest = +func compute_execution_block_hash*(blck: ForkyBeaconBlock): Eth2Digest = blck.body.compute_execution_block_hash(blck.parent_root) -from std/math import exp, ln -from std/sequtils import foldl - -func ln_binomial(n, k: int): float64 = - if k > n: - low(float64) - else: - template ln_factorial(n: int): float64 = - (2 .. n).foldl(a + ln(b.float64), 0.0) - ln_factorial(n) - ln_factorial(k) - ln_factorial(n - k) - -func hypergeom_cdf*(k: int, population: int, successes: int, draws: int): - float64 = - if k < draws + successes - population: - 0.0 - elif k >= min(successes, draws): - 1.0 - else: - let ln_denom = ln_binomial(population, draws) - (0 .. k).foldl(a + exp( - ln_binomial(successes, b) + - ln_binomial(population - successes, draws - b) - ln_denom), 0.0) +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#new-is_builder_payment_withdrawable +func is_builder_payment_withdrawable*( + state: gloas.BeaconState, + withdrawal: BuilderPendingWithdrawal): bool = + ## Check if the builder is slashed and not yet withdrawable. + let + builder = state.validators[withdrawal.builder_index] + current_epoch = state.slot.epoch + + builder.withdrawable_epoch >= current_epoch or not builder.slashed + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#new-is_parent_block_full +func is_parent_block_full*(state: gloas.BeaconState): bool = + state.latest_execution_payload_bid.block_hash == state.latest_block_hash diff --git a/beacon_chain/spec/helpers_el.nim b/beacon_chain/spec/helpers_el.nim index 9b91cc5e95..cf85c97c69 100644 --- a/beacon_chain/spec/helpers_el.nim +++ b/beacon_chain/spec/helpers_el.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -33,7 +33,7 @@ func is_valid_versioned_hashes*(blck: ForkyBeaconBlock): Result[void, string] = for vHash in tx.versionedHashes: if commitments.len <= i: return err("Extra blobs without matching `blob_kzg_commitments`") - if vHash.data != kzg_commitment_to_versioned_hash(commitments[i]): + if vHash != kzg_commitment_to_versioned_hash(commitments[i]): return err("Invalid `blob_versioned_hash` at index " & $i) inc i if i != commitments.len: diff --git a/beacon_chain/spec/keystore.nim b/beacon_chain/spec/keystore.nim index 5b422a5835..beee403234 100644 --- a/beacon_chain/spec/keystore.nim +++ b/beacon_chain/spec/keystore.nim @@ -5,12 +5,11 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import # Standard library - std/[algorithm, math, parseutils, strformat, strutils, typetraits, unicode, - uri, hashes], + std/[strformat, strutils, typetraits, unicode, uri, hashes], # Third-party libraries normalize, # Status libraries @@ -24,6 +23,10 @@ import libp2p/crypto/crypto as lcrypto, ./datatypes/base, ./signatures +from std/algorithm import binarySearch +from std/math import `^` +from std/parseutils import parseBiggestUInt + export base, uri, io2, options # We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures. @@ -150,10 +153,9 @@ type ProvenProperty* = object path*: string description*: Option[string] - capellaIndex*: Option[GeneralizedIndex] - denebIndex*: Option[GeneralizedIndex] - electraIndex*: Option[GeneralizedIndex] - fuluIndex*: Option[GeneralizedIndex] + electraIndex*: GeneralizedIndex + fuluIndex*: GeneralizedIndex + gloasIndex*: GeneralizedIndex KeystoreData* = object version*: uint64 @@ -262,9 +264,9 @@ const KeystoreCachePruningTime* = 5.minutes -UUID.serializesAsBaseIn Json -KeyPath.serializesAsBaseIn Json -WalletName.serializesAsBaseIn Json +UUID.serializesAsBase Json +KeyPath.serializesAsBase Json +WalletName.serializesAsBase Json ChecksumFunctionKind.serializesAsTextInJson CipherFunctionKind.serializesAsTextInJson @@ -727,20 +729,19 @@ template writeValue*(w: var JsonWriter, func parseProvenBlockProperty*(propertyPath: string): Result[ProvenProperty, string] = if propertyPath == ".execution_payload.fee_recipient": - debugFuluComment "We don't know yet if `GeneralizedIndex` will stay same in Fulu yet." + debugGloasComment "almost certainly not correct anymore, execution payload position changes substantially" ok ProvenProperty( path: propertyPath, - capellaIndex: some GeneralizedIndex(401), - denebIndex: some GeneralizedIndex(801), - electraIndex: some GeneralizedIndex(801), - fuluIndex: some GeneralizedIndex(801)) + electraIndex: GeneralizedIndex(801), + fuluIndex: GeneralizedIndex(801), + gloasIndex: GeneralizedIndex(801)) elif propertyPath == ".graffiti": + debugGloasComment "check if graffiti is still generalizedindex 18" ok ProvenProperty( path: propertyPath, - capellaIndex: some GeneralizedIndex(18), - denebIndex: some GeneralizedIndex(18), - electraIndex: some GeneralizedIndex(18), - fuluIndex: some GeneralizedIndex(18)) + electraIndex: GeneralizedIndex(18), + fuluIndex: GeneralizedIndex(18), + gloasIndex: GeneralizedIndex(18)) else: err("Keystores with proven properties different than " & "`.execution_payload.fee_recipient` and `.graffiti` " & @@ -847,15 +848,15 @@ proc readValue*(reader: var JsonReader, value: var RemoteKeystore) var provenProperties = reader.readValue(seq[ProvenProperty]) for prop in provenProperties.mitems: if prop.path == ".execution_payload.fee_recipient": - prop.capellaIndex = some GeneralizedIndex(401) - prop.denebIndex = some GeneralizedIndex(801) - prop.electraIndex = some GeneralizedIndex(801) - prop.fuluIndex = some GeneralizedIndex(801) + debugGloasComment "nearly certainly incorrect fee recipient generalizedindex" + prop.electraIndex = GeneralizedIndex(801) + prop.fuluIndex = GeneralizedIndex(801) + prop.gloasIndex = GeneralizedIndex(801) elif prop.path == ".graffiti": - prop.capellaIndex = some GeneralizedIndex(18) - prop.denebIndex = some GeneralizedIndex(18) - prop.electraIndex = some GeneralizedIndex(18) - prop.fuluIndex = some GeneralizedIndex(18) + debugGloasComment "check if graffiti is still generalizedindex 18" + prop.electraIndex = GeneralizedIndex(18) + prop.fuluIndex = GeneralizedIndex(18) + prop.gloasIndex = GeneralizedIndex(18) else: reader.raiseUnexpectedValue("Keystores with proven properties different than " & "`.execution_payload.fee_recipient` and `.graffiti` " & @@ -1406,4 +1407,4 @@ func prepareDeposit*(cfg: RuntimeConfig, withdrawal_credentials: makeWithdrawalCredentials(withdrawalPubKey)) res.signature = get_deposit_signature(cfg, res, signingKey).toValidatorSig() - return res \ No newline at end of file + return res diff --git a/beacon_chain/spec/light_client_sync.nim b/beacon_chain/spec/light_client_sync.nim index 00f17b101f..eba6da2679 100644 --- a/beacon_chain/spec/light_client_sync.nim +++ b/beacon_chain/spec/light_client_sync.nim @@ -12,8 +12,8 @@ import datatypes/altair, ./helpers -from ../consensus_object_pools/block_pools_types import VerifierError -export block_pools_types.VerifierError +from ../consensus_object_pools/block_pools_types_light_client import LightClientVerifierError +export LightClientVerifierError # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/sync-protocol.md#is_valid_normalized_merkle_branch func is_valid_normalized_merkle_branch[N]( @@ -37,12 +37,12 @@ func initialize_light_client_store*( cfg: RuntimeConfig ): auto = type ResultType = - Result[typeof(bootstrap).kind.LightClientStore, VerifierError] + Result[typeof(bootstrap).kind.LightClientStore, LightClientVerifierError] if not is_valid_light_client_header(bootstrap.header, cfg): - return ResultType.err(VerifierError.Invalid) + return ResultType.err(LightClientVerifierError.Invalid) if hash_tree_root(bootstrap.header.beacon) != trusted_block_root: - return ResultType.err(VerifierError.Invalid) + return ResultType.err(LightClientVerifierError.Invalid) withLcDataFork(lcDataForkAtConsensusFork( cfg.consensusForkAtEpoch(bootstrap.header.beacon.slot.epoch))): @@ -52,47 +52,47 @@ func initialize_light_client_store*( bootstrap.current_sync_committee_branch, lcDataFork.current_sync_committee_gindex, bootstrap.header.beacon.state_root): - return ResultType.err(VerifierError.Invalid) + return ResultType.err(LightClientVerifierError.Invalid) return ResultType.ok(typeof(bootstrap).kind.LightClientStore( finalized_header: bootstrap.header, current_sync_committee: bootstrap.current_sync_committee, optimistic_header: bootstrap.header)) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/light-client/sync-protocol.md#validate_light_client_update +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/light-client/sync-protocol.md#validate_light_client_update proc validate_light_client_update*( store: ForkyLightClientStore, update: SomeForkyLightClientUpdate, current_slot: Slot, cfg: RuntimeConfig, - genesis_validators_root: Eth2Digest): Result[void, VerifierError] = + genesis_validators_root: Eth2Digest): Result[void, LightClientVerifierError] = # Verify sync committee has sufficient participants template sync_aggregate(): auto = update.sync_aggregate template sync_committee_bits(): auto = sync_aggregate.sync_committee_bits let num_active_participants = countOnes(sync_committee_bits).uint64 if num_active_participants < MIN_SYNC_COMMITTEE_PARTICIPANTS: - return err(VerifierError.Invalid) + return err(LightClientVerifierError.Invalid) # Verify update does not skip a sync committee period if not is_valid_light_client_header(update.attested_header, cfg): - return err(VerifierError.Invalid) + return err(LightClientVerifierError.Invalid) when update is SomeForkyLightClientUpdateWithFinality: if update.attested_header.beacon.slot < update.finalized_header.beacon.slot: - return err(VerifierError.Invalid) + return err(LightClientVerifierError.Invalid) if update.signature_slot <= update.attested_header.beacon.slot: - return err(VerifierError.Invalid) + return err(LightClientVerifierError.Invalid) if current_slot < update.signature_slot: - return err(VerifierError.UnviableFork) + return err(LightClientVerifierError.UnviableFork) let store_period = store.finalized_header.beacon.slot.sync_committee_period signature_period = update.signature_slot.sync_committee_period is_next_sync_committee_known = store.is_next_sync_committee_known if is_next_sync_committee_known: if signature_period notin [store_period, store_period + 1]: - return err(VerifierError.MissingParent) + return err(LightClientVerifierError.MissingParent) else: if signature_period != store_period: - return err(VerifierError.MissingParent) + return err(LightClientVerifierError.MissingParent) # Verify update is relevant when update is SomeForkyLightClientUpdateWithSyncCommittee: @@ -102,11 +102,11 @@ proc validate_light_client_update*( if update.attested_header.beacon.slot <= store.finalized_header.beacon.slot: when update is SomeForkyLightClientUpdateWithSyncCommittee: if is_next_sync_committee_known: - return err(VerifierError.Duplicate) + return err(LightClientVerifierError.Duplicate) if attested_period != store_period or not is_sync_committee_update: - return err(VerifierError.Duplicate) + return err(LightClientVerifierError.Duplicate) else: - return err(VerifierError.Duplicate) + return err(LightClientVerifierError.Duplicate) # Verify that the `finality_branch`, if present, confirms `finalized_header` # to match the finalized checkpoint root saved in the state of @@ -115,17 +115,17 @@ proc validate_light_client_update*( when update is SomeForkyLightClientUpdateWithFinality: if not update.is_finality_update: if update.finalized_header != default(typeof(update.finalized_header)): - return err(VerifierError.Invalid) + return err(LightClientVerifierError.Invalid) else: var finalized_root {.noinit.}: Eth2Digest if update.finalized_header.beacon.slot != GENESIS_SLOT: if not is_valid_light_client_header(update.finalized_header, cfg): - return err(VerifierError.Invalid) + return err(LightClientVerifierError.Invalid) finalized_root = hash_tree_root(update.finalized_header.beacon) elif update.finalized_header == default(typeof(update.finalized_header)): finalized_root.reset() else: - return err(VerifierError.Invalid) + return err(LightClientVerifierError.Invalid) withLcDataFork(lcDataForkAtConsensusFork( cfg.consensusForkAtEpoch(update.attested_header.beacon.slot.epoch))): when lcDataFork > LightClientDataFork.None: @@ -134,7 +134,7 @@ proc validate_light_client_update*( update.finality_branch, lcDataFork.finalized_root_gindex, update.attested_header.beacon.state_root): - return err(VerifierError.Invalid) + return err(LightClientVerifierError.Invalid) # Verify that the `next_sync_committee`, if present, actually is the # next sync committee saved in the state of the `attested_header` @@ -142,11 +142,11 @@ proc validate_light_client_update*( if not is_sync_committee_update: if update.next_sync_committee != default(typeof(update.next_sync_committee)): - return err(VerifierError.Invalid) + return err(LightClientVerifierError.Invalid) else: if attested_period == store_period and is_next_sync_committee_known: if update.next_sync_committee != store.next_sync_committee: - return err(VerifierError.UnviableFork) + return err(LightClientVerifierError.UnviableFork) withLcDataFork(lcDataForkAtConsensusFork( cfg.consensusForkAtEpoch(update.attested_header.beacon.slot.epoch))): when lcDataFork > LightClientDataFork.None: @@ -155,7 +155,7 @@ proc validate_light_client_update*( update.next_sync_committee_branch, lcDataFork.next_sync_committee_gindex, update.attested_header.beacon.state_root): - return err(VerifierError.Invalid) + return err(LightClientVerifierError.Invalid) # Verify sync committee aggregate signature let sync_committee = @@ -176,7 +176,7 @@ proc validate_light_client_update*( bitseqs.BitArray[maxParticipants]( bytes: sync_aggregate.sync_committee_bits.bytes), signing_root.data, sync_aggregate.sync_committee_signature): - return err(VerifierError.UnviableFork) + return err(LightClientVerifierError.UnviableFork) ok() @@ -250,7 +250,7 @@ proc process_light_client_update*( update: SomeForkyLightClientUpdate, current_slot: Slot, cfg: RuntimeConfig, - genesis_validators_root: Eth2Digest): Result[void, VerifierError] = + genesis_validators_root: Eth2Digest): Result[void, LightClientVerifierError] = ? validate_light_client_update( store, update, current_slot, cfg, genesis_validators_root) @@ -294,5 +294,5 @@ proc process_light_client_update*( store.best_valid_update.reset() if not didProgress: - return err(VerifierError.Duplicate) + return err(LightClientVerifierError.Duplicate) ok() diff --git a/beacon_chain/spec/mev/bellatrix_mev.nim b/beacon_chain/spec/mev/bellatrix_mev.nim index 029f156c58..f8db11ede7 100644 --- a/beacon_chain/spec/mev/bellatrix_mev.nim +++ b/beacon_chain/spec/mev/bellatrix_mev.nim @@ -1,11 +1,11 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import ".."/datatypes/altair from ".."/datatypes/phase0 import Attestation, AttesterSlashing @@ -13,7 +13,7 @@ from ".."/datatypes/bellatrix import ExecutionPayloadHeader from ".."/eth2_merkleization import hash_tree_root type - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblockbody + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/bellatrix/builder.md#blindedbeaconblockbody BlindedBeaconBlockBody* = object randao_reveal*: ValidatorSig eth1_data*: Eth1Data @@ -26,7 +26,7 @@ type sync_aggregate*: SyncAggregate execution_payload_header*: bellatrix.ExecutionPayloadHeader - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblock + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/bellatrix/builder.md#blindedbeaconblock BlindedBeaconBlock* = object slot*: Slot proposer_index*: uint64 @@ -34,7 +34,7 @@ type state_root*: Eth2Digest body*: BlindedBeaconBlockBody - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signedblindedbeaconblock + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/bellatrix/builder.md#signedblindedbeaconblock SignedBlindedBeaconBlock* = object message*: BlindedBeaconBlock signature*: ValidatorSig @@ -66,40 +66,3 @@ func shortLog*(v: SignedBlindedBeaconBlock): auto = blck: shortLog(default(BlindedBeaconBlock)), signature: "" ) - -func toSignedBlindedBeaconBlock*(blck: bellatrix.SignedBeaconBlock): - SignedBlindedBeaconBlock = - SignedBlindedBeaconBlock( - message: BlindedBeaconBlock( - slot: blck.message.slot, - proposer_index: blck.message.proposer_index, - parent_root: blck.message.parent_root, - state_root: blck.message.state_root, - body: BlindedBeaconBlockBody( - randao_reveal: blck.message.body.randao_reveal, - eth1_data: blck.message.body.eth1_data, - graffiti: blck.message.body.graffiti, - proposer_slashings: blck.message.body.proposer_slashings, - attester_slashings: blck.message.body.attester_slashings, - attestations: blck.message.body.attestations, - deposits: blck.message.body.deposits, - voluntary_exits: blck.message.body.voluntary_exits, - sync_aggregate: blck.message.body.sync_aggregate, - execution_payload_header: ExecutionPayloadHeader( - parent_hash: blck.message.body.execution_payload.parent_hash, - fee_recipient: blck.message.body.execution_payload.fee_recipient, - state_root: blck.message.body.execution_payload.state_root, - receipts_root: blck.message.body.execution_payload.receipts_root, - logs_bloom: blck.message.body.execution_payload.logs_bloom, - prev_randao: blck.message.body.execution_payload.prev_randao, - block_number: blck.message.body.execution_payload.block_number, - gas_limit: blck.message.body.execution_payload.gas_limit, - gas_used: blck.message.body.execution_payload.gas_used, - timestamp: blck.message.body.execution_payload.timestamp, - extra_data: blck.message.body.execution_payload.extra_data, - base_fee_per_gas: - blck.message.body.execution_payload.base_fee_per_gas, - block_hash: blck.message.body.execution_payload.block_hash, - transactions_root: - hash_tree_root(blck.message.body.execution_payload.transactions)))), - signature: blck.signature) diff --git a/beacon_chain/spec/mev/capella_mev.nim b/beacon_chain/spec/mev/capella_mev.nim index 4a5dfa98d4..1f062c16d3 100644 --- a/beacon_chain/spec/mev/capella_mev.nim +++ b/beacon_chain/spec/mev/capella_mev.nim @@ -1,11 +1,11 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import ".."/datatypes/[altair, capella] from ".."/datatypes/phase0 import Attestation, AttesterSlashing @@ -14,7 +14,7 @@ from stew/byteutils import to0xHex from ../eth2_merkleization import fromSszBytes, hash_tree_root, toSszType type - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/capella/builder.md#blindedbeaconblockbody + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/capella/builder.md#blindedbeaconblockbody BlindedBeaconBlockBody* = object randao_reveal*: ValidatorSig eth1_data*: Eth1Data @@ -31,8 +31,8 @@ type List[SignedBLSToExecutionChange, Limit MAX_BLS_TO_EXECUTION_CHANGES] # [New in Capella] - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblock - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/capella/builder.md#blindedbeaconblockbody + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/bellatrix/builder.md#blindedbeaconblock + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/capella/builder.md#blindedbeaconblockbody BlindedBeaconBlock* = object slot*: Slot proposer_index*: uint64 @@ -40,8 +40,8 @@ type state_root*: Eth2Digest body*: BlindedBeaconBlockBody # [Modified in Capella] - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signedblindedbeaconblock - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/capella/builder.md#blindedbeaconblockbody + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/bellatrix/builder.md#signedblindedbeaconblock + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/capella/builder.md#blindedbeaconblockbody SignedBlindedBeaconBlock* = object message*: BlindedBeaconBlock signature*: ValidatorSig @@ -74,43 +74,3 @@ func shortLog*(v: SignedBlindedBeaconBlock): auto = blck: shortLog(v.message), signature: shortLog(v.signature) ) - -func toSignedBlindedBeaconBlock*(blck: capella.SignedBeaconBlock): - SignedBlindedBeaconBlock = - SignedBlindedBeaconBlock( - message: BlindedBeaconBlock( - slot: blck.message.slot, - proposer_index: blck.message.proposer_index, - parent_root: blck.message.parent_root, - state_root: blck.message.state_root, - body: BlindedBeaconBlockBody( - randao_reveal: blck.message.body.randao_reveal, - eth1_data: blck.message.body.eth1_data, - graffiti: blck.message.body.graffiti, - proposer_slashings: blck.message.body.proposer_slashings, - attester_slashings: blck.message.body.attester_slashings, - attestations: blck.message.body.attestations, - deposits: blck.message.body.deposits, - voluntary_exits: blck.message.body.voluntary_exits, - sync_aggregate: blck.message.body.sync_aggregate, - execution_payload_header: ExecutionPayloadHeader( - parent_hash: blck.message.body.execution_payload.parent_hash, - fee_recipient: blck.message.body.execution_payload.fee_recipient, - state_root: blck.message.body.execution_payload.state_root, - receipts_root: blck.message.body.execution_payload.receipts_root, - logs_bloom: blck.message.body.execution_payload.logs_bloom, - prev_randao: blck.message.body.execution_payload.prev_randao, - block_number: blck.message.body.execution_payload.block_number, - gas_limit: blck.message.body.execution_payload.gas_limit, - gas_used: blck.message.body.execution_payload.gas_used, - timestamp: blck.message.body.execution_payload.timestamp, - extra_data: blck.message.body.execution_payload.extra_data, - base_fee_per_gas: - blck.message.body.execution_payload.base_fee_per_gas, - block_hash: blck.message.body.execution_payload.block_hash, - transactions_root: - hash_tree_root(blck.message.body.execution_payload.transactions), - withdrawals_root: - hash_tree_root(blck.message.body.execution_payload.withdrawals)), - bls_to_execution_changes: blck.message.body.bls_to_execution_changes)), - signature: blck.signature) diff --git a/beacon_chain/spec/mev/deneb_mev.nim b/beacon_chain/spec/mev/deneb_mev.nim index 79cf6a1f7a..3feb654ed4 100644 --- a/beacon_chain/spec/mev/deneb_mev.nim +++ b/beacon_chain/spec/mev/deneb_mev.nim @@ -1,47 +1,21 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} -import ".."/datatypes/[altair, deneb] +import ".."/datatypes/[altair, bellatrix, deneb] from stew/byteutils import to0xHex from ".."/datatypes/phase0 import Attestation, AttesterSlashing -from ../datatypes/bellatrix import ExecutionAddress from ".."/datatypes/capella import SignedBLSToExecutionChange from ".."/eth2_merkleization import hash_tree_root type - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#validatorregistrationv1 - ValidatorRegistrationV1* = object - fee_recipient*: ExecutionAddress - gas_limit*: uint64 - timestamp*: uint64 - pubkey*: ValidatorPubKey - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signedvalidatorregistrationv1 - SignedValidatorRegistrationV1* = object - message*: ValidatorRegistrationV1 - signature*: ValidatorSig - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/deneb/builder.md#builderbid - BuilderBid* = object - header*: deneb.ExecutionPayloadHeader # [Modified in Deneb] - blob_kzg_commitments*: KzgCommitments # [New in Deneb] - value*: UInt256 - pubkey*: ValidatorPubKey - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signedbuilderbid - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/deneb/builder.md#executionpayloadheader - SignedBuilderBid* = object - message*: BuilderBid - signature*: ValidatorSig - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/deneb/builder.md#blindedbeaconblockbody + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/deneb/builder.md#blindedbeaconblockbody BlindedBeaconBlockBody* = object randao_reveal*: ValidatorSig eth1_data*: Eth1Data @@ -58,8 +32,8 @@ type Limit MAX_BLS_TO_EXECUTION_CHANGES] blob_kzg_commitments*: KzgCommitments # [New in Deneb] - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblock - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/deneb/builder.md#blindedbeaconblockbody + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/bellatrix/builder.md#blindedbeaconblock + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/deneb/builder.md#blindedbeaconblockbody BlindedBeaconBlock* = object slot*: Slot proposer_index*: uint64 @@ -67,40 +41,12 @@ type state_root*: Eth2Digest body*: BlindedBeaconBlockBody # [Modified in Deneb] - MaybeBlindedBeaconBlock* = object - case isBlinded*: bool - of false: - data*: deneb.BlockContents - of true: - blindedData*: BlindedBeaconBlock - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signedblindedbeaconblock - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/capella/builder.md#blindedbeaconblockbody + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/bellatrix/builder.md#signedblindedbeaconblock + # https://github.com/ethereum/builder-specs/blob/v0.6.0/specs/deneb/builder.md#blindedbeaconblockbody SignedBlindedBeaconBlock* = object message*: BlindedBeaconBlock signature*: ValidatorSig - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/deneb/builder.md#executionpayloadandblobsbundle - ExecutionPayloadAndBlobsBundle* = object - execution_payload*: deneb.ExecutionPayload - blobs_bundle*: BlobsBundle - - # Not spec, but suggested by spec - BlindedExecutionPayloadAndBlobsBundle* = object - execution_payload_header*: deneb.ExecutionPayloadHeader - blob_kzg_commitments*: KzgCommitments # [New in Deneb] - -const - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#domain-types - DOMAIN_APPLICATION_BUILDER* = DomainType([byte 0x00, 0x00, 0x00, 0x01]) - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#constants - EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION* = 1 - - # Spec is 1 second, but mev-boost indirection can induce delay when the relay - # itself has already consumed the entire second. - BUILDER_PROPOSAL_DELAY_TOLERANCE* = 1500.milliseconds - func shortLog*(v: BlindedBeaconBlock): auto = ( slot: shortLog(v.slot), @@ -129,46 +75,3 @@ func shortLog*(v: SignedBlindedBeaconBlock): auto = blck: shortLog(v.message), signature: shortLog(v.signature) ) - -func toSignedBlindedBeaconBlock*(blck: deneb.SignedBeaconBlock): - SignedBlindedBeaconBlock = - SignedBlindedBeaconBlock( - message: BlindedBeaconBlock( - slot: blck.message.slot, - proposer_index: blck.message.proposer_index, - parent_root: blck.message.parent_root, - state_root: blck.message.state_root, - body: BlindedBeaconBlockBody( - randao_reveal: blck.message.body.randao_reveal, - eth1_data: blck.message.body.eth1_data, - graffiti: blck.message.body.graffiti, - proposer_slashings: blck.message.body.proposer_slashings, - attester_slashings: blck.message.body.attester_slashings, - attestations: blck.message.body.attestations, - deposits: blck.message.body.deposits, - voluntary_exits: blck.message.body.voluntary_exits, - sync_aggregate: blck.message.body.sync_aggregate, - execution_payload_header: ExecutionPayloadHeader( - parent_hash: blck.message.body.execution_payload.parent_hash, - fee_recipient: blck.message.body.execution_payload.fee_recipient, - state_root: blck.message.body.execution_payload.state_root, - receipts_root: blck.message.body.execution_payload.receipts_root, - logs_bloom: blck.message.body.execution_payload.logs_bloom, - prev_randao: blck.message.body.execution_payload.prev_randao, - block_number: blck.message.body.execution_payload.block_number, - gas_limit: blck.message.body.execution_payload.gas_limit, - gas_used: blck.message.body.execution_payload.gas_used, - timestamp: blck.message.body.execution_payload.timestamp, - extra_data: blck.message.body.execution_payload.extra_data, - base_fee_per_gas: - blck.message.body.execution_payload.base_fee_per_gas, - block_hash: blck.message.body.execution_payload.block_hash, - transactions_root: - hash_tree_root(blck.message.body.execution_payload.transactions), - withdrawals_root: - hash_tree_root(blck.message.body.execution_payload.withdrawals), - blob_gas_used: blck.message.body.execution_payload.blob_gas_used, - excess_blob_gas: blck.message.body.execution_payload.excess_blob_gas), - bls_to_execution_changes: blck.message.body.bls_to_execution_changes, - blob_kzg_commitments: blck.message.body.blob_kzg_commitments)), - signature: blck.signature) diff --git a/beacon_chain/spec/mev/electra_mev.nim b/beacon_chain/spec/mev/electra_mev.nim index ca7650075e..c95d78b9d4 100644 --- a/beacon_chain/spec/mev/electra_mev.nim +++ b/beacon_chain/spec/mev/electra_mev.nim @@ -5,30 +5,43 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} -import ".."/datatypes/[altair, electra] +import ".."/datatypes/[altair, bellatrix, electra] from stew/byteutils import to0xHex from ".."/datatypes/phase0 import AttesterSlashing -from ../datatypes/bellatrix import ExecutionAddress from ".."/datatypes/capella import SignedBLSToExecutionChange -from ".."/datatypes/deneb import BlobsBundle, KzgCommitments +from ".."/datatypes/deneb import BlobsBundle, ExecutionPayloadHeader, KzgCommitments from ".."/eth2_merkleization import hash_tree_root type + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/builder.md#validatorregistrationv1 + ValidatorRegistrationV1* = object + fee_recipient*: ExecutionAddress + gas_limit*: uint64 + timestamp*: uint64 + pubkey*: ValidatorPubKey + + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/builder.md#signedvalidatorregistrationv1 + SignedValidatorRegistrationV1* = object + message*: ValidatorRegistrationV1 + signature*: ValidatorSig + + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/electra/builder.md#builderbid BuilderBid* = object - header*: electra.ExecutionPayloadHeader + header*: deneb.ExecutionPayloadHeader blob_kzg_commitments*: KzgCommitments execution_requests*: ExecutionRequests # [New in Electra] value*: UInt256 pubkey*: ValidatorPubKey - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signedbuilderbid + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/builder.md#signedbuilderbid SignedBuilderBid* = object message*: BuilderBid signature*: ValidatorSig + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/electra/builder.md#blindedbeaconblockbody BlindedBeaconBlockBody* = object randao_reveal*: ValidatorSig eth1_data*: Eth1Data @@ -40,14 +53,32 @@ type deposits*: List[Deposit, Limit MAX_DEPOSITS] voluntary_exits*: List[SignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS] sync_aggregate*: SyncAggregate - execution_payload_header*: electra.ExecutionPayloadHeader + execution_payload_header*: deneb.ExecutionPayloadHeader + bls_to_execution_changes*: + List[SignedBLSToExecutionChange, + Limit MAX_BLS_TO_EXECUTION_CHANGES] + blob_kzg_commitments*: KzgCommitments + execution_requests*: ExecutionRequests # [New in Electra] + + SigVerifiedBlindedBeaconBlockBody* = object + randao_reveal*: TrustedSig + eth1_data*: Eth1Data + graffiti*: GraffitiBytes + proposer_slashings*: List[TrustedProposerSlashing, Limit MAX_PROPOSER_SLASHINGS] + attester_slashings*: + List[electra.TrustedAttesterSlashing, Limit MAX_ATTESTER_SLASHINGS_ELECTRA] + attestations*: List[electra.TrustedAttestation, Limit MAX_ATTESTATIONS_ELECTRA] + deposits*: List[Deposit, Limit MAX_DEPOSITS] + voluntary_exits*: List[TrustedSignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS] + sync_aggregate*: TrustedSyncAggregate + execution_payload_header*: deneb.ExecutionPayloadHeader bls_to_execution_changes*: List[SignedBLSToExecutionChange, Limit MAX_BLS_TO_EXECUTION_CHANGES] - blob_kzg_commitments*: KzgCommitments # [New in Deneb] + blob_kzg_commitments*: KzgCommitments execution_requests*: ExecutionRequests # [New in Electra] - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblock + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/builder.md#blindedbeaconblock BlindedBeaconBlock* = object slot*: Slot proposer_index*: uint64 @@ -55,6 +86,14 @@ type state_root*: Eth2Digest body*: BlindedBeaconBlockBody # [Modified in Deneb] + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/builder.md#blindedbeaconblock + SigVerifiedBlindedBeaconBlock* = object + slot*: Slot + proposer_index*: uint64 + parent_root*: Eth2Digest + state_root*: Eth2Digest + body*: SigVerifiedBlindedBeaconBlockBody # [Modified in Deneb] + MaybeBlindedBeaconBlock* = object case isBlinded*: bool of false: @@ -62,20 +101,26 @@ type of true: blindedData*: BlindedBeaconBlock - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signedblindedbeaconblock - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/capella/builder.md#blindedbeaconblockbody + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/builder.md#signedblindedbeaconblock SignedBlindedBeaconBlock* = object message*: BlindedBeaconBlock signature*: ValidatorSig + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/deneb/builder.md#executionpayloadandblobsbundle ExecutionPayloadAndBlobsBundle* = object - execution_payload*: electra.ExecutionPayload + execution_payload*: deneb.ExecutionPayload blobs_bundle*: BlobsBundle - # Not spec, but suggested by spec - BlindedExecutionPayloadAndBlobsBundle* = object - execution_payload_header*: electra.ExecutionPayloadHeader - blob_kzg_commitments*: KzgCommitments # [New in Deneb] +const + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/builder.md#domain-types + DOMAIN_APPLICATION_BUILDER* = DomainType([byte 0x00, 0x00, 0x00, 0x01]) + + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/validator.md#constants + EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION* = 1 + + # Spec is 1 second, but mev-boost indirection can induce delay when the relay + # itself has already consumed the entire second. + BUILDER_PROPOSAL_DELAY_TOLERANCE* = 1500.milliseconds func shortLog*(v: BlindedBeaconBlock): auto = ( @@ -106,46 +151,6 @@ func shortLog*(v: SignedBlindedBeaconBlock): auto = signature: shortLog(v.signature) ) -func toSignedBlindedBeaconBlock*(blck: electra.SignedBeaconBlock): - SignedBlindedBeaconBlock = - SignedBlindedBeaconBlock( - message: BlindedBeaconBlock( - slot: blck.message.slot, - proposer_index: blck.message.proposer_index, - parent_root: blck.message.parent_root, - state_root: blck.message.state_root, - body: BlindedBeaconBlockBody( - randao_reveal: blck.message.body.randao_reveal, - eth1_data: blck.message.body.eth1_data, - graffiti: blck.message.body.graffiti, - proposer_slashings: blck.message.body.proposer_slashings, - attester_slashings: blck.message.body.attester_slashings, - attestations: blck.message.body.attestations, - deposits: blck.message.body.deposits, - voluntary_exits: blck.message.body.voluntary_exits, - sync_aggregate: blck.message.body.sync_aggregate, - execution_payload_header: ExecutionPayloadHeader( - parent_hash: blck.message.body.execution_payload.parent_hash, - fee_recipient: blck.message.body.execution_payload.fee_recipient, - state_root: blck.message.body.execution_payload.state_root, - receipts_root: blck.message.body.execution_payload.receipts_root, - logs_bloom: blck.message.body.execution_payload.logs_bloom, - prev_randao: blck.message.body.execution_payload.prev_randao, - block_number: blck.message.body.execution_payload.block_number, - gas_limit: blck.message.body.execution_payload.gas_limit, - gas_used: blck.message.body.execution_payload.gas_used, - timestamp: blck.message.body.execution_payload.timestamp, - extra_data: blck.message.body.execution_payload.extra_data, - base_fee_per_gas: - blck.message.body.execution_payload.base_fee_per_gas, - block_hash: blck.message.body.execution_payload.block_hash, - transactions_root: - hash_tree_root(blck.message.body.execution_payload.transactions), - withdrawals_root: - hash_tree_root(blck.message.body.execution_payload.withdrawals), - blob_gas_used: blck.message.body.execution_payload.blob_gas_used, - excess_blob_gas: blck.message.body.execution_payload.excess_blob_gas), - bls_to_execution_changes: blck.message.body.bls_to_execution_changes, - blob_kzg_commitments: blck.message.body.blob_kzg_commitments, - execution_requests: blck.message.body.execution_requests)), - signature: blck.signature) +template asSigVerified*( + x: BlindedBeaconBlock): SigVerifiedBlindedBeaconBlock = + isomorphicCast[SigVerifiedBlindedBeaconBlock](x) diff --git a/beacon_chain/spec/mev/fulu_mev.nim b/beacon_chain/spec/mev/fulu_mev.nim index eee40a391c..a3dfdfdb3b 100644 --- a/beacon_chain/spec/mev/fulu_mev.nim +++ b/beacon_chain/spec/mev/fulu_mev.nim @@ -5,28 +5,27 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} -import ".."/datatypes/[altair, fulu] +import ".."/datatypes/[altair, bellatrix, fulu] from stew/byteutils import to0xHex from ".."/datatypes/phase0 import AttesterSlashing -from ../datatypes/bellatrix import ExecutionAddress from ".."/datatypes/capella import SignedBLSToExecutionChange -from ".."/datatypes/deneb import BlobsBundle, KzgCommitments -from ".."/datatypes/electra import Attestation, AttesterSlashing, - ExecutionRequests +from ".."/datatypes/deneb import BlobsBundle, ExecutionPayloadHeader, KzgCommitments +from ".."/datatypes/electra import + Attestation, AttesterSlashing, ExecutionRequests from ".."/eth2_merkleization import hash_tree_root type BuilderBid* = object - header*: ExecutionPayloadHeader + header*: deneb.ExecutionPayloadHeader blob_kzg_commitments*: KzgCommitments execution_requests*: ExecutionRequests # [New in Electra] value*: UInt256 pubkey*: ValidatorPubKey - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signedbuilderbid + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/builder.md#signedbuilderbid SignedBuilderBid* = object message*: BuilderBid signature*: ValidatorSig @@ -42,20 +41,45 @@ type deposits*: List[Deposit, Limit MAX_DEPOSITS] voluntary_exits*: List[SignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS] sync_aggregate*: SyncAggregate - execution_payload_header*: ExecutionPayloadHeader + execution_payload_header*: deneb.ExecutionPayloadHeader bls_to_execution_changes*: List[SignedBLSToExecutionChange, Limit MAX_BLS_TO_EXECUTION_CHANGES] blob_kzg_commitments*: KzgCommitments # [New in Deneb] execution_requests*: ExecutionRequests # [New in Electra] - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblock + SigVerifiedBlindedBeaconBlockBody* = object + randao_reveal*: TrustedSig + eth1_data*: Eth1Data + graffiti*: GraffitiBytes + proposer_slashings*: List[TrustedProposerSlashing, Limit MAX_PROPOSER_SLASHINGS] + attester_slashings*: + List[electra.TrustedAttesterSlashing, Limit MAX_ATTESTER_SLASHINGS_ELECTRA] + attestations*: List[electra.TrustedAttestation, Limit MAX_ATTESTATIONS_ELECTRA] + deposits*: List[Deposit, Limit MAX_DEPOSITS] + voluntary_exits*: List[TrustedSignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS] + sync_aggregate*: TrustedSyncAggregate + execution_payload_header*: deneb.ExecutionPayloadHeader + bls_to_execution_changes*: + List[SignedBLSToExecutionChange, + Limit MAX_BLS_TO_EXECUTION_CHANGES] + blob_kzg_commitments*: KzgCommitments # [New in Deneb] + execution_requests*: ExecutionRequests # [New in Electra] + + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/builder.md#blindedbeaconblock BlindedBeaconBlock* = object slot*: Slot proposer_index*: uint64 parent_root*: Eth2Digest state_root*: Eth2Digest - body*: BlindedBeaconBlockBody # [Modified in Deneb] + body*: BlindedBeaconBlockBody + + SigVerifiedBlindedBeaconBlock* = object + slot*: Slot + proposer_index*: uint64 + parent_root*: Eth2Digest + state_root*: Eth2Digest + body*: SigVerifiedBlindedBeaconBlockBody MaybeBlindedBeaconBlock* = object case isBlinded*: bool @@ -64,21 +88,12 @@ type of true: blindedData*: BlindedBeaconBlock - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signedblindedbeaconblock - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/capella/builder.md#blindedbeaconblockbody + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/builder.md#signedblindedbeaconblock + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/capella/builder.md#blindedbeaconblockbody SignedBlindedBeaconBlock* = object message*: BlindedBeaconBlock signature*: ValidatorSig - ExecutionPayloadAndBlobsBundle* = object - execution_payload*: ExecutionPayload - blobs_bundle*: BlobsBundle - - # Not spec, but suggested by spec - BlindedExecutionPayloadAndBlobsBundle* = object - execution_payload_header*: ExecutionPayloadHeader - blob_kzg_commitments*: KzgCommitments # [New in Deneb] - func shortLog*(v: BlindedBeaconBlock): auto = ( slot: shortLog(v.slot), @@ -108,46 +123,6 @@ func shortLog*(v: SignedBlindedBeaconBlock): auto = signature: shortLog(v.signature) ) -func toSignedBlindedBeaconBlock*(blck: fulu.SignedBeaconBlock): - SignedBlindedBeaconBlock = - SignedBlindedBeaconBlock( - message: BlindedBeaconBlock( - slot: blck.message.slot, - proposer_index: blck.message.proposer_index, - parent_root: blck.message.parent_root, - state_root: blck.message.state_root, - body: BlindedBeaconBlockBody( - randao_reveal: blck.message.body.randao_reveal, - eth1_data: blck.message.body.eth1_data, - graffiti: blck.message.body.graffiti, - proposer_slashings: blck.message.body.proposer_slashings, - attester_slashings: blck.message.body.attester_slashings, - attestations: blck.message.body.attestations, - deposits: blck.message.body.deposits, - voluntary_exits: blck.message.body.voluntary_exits, - sync_aggregate: blck.message.body.sync_aggregate, - execution_payload_header: ExecutionPayloadHeader( - parent_hash: blck.message.body.execution_payload.parent_hash, - fee_recipient: blck.message.body.execution_payload.fee_recipient, - state_root: blck.message.body.execution_payload.state_root, - receipts_root: blck.message.body.execution_payload.receipts_root, - logs_bloom: blck.message.body.execution_payload.logs_bloom, - prev_randao: blck.message.body.execution_payload.prev_randao, - block_number: blck.message.body.execution_payload.block_number, - gas_limit: blck.message.body.execution_payload.gas_limit, - gas_used: blck.message.body.execution_payload.gas_used, - timestamp: blck.message.body.execution_payload.timestamp, - extra_data: blck.message.body.execution_payload.extra_data, - base_fee_per_gas: - blck.message.body.execution_payload.base_fee_per_gas, - block_hash: blck.message.body.execution_payload.block_hash, - transactions_root: - hash_tree_root(blck.message.body.execution_payload.transactions), - withdrawals_root: - hash_tree_root(blck.message.body.execution_payload.withdrawals), - blob_gas_used: blck.message.body.execution_payload.blob_gas_used, - excess_blob_gas: blck.message.body.execution_payload.excess_blob_gas), - bls_to_execution_changes: blck.message.body.bls_to_execution_changes, - blob_kzg_commitments: blck.message.body.blob_kzg_commitments, - execution_requests: blck.message.body.execution_requests)), - signature: blck.signature) \ No newline at end of file +template asSigVerified*( + x: BlindedBeaconBlock): SigVerifiedBlindedBeaconBlock = + isomorphicCast[SigVerifiedBlindedBeaconBlock](x) diff --git a/beacon_chain/spec/mev/rest_electra_mev_calls.nim b/beacon_chain/spec/mev/rest_electra_mev_calls.nim deleted file mode 100644 index 6984c5571c..0000000000 --- a/beacon_chain/spec/mev/rest_electra_mev_calls.nim +++ /dev/null @@ -1,56 +0,0 @@ -# beacon_chain -# Copyright (c) 2024-2025 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} - -import - chronos, presto/client, - ".."/eth2_apis/[rest_types, eth2_rest_serialization] - -export chronos, client, rest_types, eth2_rest_serialization - -proc getHeaderElectraPlain*( - slot: Slot, - parent_hash: Eth2Digest, - pubkey: ValidatorPubKey -): RestPlainResponse {. - rest, endpoint: "/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}", - meth: MethodGet, connection: {Dedicated, Close}.} - ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/header.yaml - -proc getHeaderElectra*( - client: RestClientRef, - slot: Slot, - parent_hash: Eth2Digest, - pubkey: ValidatorPubKey -): Future[RestPlainResponse] {. - async: (raises: [CancelledError, RestEncodingError, RestDnsResolveError, - RestCommunicationError], raw: true).} = - client.getHeaderElectraPlain( - slot, parent_hash, pubkey, - restAcceptType = "application/octet-stream,application/json;q=0.5", - ) - -proc submitBlindedBlockPlain*( - body: electra_mev.SignedBlindedBeaconBlock -): RestPlainResponse {. - rest, endpoint: "/eth/v1/builder/blinded_blocks", - meth: MethodPost, connection: {Dedicated, Close}.} - ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml - -proc submitBlindedBlock*( - client: RestClientRef, - body: electra_mev.SignedBlindedBeaconBlock -): Future[RestPlainResponse] {. - async: (raises: [CancelledError, RestEncodingError, RestDnsResolveError, - RestCommunicationError], raw: true).} = - ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml - client.submitBlindedBlockPlain( - body, - restAcceptType = "application/octet-stream,application/json;q=0.5", - extraHeaders = @[("eth-consensus-version", toString(ConsensusFork.Electra))] - ) diff --git a/beacon_chain/spec/mev/rest_deneb_mev_calls.nim b/beacon_chain/spec/mev/rest_mev_calls.nim similarity index 65% rename from beacon_chain/spec/mev/rest_deneb_mev_calls.nim rename to beacon_chain/spec/mev/rest_mev_calls.nim index 8074bef942..fbb88980e0 100644 --- a/beacon_chain/spec/mev/rest_deneb_mev_calls.nim +++ b/beacon_chain/spec/mev/rest_mev_calls.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2023-2025 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -25,7 +25,7 @@ proc registerValidator*(body: seq[SignedValidatorRegistrationV1] ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/validators.yaml ## https://github.com/ethereum/beacon-APIs/blob/v2.3.0/apis/validator/register_validator.yaml -proc getHeaderDenebPlain*( +proc getHeaderPlain*( slot: Slot, parent_hash: Eth2Digest, pubkey: ValidatorPubKey @@ -34,7 +34,7 @@ proc getHeaderDenebPlain*( meth: MethodGet, connection: {Dedicated, Close}.} ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/header.yaml -proc getHeaderDeneb*( +proc getHeader*( client: RestClientRef, slot: Slot, parent_hash: Eth2Digest, @@ -42,27 +42,46 @@ proc getHeaderDeneb*( ): Future[RestPlainResponse] {. async: (raises: [CancelledError, RestEncodingError, RestDnsResolveError, RestCommunicationError], raw: true).} = - client.getHeaderDenebPlain( + client.getHeaderPlain( slot, parent_hash, pubkey, restAcceptType = "application/octet-stream,application/json;q=0.5", ) proc submitBlindedBlockPlain*( - body: deneb_mev.SignedBlindedBeaconBlock + body: electra_mev.SignedBlindedBeaconBlock ): RestPlainResponse {. rest, endpoint: "/eth/v1/builder/blinded_blocks", meth: MethodPost, connection: {Dedicated, Close}.} - ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml + ## https://github.com/ethereum/builder-specs/blob/v0.5.0/apis/builder/blinded_blocks.yaml + +proc submitBlindedBlockV2Plain*( + body: fulu_mev.SignedBlindedBeaconBlock +): RestPlainResponse {. + rest, endpoint: "/eth/v2/builder/blinded_blocks", + meth: MethodPost, connection: {Dedicated, Close}.} + ## https://github.com/ethereum/builder-specs/blob/ae1d97d080a12bfb7ca248b58fb1fc6b10aed02e/apis/builder/blinded_blocks_v2.yaml proc submitBlindedBlock*( - client: RestClientRef, - body: deneb_mev.SignedBlindedBeaconBlock + client: RestClientRef, + body: electra_mev.SignedBlindedBeaconBlock ): Future[RestPlainResponse] {. async: (raises: [CancelledError, RestEncodingError, RestDnsResolveError, RestCommunicationError], raw: true).} = - ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml client.submitBlindedBlockPlain( body, restAcceptType = "application/octet-stream,application/json;q=0.5", - extraHeaders = @[("eth-consensus-version", toString(ConsensusFork.Deneb))] + extraHeaders = @[("eth-consensus-version", toString(typeof(body).kind))] + ) + +proc submitBlindedBlock*( + client: RestClientRef, + body: fulu_mev.SignedBlindedBeaconBlock +): Future[RestPlainResponse] {. + async: (raises: [CancelledError, RestEncodingError, RestDnsResolveError, + RestCommunicationError], raw: true).} = + # Everyone should have upgraded by the time of fulu + client.submitBlindedBlockV2Plain( + body, + restAcceptType = "application/octet-stream,application/json;q=0.5", + extraHeaders = @[("eth-consensus-version", toString(typeof(body).kind))] ) diff --git a/beacon_chain/spec/network.nim b/beacon_chain/spec/network.nim index 3a89bfda5d..7dda9c1b2c 100644 --- a/beacon_chain/spec/network.nim +++ b/beacon_chain/spec/network.nim @@ -5,23 +5,29 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import "."/[helpers, forks], "."/datatypes/base +from std/algorithm import sort, upperBound + export base const # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#topics-and-messages - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/p2p-interface.md#topics-and-messages + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/capella/p2p-interface.md#topics-and-messages + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/p2p-interface.md#topics-and-messages topicBeaconBlocksSuffix = "beacon_block/ssz_snappy" topicVoluntaryExitsSuffix = "voluntary_exit/ssz_snappy" topicProposerSlashingsSuffix = "proposer_slashing/ssz_snappy" topicAttesterSlashingsSuffix = "attester_slashing/ssz_snappy" topicAggregateAndProofsSuffix = "beacon_aggregate_and_proof/ssz_snappy" topicBlsToExecutionChangeSuffix = "bls_to_execution_change/ssz_snappy" + topicExecutionPayloadBidSuffix = "execution_payload_bid/ssz_snappy" + topicExecutionPayloadSuffix = "execution_payload/ssz_snappy" + topicPayloadAttestationMessageSuffix = "payload_attestation_message/ssz_snappy" const # The spec now includes this as a bare uint64 as `RESP_TIMEOUT` @@ -44,6 +50,7 @@ const enrAttestationSubnetsField* = "attnets" enrSyncSubnetsField* = "syncnets" enrCustodySubnetCountField* = "cgc" + enrNextForkDigestField* = "nfd" enrForkIdField* = "eth2" template eth2Prefix(forkDigest: ForkDigest): string = @@ -68,6 +75,18 @@ func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string = func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string = eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#execution_payload_bid +func getExecutionPayloadBidTopic*(forkDigest: ForkDigest): string = + eth2Prefix(forkDigest) & topicExecutionPayloadBidSuffix + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/p2p-interface.md#execution_payload +func getExecutionPayloadTopic*(forkDigest: ForkDigest): string = + eth2Prefix(forkDigest) & topicExecutionPayloadSuffix + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/p2p-interface.md#payload_attestation_message +func getPayloadAttestationMessageTopic*(forkDigest: ForkDigest): string = + eth2Prefix(forkDigest) & topicPayloadAttestationMessageSuffix + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#broadcast-attestation func compute_subnet_for_attestation*( committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex): @@ -90,7 +109,7 @@ func getAttestationTopic*(forkDigest: ForkDigest, ## For subscribing and unsubscribing to/from a subnet. eth2Prefix(forkDigest) & "beacon_attestation_" & $(subnetId) & "/ssz_snappy" -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/p2p-interface.md#topics-and-messages +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/p2p-interface.md#topics-and-messages func getSyncCommitteeTopic*(forkDigest: ForkDigest, subcommitteeIdx: SyncSubcommitteeIndex): string = ## For subscribing and unsubscribing to/from a subnet. @@ -117,11 +136,14 @@ func compute_subnet_for_blob_sidecar*( cfg.BLOB_SIDECAR_SUBNET_COUNT BlobId(blob_index mod subnetCount) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar func compute_subnet_for_data_column_sidecar*(column_index: ColumnIndex): uint64 = - uint64(column_index mod DATA_COLUMN_SIDECAR_SUBNET_COUNT) + # Parts of Nimbus use the subnet number and column ID semi-interchangeably + static: doAssert DATA_COLUMN_SIDECAR_SUBNET_COUNT == NUMBER_OF_COLUMNS + + column_index mod DATA_COLUMN_SIDECAR_SUBNET_COUNT -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_finality_update +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/altair/light-client/p2p-interface.md#light_client_finality_update func getLightClientFinalityUpdateTopic*(forkDigest: ForkDigest): string = ## For broadcasting or obtaining the latest `LightClientFinalityUpdate`. eth2Prefix(forkDigest) & "light_client_finality_update/ssz_snappy" @@ -131,6 +153,14 @@ func getLightClientOptimisticUpdateTopic*(forkDigest: ForkDigest): string = ## For broadcasting or obtaining the latest `LightClientOptimisticUpdate`. eth2Prefix(forkDigest) & "light_client_optimistic_update/ssz_snappy" +func getForkDigest( + cfg: RuntimeConfig, genesis_validators_root: Eth2Digest, + current_fork_version: Version, epoch: Epoch): ForkDigest = + if epoch >= cfg.FULU_FORK_EPOCH: + compute_fork_digest_fulu(cfg, genesis_validators_root, epoch) + else: + compute_fork_digest(current_fork_version, genesis_validators_root) + func getENRForkID*(cfg: RuntimeConfig, epoch: Epoch, genesis_validators_root: Eth2Digest): ENRForkID = @@ -140,8 +170,8 @@ func getENRForkID*(cfg: RuntimeConfig, current_fork_version else: cfg.forkVersionAtEpoch(cfg.nextForkEpochAtEpoch(epoch)) - fork_digest = compute_fork_digest(current_fork_version, - genesis_validators_root) + fork_digest = cfg.getForkDigest( + genesis_validators_root, current_fork_version, epoch) ENRForkID( fork_digest: fork_digest, next_fork_version: next_fork_version, @@ -156,61 +186,64 @@ func getDiscoveryForkID*(cfg: RuntimeConfig, else: let current_fork_version = cfg.forkVersionAtEpoch(epoch) - fork_digest = compute_fork_digest(current_fork_version, - genesis_validators_root) + fork_digest = cfg.getForkDigest( + genesis_validators_root, current_fork_version, epoch) ENRForkID( fork_digest: fork_digest, next_fork_version: current_fork_version, - next_fork_epoch: FAR_FUTURE_EPOCH) + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/phase0/p2p-interface.md#eth2-field + # "`next_fork_epoch` is the epoch at which the next fork is planned and + # the `current_fork_version` will be updated. If no future fork is + # planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact" + # + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/fulu/p2p-interface.md#eth2-field + # "`next_fork_epoch` is the epoch at which the next fork (whether a + # regular fork *or a BPO fork*) is planned. If no future fork is planned, + # set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact." + next_fork_epoch: cfg.nextForkEpochAtEpoch(epoch)) # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#transitioning-the-gossip -type GossipState* = set[ConsensusFork] -func getTargetGossipState*( - epoch, ALTAIR_FORK_EPOCH, BELLATRIX_FORK_EPOCH, CAPELLA_FORK_EPOCH, - DENEB_FORK_EPOCH, ELECTRA_FORK_EPOCH, FULU_FORK_EPOCH: Epoch, - isBehind: bool): +type GossipState* = HashSet[Epoch] +func getTargetGossipState*(epoch: Epoch, cfg: RuntimeConfig, isBehind: bool): GossipState = if isBehind: - return {} - - doAssert BELLATRIX_FORK_EPOCH >= ALTAIR_FORK_EPOCH - doAssert CAPELLA_FORK_EPOCH >= BELLATRIX_FORK_EPOCH - doAssert DENEB_FORK_EPOCH >= CAPELLA_FORK_EPOCH - doAssert ELECTRA_FORK_EPOCH >= DENEB_FORK_EPOCH - doAssert FULU_FORK_EPOCH >= ELECTRA_FORK_EPOCH - - # https://github.com/ethereum/consensus-specs/issues/2902 - # Don't care whether ALTAIR_FORK_EPOCH == BELLATRIX_FORK_EPOCH or - # BELLATRIX_FORK_EPOCH == CAPELLA_FORK_EPOCH works, because those - # theoretically possible networks are ill-defined regardless, and - # consequently prohibited by checkForkConsistency(). Therefore, a - # transitional epoch always exists, for every fork. - var targetForks: GossipState - - template maybeIncludeFork( - targetFork: ConsensusFork, targetForkEpoch: Epoch, - successiveForkEpoch: Epoch) = - # Subscribe one epoch ahead - if epoch + 1 >= targetForkEpoch and epoch < successiveForkEpoch: - targetForks.incl targetFork - - maybeIncludeFork( - ConsensusFork.Phase0, GENESIS_EPOCH, ALTAIR_FORK_EPOCH) - maybeIncludeFork( - ConsensusFork.Altair, ALTAIR_FORK_EPOCH, BELLATRIX_FORK_EPOCH) - maybeIncludeFork( - ConsensusFork.Bellatrix, BELLATRIX_FORK_EPOCH, CAPELLA_FORK_EPOCH) - maybeIncludeFork( - ConsensusFork.Capella, CAPELLA_FORK_EPOCH, DENEB_FORK_EPOCH) - maybeIncludeFork( - ConsensusFork.Deneb, DENEB_FORK_EPOCH, ELECTRA_FORK_EPOCH) - maybeIncludeFork( - ConsensusFork.Electra, ELECTRA_FORK_EPOCH, FULU_FORK_EPOCH) - maybeIncludeFork( - ConsensusFork.Fulu, FULU_FORK_EPOCH, FAR_FUTURE_EPOCH) - - doAssert len(targetForks) <= 2 - targetForks + return static(HashSet[Epoch]()) + + static: doAssert high(ConsensusFork) == ConsensusFork.Gloas + var epochs = newSeqOfCap[Epoch]( + int(high(ConsensusFork)) + 1 + len(cfg.BLOB_SCHEDULE)) + for bpo in cfg.BLOB_SCHEDULE: + epochs.add bpo.EPOCH + epochs.add GENESIS_EPOCH + epochs.add cfg.ALTAIR_FORK_EPOCH + epochs.add cfg.BELLATRIX_FORK_EPOCH + epochs.add cfg.CAPELLA_FORK_EPOCH + epochs.add cfg.DENEB_FORK_EPOCH + epochs.add cfg.ELECTRA_FORK_EPOCH + epochs.add cfg.FULU_FORK_EPOCH + epochs.add cfg.GLOAS_FORK_EPOCH + + # Fusaka, Glamsterdam, and further forks' BPOs epochs interleave with fork + # epochs; ensure they're treated uniformly. + epochs.sort() + + # Either the next epoch marker: + # (a) doesn't exist, because the chain is past all the known transitions; + # (b) points to a next epoch 1 epoch away; or + # (c) points to a next epoch more than 1 epoch away. + # + # (a) and (c) result in a single-epoch return, (b) in both current and next + # epoch boundaries. + + # Because GENESIS_EPOCH is always present, should never be 0. + let nextEpochIdx = upperBound(epochs, epoch) + doAssert nextEpochIdx > 0 + + let curEpochBoundary = epochs[nextEpochIdx - 1] + if nextEpochIdx == len(epochs) or epochs[nextEpochIdx] > epoch + 1: # (a)/(c) + toHashSet([curEpochBoundary]) + else: # (b) + toHashSet([curEpochBoundary, epochs[nextEpochIdx]]) func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] = # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#sync-committee-subnet-stability @@ -232,7 +265,7 @@ func getSyncSubnets*( if not nodeHasPubkey(pubkey): continue - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#broadcast-sync-committee-message + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/validator.md#broadcast-sync-committee-message # The first quarter of the pubkeys map to subnet 0, the second quarter to # subnet 1, the third quarter to subnet 2 and the final quarter to subnet # 3. diff --git a/beacon_chain/spec/peerdas_helpers.nim b/beacon_chain/spec/peerdas_helpers.nim index b4e60f1f4d..cf43fd386d 100644 --- a/beacon_chain/spec/peerdas_helpers.nim +++ b/beacon_chain/spec/peerdas_helpers.nim @@ -1,39 +1,44 @@ # beacon_chain -# Copyright (c) 2018-2025 Status Research & Development GmbH +# Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Uncategorized helper functions from the spec import - std/[algorithm, sequtils], - results, - eth/p2p/discoveryv5/[node], - kzg4844/[kzg], + chronos, chronicles, results, taskpools, + eth/p2p/discoveryv5/node, + kzg4844/kzg, ssz_serialization/[ proofs, types], + stew/assign2, ./crypto, ./[helpers, digest], - ./datatypes/[fulu] + ./datatypes/fulu + +from std/algorithm import sort +from std/sequtils import toSeq +from stew/staticfor import staticFor type CellBytes = array[fulu.CELLS_PER_EXT_BLOB, Cell] ProofBytes = array[fulu.CELLS_PER_EXT_BLOB, KzgProof] -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#compute_columns_for_custody_group -iterator compute_columns_for_custody_group*(custody_group: CustodyIndex): +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/das-core.md#compute_columns_for_custody_group +iterator compute_columns_for_custody_group*(cfg: RuntimeConfig, + custody_group: CustodyIndex): ColumnIndex = - for i in 0'u64 ..< COLUMNS_PER_GROUP: - yield ColumnIndex(NUMBER_OF_CUSTODY_GROUPS * i + custody_group) + let columns_per_group = NUMBER_OF_COLUMNS div cfg.NUMBER_OF_CUSTODY_GROUPS + for i in 0'u64 ..< columns_per_group: + yield ColumnIndex(cfg.NUMBER_OF_CUSTODY_GROUPS * i + custody_group) -func handle_custody_groups(node_id: NodeId, +func handle_custody_groups(cfg: RuntimeConfig, node_id: NodeId, custody_group_count: CustodyIndex): HashSet[CustodyIndex] = - # Decouples the custody group computation from # `get_custody_groups`, in order to later use this custody # group list across various types of output types @@ -42,7 +47,8 @@ func handle_custody_groups(node_id: NodeId, custody_groups: HashSet[CustodyIndex] current_id = node_id - while custody_groups.lenu64 < custody_group_count: + let safe_count = min(custody_group_count, cfg.NUMBER_OF_CUSTODY_GROUPS) + while custody_groups.lenu64 < safe_count: var hashed_bytes: array[8, byte] let @@ -51,7 +57,7 @@ func handle_custody_groups(node_id: NodeId, hashed_bytes[0..7] = hashed_current_id.data.toOpenArray(0,7) let custody_group = bytes_to_uint64(hashed_bytes) mod - NUMBER_OF_CUSTODY_GROUPS + cfg.NUMBER_OF_CUSTODY_GROUPS custody_groups.incl custody_group @@ -60,35 +66,26 @@ func handle_custody_groups(node_id: NodeId, custody_groups # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#get_custody_groups -func get_custody_groups*(node_id: NodeId, - custody_group_count: CustodyIndex): - seq[CustodyIndex] = +func get_custody_groups*(cfg: RuntimeConfig, node_id: NodeId, + custody_group_count: CustodyIndex): + seq[CustodyIndex] = let custody_groups = - node_id.handle_custody_groups(custody_group_count) + cfg.handle_custody_groups(node_id, custody_group_count) var groups = custody_groups.toSeq() groups.sort() groups -func resolve_columns_from_custody_groups*(node_id: NodeId, +func resolve_columns_from_custody_groups*(cfg: RuntimeConfig, node_id: NodeId, custody_group_count: CustodyIndex): - seq[ColumnIndex] = - - let - custody_groups = node_id.get_custody_groups(custody_group_count) - - var flattened = - newSeqOfCap[ColumnIndex](COLUMNS_PER_GROUP * custody_groups.len) + HashSet[ColumnIndex] = + ## Returns a set of unique columns for the custody groups of a node. + let custody_groups = cfg.get_custody_groups(node_id, custody_group_count) + var columns: HashSet[ColumnIndex] for group in custody_groups: - for index in compute_columns_for_custody_group(group): - flattened.add index - flattened - -func resolve_column_sets_from_custody_groups*(node_id: NodeId, - custody_group_count: CustodyIndex): - HashSet[ColumnIndex] = - - node_id.resolve_columns_from_custody_groups(custody_group_count).toHashSet() + for index in compute_columns_for_custody_group(cfg, group): + columns.incl index + columns # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/fulu/das-core.md#compute_matrix proc compute_matrix*(blobs: seq[KzgBlob]): Result[seq[MatrixEntry], cstring] = @@ -111,7 +108,7 @@ proc compute_matrix*(blobs: seq[KzgBlob]): Result[seq[MatrixEntry], cstring] = ok(extended_matrix) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#recover_matrix +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/fulu/das-core.md#recover_matrix proc recover_matrix*(partial_matrix: seq[MatrixEntry], blobCount: int): Result[seq[MatrixEntry], cstring] = @@ -146,155 +143,152 @@ proc recover_matrix*(partial_matrix: seq[MatrixEntry], ok(extended_matrix) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#get_data_column_sidecars -proc get_data_column_sidecars*(signed_beacon_block: electra.TrustedSignedBeaconBlock, - cellsAndProofs: seq[CellsAndProofs]): - seq[DataColumnSidecar] = - ## Given a trusted signed beacon block and the cells/proofs associated - ## with each data column (thereby blob as well) corresponding to the block, - ## this function assembles the sidecars which can be distributed to - ## the peers post data column reconstruction at every slot start. - ## - ## Note: this function only accepts `TrustedSignedBeaconBlock` as - ## during practice we would be computing cells and proofs from - ## data columns only after retrieving them from the database, where - ## they we were already verified and persisted. - template blck(): auto = signed_beacon_block.message +proc recoverCellsAndKzgProofsTask(cellIndices: seq[CellIndex], + cells: seq[Cell]): Result[CellsAndProofs, void] = + recoverCellsAndKzgProofs(cellIndices, cells).mapErr( + proc (x: string) = + discard) + +proc recover_cells_and_proofs_parallel*( + tp: Taskpool, + dataColumns: seq[ref fulu.DataColumnSidecar]): + Result[seq[CellsAndProofs], cstring] = + ## This helper recovers blobs from the data column sidecars parallelly + if dataColumns.len == 0: + return err("DataColumnSidecar: Length should not be 0") + let - beacon_block_header = - BeaconBlockHeader( - slot: blck.slot, - proposer_index: blck.proposer_index, - parent_root: blck.parent_root, - state_root: blck.state_root, - body_root: hash_tree_root(blck.body)) - - signed_beacon_block_header = - SignedBeaconBlockHeader( - message: beacon_block_header, - signature: signed_beacon_block.signature.toValidatorSig) + columnCount = dataColumns.len + blobCount = dataColumns[0].column.len + + for column in dataColumns: + if not (blobCount == column.column.len): + return err("DataColumns do not have the same length") var - sidecars = - newSeqOfCap[DataColumnSidecar](CELLS_PER_EXT_BLOB) + pendingFuts: seq[Flowvar[Result[CellsAndProofs, void]]] + res = newSeq[CellsAndProofs](blobCount) + + let startTime = Moment.now() + const reconstructionTimeout = 2.seconds + + # ---- Spawn phase with time limit ---- + for blobIdx in 0 ..< blobCount: + let now = Moment.now() + if (now - startTime) > reconstructionTimeout: + debug "PeerDAS reconstruction timed out while preparing columns", + spawned = pendingFuts.len, total = blobCount + break # Stop spawning new tasks - for column_index in 0.. reconstructionTimeout: + debug "PeerDAS reconstruction timed out", + completed = i, totalSpawned = pendingFuts.len + return err("Data column reconstruction timed out") - for i in 0..= NUMBER_OF_COLUMNS: + return err("Data column sidecar index exceeds the NUMBER_OF_COLUMNS") + + if sidecar.kzg_commitments.len == 0: + return err("Data column contains zero blob") + + if sidecar.column.len != sidecar.kzg_commitments.len or + sidecar.column.len != sidecar.kzg_proofs.len: + return err("Data column length must be equal to the number of commitments/proofs") + + ok() + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof +func verify_data_column_sidecar_inclusion_proof*(sidecar: fulu.DataColumnSidecar): Result[void, cstring] = - ## Verify if the given KZG Commitments are in included - ## in the beacon block or not + ## Verify if the given KZG commitments included in the given beacon block. let gindex = KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH_GINDEX.GeneralizedIndex if not is_valid_merkle_branch( @@ -308,47 +302,31 @@ proc verify_data_column_sidecar_inclusion_proof*(sidecar: DataColumnSidecar): ok() -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs -proc verify_data_column_sidecar_kzg_proofs*(sidecar: DataColumnSidecar): +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs +proc verify_data_column_sidecar_kzg_proofs*(sidecar: fulu.DataColumnSidecar | + gloas.DataColumnSidecar): Result[void, cstring] = - ## Verify if the KZG Proofs consisting in the `DataColumnSidecar` - ## is valid or not. - - # Check if the data column sidecar index < NUMBER_OF_COLUMNS - if not (sidecar.index < NUMBER_OF_COLUMNS): - return err("Data column sidecar index exceeds the NUMBER_OF_COLUMNS") - - # Check is the sidecar column length = sidecar.kzg_commitments length - # and sidecar.kzg_commitments length = sidecar.kzg_proofs length - if not (sidecar.column.len == sidecar.kzg_commitments.len): - return err("Data column sidecar length is not equal to the kzg_commitments length") - - if not (sidecar.kzg_commitments.len == sidecar.kzg_proofs.len): - return err("Sidecar kzg_commitments length is not equal to the kzg_proofs length") + ## Verify if the KZG proofs are correct. # Iterate through the cell indices - var cellIndices = - newSeq[CellIndex](MAX_BLOB_COMMITMENTS_PER_BLOCK) + var cellIndices = newSeqOfCap[CellIndex](sidecar.column.len) for _ in 0..9 MAX_SUPPORTED_REQUEST_BLOB_SIDECARS*: uint64 = 1152 + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/phase0/validator.md#time-parameters + ATTESTATION_DUE_BPS: uint64 = 3333 + AGGREGATE_DUE_BPS: uint64 = 6667 + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/altair/validator.md#time-parameters + SYNC_MESSAGE_DUE_BPS: uint64 = 3333 + CONTRIBUTION_DUE_BPS: uint64 = 6667 + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/phase0/fork-choice.md#time-parameters + PROPOSER_REORG_CUTOFF_BPS: uint64 = 1667 + type + TimeConfig* = object + SECONDS_PER_SLOT*: uint64 + Version* = distinct array[4, byte] - Eth1Address* = web3types.Address + + Eth1Address* = eth.Address + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/beacon-chain.md#new-blobparameters + BlobParameters* = object + EPOCH*: Epoch + MAX_BLOBS_PER_BLOCK*: uint64 RuntimeConfig* = object ## https://github.com/ethereum/consensus-specs/tree/v1.5.0-beta.2/configs @@ -68,9 +89,11 @@ type ELECTRA_FORK_EPOCH*: Epoch FULU_FORK_VERSION*: Version FULU_FORK_EPOCH*: Epoch + GLOAS_FORK_VERSION*: Version + GLOAS_FORK_EPOCH*: Epoch # Time parameters - # TODO SECONDS_PER_SLOT*: uint64 + time*: TimeConfig SECONDS_PER_ETH1_BLOCK*: uint64 MIN_VALIDATOR_WITHDRAWABILITY_DELAY*: uint64 SHARD_COMMITTEE_PERIOD*: uint64 @@ -86,9 +109,9 @@ type # Fork choice # TODO PROPOSER_SCORE_BOOST*: uint64 - # TODO REORG_HEAD_WEIGHT_THRESHOLD*: uint64 + REORG_HEAD_WEIGHT_THRESHOLD*: uint64 # TODO REORG_PARENT_WEIGHT_THRESHOLD*: uint64 - # TODO REORG_MAX_EPOCHS_SINCE_FINALIZATION*: uint64 + REORG_MAX_EPOCHS_SINCE_FINALIZATION*: uint64 # Deposit contract DEPOSIT_CHAIN_ID*: uint64 @@ -100,8 +123,6 @@ type # TODO MAX_REQUEST_BLOCKS*: uint64 # TODO EPOCHS_PER_SUBNET_SUBSCRIPTION*: uint64 MIN_EPOCHS_FOR_BLOCK_REQUESTS*: uint64 - # TODO TTFB_TIMEOUT*: uint64 - # TODO RESP_TIMEOUT*: uint64 # TODO ATTESTATION_PROPAGATION_SLOT_RANGE*: uint64 # TODO MAXIMUM_GOSSIP_CLOCK_DISPARITY*: uint64 # TODO MESSAGE_DOMAIN_INVALID_SNAPPY*: array[4, byte] @@ -126,16 +147,16 @@ type MAX_REQUEST_BLOB_SIDECARS_ELECTRA*: uint64 # Fulu - # TODO NUMBER_OF_COLUMNS*: uint64 - # TODO NUMBER_OF_CUSTODY_GROUPS*: uint64 - # TODO DATA_COLUMN_SIDECAR_SUBNET_COUNT*: uint64 - # TODO MAX_REQUEST_DATA_COLUMN_SIDECARS*: uint64 - # TODO SAMPLES_PER_SLOT*: uint64 - # TODO CUSTODY_REQUIREMENT*: uint64 - # TODO VALIDATOR_CUSTODY_REQUIREMENT*: uint64 - # TODO BALANCE_PER_ADDITIONAL_CUSTODY_GROUP*: uint64 - # TODO MAX_BLOBS_PER_BLOCK_FULU*: uint64 - # TODO MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS*: uint64 + NUMBER_OF_COLUMNS*: uint64 + NUMBER_OF_CUSTODY_GROUPS*: uint64 + DATA_COLUMN_SIDECAR_SUBNET_COUNT*: uint64 + MAX_REQUEST_DATA_COLUMN_SIDECARS*: uint64 + SAMPLES_PER_SLOT*: uint64 + CUSTODY_REQUIREMENT*: uint64 + VALIDATOR_CUSTODY_REQUIREMENT*: uint64 + BALANCE_PER_ADDITIONAL_CUSTODY_GROUP*: uint64 + MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS*: uint64 + BLOB_SCHEDULE*: seq[BlobParameters] PresetFile* = object values*: Table[string, string] @@ -147,11 +168,12 @@ type const const_preset* {.strdefine.} = "mainnet" - # No-longer used values from legacy config files + # No-longer used values from legacy config files, or quirks of BPO parsing ignoredValues = [ - "TRANSITION_TOTAL_DIFFICULTY", # Name that appears in some altair alphas, obsolete, remove when no more testnets - "MIN_ANCHOR_POW_BLOCK_DIFFICULTY", # Name that appears in some altair alphas, obsolete, remove when no more testnets - "RANDOM_SUBNETS_PER_VALIDATOR", # Removed in consensus-specs v1.4.0 + "TTFB_TIMEOUT", # https://github.com/ethereum/consensus-specs/pull/4532 + "RESP_TIMEOUT", # https://github.com/ethereum/consensus-specs/pull/4532 + " MAX_BLOBS_PER_BLOCK", # parsed separately + " - EPOCH", # parsed separately ] when const_preset == "mainnet": @@ -188,8 +210,8 @@ when const_preset == "mainnet": TERMINAL_TOTAL_DIFFICULTY: u256"115792089237316195423570985008687907853269984665640564039457584007913129638912", # By default, don't use these params - TERMINAL_BLOCK_HASH: Hash32.fromHex( - "0x0000000000000000000000000000000000000000000000000000000000000000"), + TERMINAL_BLOCK_HASH: + hash32"0x0000000000000000000000000000000000000000000000000000000000000000", # Genesis # --------------------------------------------------------------- @@ -226,11 +248,15 @@ when const_preset == "mainnet": # Fulu FULU_FORK_VERSION: Version [byte 0x06, 0x00, 0x00, 0x00], FULU_FORK_EPOCH: FAR_FUTURE_EPOCH, + # Gloas + GLOAS_FORK_VERSION: Version [byte 0x07, 0x00, 0x00, 0x00], + GLOAS_FORK_EPOCH: FAR_FUTURE_EPOCH, # Time parameters # --------------------------------------------------------------- - # 12 seconds - # TODO SECONDS_PER_SLOT: 12, + time: TimeConfig( + # 12 seconds + SECONDS_PER_SLOT: 12), # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 14, # 2**8 (= 256) epochs ~27 hours @@ -240,7 +266,6 @@ when const_preset == "mainnet": # 2**11 (= 2,048) Eth1 blocks ~8 hours ETH1_FOLLOW_DISTANCE: 2048, - # Validator cycle # --------------------------------------------------------------- # 2**2 (= 4) @@ -256,6 +281,11 @@ when const_preset == "mainnet": # [New in Deneb:EIP7514] 2**3 (= 8) MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8, + # Fork choice + # --------------------------------------------------------------- + REORG_HEAD_WEIGHT_THRESHOLD: 20, + REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2, + # Deposit contract # --------------------------------------------------------------- # Ethereum PoW Mainnet @@ -273,10 +303,6 @@ when const_preset == "mainnet": # TODO EPOCHS_PER_SUBNET_SUBSCRIPTION: 256, # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024, - # 5s - # TODO TTFB_TIMEOUT: 5, - # 10s - # TODO RESP_TIMEOUT: 10, # TODO ATTESTATION_PROPAGATION_SLOT_RANGE: 32, # 500ms # TODO MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500, @@ -315,16 +341,15 @@ when const_preset == "mainnet": MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152, # Fulu - # TODO NUMBER_OF_COLUMNS: 128, - # TODO NUMBER_OF_CUSTODY_GROUPS: 128, - # TODO DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128, - # TODO MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384, - # TODO SAMPLES_PER_SLOT: 8, - # TODO CUSTODY_REQUIREMENT: 4, - # TODO VALIDATOR_CUSTODY_REQUIREMENT: 8, - # TODO BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000, - # TODO MAX_BLOBS_PER_BLOCK_FULU: 12, - # TODO MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + NUMBER_OF_COLUMNS: 128, + NUMBER_OF_CUSTODY_GROUPS: 128, + DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128, + MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384, + SAMPLES_PER_SLOT: 8, + CUSTODY_REQUIREMENT: 4, + VALIDATOR_CUSTODY_REQUIREMENT: 8, + BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000'u64, + MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096, ) elif const_preset == "gnosis": @@ -356,8 +381,8 @@ elif const_preset == "gnosis": TERMINAL_TOTAL_DIFFICULTY: u256"115792089237316195423570985008687907853269984665640564039457584007913129638912", # By default, don't use these params - TERMINAL_BLOCK_HASH: BlockHash.fromHex( - "0x0000000000000000000000000000000000000000000000000000000000000000"), + TERMINAL_BLOCK_HASH: + hash32"0x0000000000000000000000000000000000000000000000000000000000000000", # Genesis # --------------------------------------------------------------- @@ -392,13 +417,17 @@ elif const_preset == "gnosis": ELECTRA_FORK_VERSION: Version [byte 0x05, 0x00, 0x00, 0x64], ELECTRA_FORK_EPOCH: FAR_FUTURE_EPOCH, # Fulu - FULU_FORK_VERSION: Version [byte 0x06, 0x00, 0x00, 0x00], + FULU_FORK_VERSION: Version [byte 0x06, 0x00, 0x00, 0x64], FULU_FORK_EPOCH: FAR_FUTURE_EPOCH, + # Gloas + GLOAS_FORK_VERSION: Version [byte 0x07, 0x00, 0x00, 0x64], + GLOAS_FORK_EPOCH: FAR_FUTURE_EPOCH, # Time parameters # --------------------------------------------------------------- - # 5 seconds - # TODO SECONDS_PER_SLOT: 5, + time: TimeConfig( + # 5 seconds + SECONDS_PER_SLOT: 5), # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 5, # 2**8 (= 256) epochs ~27 hours @@ -424,6 +453,11 @@ elif const_preset == "gnosis": # [New in Deneb:EIP7514] 2**3 (= 8) MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8, + # Fork choice + # --------------------------------------------------------------- + REORG_HEAD_WEIGHT_THRESHOLD: 20, + REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2, + # Deposit contract # --------------------------------------------------------------- # Gnosis PoW Mainnet @@ -441,10 +475,6 @@ elif const_preset == "gnosis": # TODO EPOCHS_PER_SUBNET_SUBSCRIPTION: 256, # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024, - # 5s - # TODO TTFB_TIMEOUT: 5, - # 10s - # TODO RESP_TIMEOUT: 10, # TODO ATTESTATION_PROPAGATION_SLOT_RANGE: 32, # 500ms # TODO MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500, @@ -483,16 +513,15 @@ elif const_preset == "gnosis": MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 256, # Fulu - # TODO NUMBER_OF_COLUMNS: 128, - # TODO NUMBER_OF_CUSTODY_GROUPS: 128, - # TODO DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128, - # TODO MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384, - # TODO SAMPLES_PER_SLOT: 8, - # TODO CUSTODY_REQUIREMENT: 4, - # TODO VALIDATOR_CUSTODY_REQUIREMENT: 8, - # TODO BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000, - # TODO MAX_BLOBS_PER_BLOCK_FULU: 12, - # TODO MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + NUMBER_OF_COLUMNS: 128, + NUMBER_OF_CUSTODY_GROUPS: 128, + DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128, + MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384, + SAMPLES_PER_SLOT: 8, + CUSTODY_REQUIREMENT: 4, + VALIDATOR_CUSTODY_REQUIREMENT: 8, + BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000'u64, + MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 ) elif const_preset == "minimal": @@ -520,8 +549,8 @@ elif const_preset == "minimal": TERMINAL_TOTAL_DIFFICULTY: u256"115792089237316195423570985008687907853269984665640564039457584007913129638912", # By default, don't use these params - TERMINAL_BLOCK_HASH: Hash32.fromHex( - "0x0000000000000000000000000000000000000000000000000000000000000000"), + TERMINAL_BLOCK_HASH: + hash32"0x0000000000000000000000000000000000000000000000000000000000000000", # Genesis @@ -559,11 +588,15 @@ elif const_preset == "minimal": # Fulu FULU_FORK_VERSION: Version [byte 0x06, 0x00, 0x00, 0x01], FULU_FORK_EPOCH: Epoch(uint64.high), + # Gloas + GLOAS_FORK_VERSION: Version [byte 0x07, 0x00, 0x00, 0x01], + GLOAS_FORK_EPOCH: Epoch(uint64.high), # Time parameters # --------------------------------------------------------------- - # [customized] Faster for testing purposes - # TODO SECONDS_PER_SLOT: 6, + time: TimeConfig( + # [customized] Faster for testing purposes + SECONDS_PER_SLOT: 6), # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 14, # 2**8 (= 256) epochs @@ -573,7 +606,6 @@ elif const_preset == "minimal": # [customized] process deposits more quickly, but insecure ETH1_FOLLOW_DISTANCE: 16, - # Validator cycle # --------------------------------------------------------------- # 2**2 (= 4) @@ -589,6 +621,10 @@ elif const_preset == "minimal": # [New in Deneb:EIP7514] [customized] MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 4, + # Fork choice + # --------------------------------------------------------------- + REORG_HEAD_WEIGHT_THRESHOLD: 20, + REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2, # Deposit contract # --------------------------------------------------------------- @@ -608,10 +644,6 @@ elif const_preset == "minimal": # TODO EPOCHS_PER_SUBNET_SUBSCRIPTION: 256, # [customized] `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 272) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 272, - # 5s - # TODO TTFB_TIMEOUT: 5, - # 10s - # TODO RESP_TIMEOUT: 10, # TODO ATTESTATION_PROPAGATION_SLOT_RANGE: 32, # 500ms # TODO MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500, @@ -647,19 +679,18 @@ elif const_preset == "minimal": # `uint64(9)` MAX_BLOBS_PER_BLOCK_ELECTRA: 9, # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA - MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152, # Fulu - # TODO NUMBER_OF_COLUMNS: 128, - # TODO NUMBER_OF_CUSTODY_GROUPS: 128, - # TODO DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128, - # TODO MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384, - # TODO SAMPLES_PER_SLOT: 8, - # TODO CUSTODY_REQUIREMENT: 4, - # TODO VALIDATOR_CUSTODY_REQUIREMENT: 8, - # TODO BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000, - # TODO MAX_BLOBS_PER_BLOCK_FULU: 12, - # TODO MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + NUMBER_OF_COLUMNS: 128, + NUMBER_OF_CUSTODY_GROUPS: 128, + DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128, + MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384, + SAMPLES_PER_SLOT: 8, + CUSTODY_REQUIREMENT: 4, + VALIDATOR_CUSTODY_REQUIREMENT: 8, + BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000'u64, + MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096, ) else: @@ -687,6 +718,17 @@ else: # createConstantsFromPreset const_preset +const IsMainnetSupported*: bool = + const_preset == "mainnet" and SECONDS_PER_SLOT == 12 + +const IsGnosisSupported*: bool = + const_preset == "gnosis" and SECONDS_PER_SLOT == 5 + +const + MIN_SECONDS_PER_SLOT* = 1'u64 + MAX_SECONDS_PER_SLOT* = int64.high.uint64 div 1_000_000_000'u64 + SLOT_DURATION_MS = SECONDS_PER_SLOT * 1000 + const SLOTS_PER_SYNC_COMMITTEE_PERIOD* = SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD @@ -738,6 +780,13 @@ func parse(T: type DomainType, input: string): T {.raises: [ValueError].} = DomainType hexToByteArray(input, 4) +func parse(T: typedesc[TimeConfig], input: string): T {.raises: [ValueError].} = + raise (ref ValueError)(msg: "Unexpected TimeConfig value") + +func cmpBlobParameters*(x, y: BlobParameters): int = + # Don't care about ties and want reverse order. + cmp(y.EPOCH.distinctBase, x.EPOCH.distinctBase) + proc readRuntimeConfig*( fileContent: string, path: string): (RuntimeConfig, seq[string]) {. raises: [PresetFileError, PresetIncompatibleError].} = @@ -770,6 +819,71 @@ proc readRuntimeConfig*( values[lineParts[0]] = lineParts[1].strip + # Accumulate BLOB_SCHEDULE entries + var + blobScheduleEntries: seq[BlobParameters] + inBlobSchedule = false + currentBPO: BlobParameters + + for rawLine in splitLines(fileContent): + inc lineNum + # Skip blank lines or full-line comments + if rawLine.len == 0 or rawLine[0] == '#': + continue + + # Remove trailing comments but preserve leading whitespace for indentation + let noComment = rawLine.split("#")[0] + let clean = noComment.strip() + + # Enter the BLOB_SCHEDULE block + # Begin BLOB_SCHEDULE section + if clean == "BLOB_SCHEDULE:": + inBlobSchedule = true + continue + + if inBlobSchedule: + let entry = strip(noComment, leading=true, trailing=false) + if entry.startsWith("- EPOCH:"): + if currentBPO.EPOCH.uint64 != 0.uint64: + blobScheduleEntries.add(currentBPO) + currentBPO = BlobParameters() + let epochStr = entry.split(":")[1].strip() + try: + currentBPO.EPOCH = Epoch(parse(uint64, epochStr)) + except ValueError: + fail("Unable to parse EPOCH: " & epochStr) + continue + elif entry.startsWith("MAX_BLOBS_PER_BLOCK:"): + let maxStr = entry.split(":")[1].strip() + try: + currentBPO.MAX_BLOBS_PER_BLOCK = parse(uint64, maxStr) + except ValueError: + fail("Unable to parse MAX_BLOBS_PER_BLOCK: " & maxStr) + continue + # Exit section on non-indented line + elif noComment[0] notin {' ', '\t'}: + if currentBPO.EPOCH.uint64 != 0.uint64: + blobScheduleEntries.add(currentBPO) + inBlobSchedule = false + else: + continue + + # Key: Value parsing + if not inBlobSchedule: + let parts = clean.split(":") + if parts.len != 2: + fail("Invalid syntax: expected 'Key: Value'") + let key = parts[0] + if key notin ignoredValues: + values[key] = parts[1].strip() + + # Final BLOB_SCHEDULE entry + if inBlobSchedule and currentBPO.EPOCH.uint64 != 0.uint64: + blobScheduleEntries.add(currentBPO) + + # BPO entries must be sorted in reverse epoch order + blobScheduleEntries.sort(cmp = cmpBlobParameters) + # Certain config keys are baked into the binary at compile-time # and cannot be overridden via config. template checkCompatibility( @@ -782,13 +896,14 @@ proc readRuntimeConfig*( if not operator(distinctBase(value), distinctBase(constValue)): raise (ref PresetFileError)(msg: "Cannot override config" & - " (required: " & name & opDesc & $distinctBase(constValue) & + " (required: " & name & " " & + opDesc & " " & $distinctBase(constValue) & " - config: " & name & "=" & values[name] & ")") else: if not operator(value, constValue): raise (ref PresetFileError)(msg: "Cannot override config" & - " (required: " & name & opDesc & $constValue & + " (required: " & name & " " & opDesc & " " & $constValue & " - config: " & name & "=" & values[name] & ")") values.del name except ValueError: @@ -800,7 +915,9 @@ proc readRuntimeConfig*( const name = astToStr(constValue) checkCompatibility(constValue, name, operator) - checkCompatibility SECONDS_PER_SLOT + checkCompatibility SECONDS_PER_SLOT # Temporary, until removed from presets + # checkCompatibility MIN_SECONDS_PER_SLOT .. MAX_SECONDS_PER_SLOT, + # "SECONDS_PER_SLOT", `in` checkCompatibility BLS_WITHDRAWAL_PREFIX @@ -856,8 +973,6 @@ proc readRuntimeConfig*( checkCompatibility MAX_PAYLOAD_SIZE, "MAX_CHUNK_SIZE" checkCompatibility MAX_REQUEST_BLOCKS checkCompatibility EPOCHS_PER_SUBNET_SUBSCRIPTION - checkCompatibility TTFB_TIMEOUT - checkCompatibility RESP_TIMEOUT checkCompatibility ATTESTATION_PROPAGATION_SLOT_RANGE checkCompatibility MAXIMUM_GOSSIP_CLOCK_DISPARITY.milliseconds.uint64, "MAXIMUM_GOSSIP_CLOCK_DISPARITY" @@ -881,17 +996,33 @@ proc readRuntimeConfig*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/fork-choice.md#configuration # Isn't being used as a preset in the usual way: at any time, there's one correct value checkCompatibility PROPOSER_SCORE_BOOST - checkCompatibility REORG_HEAD_WEIGHT_THRESHOLD checkCompatibility REORG_PARENT_WEIGHT_THRESHOLD - checkCompatibility REORG_MAX_EPOCHS_SINCE_FINALIZATION + + checkCompatibility SLOT_DURATION_MS + checkCompatibility ATTESTATION_DUE_BPS + checkCompatibility AGGREGATE_DUE_BPS + checkCompatibility SYNC_MESSAGE_DUE_BPS + checkCompatibility CONTRIBUTION_DUE_BPS + checkCompatibility PROPOSER_REORG_CUTOFF_BPS + + template assignValue(name: static string, field: untyped): untyped = + if values.hasKey(name): + when field is seq[BlobParameters]: + field = blobScheduleEntries + else: + try: + field = parse(typeof(field), values[name]) + except ValueError: + fail("Unable to parse " & name) + values.del(name) + elif name == "BLOB_SCHEDULE": + when field is seq[BlobParameters]: + field = blobScheduleEntries for name, field in cfg.fieldPairs(): - if name in values: - try: - field = parse(typeof(field), values[name]) - values.del name - except ValueError: - raise (ref PresetFileError)(msg: "Unable to parse " & name) + assignValue(name, field) + for name, field in cfg.time.fieldPairs(): + assignValue(name, field) if cfg.PRESET_BASE != const_preset: raise (ref PresetIncompatibleError)( diff --git a/beacon_chain/spec/presets/gnosis.nim b/beacon_chain/spec/presets/gnosis.nim index 2540898ee0..130ecdda5c 100644 --- a/beacon_chain/spec/presets/gnosis.nim +++ b/beacon_chain/spec/presets/gnosis.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,8 +10,8 @@ import ./gnosis/[ phase0_preset, altair_preset, bellatrix_preset, capella_preset, - deneb_preset, electra_preset] + deneb_preset, electra_preset, gloas_preset] export phase0_preset, altair_preset, bellatrix_preset, capella_preset, - deneb_preset, electra_preset + deneb_preset, electra_preset, gloas_preset diff --git a/beacon_chain/spec/presets/gnosis/gloas_preset.nim b/beacon_chain/spec/presets/gnosis/gloas_preset.nim new file mode 100644 index 0000000000..e0088d4431 --- /dev/null +++ b/beacon_chain/spec/presets/gnosis/gloas_preset.nim @@ -0,0 +1,23 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +# Mainnet preset - Gloas (Gnosis version not available yet; EF mainnet for now) +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/presets/mainnet/gloas.yaml +const + # Networking + # --------------------------------------------------------------- + # floorlog2(get_generalized_index(BeaconBlockBody, "signed_execution_payload_header", "message", "blob_kzg_commitments_root")) (= 9) + KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH_GLOAS*: uint64 = 9 + + # Execution + # --------------------------------------------------------------- + # 2**9 (= 512) validators + PTC_SIZE*: uint64 = 512 + # 2**2 (= 4) attestations + MAX_PAYLOAD_ATTESTATIONS*: uint64 = 4 diff --git a/beacon_chain/spec/presets/mainnet.nim b/beacon_chain/spec/presets/mainnet.nim index fae046cf07..7555f49c57 100644 --- a/beacon_chain/spec/presets/mainnet.nim +++ b/beacon_chain/spec/presets/mainnet.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2019-2024 Status Research & Development GmbH +# Copyright (c) 2019-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,8 +10,9 @@ import ./mainnet/[ phase0_preset, altair_preset, bellatrix_preset, capella_preset, - deneb_preset, electra_preset] + deneb_preset, electra_preset, gloas_preset + ] export phase0_preset, altair_preset, bellatrix_preset, capella_preset, - deneb_preset, electra_preset + deneb_preset, electra_preset, gloas_preset diff --git a/beacon_chain/spec/presets/mainnet/gloas_preset.nim b/beacon_chain/spec/presets/mainnet/gloas_preset.nim new file mode 100644 index 0000000000..90c0e66f86 --- /dev/null +++ b/beacon_chain/spec/presets/mainnet/gloas_preset.nim @@ -0,0 +1,23 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +# Mainnet preset - Gloas +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/presets/mainnet/gloas.yaml +const + # Networking + # --------------------------------------------------------------- + # floorlog2(get_generalized_index(BeaconBlockBody, "signed_execution_payload_header", "message", "blob_kzg_commitments_root")) (= 9) + KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH_GLOAS*: uint64 = 9 + + # Execution + # --------------------------------------------------------------- + # 2**9 (= 512) validators + PTC_SIZE*: uint64 = 512 + # 2**2 (= 4) attestations + MAX_PAYLOAD_ATTESTATIONS*: uint64 = 4 diff --git a/beacon_chain/spec/presets/minimal.nim b/beacon_chain/spec/presets/minimal.nim index 94cec86ef1..6b31aebe01 100644 --- a/beacon_chain/spec/presets/minimal.nim +++ b/beacon_chain/spec/presets/minimal.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2019-2024 Status Research & Development GmbH +# Copyright (c) 2019-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,8 +10,9 @@ import ./minimal/[ phase0_preset, altair_preset, bellatrix_preset, capella_preset, - deneb_preset, electra_preset] + deneb_preset, electra_preset, gloas_preset + ] export phase0_preset, altair_preset, bellatrix_preset, capella_preset, - deneb_preset, electra_preset + deneb_preset, electra_preset, gloas_preset diff --git a/beacon_chain/spec/presets/minimal/deneb_preset.nim b/beacon_chain/spec/presets/minimal/deneb_preset.nim index 642eec610e..522841fccd 100644 --- a/beacon_chain/spec/presets/minimal/deneb_preset.nim +++ b/beacon_chain/spec/presets/minimal/deneb_preset.nim @@ -5,14 +5,14 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Minimal preset - Deneb -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/minimal/deneb.yaml +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.5/presets/minimal/deneb.yaml const # `uint64(4096)` FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096 - # [customized] - MAX_BLOB_COMMITMENTS_PER_BLOCK*: uint64 = 32 - # [customized] `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 5 = 10 - KZG_COMMITMENT_INCLUSION_PROOF_DEPTH* = 10 + # `uint64(4096)` + MAX_BLOB_COMMITMENTS_PER_BLOCK*: uint64 = 4096 + # `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17 + KZG_COMMITMENT_INCLUSION_PROOF_DEPTH* = 17 diff --git a/beacon_chain/spec/presets/minimal/electra_preset.nim b/beacon_chain/spec/presets/minimal/electra_preset.nim index 9d87342965..0535cb51d0 100644 --- a/beacon_chain/spec/presets/minimal/electra_preset.nim +++ b/beacon_chain/spec/presets/minimal/electra_preset.nim @@ -44,10 +44,10 @@ const # Execution # --------------------------------------------------------------- - # [customized] - MAX_DEPOSIT_REQUESTS_PER_PAYLOAD* = 4 - # [customized] 2**1 (= 2) withdrawal requests - MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 2 + # 2**13 (= 8,192) deposit requests + MAX_DEPOSIT_REQUESTS_PER_PAYLOAD* = 8192 + # 2**4 (= 16) withdrawal requests + MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 16 # Withdrawals processing # --------------------------------------------------------------- diff --git a/beacon_chain/spec/presets/minimal/gloas_preset.nim b/beacon_chain/spec/presets/minimal/gloas_preset.nim new file mode 100644 index 0000000000..a51103c97e --- /dev/null +++ b/beacon_chain/spec/presets/minimal/gloas_preset.nim @@ -0,0 +1,23 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +# Minimal preset - Gloas +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/presets/minimal/gloas.yaml +const + # Networking + # --------------------------------------------------------------- + # floorlog2(get_generalized_index(BeaconBlockBody, "signed_execution_payload_header", "message", "blob_kzg_commitments_root")) (= 9) + KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH_GLOAS*: uint64 = 9 + + # Execution + # --------------------------------------------------------------- + # [customized] 2**1 (= 2) validators + PTC_SIZE*: uint64 = 2 + # 2**2 (= 4) attestations + MAX_PAYLOAD_ATTESTATIONS*: uint64 = 4 diff --git a/beacon_chain/spec/signatures.nim b/beacon_chain/spec/signatures.nim index 726c1b42ee..9aab52a702 100644 --- a/beacon_chain/spec/signatures.nim +++ b/beacon_chain/spec/signatures.nim @@ -44,7 +44,7 @@ func compute_slot_signing_root*( fork, DOMAIN_SELECTION_PROOF, epoch, genesis_validators_root) compute_signing_root(slot, domain) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#aggregation-selection +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/validator.md#aggregation-selection func get_slot_signature*( fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot, privkey: ValidatorPrivKey): CookedSig = @@ -165,7 +165,7 @@ proc verify_attestation_signature*( blsFastAggregateVerify(pubkeys, signing_root.data, signature) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-is_valid_deposit_signature +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/electra/beacon-chain.md#new-is_valid_deposit_signature func compute_deposit_signing_root( version: Version, deposit_message: DepositMessage): Eth2Digest = @@ -337,7 +337,7 @@ proc verify_sync_committee_selection_proof*( blsVerify(pubkey, signing_root.data, signature) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#signature +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/validator.md#signature func compute_contribution_and_proof_signing_root*( fork: Fork, genesis_validators_root: Eth2Digest, msg: ContributionAndProof): Eth2Digest = @@ -373,30 +373,31 @@ proc verify_contribution_and_proof_signature*( # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signing func compute_builder_signing_root( - fork: Fork, - msg: deneb_mev.BuilderBid | electra_mev.BuilderBid | fulu_mev.BuilderBid | + genesis_fork_version: Version, + msg: electra_mev.BuilderBid | fulu_mev.BuilderBid | ValidatorRegistrationV1): Eth2Digest = - # Uses genesis fork version regardless - doAssert fork.current_version == fork.previous_version - - let domain = get_domain( - fork, DOMAIN_APPLICATION_BUILDER, GENESIS_EPOCH, ZERO_HASH) + # Fork = none in spec which means GENESIS_FORK_VERSION: + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/phase0/beacon-chain.md#compute_domain + let domain = compute_domain( + DOMAIN_APPLICATION_BUILDER, genesis_fork_version, ZERO_HASH) compute_signing_root(msg, domain) proc get_builder_signature*( - fork: Fork, msg: ValidatorRegistrationV1, privkey: ValidatorPrivKey): + genesis_fork_version: Version, + msg: ValidatorRegistrationV1, privkey: ValidatorPrivKey): CookedSig = - let signing_root = compute_builder_signing_root(fork, msg) + let signing_root = compute_builder_signing_root(genesis_fork_version, msg) blsSign(privkey, signing_root.data) proc verify_builder_signature*( - fork: Fork, msg: deneb_mev.BuilderBid | electra_mev.BuilderBid | - fulu_mev.BuilderBid | ValidatorRegistrationV1, + genesis_fork_version: Version, + msg: electra_mev.BuilderBid | + fulu_mev.BuilderBid | ValidatorRegistrationV1, pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool = - let signing_root = compute_builder_signing_root(fork, msg) + let signing_root = compute_builder_signing_root(genesis_fork_version, msg) blsVerify(pubkey, signing_root.data, signature) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#new-process_bls_to_execution_change +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/capella/beacon-chain.md#new-process_bls_to_execution_change func compute_bls_to_execution_change_signing_root*( genesisFork: Fork, genesis_validators_root: Eth2Digest, msg: BLSToExecutionChange): Eth2Digest = @@ -424,3 +425,84 @@ proc verify_bls_to_execution_change_signature*( let signing_root = compute_bls_to_execution_change_signing_root( genesisFork, genesis_validators_root, msg.message) blsVerify(pubkey, signing_root.data, signature) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#new-verify_execution_payload_bid_signature +func compute_execution_payload_bid_signing_root*( + fork: Fork, genesis_validators_root: Eth2Digest, + msg: gloas.SignedExecutionPayloadBid, + state: gloas.BeaconState): Eth2Digest = + let + epoch = get_current_epoch(state) + domain = get_domain( + fork, DOMAIN_BEACON_BUILDER, epoch, genesis_validators_root) + compute_signing_root(msg.message, domain) + +func get_execution_payload_bid_signature*( + fork: Fork, genesis_validators_root: Eth2Digest, + msg: SignedExecutionPayloadBid, state: gloas.BeaconState, + privkey: ValidatorPrivKey): CookedSig = + let signing_root = compute_execution_payload_bid_signing_root( + fork, genesis_validators_root, msg, state) + blsSign(privkey, signing_root.data) + +proc verify_execution_payload_bid_signature*( + fork: Fork, genesis_validators_root: Eth2Digest, + msg: gloas.SignedExecutionPayloadBid, state: gloas.BeaconState, + pubkey: ValidatorPubKey | CookedPubKey, + signature: SomeSig): bool = + let signing_root = compute_execution_payload_bid_signing_root( + fork, genesis_validators_root, msg, state) + blsVerify(pubkey, signing_root.data, signature) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#new-verify_execution_payload_envelope_signature +func compute_execution_payload_envelope_signing_root*( + fork: Fork, genesis_validators_root: Eth2Digest, + msg: gloas.SignedExecutionPayloadEnvelope, + state: gloas.BeaconState): Eth2Digest = + let + epoch = get_current_epoch(state) + domain = get_domain( + fork, DOMAIN_BEACON_BUILDER, epoch, genesis_validators_root) + compute_signing_root(msg.message, domain) + +func get_execution_payload_envelope_signature*( + fork: Fork, genesis_validators_root: Eth2Digest, + msg: SignedExecutionPayloadEnvelope, state: gloas.BeaconState, + privkey: ValidatorPrivKey): CookedSig = + let signing_root = compute_execution_payload_envelope_signing_root( + fork, genesis_validators_root, msg, state) + blsSign(privkey, signing_root.data) + +proc verify_execution_payload_envelope_signature*( + fork: Fork, genesis_validators_root: Eth2Digest, + msg: gloas.SignedExecutionPayloadEnvelope, state: gloas.BeaconState, + pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool = + let signing_root = compute_execution_payload_envelope_signing_root( + fork, genesis_validators_root, msg, state) + blsVerify(pubkey, signing_root.data, signature) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/validator.md#constructing-a-payload-attestation +func compute_payload_attestation_message_signing_root*( + fork: Fork, genesis_validators_root: Eth2Digest, + msg: PayloadAttestationMessage): Eth2Digest = + let + epoch = msg.data.slot.epoch + domain = get_domain( + fork, DOMAIN_PTC_ATTESTER, epoch, genesis_validators_root) + compute_signing_root(msg, domain) + +func get_payload_attestation_message_signature*( + fork: Fork, genesis_validators_root: Eth2Digest, + msg: PayloadAttestationMessage, + privkey: ValidatorPrivKey): CookedSig = + let signing_root = compute_payload_attestation_message_signing_root( + fork, genesis_validators_root, msg) + blsSign(privkey, signing_root.data) + +proc verify_payload_attestation_message_signature*( + fork: Fork, genesis_validators_root: Eth2Digest, + msg: PayloadAttestationMessage, + pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool = + let signing_root = compute_payload_attestation_message_signing_root( + fork, genesis_validators_root, msg) + blsVerify(pubkey, signing_root.data, signature) diff --git a/beacon_chain/spec/signatures_batch.nim b/beacon_chain/spec/signatures_batch.nim index f62b9c0180..b17d3266c8 100644 --- a/beacon_chain/spec/signatures_batch.nim +++ b/beacon_chain/spec/signatures_batch.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} ## This module contains signature verification helpers corresponding to those ## in signatures.nim, for use with signature sets / batch signature verification diff --git a/beacon_chain/spec/ssz_codec.nim b/beacon_chain/spec/ssz_codec.nim index ce146df838..a434a1cadb 100644 --- a/beacon_chain/spec/ssz_codec.nim +++ b/beacon_chain/spec/ssz_codec.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -15,7 +15,7 @@ import from ./datatypes/altair import ParticipationFlags, EpochParticipationFlags -export codec, base, typetraits, EpochParticipationFlags +export codec, base, typetraits, ParticipationFlags, EpochParticipationFlags # Coding and decoding of SSZ to spec-specific types @@ -26,6 +26,7 @@ template toSszType*(v: ForkDigest|GraffitiBytes): auto = distinctBase(v) template toSszType*(v: Version): auto = distinctBase(v) template toSszType*(v: JustificationBits): auto = distinctBase(v) template toSszType*(v: EpochParticipationFlags): auto = asList v +template toSszType*(v: Eth1Address): auto = v.data() func fromSszBytes*( T: type GraffitiBytes, data: openArray[byte]): T {.raises: [SszError].} = @@ -71,4 +72,11 @@ func fromSszBytes*( let tmp = cast[ptr List[ParticipationFlags, Limit VALIDATOR_REGISTRY_LIMIT]](addr result) readSszValue(bytes, tmp[]) +func fromSszBytes*( + T: type Eth1Address, bytes: openArray[byte]): T {.raises: [SszError].} = + if bytes.len != sizeof(result.data()): + raiseIncorrectSize T + + copyMem(addr result.data()[0], unsafeAddr bytes[0], sizeof(result.data())) + template toSszType*(v: HashedValidatorPubKey): auto = toRaw(v.pubkey) diff --git a/beacon_chain/spec/state_transition.nim b/beacon_chain/spec/state_transition.nim index 9b8e2e14e0..a207ccb08b 100644 --- a/beacon_chain/spec/state_transition.nim +++ b/beacon_chain/spec/state_transition.nim @@ -5,10 +5,10 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # State transition, as described in -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function # # The entry point is `state_transition` which is at the bottom of the file! # @@ -90,9 +90,8 @@ func verifyStateRoot( type RollbackProc* = proc() {.gcsafe, noSideEffect, raises: [].} - RollbackHashedProc*[T] = - proc(state: var T) {.gcsafe, noSideEffect, raises: [].} - RollbackForkedHashedProc* = RollbackHashedProc[ForkedHashedBeaconState] + RollbackForkedHashedProc* = + proc(state: var ForkedHashedBeaconState) {.gcsafe, noSideEffect, raises: [].} func noRollback*() = trace "Skipping rollback of broken state" @@ -102,7 +101,29 @@ func noRollback*() = # https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function func process_slot*( - state: var ForkyBeaconState, pre_state_root: Eth2Digest) = + state: var (phase0.BeaconState | altair.BeaconState | + bellatrix.BeaconState | capella.BeaconState | + deneb.BeaconState | electra.BeaconState | fulu.BeaconState), + pre_state_root: Eth2Digest) = + # `process_slot` is the first stage of per-slot processing - it is run for + # every slot, including epoch slots - it does not however update the slot + # number! `pre_state_root` refers to the state root of the incoming + # state before any slot processing has been done. + + # Cache state root + state.state_roots[state.slot mod SLOTS_PER_HISTORICAL_ROOT] = pre_state_root + + # Cache latest block header state root + if state.latest_block_header.state_root == ZERO_HASH: + state.latest_block_header.state_root = pre_state_root + + # Cache block root + state.block_roots[state.slot mod SLOTS_PER_HISTORICAL_ROOT] = + hash_tree_root(state.latest_block_header) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#modified-process_slot +func process_slot*( + state: var gloas.BeaconState, pre_state_root: Eth2Digest) = # `process_slot` is the first stage of per-slot processing - it is run for # every slot, including epoch slots - it does not however update the slot # number! `pre_state_root` refers to the state root of the incoming @@ -119,6 +140,12 @@ func process_slot*( state.block_roots[state.slot mod SLOTS_PER_HISTORICAL_ROOT] = hash_tree_root(state.latest_block_header) + # [New in Gloas:EIP7732] + # Unset the next payload availability + clearBit( + state.execution_payload_availability, + (state.slot + 1) mod SLOTS_PER_HISTORICAL_ROOT) + func clear_epoch_from_cache(cache: var StateCache, epoch: Epoch) = cache.total_active_balance.del epoch cache.shuffled_active_validator_indices.del epoch @@ -245,6 +272,18 @@ func maybeUpgradeStateToFulu( fuluData: fulu.HashedBeaconState( root: hash_tree_root(newState[]), data: newState[]))[] +func maybeUpgradeStateToGloas( + cfg: RuntimeConfig, state: var ForkedHashedBeaconState) = + # Both process_slots() and state_transition_block() call this, so only run it + # once by checking for existing fork. + if getStateField(state, slot).epoch == cfg.GLOAS_FORK_EPOCH and + state.kind == ConsensusFork.Fulu: + let newState = upgrade_to_gloas(cfg, state.fuluData.data) + state = (ref ForkedHashedBeaconState)( + kind: ConsensusFork.Gloas, + gloasData: gloas.HashedBeaconState( + root: hash_tree_root(newState[]), data: newState[]))[] + func maybeUpgradeState*( cfg: RuntimeConfig, state: var ForkedHashedBeaconState, cache: var StateCache) = @@ -254,6 +293,7 @@ func maybeUpgradeState*( cfg.maybeUpgradeStateToDeneb(state) cfg.maybeUpgradeStateToElectra(state, cache) cfg.maybeUpgradeStateToFulu(state, cache) + cfg.maybeUpgradeStateToGloas(state) proc process_slots*( cfg: RuntimeConfig, state: var ForkedHashedBeaconState, slot: Slot, @@ -283,7 +323,7 @@ proc process_slots*( proc state_transition_block_aux( cfg: RuntimeConfig, state: var ForkyHashedBeaconState, - signedBlock: SomeForkySignedBeaconBlock, + signedBlock: SomeForkySignedBeaconBlock | ForkySignedBlindedBeaconBlock, cache: var StateCache, flags: UpdateFlags): Result[BlockRewards, cstring] = # Block updates - these happen when there's a new block being suggested # by the block proposer. Every actor in the network will update its state @@ -316,7 +356,7 @@ func noRollback*(state: var ForkedHashedBeaconState) = proc state_transition_block*( cfg: RuntimeConfig, state: var ForkedHashedBeaconState, - signedBlock: SomeForkySignedBeaconBlock, + signedBlock: SomeForkySignedBeaconBlock | ForkySignedBlindedBeaconBlock, cache: var StateCache, flags: UpdateFlags, rollback: RollbackForkedHashedProc): Result[BlockRewards, cstring] = ## `rollback` is called if the transition fails and the given state has been @@ -340,7 +380,7 @@ proc state_transition_block*( proc state_transition*( cfg: RuntimeConfig, state: var ForkedHashedBeaconState, - signedBlock: SomeForkySignedBeaconBlock, + signedBlock: SomeForkySignedBeaconBlock | ForkySignedBlindedBeaconBlock, cache: var StateCache, info: var ForkedEpochInfo, flags: UpdateFlags, rollback: RollbackForkedHashedProc): Result[BlockRewards, cstring] = ## Apply a block to the state, advancing the slot counter as necessary. The @@ -362,100 +402,117 @@ proc state_transition*( cfg, state, signedBlock.message.slot, cache, info, flags + {skipLastStateRootCalculation}) - state_transition_block( - cfg, state, signedBlock, cache, flags, rollback) + state_transition_block(cfg, state, signedBlock, cache, flags, rollback) + +template toList[A](attestations: seq[A]): auto = + when A is phase0.Attestation: + List[phase0.Attestation, Limit MAX_ATTESTATIONS](attestations) + elif A is electra.Attestation: + List[electra.Attestation, Limit MAX_ATTESTATIONS_ELECTRA](attestations) + else: + {.error: "Unknown attestation type".} + +template attester_slashings(changes: BeaconBlockValidatorChanges, consensusFork): auto = + when consensusFork >= ConsensusFork.Electra: + changes.electra_attester_slashings + else: + changes.phase0_attester_slashings + +template BeaconBlock(fork: ConsensusFork, EPOH: type): type = + when EPOH is ForkyExecutionPayloadHeader: + fork.BlindedBeaconBlock + else: + fork.BeaconBlock -func partialBeaconBlock*( +proc makeBeaconBlockWithRewards*( cfg: RuntimeConfig, - state: var (phase0.HashedBeaconState | altair.HashedBeaconState | - bellatrix.HashedBeaconState | capella.HashedBeaconState | - deneb.HashedBeaconState), + consensusFork: static ConsensusFork, + state: var ForkyHashedBeaconState, + cache: var StateCache, proposer_index: ValidatorIndex, randao_reveal: ValidatorSig, eth1_data: Eth1Data, graffiti: GraffitiBytes, - attestations: seq[phase0.Attestation], + attestations: seq[phase0.Attestation] | seq[electra.Attestation], deposits: seq[Deposit], validator_changes: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - execution_payload: ForkyExecutionPayloadForSigning, - _: ExecutionRequests): auto = - const consensusFork = typeof(state).kind + execution_payload: ForkyExecutionPayloadOrHeader, + verificationFlags: UpdateFlags, + kzg_commitments: KzgCommitments, + execution_requests: ExecutionRequests, +): Result[ + tuple[ + blck: consensusFork.BeaconBlock(typeof(execution_payload)), rewards: BlockRewards + ], + cstring, +] = + ## Create a block for the given state. The latest block applied to it will + ## be used for the parent_root value, and the slot will be take from + ## state.slot meaning process_slots must be called up to the slot for which + ## the block is to be created. + type + MaybeBlindedBeaconBlock = consensusFork.BeaconBlock(type(execution_payload)) + MaybeBlindedBlockBody = typeof(default(MaybeBlindedBeaconBlock).body) # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#preparing-for-a-beaconblock - var res = consensusFork.BeaconBlock( + var blck = MaybeBlindedBeaconBlock( slot: state.data.slot, proposer_index: proposer_index.uint64, parent_root: state.latest_block_root, - body: consensusFork.BeaconBlockBody( + body: MaybeBlindedBlockBody( randao_reveal: randao_reveal, eth1_data: eth1_data, graffiti: graffiti, proposer_slashings: validator_changes.proposer_slashings, - attester_slashings: validator_changes.phase0_attester_slashings, - attestations: - List[phase0.Attestation, Limit MAX_ATTESTATIONS](attestations), + attester_slashings: validator_changes.attester_slashings(consensusFork), + attestations: attestations.toList(), deposits: List[Deposit, Limit MAX_DEPOSITS](deposits), - voluntary_exits: validator_changes.voluntary_exits)) + voluntary_exits: validator_changes.voluntary_exits, + ), + ) - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/validator.md#preparing-a-beaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/validator.md#preparing-a-beaconblock when consensusFork >= ConsensusFork.Altair: - res.body.sync_aggregate = sync_aggregate - - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/validator.md#block-proposal - when consensusFork >= ConsensusFork.Bellatrix: - res.body.execution_payload = execution_payload.executionPayload + blck.body.sync_aggregate = sync_aggregate + + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/bellatrix/validator.md#block-proposal + when consensusFork >= ConsensusFork.Bellatrix and + consensusFork < ConsensusFork.Gloas: + debugGloasComment "handle correctly for gloas" + when execution_payload is ForkyExecutionPayloadHeader: + blck.body.execution_payload_header = execution_payload + else: + blck.body.execution_payload = execution_payload - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/capella/validator.md#block-proposal + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/capella/validator.md#block-proposal when consensusFork >= ConsensusFork.Capella: - res.body.bls_to_execution_changes = - validator_changes.bls_to_execution_changes + blck.body.bls_to_execution_changes = validator_changes.bls_to_execution_changes # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/validator.md#constructing-the-beaconblockbody - when consensusFork >= ConsensusFork.Deneb: - res.body.blob_kzg_commitments = execution_payload.blobsBundle.commitments + when consensusFork >= ConsensusFork.Deneb and + consensusFork < ConsensusFork.Gloas: + debugGloasComment "handle correctly for gloas" + blck.body.blob_kzg_commitments = kzg_commitments - res + when consensusFork >= ConsensusFork.Electra and + consensusFork < ConsensusFork.Gloas: + debugGloasComment "handle correctly for gloas" + blck.body.execution_requests = execution_requests -func partialBeaconBlock*( - cfg: RuntimeConfig, - state: var (electra.HashedBeaconState | fulu.HashedBeaconState), - proposer_index: ValidatorIndex, - randao_reveal: ValidatorSig, - eth1_data: Eth1Data, - graffiti: GraffitiBytes, - attestations: seq[electra.Attestation], - deposits: seq[Deposit], - validator_changes: BeaconBlockValidatorChanges, - sync_aggregate: SyncAggregate, - execution_payload: ForkyExecutionPayloadForSigning, - execution_requests: ExecutionRequests): auto = - const consensusFork = typeof(state).kind + let rewards = + ?process_block(cfg, state.data, blck.asSigVerified(), verificationFlags, cache) - # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#preparing-for-a-beaconblock - consensusFork.BeaconBlock( - slot: state.data.slot, - proposer_index: proposer_index.uint64, - parent_root: state.latest_block_root, - body: consensusFork.BeaconBlockBody( - randao_reveal: randao_reveal, - eth1_data: eth1_data, - graffiti: graffiti, - proposer_slashings: validator_changes.proposer_slashings, - attester_slashings: validator_changes.electra_attester_slashings, - attestations: - List[electra.Attestation, Limit MAX_ATTESTATIONS_ELECTRA](attestations), - deposits: List[Deposit, Limit MAX_DEPOSITS](deposits), - voluntary_exits: validator_changes.voluntary_exits, - sync_aggregate: sync_aggregate, - execution_payload: execution_payload.executionPayload, - bls_to_execution_changes: validator_changes.bls_to_execution_changes, - blob_kzg_commitments: execution_payload.blobsBundle.commitments, - execution_requests: execution_requests)) + state.root = hash_tree_root(state.data) + blck.state_root = state.root -proc makeBeaconBlockWithRewards*( + ok((blck: blck, rewards: rewards)) + +proc makeBeaconBlock*[EP: ForkyExecutionPayload | ForkyExecutionPayloadHeader]( cfg: RuntimeConfig, - state: var ForkedHashedBeaconState, + consensusFork: static ConsensusFork, + state: var ForkyHashedBeaconState, + cache: var StateCache, proposer_index: ValidatorIndex, randao_reveal: ValidatorSig, eth1_data: Eth1Data, @@ -464,221 +521,38 @@ proc makeBeaconBlockWithRewards*( deposits: seq[Deposit], validator_changes: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - executionPayload: ForkyExecutionPayloadForSigning, - rollback: RollbackForkedHashedProc, - cache: var StateCache, - # TODO: - # `verificationFlags` is needed only in tests and can be - # removed if we don't use invalid signatures there + execution_payload: EP, verificationFlags: UpdateFlags, - transactions_root: Opt[Eth2Digest], - execution_payload_root: Opt[Eth2Digest], - kzg_commitments: Opt[KzgCommitments], - execution_requests: ExecutionRequests): - Result[tuple[blck: ForkedBeaconBlock, rewards: BlockRewards], cstring] = - ## Create a block for the given state. The latest block applied to it will - ## be used for the parent_root value, and the slot will be take from - ## state.slot meaning process_slots must be called up to the slot for which - ## the block is to be created. - - template makeBeaconBlock( - kind: untyped - ): Result[tuple[blck: ForkedBeaconBlock, rewards: BlockRewards], cstring] = - # To create a block, we'll first apply a partial block to the state, skipping - # some validations. - - var blck = - ForkedBeaconBlock.init( - partialBeaconBlock( - cfg, state.`kind Data`, proposer_index, randao_reveal, eth1_data, - graffiti, attestations, deposits, validator_changes, sync_aggregate, - executionPayload, execution_requests)) - - let res = process_block( - cfg, state.`kind Data`.data, blck.`kind Data`.asSigVerified(), - verificationFlags, cache) - if res.isErr: - rollback(state) - return err(res.error()) - - # Override for Builder API - if transactions_root.isSome and execution_payload_root.isSome: - withState(state): - when consensusFork < ConsensusFork.Deneb: - # Nimbus doesn't support pre-Deneb builder API - discard - elif consensusFork == ConsensusFork.Deneb: - forkyState.data.latest_execution_payload_header.transactions_root = - transactions_root.get - - when executionPayload is deneb.ExecutionPayloadForSigning: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/deneb/beacon-chain.md#beaconblockbody - forkyState.data.latest_block_header.body_root = hash_tree_root( - [hash_tree_root(randao_reveal), - hash_tree_root(eth1_data), - hash_tree_root(graffiti), - hash_tree_root(validator_changes.proposer_slashings), - hash_tree_root(validator_changes.phase0_attester_slashings), - hash_tree_root( - List[phase0.Attestation, Limit MAX_ATTESTATIONS]( - attestations)), - hash_tree_root(List[Deposit, Limit MAX_DEPOSITS](deposits)), - hash_tree_root(validator_changes.voluntary_exits), - hash_tree_root(sync_aggregate), - execution_payload_root.get, - hash_tree_root(validator_changes.bls_to_execution_changes), - hash_tree_root(kzg_commitments.get) - ]) - else: - raiseAssert "Attempt to use non-Deneb payload with post-Deneb state" - elif consensusFork == ConsensusFork.Electra: - forkyState.data.latest_execution_payload_header.transactions_root = - transactions_root.get - - when executionPayload is electra.ExecutionPayloadForSigning: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#beaconblockbody - forkyState.data.latest_block_header.body_root = hash_tree_root( - [hash_tree_root(randao_reveal), - hash_tree_root(eth1_data), - hash_tree_root(graffiti), - hash_tree_root(validator_changes.proposer_slashings), - hash_tree_root(validator_changes.electra_attester_slashings), - hash_tree_root( - List[electra.Attestation, Limit MAX_ATTESTATIONS_ELECTRA]( - attestations)), - hash_tree_root(List[Deposit, Limit MAX_DEPOSITS](deposits)), - hash_tree_root(validator_changes.voluntary_exits), - hash_tree_root(sync_aggregate), - execution_payload_root.get, - hash_tree_root(validator_changes.bls_to_execution_changes), - hash_tree_root(kzg_commitments.get), - hash_tree_root(execution_requests) - ]) - else: - raiseAssert "Attempt to use non-Electra payload with post-Deneb state" - elif consensusFork == ConsensusFork.Fulu: - forkyState.data.latest_execution_payload_header.transactions_root = - transactions_root.get - - debugFuluComment "verify (again) that this is what builder API needs" - when executionPayload is fulu.ExecutionPayloadForSigning: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#beaconblockbody - forkyState.data.latest_block_header.body_root = hash_tree_root( - [hash_tree_root(randao_reveal), - hash_tree_root(eth1_data), - hash_tree_root(graffiti), - hash_tree_root(validator_changes.proposer_slashings), - hash_tree_root(validator_changes.electra_attester_slashings), - hash_tree_root( - List[electra.Attestation, Limit MAX_ATTESTATIONS]( - attestations)), - hash_tree_root(List[Deposit, Limit MAX_DEPOSITS](deposits)), - hash_tree_root(validator_changes.voluntary_exits), - hash_tree_root(sync_aggregate), - execution_payload_root.get, - hash_tree_root(validator_changes.bls_to_execution_changes), - hash_tree_root(kzg_commitments.get), - hash_tree_root(execution_requests) - ]) - else: - raiseAssert "Attempt to use non-Fulu payload with post-Electra state" - else: - static: raiseAssert "Unreachable" - - state.`kind Data`.root = hash_tree_root(state.`kind Data`.data) - blck.`kind Data`.state_root = state.`kind Data`.root - - ok((blck: blck, rewards: res.get)) - - const payloadFork = typeof(executionPayload).kind - when payloadFork == ConsensusFork.Bellatrix: - case state.kind - of ConsensusFork.Phase0: makeBeaconBlock(phase0) - of ConsensusFork.Altair: makeBeaconBlock(altair) - of ConsensusFork.Bellatrix: makeBeaconBlock(bellatrix) - else: raiseAssert "Attempt to use Bellatrix payload with post-Bellatrix state" - elif payloadFork == ConsensusFork.Capella: - case state.kind - of ConsensusFork.Capella: makeBeaconBlock(capella) - else: raiseAssert "Attempt to use Capella payload with non-Capella state" - elif payloadFork == ConsensusFork.Deneb: - case state.kind - of ConsensusFork.Deneb: makeBeaconBlock(deneb) - else: raiseAssert "Attempt to use Deneb payload with non-Deneb state" - elif payloadFork == ConsensusFork.Electra: - case state.kind - of ConsensusFork.Electra: makeBeaconBlock(electra) - else: raiseAssert "Attempt to use Electra payload with non-Electra state" - elif payloadFork == ConsensusFork.Fulu: - case state.kind - of ConsensusFork.Fulu: makeBeaconBlock(fulu) - else: raiseAssert "Attempt to use Electra payload with non-Fulu state" - else: - {.error: "Unsupported fork".} + kzg_commitments: KzgCommitments, + execution_requests: ExecutionRequests, +): Result[consensusFork.BeaconBlock, cstring] = + ok ( + ?makeBeaconBlockWithRewards( + cfg, consensusFork, state, cache, proposer_index, randao_reveal, eth1_data, + graffiti, attestations, deposits, validator_changes, sync_aggregate, + execution_payload, verificationFlags, kzg_commitments, execution_requests, + ) + ).blck proc makeBeaconBlock*( - cfg: RuntimeConfig, state: var ForkedHashedBeaconState, - proposer_index: ValidatorIndex, randao_reveal: ValidatorSig, - eth1_data: Eth1Data, graffiti: GraffitiBytes, + cfg: RuntimeConfig, + consensusFork: static ConsensusFork, + state: var ForkyHashedBeaconState, + cache: var StateCache, + proposer_index: ValidatorIndex, + randao_reveal: ValidatorSig, + eth1_data: Eth1Data, + graffiti: GraffitiBytes, attestations: seq[phase0.Attestation] | seq[electra.Attestation], deposits: seq[Deposit], validator_changes: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - executionPayload: ForkyExecutionPayloadForSigning, - rollback: RollbackForkedHashedProc, cache: var StateCache, + eps: ForkyExecutionPayloadForSigning, verificationFlags: UpdateFlags, - transactions_root: Opt[Eth2Digest], - execution_payload_root: Opt[Eth2Digest], - kzg_commitments: Opt[KzgCommitments], - execution_requests: ExecutionRequests): - Result[ForkedBeaconBlock, cstring] = - let blockAndRewards = - ? makeBeaconBlockWithRewards( - cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, - attestations, deposits, validator_changes, sync_aggregate, - executionPayload, rollback, cache, verificationFlags, - transactions_root, execution_payload_root, kzg_commitments, - execution_requests) - ok(blockAndRewards.blck) - -proc makeBeaconBlock*( - cfg: RuntimeConfig, state: var ForkedHashedBeaconState, - proposer_index: ValidatorIndex, randao_reveal: ValidatorSig, - eth1_data: Eth1Data, graffiti: GraffitiBytes, - attestations: seq[phase0.Attestation] | seq[electra.Attestation], - deposits: seq[Deposit], - validator_changes: BeaconBlockValidatorChanges, - sync_aggregate: SyncAggregate, - executionPayload: ForkyExecutionPayloadForSigning, - rollback: RollbackForkedHashedProc, cache: var StateCache): - Result[ForkedBeaconBlock, cstring] = - makeBeaconBlock( - cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, - attestations, deposits, validator_changes, sync_aggregate, - executionPayload, rollback, cache, - verificationFlags = {}, transactions_root = Opt.none Eth2Digest, - execution_payload_root = Opt.none Eth2Digest, - kzg_commitments = Opt.none KzgCommitments, - execution_requests = default(ExecutionRequests)) - -proc makeBeaconBlock*( - cfg: RuntimeConfig, state: var ForkedHashedBeaconState, - proposer_index: ValidatorIndex, randao_reveal: ValidatorSig, - eth1_data: Eth1Data, graffiti: GraffitiBytes, - attestations: seq[phase0.Attestation] | seq[electra.Attestation], - deposits: seq[Deposit], - validator_changes: BeaconBlockValidatorChanges, - sync_aggregate: SyncAggregate, - executionPayload: ForkyExecutionPayloadForSigning, - rollback: RollbackForkedHashedProc, - cache: var StateCache, verificationFlags: UpdateFlags): - Result[ForkedBeaconBlock, cstring] = + execution_requests: ExecutionRequests = default(ExecutionRequests), +): Result[consensusFork.BeaconBlock, cstring] = makeBeaconBlock( - cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, - attestations, deposits, validator_changes, sync_aggregate, - executionPayload, rollback, cache, - verificationFlags = verificationFlags, - transactions_root = Opt.none Eth2Digest, - execution_payload_root = Opt.none Eth2Digest, - kzg_commitments = Opt.none KzgCommitments, - execution_requests = default(ExecutionRequests)) + cfg, consensusFork, state, cache, proposer_index, randao_reveal, eth1_data, + graffiti, attestations, deposits, validator_changes, sync_aggregate, + eps.executionPayload, verificationFlags, eps.kzg_commitments, execution_requests, + ) diff --git a/beacon_chain/spec/state_transition_block.nim b/beacon_chain/spec/state_transition_block.nim index 5299179c4e..640a044e1a 100644 --- a/beacon_chain/spec/state_transition_block.nim +++ b/beacon_chain/spec/state_transition_block.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # State transition - block processing as described in # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#block-processing @@ -27,22 +27,27 @@ import chronicles, metrics, ../extras, - ./datatypes/[phase0, altair, bellatrix, deneb], - "."/[beaconstate, eth2_merkleization, helpers, validator, signatures], + ./[beaconstate, eth2_merkleization, forks, helpers, validator, signatures], kzg4844/kzg_abi, kzg4844/kzg from std/algorithm import fill, sorted -from std/sequtils import count, filterIt, mapIt -from ./datatypes/capella import - BeaconState, MAX_WITHDRAWALS_PER_PAYLOAD, SignedBLSToExecutionChange, - Withdrawal -from ./datatypes/electra import PendingPartialWithdrawal +from std/sequtils import count, foldl, filterIt, mapIt export extras, phase0, altair +template payload(body: SomeForkyBeaconBlockBody | SomeForkyBlindedBeaconBlockBody): auto = + # Blinded blocks contain a payload header instead of the full execution + # payload - where relevant, we assume the blinded parts are valid and just + # process the consensus-relevant parts. + when body is SomeForkyBlindedBeaconBlockBody: + body.execution_payload_header + else: + body.execution_payload + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#block-header func process_block_header*( - state: var ForkyBeaconState, blck: SomeForkyBeaconBlock, + state: var ForkyBeaconState, + blck: SomeForkyBeaconBlock | SomeForkyBlindedBeaconBlock, flags: UpdateFlags, cache: var StateCache): Result[void, cstring] = # Verify that the slots match if not (blck.slot == state.slot): @@ -52,7 +57,6 @@ func process_block_header*( if not (blck.slot > state.latest_block_header.slot): return err("process_block_header: block not newer than latest block header") - # Verify that proposer index is the correct index let proposer_index = get_beacon_proposer_index(state, cache).valueOr: return err("process_block_header: proposer missing") @@ -84,7 +88,8 @@ func `xor`[T: array](a, b: T): T = # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#randao proc process_randao( - state: var ForkyBeaconState, body: SomeForkyBeaconBlockBody, + state: var ForkyBeaconState, + body: SomeForkyBeaconBlockBody | SomeForkyBlindedBeaconBlockBody, flags: UpdateFlags, cache: var StateCache): Result[void, cstring] = let proposer_index = get_beacon_proposer_index(state, cache).valueOr: @@ -118,7 +123,8 @@ proc process_randao( # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#eth1-data func process_eth1_data( state: var ForkyBeaconState, - body: SomeForkyBeaconBlockBody): Result[void, cstring] = + body: SomeForkyBeaconBlockBody | SomeForkyBlindedBeaconBlockBody +): Result[void, cstring] = if not state.eth1_data_votes.add body.eth1_data: # Count is reset in process_final_updates, so this should never happen return err("process_eth1_data: no more room for eth1 data") @@ -135,7 +141,7 @@ func is_slashable_validator(validator: Validator, epoch: Epoch): bool = (validator.activation_epoch <= epoch) and (epoch < validator.withdrawable_epoch) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#proposer-slashings +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#proposer-slashings proc check_proposer_slashing*( state: ForkyBeaconState, proposer_slashing: SomeProposerSlashing, flags: UpdateFlags): @@ -185,12 +191,31 @@ proc check_proposer_slashing*( check_proposer_slashing(forkyState.data, proposer_slashing, flags) # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#proposer-slashings +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#modified-process_proposer_slashing proc process_proposer_slashing*( cfg: RuntimeConfig, state: var ForkyBeaconState, proposer_slashing: SomeProposerSlashing, flags: UpdateFlags, exit_queue_info: ExitQueueInfo, cache: var StateCache): Result[(Gwei, ExitQueueInfo), cstring] = let proposer_index = ? check_proposer_slashing(state, proposer_slashing, flags) + + # [New in Gloas:EIP7732] + # Remove the BuilderPendingPayment corresponding to + # this proposal if it is still in the 2-epoch window. + when typeof(state).kind >= ConsensusFork.Gloas: + let + slot = proposer_slashing.signed_header_1.message.slot + proposal_epoch = slot.epoch() + current_epoch = get_current_epoch(state) + + if proposal_epoch == current_epoch: + let payment_index = SLOTS_PER_EPOCH + (slot mod SLOTS_PER_EPOCH) + state.builder_pending_payments[payment_index.int] = + BuilderPendingPayment() + elif proposal_epoch == get_previous_epoch(state): + let payment_index = slot mod SLOTS_PER_EPOCH + state.builder_pending_payments[payment_index.int] = + BuilderPendingPayment() slash_validator(cfg, state, proposer_index, exit_queue_info, cache) # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#is_slashable_attestation_data @@ -364,7 +389,8 @@ proc process_deposit*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#new-process_deposit_request func process_deposit_request*( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), + cfg: RuntimeConfig, + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState), deposit_request: DepositRequest, flags: UpdateFlags): Result[void, cstring] = # Set deposit request start index @@ -383,7 +409,7 @@ func process_deposit_request*( else: err("process_deposit_request: couldn't add deposit to pending_deposits") -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#voluntary-exits +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#voluntary-exits # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#modified-process_voluntary_exit # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#modified-process_voluntary_exit proc check_voluntary_exit*( @@ -418,9 +444,6 @@ proc check_voluntary_exit*( return err("Exit: not in validator set long enough") when typeof(state).kind >= ConsensusFork.Electra: - if voluntary_exit.validator_index >= state.validators.lenu64: - return err("Exit: validator index out of range") - # Only exit validator if it has no pending withdrawals in the queue if not (get_pending_balance_to_withdraw( state, voluntary_exit.validator_index.ValidatorIndex) == 0.Gwei): @@ -462,7 +485,7 @@ proc process_voluntary_exit*( proc process_bls_to_execution_change*( cfg: RuntimeConfig, state: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState | - fulu.BeaconState), + fulu.BeaconState | gloas.BeaconState), signed_address_change: SignedBLSToExecutionChange): Result[void, cstring] = ? check_bls_to_execution_change( cfg.genesisFork, state, signed_address_change, {}) @@ -481,7 +504,8 @@ proc process_bls_to_execution_change*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_withdrawal_request func process_withdrawal_request*( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), + cfg: RuntimeConfig, + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState), bucketSortedValidators: BucketSortedValidators, withdrawal_request: WithdrawalRequest, cache: var StateCache) = let @@ -504,7 +528,7 @@ func process_withdrawal_request*( # Verify withdrawal credentials let - has_correct_credential = has_execution_withdrawal_credential(validator) + has_correct_credential = has_execution_withdrawal_credential(type(state).kind, validator) is_correct_source_address = validator.withdrawal_credentials.data.toOpenArray(12, 31) == withdrawal_request.source_address.data @@ -531,9 +555,7 @@ func process_withdrawal_request*( if is_full_exit_request: # Only exit validator if it has no pending withdrawals in the queue if pending_balance_to_withdraw == 0.Gwei: - if initiate_validator_exit(cfg, state, index, default(ExitQueueInfo), - cache).isErr(): - return + discard initiate_validator_exit(cfg, state, index, ExitQueueInfo(), cache) return let @@ -543,7 +565,7 @@ func process_withdrawal_request*( static(MIN_ACTIVATION_BALANCE.Gwei) + pending_balance_to_withdraw # Only allow partial withdrawals with compounding withdrawal credentials - if has_compounding_withdrawal_credential(validator) and + if has_compounding_withdrawal_credential(type(state).kind, validator) and has_sufficient_effective_balance and has_excess_balance: let to_withdraw = min( @@ -565,7 +587,7 @@ func process_withdrawal_request*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#new-is_valid_switch_to_compounding_request func is_valid_switch_to_compounding_request( - state: electra.BeaconState | fulu.BeaconState, + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState, consolidation_request: ConsolidationRequest, source_validator: Validator): bool = # Switch to compounding requires source and target be equal @@ -596,7 +618,8 @@ func is_valid_switch_to_compounding_request( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/beacon-chain.md#new-process_consolidation_request func process_consolidation_request*( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), + cfg: RuntimeConfig, + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState), bucketSortedValidators: BucketSortedValidators, consolidation_request: ConsolidationRequest, cache: var StateCache) = @@ -642,7 +665,7 @@ func process_consolidation_request*( # Verify source withdrawal credentials let has_correct_credential = - has_execution_withdrawal_credential(source_validator[]) + has_execution_withdrawal_credential(type(state).kind, source_validator[]) is_correct_source_address = source_validator.withdrawal_credentials.data.toOpenArray(12, 31) == consolidation_request.source_address.data @@ -650,7 +673,7 @@ func process_consolidation_request*( return # Verify that target has compounding withdrawal credentials - if not has_compounding_withdrawal_credential(target_validator): + if not has_compounding_withdrawal_credential(type(state).kind, target_validator): return # Verify the source and the target are active @@ -684,6 +707,30 @@ func process_consolidation_request*( discard state.pending_consolidations.add(PendingConsolidation( source_index: source_index.uint64, target_index: target_index.uint64)) +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#payload-attestations +proc process_payload_attestation*( + state: var gloas.BeaconState, payload_attestation: PayloadAttestation, + cache: var StateCache): Result[void, cstring] = + # Check that the attestation is for the parent beacon block + template data: untyped = payload_attestation.data + + if data.beacon_block_root != state.latest_block_header.parent_root: + return err("process_payload_attestation: beacon block root mismatch") + + # Check that the attestation is for the previous slot + if data.slot + 1 != state.slot: + return err("process_payload_attestation: slot mismatch") + + # Verify signature + let indexed_payload_attestation = get_indexed_payload_attestation( + state, data.slot, payload_attestation, cache + ) + + if not is_valid_indexed_payload_attestation(state, indexed_payload_attestation): + return err("process_payload_attestation: invalid signature") + + ok() + type # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.5.0#/Rewards/getBlockRewards BlockRewards* = object @@ -694,14 +741,18 @@ type # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#operations # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#modified-process_operations -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/electra/beacon-chain.md#modified-process_operations +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/electra/beacon-chain.md#modified-process_operations +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#modified-process_operations proc process_operations( cfg: RuntimeConfig, state: var ForkyBeaconState, - body: SomeForkyBeaconBlockBody, base_reward_per_increment: Gwei, + body: SomeForkyBeaconBlockBody | SomeForkyBlindedBeaconBlockBody, + base_reward_per_increment: Gwei, flags: UpdateFlags, cache: var StateCache): Result[BlockRewards, cstring] = # Verify that outstanding deposits are processed up to the maximum number of # deposits - when typeof(body).kind >= ConsensusFork.Electra: + const consensusFork = typeof(state).kind + + when consensusFork >= ConsensusFork.Electra: # Disable former deposit mechanism once all prior deposits are processed let eth1_deposit_index_limit = @@ -740,9 +791,8 @@ proc process_operations( else: default(ExitQueueInfo) # not used bsv_use = - when typeof(body).kind >= ConsensusFork.Electra: - body.deposits.len + body.execution_requests.deposits.len + - body.execution_requests.withdrawals.len + + when consensusFork in ConsensusFork.Electra .. ConsensusFork.Fulu: + body.deposits.len + body.execution_requests.withdrawals.len + body.execution_requests.consolidations.len > 0 else: body.deposits.len > 0 @@ -770,11 +820,12 @@ proc process_operations( for op in body.voluntary_exits: exit_queue_info = ? process_voluntary_exit( cfg, state, op, flags, exit_queue_info, cache) - when typeof(body).kind >= ConsensusFork.Capella: + + when consensusFork >= ConsensusFork.Capella: for op in body.bls_to_execution_changes: ? process_bls_to_execution_change(cfg, state, op) - when typeof(body).kind >= ConsensusFork.Electra: + when consensusFork in ConsensusFork.Electra .. ConsensusFork.Fulu: for op in body.execution_requests.deposits: ? process_deposit_request(cfg, state, op, {}) for op in body.execution_requests.withdrawals: @@ -803,11 +854,10 @@ func get_participant_reward*(total_active_balance: Gwei): Gwei = func get_proposer_reward*(participant_reward: Gwei): Gwei = participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#sync-aggregate-processing proc process_sync_aggregate*( state: var (altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | - fulu.BeaconState), + fulu.BeaconState | gloas.BeaconState), sync_aggregate: SomeSyncAggregate, total_active_balance: Gwei, flags: UpdateFlags, cache: var StateCache): Result[Gwei, cstring] = if strictVerification in flags and state.slot > 1.Slot: @@ -891,21 +941,7 @@ proc process_execution_payload*( return err("process_execution_payload: execution payload invalid") # Cache execution payload header - state.latest_execution_payload_header = bellatrix.ExecutionPayloadHeader( - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - extra_data: payload.extra_data, - transactions_root: hash_tree_root(payload.transactions)) + state.latest_execution_payload_header = payload.toExecutionPayloadHeader() ok() @@ -987,24 +1023,7 @@ proc process_execution_payload*( return err("process_execution_payload: execution payload invalid") # Cache execution payload header - state.latest_execution_payload_header = deneb.ExecutionPayloadHeader( - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - extra_data: payload.extra_data, - transactions_root: hash_tree_root(payload.transactions), - withdrawals_root: hash_tree_root(payload.withdrawals), - blob_gas_used: payload.blob_gas_used, # [New in Deneb] - excess_blob_gas: payload.excess_blob_gas) # [New in Deneb] + state.latest_execution_payload_header = payload.toExecutionPayloadHeader() # [New in Deneb] ok() @@ -1017,9 +1036,9 @@ type SomeElectraBeaconBlockBody = # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-process_execution_payload proc process_execution_payload*( cfg: RuntimeConfig, state: var electra.BeaconState, - body: SomeElectraBeaconBlockBody, - notify_new_payload: electra.ExecutePayload): Result[void, cstring] = - template payload: auto = body.execution_payload + body: SomeElectraBeaconBlockBody | electra_mev.SigVerifiedBlindedBeaconBlockBody, + notify_new_payload: deneb.ExecutePayload): Result[void, cstring] = + template payload: auto = body.payload # Verify consistency of the parent hash with respect to the previous # execution payload header @@ -1039,43 +1058,31 @@ proc process_execution_payload*( if not (lenu64(body.blob_kzg_commitments) <= cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): return err("process_execution_payload: too many KZG commitments") - # Verify the execution payload is valid - if not notify_new_payload(payload): - return err("process_execution_payload: execution payload invalid") + when payload is ForkyExecutionPayloadHeader: + # Assume valid, when blinded + state.latest_execution_payload_header = payload + else: + # Verify the execution payload is valid + if not notify_new_payload(payload): + return err("process_execution_payload: execution payload invalid") - # Cache execution payload header - state.latest_execution_payload_header = electra.ExecutionPayloadHeader( - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - extra_data: payload.extra_data, - transactions_root: hash_tree_root(payload.transactions), - withdrawals_root: hash_tree_root(payload.withdrawals), - blob_gas_used: payload.blob_gas_used, - excess_blob_gas: payload.excess_blob_gas) + # Cache execution payload header + state.latest_execution_payload_header = payload.toExecutionPayloadHeader() ok() +# TODO workaround for https://github.com/nim-lang/Nim/issues/18095 # copy of datatypes/fulu.nim type SomeFuluBeaconBlockBody = fulu.BeaconBlockBody | fulu.SigVerifiedBeaconBlockBody | fulu.TrustedBeaconBlockBody -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-process_execution_payload +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/fulu/beacon-chain.md#modified-process_execution_payload proc process_execution_payload*( cfg: RuntimeConfig, state: var fulu.BeaconState, - body: SomeFuluBeaconBlockBody, - notify_new_payload: fulu.ExecutePayload): Result[void, cstring] = - template payload: auto = body.execution_payload + body: SomeFuluBeaconBlockBody | fulu_mev.SigVerifiedBlindedBeaconBlockBody, + notify_new_payload: deneb.ExecutePayload): Result[void, cstring] = + template payload: auto = body.payload() # Verify consistency of the parent hash with respect to the previous # execution payload header @@ -1091,45 +1098,238 @@ proc process_execution_payload*( if not (payload.timestamp == compute_timestamp_at_slot(state, state.slot)): return err("process_execution_payload: invalid timestamp") - # [New in Deneb] Verify commitments are under limit - if not (lenu64(body.blob_kzg_commitments) <= cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + # Verify commitments are under limit + let blob_params = + cfg.get_blob_parameters(get_current_epoch(state)) + if not (lenu64(body.blob_kzg_commitments) <= blob_params.MAX_BLOBS_PER_BLOCK): + return err("process_execution_payload: too many KZG commitments") + + when payload is ForkyExecutionPayloadHeader: + # Assume valid, when blinded + state.latest_execution_payload_header = payload + else: + # Verify the execution payload is valid + if not notify_new_payload(payload): + return err("process_execution_payload: execution payload invalid") + + # Cache execution payload header + state.latest_execution_payload_header = payload.toExecutionPayloadHeader() + + ok() + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#new-process_execution_payload +proc process_execution_payload*( + cfg: RuntimeConfig, state: var gloas.HashedBeaconState, + signed_envelope: SignedExecutionPayloadEnvelope, + notify_new_payload: deneb.ExecutePayload, cache: var StateCache, + verify: bool = true): Result[void, cstring] = + template envelope: untyped = signed_envelope.message + template payload: untyped = envelope.payload + + # Verify signature + if verify: + let + builder_index = ValidatorIndex.init(envelope.builder_index).valueOr: + return err("process_execution_payload: invalid builder index") + builder_pubkey = state.data.validators.item(builder_index).pubkey + if not verify_execution_payload_envelope_signature( + state.data.fork, state.data.genesis_validators_root, signed_envelope, + state.data, builder_pubkey, signed_envelope.signature): + return err("process_execution_payload: invalid envelope signature") + + # Cache latest block header state root + if state.data.latest_block_header.state_root.isZero: + state.data.latest_block_header.state_root = state.root + + # Verify consistency with the beacon block + if envelope.beacon_block_root != + hash_tree_root(state.data.latest_block_header): + return err("process_execution_payload: beacon block root mismatch") + if envelope.slot != state.data.slot: + return err("process_execution_payload: slot mismatch") + + # Verify consistency with the committed bid + template committed_bid: untyped = state.data.latest_execution_payload_bid + if envelope.builder_index != committed_bid.builder_index: + return err("process_execution_payload: builder index mismatch") + if committed_bid.blob_kzg_commitments_root != + hash_tree_root(envelope.blob_kzg_commitments): + return err("process_execution_payload: blob KZG commitments root mismatch") + + # Verify the withdrawals root + if hash_tree_root(payload.withdrawals) != state.data.latest_withdrawals_root: + return err("process_execution_payload: withdrawals root mismatch") + + # Verify the gas_limit + if committed_bid.gas_limit != payload.gas_limit: + return err("process_execution_payload: gas limit mismatch") + + # Verify the block hash + if committed_bid.block_hash != payload.block_hash: + return err("process_execution_payload: block hash mismatch") + + # Verify consistency of the parent hash with respect to the previous execution payload + if payload.parent_hash != state.data.latest_block_hash: + return err("process_execution_payload: parent hash mismatch") + + # Verify prev_randao + if payload.prev_randao != + get_randao_mix(state.data, get_current_epoch(state.data)): + return err("process_execution_payload: prev_randao mismatch") + + # Verify timestamp + if payload.timestamp != compute_timestamp_at_slot(state.data, state.data.slot): + return err("process_execution_payload: timestamp mismatch") + + # Verify commitments are under limit + let blob_params = cfg.get_blob_parameters(get_current_epoch(state.data)) + if lenu64(envelope.blob_kzg_commitments) > blob_params.MAX_BLOBS_PER_BLOCK: return err("process_execution_payload: too many KZG commitments") # Verify the execution payload is valid if not notify_new_payload(payload): return err("process_execution_payload: execution payload invalid") - # Cache execution payload header - state.latest_execution_payload_header = fulu.ExecutionPayloadHeader( - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - extra_data: payload.extra_data, - transactions_root: hash_tree_root(payload.transactions), - withdrawals_root: hash_tree_root(payload.withdrawals), - blob_gas_used: payload.blob_gas_used, - excess_blob_gas: payload.excess_blob_gas) + let bsv = + if envelope.execution_requests.withdrawals.len + + envelope.execution_requests.consolidations.len > 0: + sortValidatorBuckets(state.data.validators.asSeq) + else: + nil + for op in envelope.execution_requests.deposits: + ? process_deposit_request(cfg, state.data, op, {}) + for op in envelope.execution_requests.withdrawals: + process_withdrawal_request(cfg, state.data, bsv[], op, cache) + for op in envelope.execution_requests.consolidations: + process_consolidation_request(cfg, state.data, bsv[], op, cache) + + # Queue the builder payment + let payment_index = (SLOTS_PER_EPOCH + (state.data.slot mod SLOTS_PER_EPOCH)).int + var payment = state.data.builder_pending_payments.mitem(payment_index) + let amount = payment.withdrawal.amount + if amount > 0.Gwei: + let exit_queue_epoch = + compute_exit_epoch_and_update_churn(cfg, state.data, amount, cache) + payment.withdrawal.withdrawable_epoch = + exit_queue_epoch + cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY + + if not state.data.builder_pending_withdrawals.add(payment.withdrawal): + return err("process_execution_payload: couldn't add builder withdrawal") + + state.data.builder_pending_payments[payment_index] = BuilderPendingPayment() + + # Cache the execution payload hash + state.data.execution_payload_availability[ + state.data.slot mod SLOTS_PER_HISTORICAL_ROOT] = true + state.data.latest_block_hash = payload.block_hash + + # Verify the state root + # TODO: Future optimization could cache intermediate Merkle tree nodes in the + # BeaconState and track which fields changed, allowing selective branch + # rebuilding instead of full recomputation. + if verify: + state.root = hash_tree_root(state.data) + if envelope.state_root != state.root: + return err("process_execution_payload: state root mismatch") + + ok() +# copy of datatypes/gloas.nim +type SomeGloasBeaconBlock = + gloas.BeaconBlock | gloas.SigVerifiedBeaconBlock | gloas.TrustedBeaconBlock + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/beacon-chain.md#new-process_execution_payload_bid +proc process_execution_payload_bid*( + cfg: RuntimeConfig, state: var gloas.BeaconState, + blck: SomeGloasBeaconBlock): Result[void, cstring] = + template signed_bid: untyped = blck.body.signed_execution_payload_bid + template bid: untyped = signed_bid.message + let + builder_index = ValidatorIndex.init(bid.builder_index).valueOr: + return err("process_execution_payload_bid: invalid builder index") + builder = addr state.validators.item(builder_index) + amount = bid.value + + # For self-builds, amount must be zero regardless of withdrawal credential prefix + if builder_index == blck.proposer_index: + if amount != 0.Gwei: + return err("process_execution_payload_bid: self-build must have zero amount") + if signed_bid.signature != ValidatorSig.infinity(): + return err("process_execution_payload_bid: self-build signature must be infinity") + else: + # Non-self builds require builder withdrawal credential + if not has_builder_withdrawal_credential(builder[]): + return err("process_execution_payload_bid: builder missing withdrawal credential") + # Verify the bid signature for non-self builds + if not verify_execution_payload_bid_signature( + state.fork, state.genesis_validators_root, signed_bid, + state, builder[].pubkey, signed_bid.signature): + return err("payload_bid: invalid bid signature") + + if not is_active_validator(builder[], get_current_epoch(state)): + return err("process_execution_payload_bid: builder not active") + if builder[].slashed: + return err("process_execution_payload_bid: builder is slashed") + + # Check that the builder is active, non-slashed, and has funds to cover the bid + let + pending_payments = block: + var total: Gwei + for payment in state.builder_pending_payments: + if payment.withdrawal.builder_index == builder_index: + total += payment.withdrawal.amount + total + pending_withdrawals = block: + var total: Gwei + for withdrawal in state.builder_pending_withdrawals: + if withdrawal.builder_index == builder_index: + total += withdrawal.amount + total + required_balance = + amount + pending_payments + pending_withdrawals + + static(MIN_ACTIVATION_BALANCE.Gwei) + + if amount != 0.Gwei and + state.balances.item(builder_index) < required_balance: + return err("process_execution_payload_bid: insufficient builder balance") + + # Verify that the bid is for the current slot + if bid.slot != blck.slot: + return err("process_execution_payload_bid: bid slot mismatch") + + # Verify that the bid is for the right parent block + if bid.parent_block_hash != state.latest_block_hash: + return err("process_execution_payload_bid: parent block hash mismatch") + if bid.parent_block_root != blck.parent_root: + return err("process_execution_payload_bid: parent block root mismatch") + + # Record the pending payment + let + pending_payment = BuilderPendingPayment( + weight: 0.Gwei, + withdrawal: BuilderPendingWithdrawal( + fee_recipient: bid.fee_recipient, + amount: amount, + builder_index: builder_index.uint64 + ) + ) + state.builder_pending_payments.mitem( + SLOTS_PER_EPOCH + (bid.slot mod SLOTS_PER_EPOCH)) = pending_payment + + # Cache the signed execution payload bid + state.latest_execution_payload_bid = bid ok() # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#new-process_withdrawals # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-process_withdrawals func process_withdrawals*( state: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState | - fulu.BeaconState), - payload: capella.ExecutionPayload | deneb.ExecutionPayload | - electra.ExecutionPayload | fulu.ExecutionPayload): + fulu.BeaconState | gloas.BeaconState), + payload: ForkyExecutionPayloadOrHeader): Result[void, cstring] = - when typeof(state).kind >= ConsensusFork.Electra: + const consensusFork = typeof(state).kind + + when consensusFork >= ConsensusFork.Electra: let (expected_withdrawals, partial_withdrawals_count) = get_expected_withdrawals_with_partial_count(state) @@ -1141,17 +1341,20 @@ func process_withdrawals*( else: let expected_withdrawals = get_expected_withdrawals(state) - if not (len(payload.withdrawals) == len(expected_withdrawals)): - return err("process_withdrawals: different numbers of payload and expected withdrawals") + when payload is ForkyExecutionPayloadHeader: + if not (payload.withdrawals_root == hash_tree_root( + List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init( + expected_withdrawals))): + return err("process_withdrawals: withdrawals_root does not match expected withdrawals") + else: + if payload.withdrawals.asSeq() != expected_withdrawals: + return err("process_withdrawals: payload withdrawals don't match expected withdrawals") - for i in 0 ..< len(expected_withdrawals): - if expected_withdrawals[i] != payload.withdrawals[i]: - return err("process_withdrawals: mismatched expected and payload withdrawal") - let validator_index = - ValidatorIndex.init(expected_withdrawals[i].validator_index).valueOr: + for withdrawal in expected_withdrawals: + let validator_index = ValidatorIndex.init(withdrawal.validator_index).valueOr: return err("process_withdrawals: invalid validator index") decrease_balance( - state, validator_index, expected_withdrawals[i].amount) + state, validator_index, withdrawal.amount) # Update the next withdrawal index if this block contained withdrawals if len(expected_withdrawals) != 0: @@ -1176,15 +1379,83 @@ func process_withdrawals*( ok() +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#modified-process_withdrawals +func process_withdrawals*(state: var gloas.BeaconState): + Result[void, cstring] = + # return early if the parent block was empty + if not is_parent_block_full(state): + return ok() + + let (expected_withdrawals, processed_builder_withdrawals_count, processed_partial_withdrawals_count) = + get_expected_withdrawals(state) + + let withdrawals_list = + List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(expected_withdrawals) + state.latest_withdrawals_root = hash_tree_root(withdrawals_list) + + for withdrawal in expected_withdrawals: + let validator_index = ValidatorIndex.init(withdrawal.validator_index).valueOr: + return err("process_withdrawals: invalid validator index") + decrease_balance(state, validator_index, withdrawal.amount) + + # Update the pending builder withdrawals + var new_builder_withdrawals: seq[BuilderPendingWithdrawal] + + let processed_count = min( + processed_builder_withdrawals_count, + state.builder_pending_withdrawals.lenu64).int + + for i in 0 ..< processed_count: + let withdrawal = state.builder_pending_withdrawals.item(i) + if not is_builder_payment_withdrawable(state, withdrawal): + new_builder_withdrawals.add(withdrawal) + + for i in processed_count ..< state.builder_pending_withdrawals.len: + new_builder_withdrawals.add( + state.builder_pending_withdrawals.item(i)) + + state.builder_pending_withdrawals = + HashList[BuilderPendingWithdrawal, Limit BUILDER_PENDING_WITHDRAWALS_LIMIT] + .init(new_builder_withdrawals) + + # Update pending partial withdrawals + state.pending_partial_withdrawals = + HashList[PendingPartialWithdrawal, Limit PENDING_PARTIAL_WITHDRAWALS_LIMIT].init( + state.pending_partial_withdrawals.asSeq[processed_partial_withdrawals_count .. ^1]) + + # Update the next withdrawal index if this block contained withdrawals + if len(expected_withdrawals) != 0: + let latest_withdrawal = expected_withdrawals[^1] + state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1) + + # Update the next validator index to start the next withdrawal sweep + if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: + # Next sweep starts after the latest withdrawal's validator index + let next_validator_index = + (expected_withdrawals[^1].validator_index + 1) mod + lenu64(state.validators) + state.next_withdrawal_validator_index = next_validator_index + else: + # Advance sweep by the max length of the sweep if there was not a full set of withdrawals + let + next_index = + state.next_withdrawal_validator_index + + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP + next_validator_index = next_index mod lenu64(state.validators) + state.next_withdrawal_validator_index = next_validator_index + + ok() + # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#kzg_commitment_to_versioned_hash func kzg_commitment_to_versioned_hash*( - kzg_commitment: KzgCommitment): VersionedHash = - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#blob + kzg_commitment: KzgCommitment): VersionedHash {.noinit.} = + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/deneb/beacon-chain.md#blob const VERSIONED_HASH_VERSION_KZG = 0x01'u8 - var res: VersionedHash - res[0] = VERSIONED_HASH_VERSION_KZG - res[1 .. 31] = eth2digest(kzg_commitment.bytes).data.toOpenArray(1, 31) + var res {.noinit.}: VersionedHash + static: assert res.data.len == 32 + res.data[0] = VERSIONED_HASH_VERSION_KZG + res.data[1 .. 31] = eth2digest(kzg_commitment.bytes).data.toOpenArray(1, 31) res proc validate_blobs*( @@ -1354,7 +1625,8 @@ type SomeElectraBlock = electra.BeaconBlock | electra.SigVerifiedBeaconBlock | electra.TrustedBeaconBlock proc process_block*( cfg: RuntimeConfig, - state: var electra.BeaconState, blck: SomeElectraBlock, + state: var electra.BeaconState, + blck: SomeElectraBlock | electra_mev.SigVerifiedBlindedBeaconBlock, flags: UpdateFlags, cache: var StateCache): Result[BlockRewards, cstring] = ## When there's a new block, we need to verify that the block is sane and ## update the state accordingly - the state is left in an unknown state when @@ -1365,10 +1637,10 @@ proc process_block*( # Consensus specs v1.4.0 unconditionally assume is_execution_enabled is # true, but intentionally keep such a check. if is_execution_enabled(state, blck.body): - ? process_withdrawals(state, blck.body.execution_payload) + ? process_withdrawals(state, blck.body.payload) ? process_execution_payload( cfg, state, blck.body, - func(_: electra.ExecutionPayload): bool = true) + func(_: deneb.ExecutionPayload): bool = true) ? process_randao(state, blck.body, flags, cache) ? process_eth1_data(state, blck.body) @@ -1387,7 +1659,8 @@ type SomeFuluBlock = fulu.BeaconBlock | fulu.SigVerifiedBeaconBlock | fulu.TrustedBeaconBlock proc process_block*( cfg: RuntimeConfig, - state: var fulu.BeaconState, blck: SomeFuluBlock, + state: var fulu.BeaconState, + blck: SomeFuluBlock | fulu_mev.SigVerifiedBlindedBeaconBlock, flags: UpdateFlags, cache: var StateCache): Result[BlockRewards, cstring] = ## When there's a new block, we need to verify that the block is sane and ## update the state accordingly - the state is left in an unknown state when @@ -1398,10 +1671,41 @@ proc process_block*( # Consensus specs v1.4.0 unconditionally assume is_execution_enabled is # true, but intentionally keep such a check. if is_execution_enabled(state, blck.body): - ? process_withdrawals(state, blck.body.execution_payload) + ? process_withdrawals(state, blck.body.payload) + ? process_execution_payload( cfg, state, blck.body, - func(_: fulu.ExecutionPayload): bool = true) + func(_: deneb.ExecutionPayload): bool = true) + ? process_randao(state, blck.body, flags, cache) + ? process_eth1_data(state, blck.body) + + let + total_active_balance = get_total_active_balance(state, cache) + base_reward_per_increment = + get_base_reward_per_increment(total_active_balance) + var operations_rewards = ? process_operations( + cfg, state, blck.body, base_reward_per_increment, flags, cache) + operations_rewards.sync_aggregate = ? process_sync_aggregate( + state, blck.body.sync_aggregate, total_active_balance, flags, cache) + + ok(operations_rewards) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#block-processing +debugGloasComment "readd gloas_mev block and, well the rest too" +type SomeGloasBlock = + gloas.BeaconBlock | gloas.SigVerifiedBeaconBlock | gloas.TrustedBeaconBlock +proc process_block*( + cfg: RuntimeConfig, + state: var gloas.BeaconState, + blck: SomeGloasBlock, + flags: UpdateFlags, cache: var StateCache): Result[BlockRewards, cstring] = + ## When there's a new block, we need to verify that the block is sane and + ## update the state accordingly - the state is left in an unknown state when + ## block application fails (!) + + ? process_block_header(state, blck, flags, cache) + ? process_withdrawals(state) + ? process_execution_payload_bid(cfg, state, blck) ? process_randao(state, blck.body, flags, cache) ? process_eth1_data(state, blck.body) @@ -1414,4 +1718,4 @@ proc process_block*( operations_rewards.sync_aggregate = ? process_sync_aggregate( state, blck.body.sync_aggregate, total_active_balance, flags, cache) - ok(operations_rewards) \ No newline at end of file + ok(operations_rewards) diff --git a/beacon_chain/spec/state_transition_epoch.nim b/beacon_chain/spec/state_transition_epoch.nim index a8da3ab30a..3c52d3ac1d 100644 --- a/beacon_chain/spec/state_transition_epoch.nim +++ b/beacon_chain/spec/state_transition_epoch.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # State transition - epoch processing, as described in # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#epoch-processing @@ -29,6 +29,7 @@ import from std/math import sum, `^` from stew/bitops2 import setBit +from stew/staticfor import staticFor from ./datatypes/capella import BeaconState, HistoricalSummary, Withdrawal, WithdrawalIndex @@ -177,7 +178,8 @@ from ./datatypes/deneb import BeaconState # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#get_unslashed_participating_indices func get_unslashed_participating_balances*( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState): + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState): UnslashedParticipatingBalances = let previous_epoch = get_previous_epoch(state) @@ -229,7 +231,8 @@ func get_unslashed_participating_balances*( func is_unslashed_participating_index( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState, + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState, flag_index: TimelyFlag, epoch: Epoch, validator_index: ValidatorIndex): bool = doAssert epoch in [get_previous_epoch(state), get_current_epoch(state)] # TODO hoist this conditional @@ -377,7 +380,7 @@ proc weigh_justification_and_finalization( res -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#justification-and-finalization +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#justification-and-finalization proc process_justification_and_finalization*( state: var phase0.BeaconState, balances: TotalBalances, flags: UpdateFlags = {}) = @@ -423,7 +426,7 @@ proc compute_unrealized_finality*( proc process_justification_and_finalization*( state: var (altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | - fulu.BeaconState), + fulu.BeaconState | gloas.BeaconState), balances: UnslashedParticipatingBalances, flags: UpdateFlags = {}) = # Initial FFG checkpoint values have a `0x00` stub for `root`. @@ -445,7 +448,8 @@ proc process_justification_and_finalization*( proc compute_unrealized_finality*( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState): FinalityCheckpoints = + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState): FinalityCheckpoints = if get_current_epoch(state) <= GENESIS_EPOCH + 1: return FinalityCheckpoints( justified: state.current_justified_checkpoint, @@ -638,10 +642,11 @@ func get_attestation_deltas( info.validators[proposer_index].delta.add( proposer_delta.get()[1]) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#get_base_reward +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/beacon-chain.md#get_base_reward func get_base_reward_increment*( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState, + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState, index: ValidatorIndex, base_reward_per_increment: Gwei): Gwei = ## Return the base reward for the validator defined by ``index`` with respect ## to the current ``state``. @@ -653,7 +658,8 @@ func get_base_reward_increment*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#get_flag_index_deltas func get_flag_index_reward*( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState, + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState, base_reward: Gwei, active_increments: uint64, unslashed_participating_increments: uint64, @@ -682,7 +688,8 @@ func get_active_increments*( # Combines get_flag_index_deltas() and get_inactivity_penalty_deltas() template get_flag_and_inactivity_delta( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState, + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState, base_reward_per_increment: Gwei, finality_delay: uint64, previous_epoch: Epoch, active_increments: uint64, penalty_denominator: uint64, @@ -737,7 +744,8 @@ template get_flag_and_inactivity_delta( iterator get_flag_and_inactivity_deltas*( cfg: RuntimeConfig, state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState, + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState, base_reward_per_increment: Gwei, info: var altair.EpochInfo, finality_delay: uint64): (ValidatorIndex, Gwei, Gwei, Gwei, Gwei, Gwei, Gwei) = @@ -781,45 +789,6 @@ iterator get_flag_and_inactivity_deltas*( active_increments, penalty_denominator, epoch_participation, participating_increments, info, vidx, state.inactivity_scores[vidx]) -func get_flag_and_inactivity_delta_for_validator( - cfg: RuntimeConfig, - state: deneb.BeaconState | electra.BeaconState | fulu.BeaconState, - base_reward_per_increment: Gwei, info: var altair.EpochInfo, - finality_delay: uint64, vidx: ValidatorIndex, inactivity_score: Gwei): - Opt[(ValidatorIndex, Gwei, Gwei, Gwei, Gwei, Gwei, Gwei)] = - ## Return the deltas for a given ``flag_index`` by scanning through the - ## participation flags. - const INACTIVITY_PENALTY_QUOTIENT = - when state is altair.BeaconState: - INACTIVITY_PENALTY_QUOTIENT_ALTAIR - else: - INACTIVITY_PENALTY_QUOTIENT_BELLATRIX - - static: doAssert ord(high(TimelyFlag)) == 2 - - let - previous_epoch = get_previous_epoch(state) - active_increments = get_active_increments(info) - penalty_denominator = - cfg.INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT - epoch_participation = - if previous_epoch == get_current_epoch(state): - unsafeAddr state.current_epoch_participation - else: - unsafeAddr state.previous_epoch_participation - participating_increments = [ - get_unslashed_participating_increment(info, TIMELY_SOURCE_FLAG_INDEX), - get_unslashed_participating_increment(info, TIMELY_TARGET_FLAG_INDEX), - get_unslashed_participating_increment(info, TIMELY_HEAD_FLAG_INDEX)] - - if not is_eligible_validator(info.validators[vidx]): - return Opt.none((ValidatorIndex, Gwei, Gwei, Gwei, Gwei, Gwei, Gwei)) - - Opt.some get_flag_and_inactivity_delta( - state, base_reward_per_increment, finality_delay, previous_epoch, - active_increments, penalty_denominator, epoch_participation, - participating_increments, info, vidx, inactivity_score.uint64) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#rewards-and-penalties-1 func process_rewards_and_penalties*( state: var phase0.BeaconState, info: var phase0.EpochInfo) = @@ -848,7 +817,7 @@ func process_rewards_and_penalties*( cfg: RuntimeConfig, state: var (altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | - fulu.BeaconState), + fulu.BeaconState | gloas.BeaconState), info: var altair.EpochInfo) = if get_current_epoch(state) == GENESIS_EPOCH: return @@ -951,7 +920,8 @@ func process_registry_updates*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#modified-process_registry_updates func process_registry_updates*( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), + cfg: RuntimeConfig, + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState), cache: var StateCache): Result[void, cstring] = # Process activation eligibility and ejections for index in 0 ..< state.validators.len: @@ -989,7 +959,7 @@ func get_adjusted_total_slashing_balance*( PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR elif state is bellatrix.BeaconState or state is capella.BeaconState or state is deneb.BeaconState or state is electra.BeaconState or - state is fulu.BeaconState: + state is fulu.BeaconState or state is gloas.BeaconState: PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX else: {.fatal: "process_slashings: incorrect BeaconState type".} @@ -1017,15 +987,8 @@ func get_slashing_penalty*( let penalty_numerator = validator.effective_balance div increment * adjusted_total_slashing_balance penalty_numerator div total_balance * increment - elif consensusFork == ConsensusFork.Electra: - let - effective_balance_increments = validator.effective_balance div increment - penalty_per_effective_balance_increment = - adjusted_total_slashing_balance div (total_balance div increment) - - # [Modified in Electra:EIP7251] - penalty_per_effective_balance_increment * effective_balance_increments - elif consensusFork == ConsensusFork.Fulu: + elif consensusFork in + [ConsensusFork.Electra, ConsensusFork.Fulu, ConsensusFork.Gloas]: let effective_balance_increments = validator.effective_balance div increment penalty_per_effective_balance_increment = @@ -1036,27 +999,6 @@ func get_slashing_penalty*( else: static: doAssert false -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#slashings -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/bellatrix/beacon-chain.md#slashings -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#modified-process_slashings -func get_slashing( - state: ForkyBeaconState, total_balance: Gwei, vidx: ValidatorIndex): Gwei = - # For efficiency reasons, it doesn't make sense to have process_slashings use - # this per-validator index version, but keep them parallel otherwise. - let - epoch = get_current_epoch(state) - adjusted_total_slashing_balance = get_adjusted_total_slashing_balance( - state, total_balance) - - let validator = unsafeAddr state.validators.item(vidx) - if slashing_penalty_applies(validator[], epoch): - get_slashing_penalty( - typeof(state).kind, validator[], adjusted_total_slashing_balance, - total_balance) - else: - 0.Gwei - func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) = let epoch = get_current_epoch(state) @@ -1144,7 +1086,8 @@ func process_participation_record_updates*(state: var phase0.BeaconState) = func process_participation_flag_updates*( state: var (altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | - electra.BeaconState | fulu.BeaconState)) = + electra.BeaconState | fulu.BeaconState | + gloas.BeaconState)) = state.previous_epoch_participation = state.current_epoch_participation const zero = 0.ParticipationFlags @@ -1159,7 +1102,8 @@ func process_participation_flag_updates*( func process_sync_committee_updates*( state: var (altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | - electra.BeaconState | fulu.BeaconState)) = + electra.BeaconState | fulu.BeaconState | + gloas.BeaconState)) = let next_epoch = get_current_epoch(state) + 1 if next_epoch.is_sync_committee_period(): state.current_sync_committee = state.next_sync_committee @@ -1169,7 +1113,8 @@ func process_sync_committee_updates*( template compute_inactivity_update( cfg: RuntimeConfig, state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState, + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState, info: altair.EpochInfo, pre_inactivity_score: uint64): uint64 = let previous_epoch = get_previous_epoch(state) # get_eligible_validator_indices() @@ -1195,7 +1140,7 @@ func process_inactivity_updates*( cfg: RuntimeConfig, state: var (altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | - fulu.BeaconState), + fulu.BeaconState | gloas.BeaconState), info: altair.EpochInfo) = # Score updates based on previous epoch participation, skip genesis epoch if get_current_epoch(state) == GENESIS_EPOCH: @@ -1222,7 +1167,7 @@ func process_inactivity_updates*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#historical-summaries-updates func process_historical_summaries_update*( state: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState | - fulu.BeaconState)): + fulu.BeaconState | gloas.BeaconState)): Result[void, cstring] = # Set historical block root accumulator. let next_epoch = get_current_epoch(state) + 1 @@ -1242,7 +1187,8 @@ from ".."/validator_bucket_sort import # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#new-apply_pending_deposit func apply_pending_deposit( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), + cfg: RuntimeConfig, + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState), deposit: PendingDeposit, validator_index: Opt[ValidatorIndex]): Result[void, cstring] = ## Applies ``deposit`` to the ``state``. @@ -1263,7 +1209,8 @@ func apply_pending_deposit( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#new-process_pending_deposits func process_pending_deposits*( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState) , + cfg: RuntimeConfig, + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState), cache: var StateCache): Result[void, cstring] = let next_epoch = get_current_epoch(state) + 1 @@ -1356,7 +1303,8 @@ func process_pending_deposits*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#new-process_pending_consolidations func process_pending_consolidations*( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState) ): + cfg: RuntimeConfig, + state: var (electra.BeaconState | fulu.BeaconState | gloas.BeaconState)): Result[void, cstring] = let next_epoch = get_current_epoch(state) + 1 var next_pending_consolidation = 0 @@ -1393,6 +1341,62 @@ func process_pending_consolidations*( ok() +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.1/specs/fulu/beacon-chain.md#new-process_proposer_lookahead +func process_proposer_lookahead*( + state: var (fulu.BeaconState | gloas.BeaconState), cache: var StateCache): + Result[void, cstring] = + let + total_slots = state.proposer_lookahead.data.lenu64 + last_epoch_start = total_slots - SLOTS_PER_EPOCH + + for i in 0 ..< last_epoch_start: + mitem(state.proposer_lookahead, i) = + mitem(state.proposer_lookahead, i + SLOTS_PER_EPOCH) + + let + next_epoch = get_current_epoch(state) + MIN_SEED_LOOKAHEAD + 1 + new_proposers = + get_beacon_proposer_indices(state, next_epoch) + + for i in 0 ..< SLOTS_PER_EPOCH: + if new_proposers[i].isSome(): + mitem(state.proposer_lookahead, last_epoch_start + i) = new_proposers[i].get.uint64 + + ok() + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#new-get_builder_payment_quorum_threshold +func get_builder_payment_quorum_threshold(state: gloas.BeaconState, cache: var StateCache): uint64 = + ## Calculate the quorum threshold for builder payments. + let quorum = ( + get_total_active_balance(state, cache) div SLOTS_PER_EPOCH * BUILDER_PAYMENT_THRESHOLD_NUMERATOR) + uint64(quorum div BUILDER_PAYMENT_THRESHOLD_DENOMINATOR) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#new-process_builder_pending_payments +func process_builder_pending_payments*( + cfg: RuntimeConfig, state: var gloas.BeaconState, cache: var StateCache): + Result[void, cstring] = + ## Processes the builder pending payments from the previous epoch. + let quorum = get_builder_payment_quorum_threshold(state, cache) + + for index in 0 ..< min( + state.builder_pending_payments.len, SLOTS_PER_EPOCH.int): + var payment = state.builder_pending_payments.mitem(index) + if payment.weight.distinctBase > quorum: + let exit_queue_epoch = compute_exit_epoch_and_update_churn( + cfg, state, payment.withdrawal.amount, cache) + payment.withdrawal.withdrawable_epoch = + exit_queue_epoch + cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY + if not state.builder_pending_withdrawals.add(payment.withdrawal): + return err("process_builder_pending_payments: couldn't add to builder_pending_withdrawals") + + staticFor i, 0 ..< SLOTS_PER_EPOCH.int: + assign( + state.builder_pending_payments.mitem(i), + state.builder_pending_payments.item(i + SLOTS_PER_EPOCH)) + state.builder_pending_payments.mitem(i + SLOTS_PER_EPOCH).reset() + + ok() + # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#epoch-processing proc process_epoch*( cfg: RuntimeConfig, state: var phase0.BeaconState, flags: UpdateFlags, @@ -1428,7 +1432,8 @@ proc process_epoch*( func init*( info: var altair.EpochInfo, state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState) = + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState) = # init participation, overwriting the full structure info.balances = get_unslashed_participating_balances(state) info.validators.setLen(state.validators.len()) @@ -1446,7 +1451,8 @@ func init*( func init*( T: type altair.EpochInfo, state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState): T = + deneb.BeaconState | electra.BeaconState | fulu.BeaconState | + gloas.BeaconState): T = init(result, state) # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#epoch-processing @@ -1458,8 +1464,6 @@ proc process_epoch*( let epoch = get_current_epoch(state) info.init(state) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#justification-and-finalization - # [Modified in Altair] process_justification_and_finalization(state, info.balances, flags) # state.slot hasn't been incremented yet. @@ -1473,16 +1477,9 @@ proc process_epoch*( doAssert state.finalized_checkpoint.epoch + 3 >= epoch process_inactivity_updates(cfg, state, info) # [New in Altair] - - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#rewards-and-penalties process_rewards_and_penalties(cfg, state, info) # [Modified in Altair] - - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#registry-updates ? process_registry_updates(cfg, state, cache) - - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#slashings process_slashings(state, info.balances.current_epoch) # [Modified in Altair] - process_eth1_data_reset(state) process_effective_balance_updates(state) process_slashings_reset(state) @@ -1502,7 +1499,6 @@ proc process_epoch*( let epoch = get_current_epoch(state) info.init(state) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#justification-and-finalization process_justification_and_finalization(state, info.balances, flags) # state.slot hasn't been incremented yet. @@ -1517,16 +1513,9 @@ proc process_epoch*( quit 1 process_inactivity_updates(cfg, state, info) - - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#rewards-and-penalties process_rewards_and_penalties(cfg, state, info) - - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#registry-updates ? process_registry_updates(cfg, state, cache) - - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#slashings process_slashings(state, info.balances.current_epoch) - process_eth1_data_reset(state) process_effective_balance_updates(state) process_slashings_reset(state) @@ -1537,15 +1526,14 @@ proc process_epoch*( ok() -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#epoch-processing +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/electra/beacon-chain.md#epoch-processing proc process_epoch*( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), + cfg: RuntimeConfig, state: var electra.BeaconState, flags: UpdateFlags, cache: var StateCache, info: var altair.EpochInfo): Result[void, cstring] = let epoch = get_current_epoch(state) info.init(state) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#justification-and-finalization process_justification_and_finalization(state, info.balances, flags) # state.slot hasn't been incremented yet. @@ -1560,16 +1548,46 @@ proc process_epoch*( quit 1 process_inactivity_updates(cfg, state, info) - - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#rewards-and-penalties process_rewards_and_penalties(cfg, state, info) - - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#registry-updates ? process_registry_updates(cfg, state, cache) # [Modified in Electra:EIP7251] - - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings process_slashings(state, info.balances.current_epoch) + process_eth1_data_reset(state) + ? process_pending_deposits(cfg, state, cache) # [New in Electra:EIP7251] + ? process_pending_consolidations(cfg, state) # [New in Electra:EIP7251] + process_effective_balance_updates(state) # [Modified in Electra:EIP7251] + process_slashings_reset(state) + process_randao_mixes_reset(state) + ? process_historical_summaries_update(state) # [Modified in Capella] + process_participation_flag_updates(state) + process_sync_committee_updates(state) + ok() + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.1/specs/fulu/beacon-chain.md#modified-process_epoch +proc process_epoch*( + cfg: RuntimeConfig, state: var fulu.BeaconState, + flags: UpdateFlags, cache: var StateCache, info: var altair.EpochInfo): + Result[void, cstring] = + let epoch = get_current_epoch(state) + info.init(state) + + process_justification_and_finalization(state, info.balances, flags) + + # state.slot hasn't been incremented yet. + if strictVerification in flags: + # Rule 2/3/4 finalization results in the most pessimal case. The other + # three finalization rules finalize more quickly as long as the any of + # the finalization rules triggered. + if (epoch >= 2 and state.current_justified_checkpoint.epoch + 2 < epoch) or + (epoch >= 3 and state.finalized_checkpoint.epoch + 3 < epoch): + fatal "The network did not finalize", + epoch, finalizedEpoch = state.finalized_checkpoint.epoch + quit 1 + + process_inactivity_updates(cfg, state, info) + process_rewards_and_penalties(cfg, state, info) + ? process_registry_updates(cfg, state, cache) # [Modified in Electra:EIP7251] + process_slashings(state, info.balances.current_epoch) process_eth1_data_reset(state) ? process_pending_deposits(cfg, state, cache) # [New in Electra:EIP7251] ? process_pending_consolidations(cfg, state) # [New in Electra:EIP7251] @@ -1579,110 +1597,45 @@ proc process_epoch*( ? process_historical_summaries_update(state) # [Modified in Capella] process_participation_flag_updates(state) process_sync_committee_updates(state) + ? process_proposer_lookahead(state, cache) # [New in Fulu:EIP7917] ok() -proc get_validator_balance_after_epoch*( - cfg: RuntimeConfig, state: deneb.BeaconState | electra.BeaconState | - fulu.BeaconState, - cache: var StateCache, info: var altair.EpochInfo, - index: ValidatorIndex): Gwei = - # Run a subset of process_epoch() which affects an individual validator, - # without modifying state itself - info.init(state) # TODO avoid quadratic aspects here - - # Can't use process_justification_and_finalization(), but use its helper - # function. Used to calculate inactivity_score. - let jf_info = - # process_justification_and_finalization() skips first two epochs - if get_current_epoch(state) <= GENESIS_EPOCH + 1: - JustificationAndFinalizationInfo( - previous_justified_checkpoint: state.previous_justified_checkpoint, - current_justified_checkpoint: state.current_justified_checkpoint, - finalized_checkpoint: state.finalized_checkpoint, - justification_bits: state.justification_bits) - else: - weigh_justification_and_finalization( - state, info.balances.current_epoch, - info.balances.previous_epoch[TIMELY_TARGET_FLAG_INDEX], - info.balances.current_epoch_TIMELY_TARGET, {}) - - # Used as part of process_rewards_and_penalties - let inactivity_score = - # process_inactivity_updates skips GENESIS_EPOCH and ineligible validators - if get_current_epoch(state) == GENESIS_EPOCH or - not is_eligible_validator(info.validators[index]): - 0.Gwei - else: - let - finality_delay = - get_previous_epoch(state) - jf_info.finalized_checkpoint.epoch - not_in_inactivity_leak = not is_in_inactivity_leak(finality_delay) - pre_inactivity_score = state.inactivity_scores.asSeq()[index] - - # This is a template which uses not_in_inactivity_leak and index - compute_inactivity_update(cfg, state, info, pre_inactivity_score).Gwei - - # process_rewards_and_penalties for a single validator - let reward_and_penalties_balance = block: - # process_rewards_and_penalties doesn't run at GENESIS_EPOCH - if get_current_epoch(state) == GENESIS_EPOCH: - state.balances.item(index) - else: - let - total_active_balance = info.balances.current_epoch - base_reward_per_increment = get_base_reward_per_increment( - total_active_balance) - finality_delay = get_finality_delay(state) - - var balance = state.balances.item(index) - let maybeDelta = get_flag_and_inactivity_delta_for_validator( - cfg, state, base_reward_per_increment, info, finality_delay, index, - inactivity_score) - if maybeDelta.isOk: - # Can't use isErrOr in generics - let (validator_index, reward0, reward1, reward2, penalty0, penalty1, penalty2) = - maybeDelta.get - info.validators[validator_index].delta.rewards += reward0 + reward1 + reward2 - info.validators[validator_index].delta.penalties += penalty0 + penalty1 + penalty2 - increase_balance(balance, info.validators[index].delta.rewards) - decrease_balance(balance, info.validators[index].delta.penalties) - balance - - # The two directly balance-changing operations, from Altair through Deneb, - # are these. The rest is necessary to look past a single epoch transition, - # but that's not the use case here. - var post_epoch_balance = reward_and_penalties_balance - decrease_balance( - post_epoch_balance, - get_slashing(state, info.balances.current_epoch, index)) - - # Electra adds apply_pending_deposit as a potential balance-changing epoch - # operations. This should probably be cached, so its 16+ invocations, each - # time, e.g., withdrawals are calculated don't repeat, if it's empirically - # too expensive. Limits exist on how large this structure can get though. - # - # TODO withdrawals and consolidation request processing can also affect this - when type(state).kind >= ConsensusFork.Electra: - for deposit in state.pending_deposits: - discard - - post_epoch_balance - -proc get_next_slot_expected_withdrawals*( - cfg: RuntimeConfig, state: deneb.BeaconState, cache: var StateCache, - info: var altair.EpochInfo): seq[Withdrawal] = - get_expected_withdrawals_aux(state, (state.slot + 1).epoch) do: - # validator_index is defined by an injected symbol within the template - get_validator_balance_after_epoch( - cfg, state, cache, info, validator_index.ValidatorIndex) - -proc get_next_slot_expected_withdrawals*( - cfg: RuntimeConfig, state: electra.BeaconState, cache: var StateCache, - info: var altair.EpochInfo): seq[Withdrawal] = - let (res, _) = get_expected_withdrawals_with_partial_count_aux( - state, (state.slot + 1).epoch) do: - # validator_index is defined by an injected symbol within the template - get_validator_balance_after_epoch( - cfg, state, cache, info, validator_index.ValidatorIndex) - res \ No newline at end of file +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#modified-process_epoch +proc process_epoch*( + cfg: RuntimeConfig, state: var gloas.BeaconState, + flags: UpdateFlags, cache: var StateCache, info: var altair.EpochInfo): + Result[void, cstring] = + let epoch = get_current_epoch(state) + info.init(state) + + process_justification_and_finalization(state, info.balances, flags) + + # state.slot hasn't been incremented yet. + if strictVerification in flags: + # Rule 2/3/4 finalization results in the most pessimal case. The other + # three finalization rules finalize more quickly as long as the any of + # the finalization rules triggered. + if (epoch >= 2 and state.current_justified_checkpoint.epoch + 2 < epoch) or + (epoch >= 3 and state.finalized_checkpoint.epoch + 3 < epoch): + fatal "The network did not finalize", + epoch, finalizedEpoch = state.finalized_checkpoint.epoch + quit 1 + + process_inactivity_updates(cfg, state, info) + process_rewards_and_penalties(cfg, state, info) + ? process_registry_updates(cfg, state, cache) # [Modified in Electra:EIP7251] + process_slashings(state, info.balances.current_epoch) + process_eth1_data_reset(state) + ? process_pending_deposits(cfg, state, cache) + ? process_pending_consolidations(cfg, state) + process_effective_balance_updates(state) + process_slashings_reset(state) + process_randao_mixes_reset(state) + ? process_historical_summaries_update(state) + process_participation_flag_updates(state) + process_sync_committee_updates(state) + ? process_proposer_lookahead(state, cache) + ? process_builder_pending_payments(cfg, state, cache) # [New in Gloas:EIP7732] + + ok() diff --git a/beacon_chain/spec/validator.nim b/beacon_chain/spec/validator.nim index 1039bcc19a..1a90f86d3e 100644 --- a/beacon_chain/spec/validator.nim +++ b/beacon_chain/spec/validator.nim @@ -4,13 +4,15 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # Helpers and functions pertaining to managing the validator set import std/algorithm, "."/[crypto, helpers] +from std/sequtils import mapIt +from std/math import `^` export helpers const @@ -158,13 +160,13 @@ func get_shuffled_active_validator_indices*( withState(state): cache.get_shuffled_active_validator_indices(forkyState.data, epoch) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#get_active_validator_indices +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#get_active_validator_indices func count_active_validators*(state: ForkyBeaconState, epoch: Epoch, cache: var StateCache): uint64 = cache.get_shuffled_active_validator_indices(state, epoch).lenu64 -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#get_committee_count_per_slot +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#get_committee_count_per_slot func get_committee_count_per_slot*(num_active_validators: uint64): uint64 = clamp( num_active_validators div SLOTS_PER_EPOCH div TARGET_COMMITTEE_SIZE, @@ -189,7 +191,7 @@ iterator get_committee_indices*(committee_count_per_slot: uint64): CommitteeInde let committee_index = CommitteeIndex.init(idx).expect("value clamped") yield committee_index -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#compute_committee +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#compute_committee func compute_committee_slice*( active_validators, index, count: uint64): Slice[int] = doAssert active_validators <= ValidatorIndex.high.uint64 @@ -299,7 +301,7 @@ func get_beacon_committee_len*( withState(state): get_beacon_committee_len(forkyState.data, slot, index, cache) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#compute_shuffled_index +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#compute_shuffled_index template compute_shuffled_index_aux( index: uint64, index_count: uint64, seed: Eth2Digest, iter: untyped): uint64 = @@ -391,7 +393,7 @@ template compute_proposer_index( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/beacon-chain.md#modified-compute_proposer_index template compute_proposer_index( - state: electra.BeaconState | fulu.BeaconState, + state: electra.BeaconState | fulu.BeaconState | gloas.BeaconState, indices: openArray[ValidatorIndex], seed: Eth2Digest, unshuffleTransform: untyped): Opt[ValidatorIndex] = ## Return from ``indices`` a random index sampled by effective balance. @@ -439,62 +441,194 @@ func compute_proposer_index(state: ForkyBeaconState, ## Return from ``indices`` a random index sampled by effective balance. compute_proposer_index(state, indices, seed, shuffled_index) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#get_beacon_proposer_index +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/fulu/beacon-chain.md#new-compute_proposer_indices +func compute_proposer_indices*( + state: ForkyBeaconState, + epoch: Epoch, seed: Eth2Digest, + indices: seq[ValidatorIndex] +): seq[Opt[ValidatorIndex]] = + var proposerIndices: seq[Opt[ValidatorIndex]] + + for epochSlot in epoch.slots(): + var buffer: array[32 + 8, byte] + buffer[0..31] = seed.data + buffer[32..39] = uint_to_bytes(epochSlot.asUInt64) + + let slotSeed = eth2digest(buffer) # Concatenate manually using buffer + let proposerIndex = compute_proposer_index(state, indices, slotSeed) + proposerIndices.add(proposerIndex) + + proposerIndices + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#new-compute_balance_weighted_acceptance +func compute_balance_weighted_acceptance( + state: gloas.BeaconState, index: ValidatorIndex, + seed: Eth2Digest, i: uint64): bool = + ## Return whether to accept the selection of the validator ``index``, with probability + ## proportional to its ``effective_balance``, and randomness given by ``seed`` and ``i``. + const MAX_RANDOM_VALUE = (2^16 - 1).uint64 + + var buffer {.noinit.}: array[40, byte] + buffer[0..31] = seed.data + buffer[32..39] = uint_to_bytes(i div 16) + + let + random_bytes = eth2digest(buffer) + offset = (i mod 16) * 2 + + var random_bytes_8: array[8, byte] + random_bytes_8[0..1] = random_bytes.data.toOpenArray(offset, offset + 1) + + let + random_value = bytes_to_uint64(random_bytes_8) + effective_balance = state.validators[index].effective_balance + + effective_balance.uint64 * MAX_RANDOM_VALUE >= + MAX_EFFECTIVE_BALANCE_ELECTRA.uint64 * random_value + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#new-compute_balance_weighted_selection +iterator compute_balance_weighted_selection*( + state: gloas.BeaconState, indices: seq[ValidatorIndex], + seed: Eth2Digest, size: uint64, + shuffle_indices: bool): ValidatorIndex = + ## Return ``size`` indices sampled by effective balance, using ``indices`` + ## as candidates. If ``shuffle_indices`` is ``True``, candidate indices + ## are themselves sampled from ``indices`` by shuffling it, otherwise + ## ``indices`` is traversed in order. + let total = indices.lenu64 + doAssert total > 0 + + var + i = 0'u64 + count = 0'u64 + + while count < size: + var next_index = i mod total + if shuffle_indices: + next_index = compute_shuffled_index(next_index, total, seed) + + let candidate_index = indices[next_index] + if compute_balance_weighted_acceptance(state, candidate_index, seed, i): + yield candidate_index + inc count + inc i + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#modified-compute_proposer_indices +func compute_proposer_indices*( + state: gloas.BeaconState, + epoch: Epoch, seed: Eth2Digest, + indices: seq[ValidatorIndex] +): seq[ValidatorIndex] = + ## Return the proposer indices for the given ``epoch`` using balance-weighted selection. + var proposer_indices: seq[ValidatorIndex] + + for epochSlot in epoch.slots(): + var buffer: array[32 + 8, byte] + buffer[0..31] = seed.data + buffer[32..39] = uint_to_bytes(epochSlot.asUInt64) + let slotSeed = eth2digest(buffer) + + for proposer in compute_balance_weighted_selection( + state, indices, slotSeed, size=1, shuffle_indices=true): + proposer_indices.add(proposer) + break + + proposer_indices + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#get_beacon_proposer_index func get_beacon_proposer_index*( state: ForkyBeaconState, cache: var StateCache, slot: Slot): Opt[ValidatorIndex] = let epoch = get_current_epoch(state) - if slot.epoch() != epoch: # compute_proposer_index depends on `effective_balance`, therefore the # beacon proposer index can only be computed for the "current" epoch: # https://github.com/ethereum/consensus-specs/pull/772#issuecomment-475574357 return Opt.none(ValidatorIndex) + when typeof(state).kind >= ConsensusFork.Fulu: + let pi = Opt.some(ValidatorIndex item(state.proposer_lookahead, slot mod SLOTS_PER_EPOCH)) + cache.beacon_proposer_indices[slot] = pi + return pi + else: + cache.beacon_proposer_indices.withValue(slot, proposer) do: + return proposer[] + do: + ## Return the beacon proposer index at the current slot. + var buffer: array[32 + 8, byte] + buffer[0..31] = get_seed(state, epoch, DOMAIN_BEACON_PROPOSER).data + # There's exactly one beacon proposer per slot - the same validator may + # however propose several times in the same epoch (however unlikely) + let indices = get_active_validator_indices(state, epoch) + var res: Opt[ValidatorIndex] + for epoch_slot in epoch.slots(): + buffer[32..39] = uint_to_bytes(epoch_slot.asUInt64) + let seed = eth2digest(buffer) + let pi = compute_proposer_index(state, indices, seed) + if epoch_slot == slot: + res = pi + cache.beacon_proposer_indices[epoch_slot] = pi + return res + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/fulu/beacon-chain.md#new-get_beacon_proposer_indices +func get_beacon_proposer_indices*( + state: ForkyBeaconState, epoch: Epoch +): seq[Opt[ValidatorIndex]] = + ## Return the proposer indices for the given `epoch`. + let indices = get_active_validator_indices(state, epoch) + let seed = get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + + debugGloasComment "temporary workaround for Gloas" + when typeof(state).kind >= ConsensusFork.Gloas: + let proposers = compute_proposer_indices(state, epoch, seed, indices) + proposers.mapIt(Opt.some(it)) + else: + compute_proposer_indices(state, epoch, seed, indices) - cache.beacon_proposer_indices.withValue(slot, proposer) do: - return proposer[] - do: - ## Return the beacon proposer index at the current slot. - - var buffer: array[32 + 8, byte] - buffer[0..31] = get_seed(state, epoch, DOMAIN_BEACON_PROPOSER).data - - # There's exactly one beacon proposer per slot - the same validator may - # however propose several times in the same epoch (however unlikely) - let indices = get_active_validator_indices(state, epoch) - var res: Opt[ValidatorIndex] - - for epoch_slot in epoch.slots(): - buffer[32..39] = uint_to_bytes(epoch_slot.asUInt64) - let seed = eth2digest(buffer) - let pi = compute_proposer_index(state, indices, seed) - if epoch_slot == slot: - res = pi - cache.beacon_proposer_indices[epoch_slot] = pi - - return res - -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#get_beacon_proposer_index +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/beacon-chain.md#get_beacon_proposer_index func get_beacon_proposer_indices*( state: ForkyBeaconState, shuffled_indices: openArray[ValidatorIndex], epoch: Epoch): seq[Opt[ValidatorIndex]] = ## Return the beacon proposer indices at the current epoch, using shuffled ## rather than sorted active validator indices. - var - buffer {.noinit.}: array[32 + 8, byte] - res: seq[Opt[ValidatorIndex]] + when typeof(state).kind < ConsensusFork.Fulu: + var + buffer {.noinit.}: array[32 + 8, byte] + res: seq[Opt[ValidatorIndex]] - buffer[0..31] = get_seed(state, epoch, DOMAIN_BEACON_PROPOSER).data - let epoch_shuffle_seed = get_seed(state, epoch, DOMAIN_BEACON_ATTESTER) + buffer[0..31] = get_seed(state, epoch, DOMAIN_BEACON_PROPOSER).data + let epoch_shuffle_seed = get_seed(state, epoch, DOMAIN_BEACON_ATTESTER) - for epoch_slot in epoch.slots(): - buffer[32..39] = uint_to_bytes(epoch_slot.asUInt64) - res.add ( - compute_proposer_index(state, shuffled_indices, eth2digest(buffer)) do: - compute_inverted_shuffled_index( - shuffled_index, seq_len, epoch_shuffle_seed)) + for epoch_slot in epoch.slots(): + buffer[32..39] = uint_to_bytes(epoch_slot.asUInt64) + res.add ( + compute_proposer_index(state, shuffled_indices, eth2digest(buffer)) do: + compute_inverted_shuffled_index( + shuffled_index, seq_len, epoch_shuffle_seed)) - res + res + else: + # Not using shuffled indices here is not a bug, + # as the method of computing proposer in the below + # function does not require shuffled indices post Fulu + get_beacon_proposer_indices(state, epoch) + +func initialize_proposer_lookahead*(state: electra.BeaconState, + cache: var StateCache): + HashArray[Limit ((MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH), uint64] = + let current_epoch = state.slot.epoch() + var lookahead: HashArray[Limit ((MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH), uint64] + + for i in 0 ..< (MIN_SEED_LOOKAHEAD + 1): + let + epoch_i = current_epoch + i + proposers = + get_beacon_proposer_indices(state, epoch_i) + + for j in 0 ..< SLOTS_PER_EPOCH: + if proposers[j].isSome(): + mitem(lookahead, i * SLOTS_PER_EPOCH + j) = proposers[j].get.uint64 + + lookahead # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#get_beacon_proposer_index func get_beacon_proposer_index*(state: ForkyBeaconState, cache: var StateCache): diff --git a/beacon_chain/statediff.nim b/beacon_chain/statediff.nim index 4d3a2a776a..c83c9f3016 100644 --- a/beacon_chain/statediff.nim +++ b/beacon_chain/statediff.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2020-2024 Status Research & Development GmbH +# Copyright (c) 2020-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -11,19 +11,20 @@ import stew/assign2, ./spec/forks -func diffModIncEpoch[maxLen, T](hl: HashArray[maxLen, T], startSlot: uint64): - array[SLOTS_PER_EPOCH, T] = - static: doAssert maxLen.uint64 mod SLOTS_PER_EPOCH == 0 +func diffModIncEpoch[maxLength, T]( + hl: HashArray[maxLength, T], startSlot: uint64): array[SLOTS_PER_EPOCH, T] = + static: doAssert maxLength.uint64 mod SLOTS_PER_EPOCH == 0 doAssert startSlot mod SLOTS_PER_EPOCH == 0 for i in startSlot ..< startSlot + SLOTS_PER_EPOCH: - result[i mod SLOTS_PER_EPOCH] = hl[i mod maxLen.uint64] + result[i mod SLOTS_PER_EPOCH] = hl[i mod maxLength.uint64] -func applyModIncrement[maxLen, T]( - ha: var HashArray[maxLen, T], hl: array[SLOTS_PER_EPOCH, T], slot: uint64) = +func applyModIncrement[maxLength, T]( + ha: var HashArray[maxLength, T], hl: array[SLOTS_PER_EPOCH, T], + slot: uint64) = var indexSlot = slot for item in hl: - ha[indexSlot mod maxLen.uint64] = item + ha[indexSlot mod maxLength.uint64] = item indexSlot += 1 func applyValidatorIdentities( @@ -51,9 +52,9 @@ func setValidatorStatusesNoWithdrawals( validator[].exit_epoch = hl[i].exit_epoch validator[].withdrawable_epoch = hl[i].withdrawable_epoch -func replaceOrAddEncodeEth1Votes[T, maxLen]( - votes0: openArray[T], votes0_len: int, votes1: HashList[T, maxLen]): - (bool, List[T, maxLen]) = +func replaceOrAddEncodeEth1Votes[T, maxLength]( + votes0: openArray[T], votes0_len: int, votes1: HashList[T, maxLength]): + (bool, List[T, maxLength]) = let num_votes0 = votes0.len lower_bound = @@ -67,17 +68,17 @@ func replaceOrAddEncodeEth1Votes[T, maxLen]( else: num_votes0 - var res = (lower_bound == 0, default(List[T, maxLen])) + var res = (lower_bound == 0, default(List[T, maxLength])) for i in lower_bound ..< votes1.len: if not result[1].add votes1[i]: raiseAssert "same limit" res -func replaceOrAddDecodeEth1Votes[T, maxLen]( - votes0: var HashList[T, maxLen], eth1_data_votes_replaced: bool, - votes1: List[T, maxLen]) = +func replaceOrAddDecodeEth1Votes[T, maxLength]( + votes0: var HashList[T, maxLength], eth1_data_votes_replaced: bool, + votes1: List[T, maxLength]) = if eth1_data_votes_replaced: - votes0 = HashList[T, maxLen]() + votes0 = HashList[T, maxLength]() for item in votes1: if not votes0.add item: @@ -209,8 +210,8 @@ func applyDiff*( state: var capella.BeaconState, immutableValidators: openArray[ImmutableValidatorData2], stateDiff: BeaconStateDiff) = - template assign[T, maxLen]( - tgt: var HashList[T, maxLen], src: List[T, maxLen]) = + template assign[T, maxLength]( + tgt: var HashList[T, maxLength], src: List[T, maxLength]) = assign(tgt.data, src) tgt.resetCache() diff --git a/beacon_chain/statusbar.nim b/beacon_chain/statusbar.nim index df4d5dc24e..83aa56baff 100644 --- a/beacon_chain/statusbar.nim +++ b/beacon_chain/statusbar.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -107,7 +107,7 @@ proc renderCells(cells: seq[StatusBarCell], sep: string) = stdout.write cell.content, " " stdout.resetAttributes() -proc render*(s: var StatusBarView) {.raises: [ValueError].} = +proc render*(s: var StatusBarView) = doAssert s.consumedLines == 0 let diff --git a/beacon_chain/sync/light_client_manager.nim b/beacon_chain/sync/light_client_manager.nim index a74f85e3ea..ed09e6c347 100644 --- a/beacon_chain/sync/light_client_manager.nim +++ b/beacon_chain/sync/light_client_manager.nim @@ -35,7 +35,7 @@ type Endpoint[Nothing, ForkedLightClientOptimisticUpdate] ValueVerifier[V] = - proc(v: V): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} + proc(v: V): Future[Result[void, LightClientVerifierError]] {.async: (raises: [CancelledError]).} BootstrapVerifier* = ValueVerifier[ForkedLightClientBootstrap] UpdateVerifier* = @@ -55,6 +55,7 @@ type LightClientManager* = object network: Eth2Node rng: ref HmacDrbgContext + timeConfig: TimeConfig getTrustedBlockRoot: GetTrustedBlockRootCallback bootstrapVerifier: BootstrapVerifier updateVerifier: UpdateVerifier @@ -72,6 +73,7 @@ func init*( T: type LightClientManager, network: Eth2Node, rng: ref HmacDrbgContext, + timeConfig: TimeConfig, getTrustedBlockRoot: GetTrustedBlockRootCallback, bootstrapVerifier: BootstrapVerifier, updateVerifier: UpdateVerifier, @@ -88,6 +90,7 @@ func init*( LightClientManager( network: network, rng: rng, + timeConfig: timeConfig, getTrustedBlockRoot: getTrustedBlockRoot, bootstrapVerifier: bootstrapVerifier, updateVerifier: updateVerifier, @@ -112,7 +115,7 @@ proc isGossipSupported*( finalizedPeriod = self.getFinalizedPeriod(), isNextSyncCommitteeKnown = self.isNextSyncCommitteeKnown()) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap proc doRequest( e: typedesc[Bootstrap], peer: Peer, @@ -120,7 +123,7 @@ proc doRequest( ): Future[NetRes[ForkedLightClientBootstrap]] {.async: (raises: [CancelledError], raw: true).} = peer.lightClientBootstrap(blockRoot) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange type LightClientUpdatesByRangeResponse = NetRes[List[ForkedLightClientUpdate, MAX_REQUEST_LIGHT_CLIENT_UPDATES]] proc doRequest( @@ -138,7 +141,7 @@ proc doRequest( raise newException(ResponseError, e.error) return response -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate proc doRequest( e: typedesc[FinalityUpdate], peer: Peer @@ -196,16 +199,16 @@ proc workerTask[E]( let res = await self.valueVerifier(E)(val) if res.isErr: case res.error - of VerifierError.MissingParent: + of LightClientVerifierError.MissingParent: # Stop, requires different request to progress return didProgress - of VerifierError.Duplicate: + of LightClientVerifierError.Duplicate: # Ignore, a concurrent request may have already fulfilled this when E.V is ForkedLightClientBootstrap: didProgress = true else: discard - of VerifierError.UnviableFork: + of LightClientVerifierError.UnviableFork: # Descore, peer is on an incompatible fork version withForkyObject(val): when lcDataFork > LightClientDataFork.None: @@ -217,7 +220,7 @@ proc workerTask[E]( endpoint = E.name, peer, peer_score = peer.getScore() peer.updateScore(PeerScoreUnviableFork) return didProgress - of VerifierError.Invalid: + of LightClientVerifierError.Invalid: # Descore, received data is malformed withForkyObject(val): when lcDataFork > LightClientDataFork.None: @@ -399,7 +402,8 @@ proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} = wallTime else: wallTime + self.rng.nextLcSyncTaskDelay( - wallTime, finalized, optimistic, isNextSyncCommitteeKnown, + self.timeConfig, wallTime, + finalized, optimistic, isNextSyncCommitteeKnown, didLatestSyncTaskProgress = didProgress) wasGossipSupported = isGossipSupported diff --git a/beacon_chain/sync/light_client_protocol.nim b/beacon_chain/sync/light_client_protocol.nim index 268f27d3f6..500461f4ab 100644 --- a/beacon_chain/sync/light_client_protocol.nim +++ b/beacon_chain/sync/light_client_protocol.nim @@ -46,8 +46,7 @@ proc readChunkPayload*( let res = await eth2_network.readChunkPayload( conn, peer, MsgType.Forky(lcDataFork)) if res.isOk: - if contextFork != - peer.network.cfg.consensusForkAtEpoch(res.get.contextEpoch): + if peer.network.forkDigestAtEpoch(res.get.contextEpoch) != contextBytes: return neterr InvalidContextBytes return ok MsgType.init(res.get) else: @@ -57,12 +56,8 @@ proc readChunkPayload*( {.pop.} -func forkDigestAtEpoch(state: LightClientNetworkState, - epoch: Epoch): ForkDigest = - state.dag.forkDigests[].atEpoch(epoch, state.dag.cfg) - -p2pProtocol LightClientSync(version = 1, - networkState = LightClientNetworkState): +p2pProtocol LightClientSync( + version = 1, networkState = LightClientNetworkState): # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap proc lightClientBootstrap( peer: Peer, @@ -78,7 +73,7 @@ p2pProtocol LightClientSync(version = 1, when lcDataFork > LightClientDataFork.None: let contextEpoch = forkyBootstrap.contextEpoch - contextBytes = peer.networkState.forkDigestAtEpoch(contextEpoch).data + contextBytes = dag.forkDigestAtEpoch(contextEpoch).data # TODO extract from libp2pProtocol peer.awaitQuota( @@ -120,8 +115,7 @@ p2pProtocol LightClientSync(version = 1, when lcDataFork > LightClientDataFork.None: let contextEpoch = forkyUpdate.contextEpoch - contextBytes = - peer.networkState.forkDigestAtEpoch(contextEpoch).data + contextBytes = dag.forkDigestAtEpoch(contextEpoch).data # TODO extract from libp2pProtocol peer.awaitQuota( @@ -148,7 +142,7 @@ p2pProtocol LightClientSync(version = 1, when lcDataFork > LightClientDataFork.None: let contextEpoch = forkyFinalityUpdate.contextEpoch - contextBytes = peer.networkState.forkDigestAtEpoch(contextEpoch).data + contextBytes = dag.forkDigestAtEpoch(contextEpoch).data # TODO extract from libp2pProtocol peer.awaitQuota( @@ -174,7 +168,7 @@ p2pProtocol LightClientSync(version = 1, when lcDataFork > LightClientDataFork.None: let contextEpoch = forkyOptimisticUpdate.contextEpoch - contextBytes = peer.networkState.forkDigestAtEpoch(contextEpoch).data + contextBytes = dag.forkDigestAtEpoch(contextEpoch).data # TODO extract from libp2pProtocol peer.awaitQuota( diff --git a/beacon_chain/sync/light_client_sync_helpers.nim b/beacon_chain/sync/light_client_sync_helpers.nim index 6defb98deb..882afb851b 100644 --- a/beacon_chain/sync/light_client_sync_helpers.nim +++ b/beacon_chain/sync/light_client_sync_helpers.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -110,6 +110,7 @@ func computeDelayWithJitter*( func nextLcSyncTaskDelay*( rng: ref HmacDrbgContext, + timeConfig: TimeConfig, wallTime: BeaconTime, finalized: SyncCommitteePeriod, optimistic: SyncCommitteePeriod, diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index 657ccfd7ff..a2080205e1 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -5,20 +5,19 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} -import chronos, chronicles +import std/[sets, sequtils], chronos, chronicles +import ssz_serialization/types import ../spec/[forks, network, peerdas_helpers], ../networking/eth2_network, ../consensus_object_pools/block_quarantine, ../consensus_object_pools/blob_quarantine, - ../consensus_object_pools/data_column_quarantine, "."/sync_protocol, "."/sync_manager, ../gossip_processing/block_processor from std/algorithm import binarySearch, sort -from std/sequtils import mapIt from std/strutils import join from ../beacon_clock import GetBeaconTimeFn export block_quarantine, sync_manager @@ -33,16 +32,20 @@ const PARALLEL_REQUESTS = 2 ## Number of peers we're using to resolve our request. - PARALLEL_REQUESTS_DATA_COLUMNS = 32 + PARALLEL_DATA_COLUMNS = 8 + + PARALLEL_DATA_COLUMNS_SUPER = 10 BLOB_GOSSIP_WAIT_TIME_NS = 2 * 1_000_000_000 ## How long to wait for blobs to arri ve over gossip before fetching. DATA_COLUMN_GOSSIP_WAIT_TIME_NS = 2 * 1_000_000_000 - ## How long to wait for blobs to arri ve over gossip before fetching. + ## How long to wait for data columns to arrive over gossip before fetching. POLL_INTERVAL = 1.seconds + POLL_INTERVAL_COLUMNS = 500.milliseconds + type BlockVerifierFn = proc( signedBlock: ForkedSignedBeaconBlock, @@ -58,10 +61,18 @@ type DataColumnLoaderFn = proc( columnId: DataColumnIdentifier): - Opt[ref DataColumnSidecar] {.gcsafe, raises: [].} + Opt[ref fulu.DataColumnSidecar] {.gcsafe, raises: [].} InhibitFn = proc: bool {.gcsafe, raises: [].} + BlobResponseRecord = object + block_root: Eth2Digest + sidecar: ref BlobSidecar + + DataColumnResponseRecord* = object + block_root*: Eth2Digest + sidecar*: ref fulu.DataColumnSidecar + RequestManager* = object network*: Eth2Node supernode*: bool @@ -70,7 +81,7 @@ type inhibit: InhibitFn quarantine: ref Quarantine blobQuarantine: ref BlobQuarantine - dataColumnQuarantine: ref DataColumnQuarantine + dataColumnQuarantine: ref ColumnQuarantine blockVerifier: BlockVerifierFn blockLoader: BlockLoaderFn blobLoader: BlobLoaderFn @@ -85,7 +96,7 @@ func shortLog*(x: seq[Eth2Digest]): string = func shortLog*(x: seq[FetchRecord]): string = "[" & x.mapIt(shortLog(it.root)).join(", ") & "]" -proc init*(T: type RequestManager, network: Eth2Node, +func init*(T: type RequestManager, network: Eth2Node, supernode: bool, custody_columns_set: HashSet[ColumnIndex], denebEpoch: Epoch, @@ -93,7 +104,7 @@ proc init*(T: type RequestManager, network: Eth2Node, inhibit: InhibitFn, quarantine: ref Quarantine, blobQuarantine: ref BlobQuarantine, - dataColumnQuarantine: ref DataColumnQuarantine, + dataColumnQuarantine: ref ColumnQuarantine, blockVerifier: BlockVerifierFn, blockLoader: BlockLoaderFn = nil, blobLoader: BlobLoaderFn = nil, @@ -126,73 +137,57 @@ func checkResponse(roots: openArray[Eth2Digest], checks.del(res) true -func cmpSidecarIdentifier(x: BlobIdentifier | DataColumnIdentifier, - y: ref BlobSidecar | ref DataColumnSidecar): int = - cmp(x.index, y[].index) +func cmpColumnIndex(x: ColumnIndex, y: ref fulu.DataColumnSidecar): int = + cmp(x, y[].index) -func checkResponseSanity(idList: seq[BlobIdentifier], - blobs: openArray[ref BlobSidecar]): bool = +func checkResponseSanity( + idents: openArray[BlobIdentifier], + blobs: openArray[ref BlobSidecar] +): Opt[seq[BlobResponseRecord]] = # Cannot respond more than what I have asked - if blobs.len > idList.len: - return false - var i = 0 - while i < blobs.len: - let - block_root = - hash_tree_root(blobs[i][].signed_block_header.message) - idListKey = binarySearch(idList, blobs[i], cmpSidecarIdentifier) - - # Verify the block root - if idList[idListKey].block_root != block_root: - return false - - # Verify inclusion proof - blobs[i][].verify_blob_sidecar_inclusion_proof().isOkOr: - return false - inc i - true + if len(blobs) > len(idents): + return Opt.none(seq[BlobResponseRecord]) -func checkResponseSubset(idList: seq[BlobIdentifier], - blobs: openArray[ref BlobSidecar]): bool = - ## Clients MUST respond with at least one sidecar, if they have it. - ## Clients MAY limit the number of blocks and sidecars in the response. - ## https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/deneb/p2p-interface.md#blobsidecarsbyroot-v1 - for blb in blobs: - if binarySearch(idList, blb, cmpSidecarIdentifier) == -1: - return false - true + var + checks = idents.toHashSet() + records: seq[BlobResponseRecord] -func checkResponseSanity(idList: seq[DataColumnIdentifier], - columns: openArray[ref DataColumnSidecar]): bool = - # Cannot respond more than what I have asked - if columns.len > idList.len: - return false - var i = 0 - while i < columns.len: + for sidecar in blobs.items(): let - block_root = - hash_tree_root(columns[i][].signed_block_header.message) - idListKey = binarySearch(idList, columns[i], cmpSidecarIdentifier) + block_root = hash_tree_root(sidecar[].signed_block_header.message) + sidecarIdent = + BlobIdentifier(block_root: block_root, index: sidecar[].index) - # Verify the block root - if idList[idListKey].block_root != block_root: - return false + if checks.missingOrExcl(sidecarIdent): + return Opt.none(seq[BlobResponseRecord]) # Verify inclusion proof - columns[i][].verify_data_column_sidecar_inclusion_proof().isOkOr: - return false - inc i - true - -func checkResponseSubset(idList: seq[DataColumnIdentifier], - columns: openArray[ref DataColumnSidecar]): bool = - ## Clients MUST respond with at least one sidecar, if they have it. - ## Clients MAY limit the number of blocks and sidecars in the response. - ## https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1 - for col in columns: - if binarySearch(idList, col, cmpSidecarIdentifier) == -1: - return false - true + sidecar[].verify_blob_sidecar_inclusion_proof().isOkOr: + return Opt.none(seq[BlobResponseRecord]) + + records.add(BlobResponseRecord(block_root: block_root, sidecar: sidecar)) + + Opt.some(records) + +func checkColumnResponse*(idList: seq[DataColumnsByRootIdentifier], + columns: openArray[ref fulu.DataColumnSidecar]): + Opt[seq[DataColumnResponseRecord]] = + var colRec: seq[DataColumnResponseRecord] + for colresp in columns: + let block_root = + hash_tree_root(colresp[].signed_block_header.message) + for id in idList: + if id.block_root == block_root: + if binarySearch(id.indices.asSeq, colresp, cmpColumnIndex) == -1: + # at the common block root level, the response + # is NOT a subset of the request ids + return Opt.none(seq[DataColumnResponseRecord]) + # verify the inclusion proof + colresp[].verify_data_column_sidecar_inclusion_proof().isOkOr: + return Opt.none(seq[DataColumnResponseRecord]) + colRec.add(DataColumnResponseRecord(block_root: block_root, + sidecar: colresp)) + Opt.some(colRec) proc requestBlocksByRoot(rman: RequestManager, items: seq[Eth2Digest]) {.async: (raises: [CancelledError]).} = var peer: Peer @@ -265,7 +260,7 @@ proc requestBlocksByRoot(rman: RequestManager, items: seq[Eth2Digest]) {.async: if not(isNil(peer)): rman.network.peerPool.release(peer) -func cmpSidecarIndexes(x, y: ref BlobSidecar | ref DataColumnSidecar): int = +func cmpSidecarIndexes(x, y: ref BlobSidecar | ref fulu.DataColumnSidecar): int = cmp(x[].index, y[].index) proc fetchBlobsFromNetwork(self: RequestManager, @@ -283,35 +278,28 @@ proc fetchBlobsFromNetwork(self: RequestManager, if blobs.isOk: var ublobs = blobs.get().asSeq() - ublobs.sort(cmpSidecarIndexes) - if not checkResponseSanity(idList, ublobs): - debug "Response to blobs by root have erroneous block root", - peer = peer, blobs = shortLog(idList), ublobs = len(ublobs) + let records = checkResponseSanity(idList, ublobs).valueOr: + debug "Response to blobs by root is incorrect", + peer = peer, blobs = shortLog(idList), ublobs = len(ublobs) peer.updateScore(PeerScoreBadResponse) return - if not checkResponseSubset(idList, ublobs): - debug "Response to blobs by root is not a subset", - peer = peer, blobs = shortLog(idList), ublobs = len(ublobs) - peer.updateScore(PeerScoreBadResponse) - return + for b in records: + self.blobQuarantine[].put(b.block_root, b.sidecar) - for b in ublobs: - self.blobQuarantine[].put(b) var curRoot: Eth2Digest - for b in ublobs: - let block_root = hash_tree_root(b.signed_block_header.message) - if block_root != curRoot: - curRoot = block_root - if (let o = self.quarantine[].popBlobless(curRoot); o.isSome): - let b = o.unsafeGet() - discard await self.blockVerifier(b, false) + for record in records: + if record.block_root != curRoot: + curRoot = record.block_root + if (let o = self.quarantine[].popSidecarless(curRoot); o.isSome): + let blck = o.unsafeGet() + discard await self.blockVerifier(blck, false) # TODO: # If appropriate, return a VerifierError.InvalidBlob from # verification, check for it here, and penalize the peer accordingly else: debug "Blobs by root request failed", - peer = peer, blobs = shortLog(idList), err = blobs.error() + peer = peer, blobs = shortLog(idList), err = blobs.error() peer.updateScore(PeerScoreNoValues) finally: @@ -319,92 +307,145 @@ proc fetchBlobsFromNetwork(self: RequestManager, self.network.peerPool.release(peer) proc checkPeerCustody(rman: RequestManager, - peer: Peer): - bool = - # Returns true if the peer custodies atleast - # ONE of the common custody columns, straight - # away returns true if the peer is a supernode. + peer: Peer): DataColumnIndices = + ## Returns the intersection of custody columns + ## with the peer. Also applies peer scoring. + var intersection: DataColumnIndices if rman.supernode: - # For a supernode, it is always best/optimistic - # to filter other supernodes, rather than filter - # too many full nodes that have a subset of the custody - # columns if peer.lookupCgcFromPeer() == - NUMBER_OF_CUSTODY_GROUPS.uint64: - return true - + rman.network.cfg.NUMBER_OF_CUSTODY_GROUPS: + # full custody → return all columns + for col in 0 ..< rman.network.cfg.NUMBER_OF_CUSTODY_GROUPS: + discard intersection.add(ColumnIndex col) + peer.updateScore(PeerScoreSupernode) + debug "Peer is supernode", + peer = peer, score = peer.getScore(), + remote_custody = peer.lookupCgcFromPeer() + return intersection else: if peer.lookupCgcFromPeer() == - NUMBER_OF_CUSTODY_GROUPS.uint64: - return true - - elif peer.lookupCgcFromPeer() == - CUSTODY_REQUIREMENT.uint64: - - # Fetch the remote custody count - let remoteCustodyGroupCount = - peer.lookupCgcFromPeer() - - # Extract remote peer's nodeID from peerID - # Fetch custody columns from remote peer + rman.network.cfg.NUMBER_OF_CUSTODY_GROUPS: + # full custody → return all columns + for col in 0 ..< rman.network.cfg.NUMBER_OF_CUSTODY_GROUPS: + discard intersection.add(ColumnIndex col) + peer.updateScore(PeerScoreSupernode) + debug "Peer is supernode", + peer = peer, score = peer.getScore(), + remote_custody = peer.lookupCgcFromPeer() + return intersection + else: let + remoteCustodyGroupCount = peer.lookupCgcFromPeer() remoteNodeId = fetchNodeIdFromPeerId(peer) remoteCustodyColumns = - remoteNodeId.resolve_column_sets_from_custody_groups( - max(SAMPLES_PER_SLOT.uint64, + rman.network.cfg.resolve_columns_from_custody_groups( + remoteNodeId, + max(rman.network.cfg.SAMPLES_PER_SLOT, remoteCustodyGroupCount)) for local_column in rman.custody_columns_set: - if local_column notin remoteCustodyColumns: - return false + if local_column in remoteCustodyColumns: + discard intersection.add(local_column) + # Apply scoring logic + logs + if intersection.len == 0: + peer.updateScore(PeerScoreBadColumnIntersection) + debug "Peer has no custody overlap", + peer = peer, score = peer.getScore(), + remote_custody = remoteCustodyGroupCount + elif intersection.len < (rman.custody_columns_set.len div 2): + peer.updateScore(PeerScoreScantyColumnIntersection) + debug "Peer has scanty custody overlap", + peer = peer, score = peer.getScore(), + remote_custody = remoteCustodyGroupCount, + overlap = intersection.len, local = rman.custody_columns_set.len + else: + peer.updateScore(PeerScoreDecentColumnIntersection) + debug "Peer has decent custody overlap", + peer = peer, score = peer.getScore(), + remote_custody = remoteCustodyGroupCount, + overlap = intersection.len, local = rman.custody_columns_set.len - return true + return intersection + +func matchIntersection(rman: RequestManager): PeerCustomFilterCallback[Peer] = + return proc(peer: Peer): bool = + let + remoteCustodyGroupCount = peer.lookupCgcFromPeer() + remoteNodeId = fetchNodeIdFromPeerId(peer) + remoteCustodyColumns = + rman.network.cfg.resolve_columns_from_custody_groups( + remoteNodeId, + max(rman.network.cfg.SAMPLES_PER_SLOT, remoteCustodyGroupCount)) + overlap = rman.custody_columns_set.countIt(it in remoteCustodyColumns) + return overlap > (rman.custody_columns_set.len div 2) - else: - return false proc fetchDataColumnsFromNetwork(rman: RequestManager, - colIdList: seq[DataColumnIdentifier]) + colIdList: seq[DataColumnsByRootIdentifier]) {.async: (raises: [CancelledError]).} = - var peer = await rman.network.peerPool.acquire() + var peer: Peer + peer = await rman.network.peerPool.acquire( + filter = {Incoming, Outgoing}, + customFilter = matchIntersection(rman)) try: - if rman.checkPeerCustody(peer): - debug "Requesting data columns by root", peer = peer, columns = shortLog(colIdList), - peer_score = peer.getScore() - let columns = await dataColumnSidecarsByRoot(peer, DataColumnIdentifierList colIdList) - - if columns.isOk: - var ucolumns = columns.get().asSeq() - ucolumns.sort(cmpSidecarIndexes) - if not checkResponseSanity(colIdList, ucolumns): - debug "Response to columns by root have erroneous block root", - peer = peer, columns = shortLog(colIdList), ucolumns = len(ucolumns) - peer.updateScore(PeerScoreBadResponse) - return - - if not checkResponseSubset(colIdList, ucolumns): - debug "Response to columns by root is not a subset", - peer = peer, columns = shortLog(colIdList), ucolumns = len(ucolumns) - peer.updateScore(PeerScoreBadResponse) - return - - for col in ucolumns: - rman.dataColumnQuarantine[].put(col) - var curRoot: Eth2Digest - for col in ucolumns: - let block_root = hash_tree_root(col.signed_block_header.message) - if block_root != curRoot: - curRoot = block_root - if (let o = rman.quarantine[].popColumnless(curRoot); o.isSome): - let col = o.unsafeGet() - discard await rman.blockVerifier(col, false) - else: - debug "Data columns by root request not done, peer doesn't have custody column", - peer = peer, columns = shortLog(colIdList), err = columns.error() - peer.updateScore(PeerScoreNoValues) + let intersection = rman.checkPeerCustody(peer) + + debug "Acquired peer after custody check", + peer = peer, + peer_score = peer.getScore(), + overlap = intersection.len, + local = rman.custody_columns_set.len + if intersection.len == 0: + debug "Peer has no usable custody overlap", + peer = peer + return + let intColIdList = colIdList + .mapIt(DataColumnsByRootIdentifier( + block_root: it.block_root, + indices: DataColumnIndices( + filterIt(it.indices.asSeq, it in intersection)))) + .filterIt(it.indices.len > 0) + if intColIdList.len == 0: + debug "No intersecting custody columns to request", + peer = peer, + peer_score = peer.getScore() + return + debug "Requesting data columns by root", + peer = peer, + columns = shortLog(intColIdList), + peer_score = peer.getScore() + let columns = await dataColumnSidecarsByRoot(peer, DataColumnsByRootIdentifierList intColIdList) + if columns.isOk: + var ucolumns = columns.get().asSeq() + ucolumns.sort(cmpSidecarIndexes) + let records = checkColumnResponse(colIdList, ucolumns).valueOr: + debug "Response to columns by root is not a subset", + peer = peer, + columns = shortLog(colIdList), + ucolumns = len(ucolumns) + peer.updateScore(PeerScoreBadResponse) + return + for col in records: + debug "Received column responses", + peer = peer, + column_sidecars = shortLog(col.sidecar[]), + peer_score = peer.getScore() + rman.dataColumnQuarantine[].put(col.block_root, col.sidecar) + var curRoot: Eth2Digest + for col in records: + if col.block_root != curRoot: + curRoot = col.block_root + if (let o = rman.quarantine[].popSidecarless(curRoot); o.isSome): + let col = o.unsafeGet() + discard await rman.blockVerifier(col, false) + else: + debug "Data columns by root request failed or peer missing custody columns", + peer = peer, + err = columns.error() + peer.updateScore(PeerScoreNoValues) finally: - if not(isNil(peer)): + if not isNil(peer): rman.network.peerPool.release(peer) proc requestManagerBlockLoop( @@ -475,38 +516,42 @@ proc getMissingBlobs(rman: RequestManager): seq[BlobIdentifier] = waitDur = TimeDiff(nanoseconds: BLOB_GOSSIP_WAIT_TIME_NS) var - fetches: seq[BlobIdentifier] + idents: seq[BlobIdentifier] ready: seq[Eth2Digest] - for blobless in rman.quarantine[].peekBlobless(): + for blobless in rman.quarantine[].peekSidecarless(): withBlck(blobless): - when consensusFork >= ConsensusFork.Deneb: + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: # give blobs a chance to arrive over gossip if forkyBlck.message.slot == wallSlot and delay < waitDur: debug "Not handling missing blobs early in slot" continue - if not rman.blobQuarantine[].hasBlobs(forkyBlck): - let missing = rman.blobQuarantine[].blobFetchRecord(forkyBlck) - if len(missing.indices) == 0: - warn "quarantine missing blobs, but missing indices is empty", - blk=blobless.root, - commitments=len(forkyBlck.message.body.blob_kzg_commitments) - for idx in missing.indices: - let id = BlobIdentifier(block_root: blobless.root, index: idx) - if id notin fetches: - fetches.add(id) + let + commitmentsCount = len(forkyBlck.message.body.blob_kzg_commitments) + missing = + rman.blobQuarantine[].fetchMissingSidecars(blobless.root, forkyBlck) + + if len(missing) > 0: + for ident in missing: + idents.add(ident) else: - # this is a programming error should it occur. - warn "missing blob handler found blobless block with all blobs", - blk=blobless.root, - commitments=len(forkyBlck.message.body.blob_kzg_commitments) - ready.add(blobless.root) + if commitmentsCount == 0: + # this is a programming error should it occur. + warn "missing blob handler found blobless block with all blobs", + blk = blobless.root, + commitments = len(forkyBlck.message.body.blob_kzg_commitments) + ready.add(blobless.root) + else: + # This should not happen either... + warn "quarantine missing blobs, but missing indices is empty", + blk = blobless.root, + commitments = len(forkyBlck.message.body.blob_kzg_commitments) for root in ready: - let blobless = rman.quarantine[].popBlobless(root).valueOr: + let blobless = rman.quarantine[].popSidecarless(root).valueOr: continue discard rman.blockVerifier(blobless, false) - fetches + idents proc requestManagerBlobLoop( rman: RequestManager) {.async: (raises: [CancelledError]).} = @@ -543,12 +588,12 @@ proc requestManagerBlobLoop( discard blockRoots.pop() continue debug "Loaded orphaned blob from storage", blobId - rman.blobQuarantine[].put(blob_sidecar) + rman.blobQuarantine[].put(curRoot, blob_sidecar) var verifiers = newSeqOfCap[ Future[Result[void, VerifierError]] .Raising([CancelledError])](blockRoots.len) for blockRoot in blockRoots: - let blck = rman.quarantine[].popBlobless(blockRoot).valueOr: + let blck = rman.quarantine[].popSidecarless(blockRoot).valueOr: continue verifiers.add rman.blockVerifier(blck, maybeFinalized = false) try: @@ -575,7 +620,7 @@ proc requestManagerBlobLoop( blobs_count = len(blobIds), sync_speed = speed(start, finish) -proc getMissingDataColumns(rman: RequestManager): HashSet[DataColumnIdentifier] = +proc getMissingDataColumns(rman: RequestManager): seq[DataColumnsByRootIdentifier] = let wallTime = rman.getBeaconTime() wallSlot = wallTime.slotOrZero() @@ -584,37 +629,40 @@ proc getMissingDataColumns(rman: RequestManager): HashSet[DataColumnIdentifier] const waitDur = TimeDiff(nanoseconds: DATA_COLUMN_GOSSIP_WAIT_TIME_NS) var - fetches: HashSet[DataColumnIdentifier] + fetches: seq[DataColumnsByRootIdentifier] ready: seq[Eth2Digest] - for columnless in rman.quarantine[].peekColumnless(): + for columnless in rman.quarantine[].peekSidecarless(): withBlck(columnless): - when consensusFork >= ConsensusFork.Fulu: + when consensusFork >= ConsensusFork.Fulu and consensusFork < ConsensusFork.Gloas: + debugGloasComment "handle correctly for gloas" # granting data columns a chance to arrive over gossip if forkyBlck.message.slot == wallSlot and delay < waitDur: debug "Not handling missing data columns early in slot" continue - if not rman.dataColumnQuarantine[].hasMissingDataColumns(forkyBlck): - let missing = rman.dataColumnQuarantine[].dataColumnFetchRecord(forkyBlck) - if len(missing.indices) == 0: - warn "quarantine is missing data columns, but missing indices are empty", - blk = columnless.root, - commitments = len(forkyBlck.message.body.blob_kzg_commitments) - for idx in missing.indices: - let id = DataColumnIdentifier(block_root: columnless.root, index: idx) - if id.index in rman.custody_columns_set and id notin fetches and - len(forkyBlck.message.body.blob_kzg_commitments) != 0: - fetches.incl(id) + let + commitmentsCount = len(forkyBlck.message.body.blob_kzg_commitments) + ident = rman.dataColumnQuarantine[].fetchMissingSidecars( + columnless.root, forkyBlck) + + if len(ident.indices) > 0 and ident notin fetches: + fetches.add(ident) else: - # this is a programming error and it not should occur - warn "missing column handler found columnless block with all data columns", - blk = columnless.root, - commitments = len(forkyBlck.message.body.blob_kzg_commitments) - ready.add(columnless.root) + if commitmentsCount == 0: + # this is a programming error should it occur. + warn "missing column handler found columnless block with all data columns", + blk = columnless.root, + commitments = len(forkyBlck.message.body.blob_kzg_commitments) + ready.add(columnless.root) + else: + # This should not happen either... + warn "quarantine missing data columns, but missing indices is empty", + blk = columnless.root, + commitments = len(forkyBlck.message.body.blob_kzg_commitments) for root in ready: - let columnless = rman.quarantine[].popColumnless(root).valueOr: + let columnless = rman.quarantine[].popSidecarless(root).valueOr: continue discard rman.blockVerifier(columnless, false) fetches @@ -623,7 +671,7 @@ proc requestManagerDataColumnLoop( rman: RequestManager) {.async: (raises: [CancelledError]).} = while true: - await sleepAsync(POLL_INTERVAL) + await sleepAsync(POLL_INTERVAL_COLUMNS) if rman.inhibit(): continue @@ -631,10 +679,9 @@ proc requestManagerDataColumnLoop( if missingColumnIds.len == 0: continue - var columnIds: seq[DataColumnIdentifier] + var columnIds: seq[DataColumnsByRootIdentifier] if rman.dataColumnLoader == nil: - for item in missingColumnIds: - columnIds.add item + columnIds = missingColumnIds else: var blockRoots: seq[Eth2Digest] @@ -642,20 +689,26 @@ proc requestManagerDataColumnLoop( for columnId in missingColumnIds: if columnId.block_root != curRoot: curRoot = columnId.block_root - blockRoots.add curRoot - let data_column_sidecar = rman.dataColumnLoader(columnId).valueOr: - columnIds.add columnId - if blockRoots.len > 0 and blockRoots[^1] == curRoot: - # A data column is missing, remove from list of fully available data columns - discard blockRoots.pop() - continue - debug "Loaded orphaned data columns from storage", columnId - rman.dataColumnQuarantine[].put(data_column_sidecar) + if curRoot notin blockRoots: + blockRoots.add curRoot + for index in columnId.indices: + let loaderElem = DataColumnIdentifier( + block_root: columnId.block_root, + index: index) + let data_column_sidecar = rman.dataColumnLoader(loaderElem).valueOr: + if columnId notin columnIds: + columnIds.add columnId + if blockRoots.len > 0 and blockRoots[^1] == curRoot: + # A data column is missing, remove from list of fully available data columns + discard blockRoots.pop() + continue + debug "Loaded orphaned data columns from storage", columnId + rman.dataColumnQuarantine[].put(curRoot, data_column_sidecar) var verifiers = newSeqOfCap[ Future[Result[void, VerifierError]] .Raising([CancelledError])](blockRoots.len) for blockRoot in blockRoots: - let blck = rman.quarantine[].popColumnless(blockRoot).valueOr: + let blck = rman.quarantine[].popSidecarless(blockRoot).valueOr: continue verifiers.add rman.blockVerifier(blck, maybeFinalized = false) try: @@ -669,9 +722,15 @@ proc requestManagerDataColumnLoop( if columnIds.len > 0: debug "Requesting detected missing data columns", columns = shortLog(columnIds) let start = SyncMoment.now(0) - var workers: - array[PARALLEL_REQUESTS_DATA_COLUMNS, Future[void].Raising([CancelledError])] - for i in 0..= + rman.network.cfg.NUMBER_OF_CUSTODY_GROUPS: + PARALLEL_DATA_COLUMNS_SUPER + else: + PARALLEL_DATA_COLUMNS + var workers = + newSeq[Future[void].Raising([CancelledError])](workerCount) + for i in 0..= rman.network.cfg.FULU_FORK_EPOCH and + isNil(rman.dataColumnLoopFuture): + if not(isNil(rman.blobLoopFuture)): + rman.blobLoopFuture.cancelSoon() + + rman.dataColumnLoopFuture = + rman.requestManagerDataColumnLoop() proc stop*(rman: RequestManager) = ## Stop Request Manager's loop. diff --git a/beacon_chain/sync/sync_manager.nim b/beacon_chain/sync/sync_manager.nim index 5a6791ea4b..befbdea502 100644 --- a/beacon_chain/sync/sync_manager.nim +++ b/beacon_chain/sync/sync_manager.nim @@ -27,7 +27,7 @@ const StatusExpirationTime* = chronos.minutes(2) ## Time time it takes for the peer's status information to expire. - ConcurrentRequestsCount* = 3 + ConcurrentRequestsCount* = 1 # Higher values require reviewing `pending == 0` ## Number of requests performed by one peer in single syncing step RepeatingFailuresCount* = 2 @@ -43,7 +43,7 @@ const type SyncWorkerStatus* {.pure.} = enum Sleeping, WaitingPeer, UpdatingStatus, Requesting, Downloading, Queueing, - Processing + Processing, Paused SyncManagerFlag* {.pure.} = enum NoMonitor, NoGenesisSync @@ -69,12 +69,14 @@ type progressPivot: Slot workers: array[SyncWorkersCount, SyncWorker[A, B]] notInSyncEvent: AsyncEvent + resumeSyncEvent: AsyncEvent shutdownEvent: AsyncEvent rangeAge: uint64 chunkSize: uint64 queue: SyncQueue[A] syncFut: Future[void].Raising([CancelledError]) blockVerifier: BlockVerifier + forkAtEpoch: ForkAtEpochCallback inProgress*: bool insSyncSpeed*: float avgSyncSpeed*: float @@ -121,7 +123,7 @@ proc initQueue[A, B](man: SyncManager[A, B]) = man.concurrentRequestsCount, man.repeatingFailuresCount, man.getSafeSlot, man.blockVerifier, - man.ident) + man.forkAtEpoch, man.ident) of SyncQueueKind.Backward: let firstSlot = man.getFirstSlot() @@ -136,8 +138,8 @@ proc initQueue[A, B](man: SyncManager[A, B]) = man.chunkSize, man.concurrentRequestsCount, man.repeatingFailuresCount, - man.getSafeSlot, - man.blockVerifier, man.ident) + man.getSafeSlot, man.blockVerifier, + man.forkAtEpoch, man.ident) proc newSyncManager*[A, B]( pool: PeerPool[A, B], @@ -154,6 +156,7 @@ proc newSyncManager*[A, B]( weakSubjectivityPeriodCb: GetBoolCallback, progressPivot: Slot, blockVerifier: BlockVerifier, + forkAtEpochCb: ForkAtEpochCallback, shutdownEvent: AsyncEvent, maxHeadAge = uint64(SLOTS_PER_EPOCH * 1), chunkSize = uint64(SLOTS_PER_EPOCH), @@ -185,7 +188,9 @@ proc newSyncManager*[A, B]( maxHeadAge: maxHeadAge, chunkSize: chunkSize, blockVerifier: blockVerifier, + forkAtEpoch: forkAtEpochCb, notInSyncEvent: newAsyncEvent(), + resumeSyncEvent: newAsyncEvent(), direction: direction, shutdownEvent: shutdownEvent, ident: ident, @@ -263,7 +268,7 @@ func groupBlobs*( blob_cursor = 0 for block_idx, blck in blocks: withBlck(blck[]): - when consensusFork >= ConsensusFork.Deneb: + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: template kzgs: untyped = forkyBlck.message.body.blob_kzg_commitments if kzgs.len == 0: continue @@ -342,7 +347,7 @@ proc getSyncBlockData*[T]( let (shouldGetBlob, blobsCount) = withBlck(blocksRange[0][]): - when consensusFork >= ConsensusFork.Deneb: + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: let res = len(forkyBlck.message.body.blob_kzg_commitments) if res > 0: (true, res) @@ -433,7 +438,7 @@ proc getSyncBlockData[A, B]( var hasBlobs = false for blck in blocks: withBlck(blck[]): - when consensusFork >= ConsensusFork.Deneb: + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: if len(forkyBlck.message.body.blob_kzg_commitments) > 0: hasBlobs = true break @@ -638,7 +643,9 @@ proc syncStep[A, B]( proc processCallback() = man.workers[index].status = SyncWorkerStatus.Processing - var jobs: seq[Future[void].Raising([CancelledError])] + var + jobs: seq[Future[void].Raising([CancelledError])] + requests: seq[SyncRequest[Peer]] try: for rindex in 0 ..< man.concurrentRequestsCount: @@ -658,6 +665,7 @@ proc syncStep[A, B]( peer_score = peer.getScore(), peer_speed = peer.netKbps(), index = index, + request_index = rindex, local_head_slot = headSlot, remote_head_slot = peerSlot, queue_input_slot = man.queue.inpSlot, @@ -669,18 +677,22 @@ proc syncStep[A, B]( await sleepAsync(RESP_TIMEOUT_DUR) break + requests.add(request) man.workers[index].status = SyncWorkerStatus.Downloading + let data = (await man.getSyncBlockData(index, request)).valueOr: debug "Failed to get block data", peer = peer, peer_score = peer.getScore(), peer_speed = peer.netKbps(), index = index, + request_index = rindex, reason = error, direction = man.direction, sync_ident = man.ident, topics = "syncman" - man.queue.push(request) + # Mark all requests as failed + man.queue.push(requests) break # Scoring will happen in `syncUpdate`. @@ -700,7 +712,19 @@ proc syncStep[A, B]( await allFutures(jobs) except CancelledError as exc: + # Mark all requests as failed + man.queue.push(requests) + # Cancelling all verification jobs let pending = jobs.filterIt(not(it.finished)).mapIt(cancelAndWait(it)) + debug "Cancelling sync step", + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + index = index, + num_pending = pending.len, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" await noCancel allFutures(pending) raise exc @@ -720,6 +744,11 @@ proc syncWorker[A, B]( try: while true: man.workers[index].status = SyncWorkerStatus.Sleeping + + if not(man.resumeSyncEvent.isSet()): + man.workers[index].status = SyncWorkerStatus.Paused + await man.resumeSyncEvent.wait() + # This event is going to be set until we are not in sync with network await man.notInSyncEvent.wait() man.workers[index].status = SyncWorkerStatus.WaitingPeer @@ -767,6 +796,9 @@ proc getWorkersStats[A, B](man: SyncManager[A, B]): tuple[map: string, of SyncWorkerStatus.Processing: ch = 'P' inc(pending) + of SyncWorkerStatus.Paused: + ch = 'p' + inc(sleeping) map[i] = ch (map, sleeping, waiting, pending) @@ -822,6 +854,8 @@ proc syncLoop[A, B]( ) {.async: (raises: [CancelledError]).} = mixin getKey, getScore + man.resumeSyncEvent.fire() + # Update SyncQueue parameters, because callbacks used to calculate parameters # could provide different values at moment when syncLoop() started. man.initQueue() @@ -841,6 +875,8 @@ proc syncLoop[A, B]( man.avgSyncSpeed = 0 man.insSyncSpeed = 0 + await man.resumeSyncEvent.wait() + await man.notInSyncEvent.wait() # Give the node time to connect to peers and get the sync process started @@ -944,11 +980,12 @@ proc syncLoop[A, B]( uint64(man.queue.outSlot) + 1'u64 ) - # Update status string - man.syncStatus = timeleft.toTimeLeftString() & " (" & - (done * 100).formatBiggestFloat(ffDecimal, 2) & "%) " & - man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4) & - "slots/s (" & map & ":" & currentSlot & ")" + if man.resumeSyncEvent.isSet(): + # Update status string + man.syncStatus = timeleft.toTimeLeftString() & " (" & + (done * 100).formatBiggestFloat(ffDecimal, 2) & "%) " & + man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4) & + "slots/s (" & map & ":" & currentSlot & ")" if (man.queue.kind == SyncQueueKind.Forward) and (SyncManagerFlag.NoGenesisSync in man.flags): @@ -1055,10 +1092,42 @@ proc start*[A, B](man: SyncManager[A, B]) = ## Starts SyncManager's main loop. man.syncFut = man.syncLoop() +proc pause*[A, B](man: SyncManager[A, B]) = + ## Pause all the workers + man.resumeSyncEvent.clear() + man.inProgress = false + +proc resume*[A, B](man: SyncManager[A, B]) = + ## Resume all workers + man.resumeSyncEvent.fire() + man.inProgress = true + +func isStarted*[A, B](man: SyncManager[A, B]): bool = + not(isNil(man.syncFut)) and not(man.syncFut.finished()) + +func isPaused*[A, B](man: SyncManager[A, B]): bool = + not(man.resumeSyncEvent.isSet()) + proc updatePivot*[A, B](man: SyncManager[A, B], pivot: Slot) = ## Update progress pivot slot. man.progressPivot = pivot +func getStatus*[A, B](man: SyncManager[A, B]): string = + var res: seq[string] + if man.isStarted(): + res.add("started") + if man.isPaused(): + res.add("paused") + else: + if man.inProgress: + res.add("running") + else: + res.add("stopped") + "(" & res.join(", ") & ")" + +func queueLen*[A, B](man: SyncManager[A, B]): uint64 = + len(man.queue) + proc join*[A, B]( man: SyncManager[A, B] ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = diff --git a/beacon_chain/sync/sync_overseer.nim b/beacon_chain/sync/sync_overseer.nim index 225e740ca1..90ed08c754 100644 --- a/beacon_chain/sync/sync_overseer.nim +++ b/beacon_chain/sync/sync_overseer.nim @@ -60,6 +60,15 @@ iterator chunks*(data: openArray[BlockData], yield BlockDataChunk.init(stateCallback, data.toOpenArray(i, min(i + maxCount, len(data)) - 1)) +proc syncDistance*( + overseer: SyncOverseerRef +): uint64 = + let + dag = overseer.consensusManager.dag + wallSlot = overseer.getBeaconTimeFn().slotOrZero() + headSlot = dag.head.slot + wallSlot - headSlot + proc getLatestBeaconHeader( overseer: SyncOverseerRef ): Future[BeaconBlockHeader] {.async: (raises: [CancelledError]).} = @@ -214,33 +223,6 @@ proc blockProcessingLoop(overseer: SyncOverseerRef): Future[void] {. attestationPool = consensusManager.attestationPool validatorMonitor = overseer.validatorMonitor - proc onBlockAdded( - blckRef: BlockRef, blck: ForkedTrustedSignedBeaconBlock, epochRef: EpochRef, - unrealized: FinalityCheckpoints) {.gcsafe, raises: [].} = - - let wallTime = overseer.getBeaconTimeFn() - withBlck(blck): - attestationPool[].addForkChoice( - epochRef, blckRef, unrealized, forkyBlck.message, wallTime) - - validatorMonitor[].registerBeaconBlock( - MsgSource.sync, wallTime, forkyBlck.message) - - for attestation in forkyBlck.message.body.attestations: - for validator_index in - dag.get_attesting_indices(attestation, true): - validatorMonitor[].registerAttestationInBlock( - attestation.data, validator_index, forkyBlck.message.slot) - - withState(dag[].clearanceState): - when (consensusFork >= ConsensusFork.Altair) and - (type(forkyBlck) isnot phase0.TrustedSignedBeaconBlock): - for i in forkyBlck.message.body.sync_aggregate. - sync_committee_bits.oneIndices(): - validatorMonitor[].registerSyncAggregateInBlock( - forkyBlck.message.slot, forkyBlck.root, - forkyState.data.current_sync_committee.pubkeys.data[i]) - block mainLoop: while true: let bchunk = await overseer.blocksQueue.popFirst() @@ -248,17 +230,27 @@ proc blockProcessingLoop(overseer: SyncOverseerRef): Future[void] {. block innerLoop: for bdata in bchunk.blocks: block: - let res = addBackfillBlockData(dag, bdata, bchunk.onStateUpdatedCb, - onBlockAdded) + let res = withBlck(bdata.blck): + addBackfillBlockData( + dag, + consensusFork, + bdata, + bchunk.onStateUpdatedCb, + onBlockAdded( + dag, + consensusFork, + MsgSource.sync, + overseer.getBeaconTimeFn(), + attestationPool, + validatorMonitor, + ), + ) if res.isErr(): - let msg = "Unable to add block data to database [" & - $res.error & "]" + let msg = "Unable to add block data to database [" & $res.error & "]" bchunk.resfut.complete(Result[void, string].err(msg)) break innerLoop - consensusManager.updateHead(overseer.getBeaconTimeFn).isOkOr: - bchunk.resfut.complete(Result[void, string].err(error)) - break innerLoop + consensusManager[].updateHead(overseer.getBeaconTimeFn().slotOrZero()) bchunk.resfut.complete(Result[void, string].ok()) @@ -415,18 +407,43 @@ proc startBackfillTask(overseer: SyncOverseerRef): Future[void] {. async: (raises: []).} = # This procedure performs delayed start of backfilling process. while overseer.consensusManager.dag.needsBackfill: - if not(overseer.forwardSync.inProgress): - # Only start the backfiller if it's needed _and_ head sync has completed - - # if we lose sync after having synced head, we could stop the backfilller, - # but this should be a fringe case - might as well keep the logic simple - # for now. - overseer.backwardSync.start() - return + debug "Sync overseer backfill monitor status", + need_backfill = overseer.consensusManager.dag.needsBackfill, + sync_distance = overseer.syncDistance, + backward_status = overseer.backwardSync.getStatus(), + backward_queue = overseer.backwardSync.queueLen(), + forward_status = overseer.forwardSync.getStatus(), + forward_queue = overseer.forwardSync.queueLen() + + if overseer.syncDistance() <= 1'u64: + # Only allow backfiller to work if it's needed _and_ head sync has + # completed - if we lose sync after having synced head, we pause the + # backfilller. + # + # 1 slots distance here is experimental number. + if not(overseer.backwardSync.isStarted()): + overseer.backwardSync.start() + else: + if overseer.backwardSync.isPaused(): + overseer.backwardSync.resume() + else: + if overseer.backwardSync.isStarted(): + if not(overseer.backwardSync.isPaused()): + overseer.backwardSync.pause() try: await sleepAsync(chronos.seconds(2)) except CancelledError: return + debug "Backfill process finished", + need_backfill = overseer.consensusManager.dag.needsBackfill, + sync_distance = overseer.syncDistance, + backward_status = overseer.backwardSync.getStatus(), + backward_queue = overseer.backwardSync.queueLen(), + forward_status = overseer.forwardSync.getStatus(), + forward_queue = overseer.forwardSync.queueLen() + overseer.syncKind = SyncKind.ForwardSync + proc mainLoop*( overseer: SyncOverseerRef ): Future[void] {.async: (raises: []).} = @@ -446,9 +463,11 @@ proc mainLoop*( if overseer.isWithinWeakSubjectivityPeriod(currentSlot): # Starting forward sync manager/monitor. + overseer.syncKind = SyncKind.ForwardSync overseer.forwardSync.start() # Starting backfill/backward sync manager. if dag.needsBackfill(): + overseer.syncKind = SyncKind.TrustedNodeSync asyncSpawn overseer.startBackfillTask() return else: @@ -468,6 +487,7 @@ proc mainLoop*( if overseer.config.longRangeSync == LongRangeSyncMode.Lenient: # Starting forward sync manager/monitor only. + overseer.syncKind = SyncKind.ForwardSync overseer.forwardSync.start() return @@ -490,6 +510,7 @@ proc mainLoop*( overseer.untrustedInProgress = true try: + overseer.syncKind = SyncKind.UntrustedSyncInit await overseer.initUntrustedSync() except CancelledError: return @@ -497,6 +518,7 @@ proc mainLoop*( # We need to update pivot slot to enable timeleft calculation. overseer.untrustedSync.updatePivot(overseer.clist.tail.get().slot) # Note: We should not start forward sync manager! + overseer.syncKind = SyncKind.UntrustedSyncDownload overseer.untrustedSync.start() # Waiting until untrusted backfilling will not be complete @@ -511,6 +533,7 @@ proc mainLoop*( let blockProcessingFut = overseer.blockProcessingLoop() try: + overseer.syncKind = SyncKind.UntrustedSyncRebuild await overseer.rebuildState() except CancelledError: await cancelAndWait(blockProcessingFut) @@ -525,6 +548,7 @@ proc mainLoop*( # When we finished state rebuilding process - we could start forward # SyncManager which could perform finish sync. + overseer.syncKind = SyncKind.ForwardSync overseer.forwardSync.start() proc start*(overseer: SyncOverseerRef) = @@ -535,3 +559,62 @@ proc stop*(overseer: SyncOverseerRef) {.async: (raises: []).} = "SyncOverseer was not started yet") if not(overseer.loopFuture.finished()): await cancelAndWait(overseer.loopFuture) + +proc syncStatusMessage*( + overseer: SyncOverseerRef, +): string = + let + dag = overseer.consensusManager.dag + wallSlot = overseer.getBeaconTimeFn().slotOrZero() + optimistic = not(dag.head.executionValid) + optSuffix = + if not(dag.head.executionValid): + "/opt" + else: + "" + lcSuffix = + if overseer.consensusManager[].shouldSyncOptimistically(wallSlot): + " - lc: " & $shortLog(overseer.consensusManager[].optimisticHead) + else: + "" + res = + case overseer.syncKind + of SyncKind.ForwardSync: + if overseer.forwardSync.inProgress: + overseer.forwardSync.syncStatus & optSuffix & lcSuffix + else: + "" + of SyncKind.TrustedNodeSync: + if overseer.backwardSync.inProgress: + "backfill: " & overseer.backwardSync.syncStatus + else: + if overseer.forwardSync.inProgress: + overseer.forwardSync.syncStatus & optSuffix & lcSuffix + else: + "" + of SyncKind.UntrustedSyncInit: + if overseer.statusMsg.isSome(): + "untrusted: " & overseer.statusMsg.get() + else: + "" + of SyncKind.UntrustedSyncDownload: + "untrusted: " & overseer.untrustedSync.syncStatus + of SyncKind.UntrustedSyncRebuild: + if overseer.statusMsg.isSome(): + "untrusted: " & overseer.statusMsg.get() + else: + "" + + if len(res) == 0: + if overseer.syncDistance() <= 1: + if optimistic: + "synced/opt" + else: + "synced" + else: + if optimistic: + "almost synced/opt" + else: + "almost synced" + else: + res diff --git a/beacon_chain/sync/sync_protocol.nim b/beacon_chain/sync/sync_protocol.nim index 4b1450655e..106108ac00 100644 --- a/beacon_chain/sync/sync_protocol.nim +++ b/beacon_chain/sync/sync_protocol.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import chronicles, chronos, snappy, snappy/codec, @@ -41,6 +41,8 @@ type BlobIdentifier, Limit MAX_SUPPORTED_REQUEST_BLOB_SIDECARS] DataColumnIdentifierList* = List[ DataColumnIdentifier, Limit (MAX_REQUEST_DATA_COLUMN_SIDECARS)] + DataColumnsByRootIdentifierList* = List[ + DataColumnsByRootIdentifier, Limit (MAX_REQUEST_BLOCKS_DENEB)] proc readChunkPayload*( conn: Connection, peer: Peer, MsgType: type (ref ForkedSignedBeaconBlock)): @@ -85,7 +87,7 @@ proc readChunkPayload*( return neterr InvalidContextBytes proc readChunkPayload*( - conn: Connection, peer: Peer, MsgType: type (ref DataColumnSidecar)): + conn: Connection, peer: Peer, MsgType: type (ref fulu.DataColumnSidecar)): Future[NetRes[MsgType]] {.async: (raises: [CancelledError]).} = var contextBytes: ForkDigest try: @@ -98,7 +100,7 @@ proc readChunkPayload*( withConsensusFork(contextFork): when consensusFork >= ConsensusFork.Fulu: - let res = await readChunkPayload(conn, peer, DataColumnSidecar) + let res = await readChunkPayload(conn, peer, fulu.DataColumnSidecar) if res.isOk: return ok newClone(res.get) else: @@ -173,32 +175,33 @@ template getBlobSidecarsByRange( dag.getBlockRange(startSlot, blockIds.toOpenArray(0, endIndex)) var - found = 0 + found = 0'u64 bytes: seq[byte] - for i in startIndex..endIndex: - for j in 0..= maxReqSidecars: + break outer debug "BlobSidecar v" & versionNumber & " range request done", peer, startSlot, count = reqCount, found @@ -254,13 +257,6 @@ p2pProtocol BeaconSync(version = 1, for i in startIndex..endIndex: if dag.getBlockSZ(blocks[i], bytes): - # In general, there is not much intermediate time between post-merge - # blocks all being optimistic and none of them being optimistic. The - # EL catches up, tells the CL the head is verified, and that's it. - if blocks[i].slot.epoch >= dag.cfg.BELLATRIX_FORK_EPOCH and - not dag.head.executionValid: - continue - let uncompressedLen = uncompressedLenFramed(bytes).valueOr: warn "Cannot read block size, database corrupt?", bytes = bytes.len(), blck = shortLog(blocks[i]) @@ -321,13 +317,6 @@ p2pProtocol BeaconSync(version = 1, continue if dag.getBlockSZ(blockRef.bid, bytes): - # In general, there is not much intermediate time between post-merge - # blocks all being optimistic and none of them being optimistic. The - # EL catches up, tells the CL the head is verified, and that's it. - if blockRef.slot.epoch >= dag.cfg.BELLATRIX_FORK_EPOCH and - not dag.head.executionValid: - continue - let uncompressedLen = uncompressedLenFramed(bytes).valueOr: warn "Cannot read block size, database corrupt?", bytes = bytes.len(), blck = shortLog(blockRef) @@ -390,19 +379,21 @@ p2pProtocol BeaconSync(version = 1, peer.networkState.dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, peer.networkState.dag.cfg.MAX_REQUEST_BLOB_SIDECARS_ELECTRA) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1 + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1 proc dataColumnSidecarsByRoot( peer: Peer, - colIds: DataColumnIdentifierList, + colIds: DataColumnsByRootIdentifierList, response: MultipleChunksResponse[ - ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMN_SIDECARS)]) + ref fulu.DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMN_SIDECARS)]) {.async, libp2pProtocol("data_column_sidecars_by_root", 1).} = trace "got data column root request", peer, len = colIds.len if colIds.len == 0: raise newException(InvalidInputsError, "No data columns request for root") - if colIds.lenu64 > MAX_REQUEST_DATA_COLUMN_SIDECARS: + static: doAssert MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS == + MAX_REQUEST_DATA_COLUMN_SIDECARS + if colIds.lenu64 > MAX_REQUEST_BLOCKS_DENEB: raise newException(InvalidInputsError, "Exceeding data column request limit") let @@ -414,28 +405,40 @@ p2pProtocol BeaconSync(version = 1, bytes: seq[byte] for i in 0..= dag.head.slot.epoch: + if dag.cfg.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS >= + dag.head.slot.epoch: GENESIS_EPOCH else: - dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS + dag.head.slot.epoch - + dag.cfg.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS if startSlot.epoch < epochBoundary: raise newException(ResourceUnavailableError, DataColumnsOutOfRange) @@ -478,36 +480,32 @@ p2pProtocol BeaconSync(version = 1, dag.getBlockRange(startSlot, blockIds.toOpenArray(0, endIndex)) var - found = 0 + found = 0'u64 bytes: seq[byte] - for i in startIndex..endIndex: - for k in reqColumns: - if dag.db.getDataColumnSidecarSZ(blockIds[i].root, ColumnIndex k, bytes): - if blockIds[i].slot.epoch >= dag.cfg.DENEB_FORK_EPOCH and - not dag.head.executionValid: - continue - - let uncompressedLen = uncompressedLenFramed(bytes).valueOr: - warn "Cannot read data column sidecar size, database corrup?", - bytes = bytes.len, blck = shortLog(blockIds[i]) - continue - - peer.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_range/1") - peer.network.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_range/1") - - await response.writeBytesSZ( - uncompressedLen, bytes, - peer.network.forkDigestAtEpoch(blockIds[i].slot.epoch).data) - inc found - - var - respondedCols: seq[ColumnIndex] - respondedCols.add(k) - - # additional logging for devnets - debug "responded to data column sidecar range request", - peer, blck = shortLog(blockIds[i]), columns = respondedCols + block outer: + for i in startIndex..endIndex: + for k in reqColumns: + if dag.db.getDataColumnSidecarSZ(blockIds[i].root, ColumnIndex k, bytes): + let uncompressedLen = uncompressedLenFramed(bytes).valueOr: + warn "Cannot read data column sidecar size, database corrup?", + bytes = bytes.len, blck = shortLog(blockIds[i]) + continue + + peer.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_range/1") + peer.network.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_range/1") + + await response.writeBytesSZ( + uncompressedLen, bytes, + peer.network.forkDigestAtEpoch(blockIds[i].slot.epoch).data) + inc found + + # additional logging for devnets + trace "responded to data column sidecar range request", + peer, blck = shortLog(blockIds[i]), column = k + + if found >= MAX_REQUEST_DATA_COLUMN_SIDECARS: + break outer debug "Data column range request done", peer, startSlot, count = reqCount, columns = reqColumns, found diff --git a/beacon_chain/sync/sync_queue.nim b/beacon_chain/sync/sync_queue.nim index 272935f78c..5c78ab44e6 100644 --- a/beacon_chain/sync/sync_queue.nim +++ b/beacon_chain/sync/sync_queue.nim @@ -25,6 +25,8 @@ type BlockVerifier* = proc(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} + ForkAtEpochCallback* = + proc(epoch: Epoch): ConsensusFork {.gcsafe, raises: [].} SyncRange* = object slot*: Slot @@ -37,15 +39,20 @@ type SyncQueueKind* {.pure.} = enum Forward, Backward + SyncRequestFlag* {.pure.} = enum + Void + SyncRequest*[T] = object kind*: SyncQueueKind data*: SyncRange + flags*: set[SyncRequestFlag] item*: T SyncQueueItem[T] = object requests: seq[SyncRequest[T]] data: SyncRange failuresCount: Natural + voidsCount: Natural SyncWaiterItem[T] = ref object future: Future[void].Raising([CancelledError]) @@ -90,6 +97,7 @@ type requests: Deque[SyncQueueItem[T]] getSafeSlot: GetSlotCallback blockVerifier: BlockVerifier + forkAtEpoch: ForkAtEpochCallback waiters: seq[SyncWaiterItem[T]] gapList: seq[GapItem[T]] lock: AsyncLock @@ -267,7 +275,60 @@ func init[T](t: typedesc[SyncQueueItem], func init[T](t: typedesc[GapItem], req: SyncRequest[T]): GapItem[T] = GapItem[T](data: req.data, item: req.item) -func next(srange: SyncRange): SyncRange {.inline.} = +func last_slot*(epoch: Epoch): Slot = + ## Return the start slot of ``epoch``. + const maxEpoch = Epoch(FAR_FUTURE_SLOT div SLOTS_PER_EPOCH) + if epoch >= maxEpoch: FAR_FUTURE_SLOT + else: Slot(epoch * SLOTS_PER_EPOCH + (SLOTS_PER_EPOCH - 1'u64)) + +func start_slot*(sr: SyncRange): Slot = + sr.slot + +func last_slot*(sr: SyncRange): Slot = + if sr.slot + (uint64(sr.count) - 1'u64) < sr.slot: + FAR_FUTURE_SLOT + else: + sr.slot + (uint64(sr.count) - 1'u64) + +proc epochFilter*[T](squeue: SyncQueue[T], srange: SyncRange): SyncRange = + case squeue.kind + of SyncQueueKind.Forward: + let + startEpoch = srange.slot.epoch() + startFork = squeue.forkAtEpoch(startEpoch) + + var currentEpoch = startEpoch + while (currentEpoch.start_slot() <= srange.last_slot()) and + (squeue.forkAtEpoch(currentEpoch) == startFork) and + (currentEpoch != FAR_FUTURE_EPOCH): + currentEpoch += 1 + + if (currentEpoch.start_slot() <= srange.last_slot()) and + (squeue.forkAtEpoch(currentEpoch) != startFork): + SyncRange( + slot: srange.start_slot(), + count: currentEpoch.start_slot() - srange.slot) + else: + srange + of SyncQueueKind.Backward: + let + startEpoch = srange.last_slot().epoch() + startFork = squeue.forkAtEpoch(startEpoch) + + var currentEpoch = startEpoch + while (currentEpoch.last_slot() >= srange.start_slot()) and + (squeue.forkAtEpoch(currentEpoch) == startFork) and + (currentEpoch != GENESIS_EPOCH): + currentEpoch -= 1 + + if (currentEpoch.last_slot() >= srange.start_slot()) and + (squeue.forkAtEpoch(currentEpoch) != startFork): + let ncount = srange.last_slot() - (currentEpoch + 1).start_slot() + 1'u64 + SyncRange(slot: (currentEpoch + 1).start_slot(), count: ncount) + else: + srange + +func next[T](sq: SyncQueue[T], srange: SyncRange): SyncRange {.inline.} = let slot = srange.slot + srange.count if slot == FAR_FUTURE_SLOT: # Finish range @@ -276,22 +337,22 @@ func next(srange: SyncRange): SyncRange {.inline.} = # Range that causes uint64 overflow, fixing. SyncRange.init(slot, uint64(FAR_FUTURE_SLOT - srange.count)) else: - if slot + srange.count < slot: - SyncRange.init(slot, uint64(FAR_FUTURE_SLOT - srange.count)) + if slot + sq.chunkSize < slot: + SyncRange.init(slot, uint64(FAR_FUTURE_SLOT - sq.chunkSize)) else: - SyncRange.init(slot, srange.count) + SyncRange.init(slot, sq.chunkSize) -func prev(srange: SyncRange): SyncRange {.inline.} = +func prev[T](sq: SyncQueue[T], srange: SyncRange): SyncRange {.inline.} = if srange.slot == GENESIS_SLOT: # Start range srange else: - let slot = srange.slot - srange.count + let slot = srange.slot - sq.chunkSize if slot > srange.slot: # Range that causes uint64 underflow, fixing. SyncRange.init(GENESIS_SLOT, uint64(srange.slot)) else: - SyncRange.init(slot, srange.count) + SyncRange.init(slot, sq.chunkSize) func contains(srange: SyncRange, slot: Slot): bool {.inline.} = ## Returns `true` if `slot` is in range of `srange`. @@ -451,6 +512,7 @@ func init*[T](t1: typedesc[SyncQueue], t2: typedesc[T], failureResetThreshold: Natural, getSafeSlotCb: GetSlotCallback, blockVerifier: BlockVerifier, + forkAtEpoch: ForkAtEpochCallback, ident: string = "main"): SyncQueue[T] = doAssert(chunkSize > 0'u64, "Chunk size should not be zero") doAssert(requestsCount > 0, "Number of requests should not be zero") @@ -466,16 +528,17 @@ func init*[T](t1: typedesc[SyncQueue], t2: typedesc[T], inpSlot: start, outSlot: start, blockVerifier: blockVerifier, + forkAtEpoch: forkAtEpoch, requests: initDeque[SyncQueueItem[T]](), lock: newAsyncLock(), ident: ident ) -func contains[T](requests: openArray[SyncRequest[T]], source: T): bool = - for req in requests: - if req.item == source: - return true - false +func searchPeer[T](requests: openArray[SyncRequest[T]], source: T): int = + for index, request in requests.pairs(): + if request.item == source: + return index + -1 func find[T](sq: SyncQueue[T], req: SyncRequest[T]): Opt[SyncPosition] = if len(sq.requests) == 0: @@ -538,7 +601,8 @@ proc pop*[T](sq: SyncQueue[T], peerMaxSlot: Slot, item: T): SyncRequest[T] = var count = 0 for qitem in sq.requests.mitems(): if len(qitem.requests) < sq.requestsCount: - if item notin qitem.requests: + let sindex = qitem.requests.searchPeer(item) + if sindex < 0: return if qitem.data.slot > peerMaxSlot: # Peer could not satisfy our request, returning empty one. @@ -550,7 +614,9 @@ proc pop*[T](sq: SyncQueue[T], peerMaxSlot: Slot, item: T): SyncRequest[T] = qitem.requests.add(request) request else: - inc(count) + if SyncRequestFlag.Void notin qitem.requests[sindex].flags: + # We only count non-empty requests. + inc(count) doAssert(count < sq.requestsCount, "You should not pop so many requests for single peer") @@ -567,9 +633,9 @@ proc pop*[T](sq: SyncQueue[T], peerMaxSlot: Slot, item: T): SyncRequest[T] = case sq.kind of SyncQueueKind.Forward: - lastrange.next() + sq.next(lastrange) of SyncQueueKind.Backward: - lastrange.prev() + sq.prev(lastrange) else: case sq.kind of SyncQueueKind.Forward: @@ -581,7 +647,7 @@ proc pop*[T](sq: SyncQueue[T], peerMaxSlot: Slot, item: T): SyncRequest[T] = # Peer could not satisfy our request, returning empty one. SyncRequest.init(sq.kind, item) else: - let request = SyncRequest.init(sq.kind, newrange, item) + let request = SyncRequest.init(sq.kind, sq.epochFilter(newrange), item) sq.requests.addLast(SyncQueueItem.init(request)) request @@ -688,12 +754,17 @@ iterator blocks( for i in countdown(len(blcks) - 1, 0): yield (blcks[i], blobs.getOpt(i)) +proc push*[T](sq: SyncQueue[T], requests: openArray[SyncRequest[T]]) = + ## Push multiple failed requests back to queue. + for request in requests: + let pos = sq.find(request).valueOr: + debug "Request is not relevant anymore", request = request + continue + sq.del(pos) + proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T]) = - ## Push failed request back to queue. - let pos = sq.find(sr).valueOr: - debug "Request is not relevant anymore", request = sr - return - sq.del(pos) + ## Push single failed request back to queue. + sq.push([sr]) proc process[T]( sq: SyncQueue[T], @@ -800,7 +871,12 @@ proc push*[T]( raise exc pos - await sq.lock.acquire() + try: + await sq.lock.acquire() + except CancelledError as exc: + sq.del(sr) + raise exc + try: position = sq.findPosition(sr) @@ -819,6 +895,8 @@ proc push*[T]( # Empty responses does not affect failures count debug "Received empty response", request = sr, + voids_count = sq.requests[position.qindex].voidsCount, + failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), blobs_map = getShortMap(sr, blobs), @@ -826,13 +904,23 @@ proc push*[T]( topics = "syncman" sr.item.updateStats(SyncResponseKind.Empty, 1'u64) + inc(sq.requests[position.qindex].voidsCount) + # Mark empty request in queue, so this range will not be requested by + # the same peer. + sq.requests[position.qindex].requests[position.sindex].flags.incl( + SyncRequestFlag.Void) sq.gapList.add(GapItem.init(sr)) - sq.advanceQueue() + # With empty response - advance only when `requestsCount` of different + # peers returns empty response for the same range. + if sq.requests[position.qindex].voidsCount >= sq.requestsCount: + sq.advanceQueue() of SyncProcessError.Duplicate: # Duplicate responses does not affect failures count debug "Received duplicate response", request = sr, + voids_count = sq.requests[position.qindex].voidsCount, + failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), blobs_map = getShortMap(sr, blobs), @@ -845,6 +933,7 @@ proc push*[T]( debug "Block pool rejected peer's response", request = sr, invalid_block = pres.blck, + voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), @@ -859,6 +948,7 @@ proc push*[T]( notice "Received blocks from an unviable fork", request = sr, unviable_block = pres.blck, + voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), @@ -874,6 +964,7 @@ proc push*[T]( debug "Unexpected missing parent", request = sr, missing_parent_block = pres.blck, + voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), @@ -895,6 +986,7 @@ proc push*[T]( request = sr, finalized_slot = sq.getSafeSlot(), missing_parent_block = pres.blck, + voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), @@ -920,6 +1012,7 @@ proc push*[T]( if sq.requests[position.qindex].failuresCount >= sq.failureResetThreshold: let point = sq.getRewindPoint(pres.blck.get().slot, sq.getSafeSlot()) debug "Multiple repeating errors occured, rewinding", + voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, rewind_slot = point, sync_ident = sq.ident, diff --git a/beacon_chain/sync/sync_types.nim b/beacon_chain/sync/sync_types.nim index 1de01d5b1b..60402fe882 100644 --- a/beacon_chain/sync/sync_types.nim +++ b/beacon_chain/sync/sync_types.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -25,6 +25,12 @@ type onStateUpdatedCb*: OnStateUpdated blocks*: seq[BlockData] + SyncKind* {.pure.} = enum + ForwardSync, TrustedNodeSync, + UntrustedSyncInit, + UntrustedSyncDownload, + UntrustedSyncRebuild + SyncOverseer* = object statusMsg*: Opt[string] consensusManager*: ref ConsensusManager @@ -44,6 +50,7 @@ type avgSpeed*: float blocksQueue*: AsyncQueue[BlockDataChunk] untrustedInProgress*: bool + syncKind*: SyncKind SyncOverseerRef* = ref SyncOverseer diff --git a/beacon_chain/sync/validator_custody.nim b/beacon_chain/sync/validator_custody.nim new file mode 100644 index 0000000000..2e5ac58e99 --- /dev/null +++ b/beacon_chain/sync/validator_custody.nim @@ -0,0 +1,71 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import std/[sets] +import chronicles +import ssz_serialization/[proofs, types] +import + ../validators/action_tracker, + ../spec/[beaconstate, forks, network, helpers, peerdas_helpers], + ../networking/eth2_network, + ../consensus_object_pools/blockchain_dag, + ../consensus_object_pools/block_dag, + ../consensus_object_pools/blob_quarantine + +from std/algorithm import sort +from std/sequtils import toSeq +from ../beacon_clock import GetBeaconTimeFn + +logScope: topics = "validator_custody" + + +type + ValidatorCustody* = object + network: Eth2Node + dag*: ChainDAGRef + older_column_set: HashSet[ColumnIndex] + newer_column_set*: HashSet[ColumnIndex] + diff_set*: seq[ColumnIndex] + dataColumnQuarantine: ref ColumnQuarantine + + ValidatorCustodyRef* = ref ValidatorCustody + +func init*(T: type ValidatorCustodyRef, network: Eth2Node, + dag: ChainDAGRef, + older_column_set: HashSet[ColumnIndex], + dataColumnQuarantine: ref ColumnQuarantine): ValidatorCustodyRef = + (ValidatorCustodyRef)( + network: network, + dag: dag, + older_column_set: older_column_set, + dataColumnQuarantine: dataColumnQuarantine) + +proc detectNewValidatorCustody*(vcus: ValidatorCustodyRef, + current_slot: Slot, + total_node_balance: Gwei) = + debug "Total node balance before applying validator custody", + total_node_balance = total_node_balance + let + vcustody = + vcus.dag.cfg.get_validators_custody_requirement(total_node_balance) + newer_columns = + vcus.dag.cfg.resolve_columns_from_custody_groups( + vcus.network.nodeId, + max(vcus.dag.cfg.CUSTODY_REQUIREMENT.uint64, + vcustody)) + + # update data column quarantine custody requirements + vcus.dataColumnQuarantine[].custodyColumns = newer_columns.toSeq() + sort(vcus.dataColumnQuarantine[].custodyColumns) + # check which custody set is larger + if newer_columns.len >= vcus.older_column_set.len: + vcus.diff_set = toSeq(newer_columns.difference(vcus.older_column_set)) + vcus.older_column_set = newer_columns + vcus.newer_column_set = newer_columns + vcus.dag.eaSlot = max(vcus.dag.eaSlot, current_slot) diff --git a/beacon_chain/trusted_node_sync.nim b/beacon_chain/trusted_node_sync.nim index c670c5b710..c413d164e9 100644 --- a/beacon_chain/trusted_node_sync.nim +++ b/beacon_chain/trusted_node_sync.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import stew/base10, @@ -14,8 +14,7 @@ import ./consensus_object_pools/[block_clearance, blockchain_dag], ./spec/eth2_apis/rest_beacon_client, ./spec/[beaconstate, eth2_merkleization, forks, light_client_sync, - network, presets, - state_transition, deposit_snapshots] + network, presets, state_transition] from presto import RestDecodingError from "."/beacon_clock import @@ -25,20 +24,6 @@ const largeRequestsTimeout = 3.minutes # Downloading large items such as states. smallRequestsTimeout = 30.seconds # Downloading smaller items such as blocks and deposit snapshots. -proc fetchDepositSnapshot( - client: RestClientRef -): Future[Result[DepositContractSnapshot, string]] {.async.} = - let resp = try: - awaitWithTimeout(client.getDepositSnapshot(), smallRequestsTimeout): - return err "Fetching /eth/v1/beacon/deposit_snapshot timed out" - except CatchableError as e: - return err("The trusted node likely does not support the /eth/v1/beacon/deposit_snapshot end-point:" & e.msg) - - let snapshot = DepositContractSnapshot.init(resp.data.data).valueOr: - return err "The obtained deposit snapshot contains self-contradictory data" - - ok snapshot - from ./spec/datatypes/deneb import asSigVerified, shortLog type @@ -78,7 +63,6 @@ proc doTrustedNodeSync*( syncTarget: TrustedNodeSyncTarget, backfill: bool, reindex: bool, - downloadDepositSnapshot: bool, genesisState: ref ForkedHashedBeaconState = nil) {.async.} = logScope: restUrl @@ -191,7 +175,7 @@ proc doTrustedNodeSync*( doAssert genesisState != nil, "Already checked for `TrustedBlockRoot`" let genesisTime = getStateField(genesisState[], genesis_time) - beaconClock = BeaconClock.init(genesisTime).valueOr: + beaconClock = BeaconClock.init(cfg.time, genesisTime).valueOr: error "Invalid genesis time in state", genesisTime quit 1 getBeaconTime = beaconClock.getBeaconTimeFn() @@ -386,20 +370,6 @@ proc doTrustedNodeSync*( else: ChainDAGRef.preInit(db, state[]) - if downloadDepositSnapshot: - # Fetch deposit snapshot. This API endpoint is still optional. - let depositSnapshot = await fetchDepositSnapshot(client) - if depositSnapshot.isOk: - if depositSnapshot.get.matches(getStateField(state[], eth1_data)): - info "Writing deposit contracts snapshot", - depositRoot = depositSnapshot.get.getDepositRoot(), - depositCount = depositSnapshot.get.getDepositCountU64 - db.putDepositContractSnapshot(depositSnapshot.get) - else: - warn "The downloaded deposit snapshot does not agree with the downloaded state" - else: - warn "Deposit tree snapshot was not imported", reason = depositSnapshot.error - else: notice "Skipping checkpoint download, database already exists (remove db directory to get a fresh snapshot)", databaseDir, head = shortLog(head.get()) @@ -407,7 +377,7 @@ proc doTrustedNodeSync*( # Coming this far, we've done what ChainDAGRef.preInit would normally do - # we can now load a ChainDAG to start backfilling it let - validatorMonitor = newClone(ValidatorMonitor.init(false, false)) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time, false, false)) dag = ChainDAGRef.init(cfg, db, validatorMonitor, {}, eraPath = eraDir) backfillSlot = max(dag.backfill.slot, 1.Slot) - 1 horizon = max(dag.horizon, dag.frontfill.valueOr(BlockId()).slot) @@ -555,5 +525,5 @@ when isMainModule: db = BeaconChainDB.new(databaseDir, cfg, inMemory = false) waitFor db.doTrustedNodeSync( cfg, databaseDir, os.paramStr(3), - os.paramStr(4), syncTarget, backfill, false, true) + os.paramStr(4), syncTarget, backfill, false) db.close() diff --git a/beacon_chain/validator_bucket_sort.nim b/beacon_chain/validator_bucket_sort.nim index 2659d98e5e..5027a1c865 100644 --- a/beacon_chain/validator_bucket_sort.nim +++ b/beacon_chain/validator_bucket_sort.nim @@ -1,14 +1,15 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import std/typetraits import "."/spec/crypto +from stew/staticfor import staticFor from "."/spec/datatypes/base import Validator, ValidatorIndex, pubkey, `==` const @@ -16,11 +17,8 @@ const NUM_BUCKETS = 1 shl BUCKET_BITS type - # `newSeqUninitialized` requires its type to be SomeNumber - IntValidatorIndex = distinctBase ValidatorIndex - BucketSortedValidators* = object - bucketSorted*: seq[IntValidatorIndex] + bucketSorted*: seq[ValidatorIndex] bucketUpperBounds: array[NUM_BUCKETS, uint] # avoids over/underflow checks extraItems*: seq[ValidatorIndex] @@ -50,17 +48,17 @@ func sortValidatorBuckets*(validators: openArray[Validator]): bucketInsertPositions[i] = accum doAssert accum == validators.len.uint let res = (ref BucketSortedValidators)( - bucketSorted: newSeqUninitialized[IntValidatorIndex](validators.len), + bucketSorted: newSeqUninit[ValidatorIndex](validators.len), bucketUpperBounds: bucketInsertPositions) for i, validator in validators: let insertPos = addr bucketInsertPositions[getBucketNumber(validator.pubkey)] dec insertPos[] - res.bucketSorted[insertPos[]] = i.IntValidatorIndex + res.bucketSorted[insertPos[]] = i.ValidatorIndex doAssert bucketInsertPositions[0] == 0 - for i in 1 ..< NUM_BUCKETS: + staticFor i, 1 ..< NUM_BUCKETS: doAssert res.bucketUpperBounds[i - 1] == bucketInsertPositions[i] res @@ -85,6 +83,6 @@ func findValidatorIndex*( bsv.bucketUpperBounds[bucketNumber - 1] for i in lowerBounds ..< bsv.bucketUpperBounds[bucketNumber]: - if validators[bsv.bucketSorted[i]].pubkey == pubkey: - return Opt.some bsv.bucketSorted[i].ValidatorIndex + if validators[bsv.bucketSorted[i].distinctBase].pubkey == pubkey: + return Opt.some bsv.bucketSorted[i] Opt.none ValidatorIndex diff --git a/beacon_chain/validator_client/api.nim b/beacon_chain/validator_client/api.nim index e2416c7f61..4fbb50cc9c 100644 --- a/beacon_chain/validator_client/api.nim +++ b/beacon_chain/validator_client/api.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -49,6 +49,17 @@ type data*: ApiResponse[T] score*: X + DoubleTimeoutState {.pure.} = enum + Soft, Hard + + DoubleTimeout* = object + startTime: Moment + softTimeout: Duration + hardTimeout: Duration + betweenTimeout: Duration + timeoutFuture*: Future[void].Raising([CancelledError]) + state: DoubleTimeoutState + const ViableNodeStatus* = { RestBeaconNodeStatus.Compatible, @@ -57,6 +68,70 @@ const RestBeaconNodeStatus.Synced } +proc init( + t: typedesc[DoubleTimeout], + softTimeout, hardTimeout: Duration +): DoubleTimeout = + let + betweenTimeout = + if softTimeout == InfiniteDuration: + ZeroDuration + else: + if hardTimeout == InfiniteDuration: + ZeroDuration + else: + doAssert(hardTimeout >= softTimeout, + "Hard timeout should be bigger than soft timeout") + hardTimeout - softTimeout + future = + if softTimeout == InfiniteDuration: + nil + else: + sleepAsync(softTimeout) + + DoubleTimeout( + startTime: Moment.now(), + softTimeout: softTimeout, + hardTimeout: hardTimeout, + betweenTimeout: betweenTimeout, + timeoutFuture: future, + state: DoubleTimeoutState.Soft + ) + +func timedOut(dt: DoubleTimeout): bool = + if isNil(dt.timeoutFuture): + false + else: + dt.timeoutFuture.finished() + +func hardTimedOut(dt: DoubleTimeout): bool = + (dt.state == DoubleTimeoutState.Hard) and dt.timedOut() + +func softTimedOut(dt: DoubleTimeout): bool = + (dt.state == DoubleTimeoutState.Hard) or + ((dt.state == DoubleTimeoutState.Soft) and dt.timedOut()) + +proc switch(dt: var DoubleTimeout) = + if dt.state == DoubleTimeoutState.Hard: + # It's too late to switch, so doing nothing + return + if not(dt.timedOut()): + # Timeout is not exceeded yet, so doing nothing + return + dt.state = DoubleTimeoutState.Hard + dt.timeoutFuture = + if dt.hardTimeout == InfiniteDuration: + nil + else: + sleepAsync(dt.betweenTimeout) + +proc timePassed(dt: DoubleTimeout): Duration = + Moment.now() - dt.startTime + +proc close(dt: DoubleTimeout): Future[void] {.async: (raises: []).} = + if not(isNil(dt.timeoutFuture)): + await cancelAndWait(dt.timeoutFuture) + proc `$`*[T](s: ApiScore[T]): string = var res = Base10.toString(uint64(s.index)) res.add(": ") @@ -95,7 +170,7 @@ proc lazyWaiter( strategy: ApiStrategyKind ) {.async: (raises: []).} = try: - await allFutures(request) + await request.join() if request.failed(): let failure = ApiNodeFailure.init( ApiFailure.Communication, requestName, strategy, node, @@ -130,6 +205,42 @@ proc lazyWait( else: await allFutures(futures) +proc lazyWait( + nodes: seq[BeaconNodeServerRef], + requests: seq[FutureBase], + timeout: ref DoubleTimeout, + requestName: string, + strategy: ApiStrategyKind +) {.async: (raises: [CancelledError]).} = + doAssert(len(nodes) == len(requests)) + if len(nodes) == 0: + return + + var futures: seq[Future[void]] + for index in 0 ..< len(requests): + futures.add(lazyWaiter(nodes[index], requests[index], requestName, + strategy)) + + if isNil(timeout[].timeoutFuture): + await allFutures(futures) + return + + while true: + try: + await allFutures(futures).wait(timeout[].timeoutFuture) + # All pending jobs finished successfully, exiting + break + except AsyncTimeoutError: + if timeout[].hardTimedOut(): + # Hard timeout exceeded, terminating all the jobs. + let pending = + futures.filterIt(not(it.finished())).mapIt(it.cancelAndWait()) + await noCancel allFutures(pending) + break + else: + # Soft timeout exceeded, switching to hard timeout future. + timeout[].switch() + proc apiResponseOr[T](future: FutureBase, timerFut: Future[void], message: string): ApiResponse[T] = if future.finished() and not(future.cancelled()): @@ -278,27 +389,22 @@ template firstSuccessParallel*( retRes template bestSuccess*( - vc: ValidatorClientRef, - responseType: typedesc, - handlerType: typedesc, - scoreType: typedesc, - timeout: Duration, - statuses: set[RestBeaconNodeStatus], - roles: set[BeaconNodeRole], - bodyRequest, - bodyScore, - bodyHandler: untyped): ApiResponse[handlerType] = + vc: ValidatorClientRef, + responseType: typedesc, + handlerType: typedesc, + scoreType: typedesc, + softTimeout: Duration, + hardTimeout: Duration, + statuses: set[RestBeaconNodeStatus], + roles: set[BeaconNodeRole], + bodyRequest, + bodyScore, + bodyHandler: untyped +): ApiResponse[handlerType] = var it {.inject.}: RestClientRef iterations = 0 - - var timerFut = - if timeout != InfiniteDuration: - sleepAsync(timeout) - else: - nil - - var + timeout = newClone(DoubleTimeout.init(softTimeout, hardTimeout)) retRes: ApiResponse[handlerType] scores: seq[ApiScore[scoreType]] bestResponse: Opt[BestNodeResponse[handlerType, scoreType]] @@ -309,26 +415,31 @@ template bestSuccess*( try: if iterations == 0: # We are not going to wait for BNs if there some available. - await vc.waitNodes(timerFut, statuses, roles, false) + await vc.waitNodes(timeout[].timeoutFuture, statuses, roles, false) else: - # We get here only, if all the requests are failed. To avoid requests - # spam we going to wait for changes in BNs statuses. - await vc.waitNodes(timerFut, statuses, roles, true) + # We get here only, if all the requests are failed. To avoid + # requests spam we going to wait for changes in BNs statuses. + await vc.waitNodes(timeout[].timeoutFuture, statuses, roles, true) vc.filterNodes(statuses, roles) except CancelledError as exc: - if not(isNil(timerFut)) and not(timerFut.finished()): - await timerFut.cancelAndWait() + await timeout[].close() raise exc if len(onlineNodes) == 0: - retRes = ApiResponse[handlerType].err("No online beacon node(s)") - break mainLoop + if timeout[].hardTimedOut(): + retRes = ApiResponse[handlerType].err("No online beacon node(s)") + break mainLoop + else: + debug "Soft timeout exceeded while waiting for beacon node(s)", + time_passed = timeout[].timePassed() + timeout[].switch() else: var (pendingRequests, pendingNodes) = block: - var requests: seq[FutureBase] - var nodes: seq[BeaconNodeServerRef] + var + requests: seq[FutureBase] + nodes: seq[BeaconNodeServerRef] for node {.inject.} in onlineNodes: it = node.client let fut = FutureBase(bodyRequest) @@ -342,24 +453,29 @@ template bestSuccess*( var finishedRequests: seq[FutureBase] finishedNodes: seq[BeaconNodeServerRef] - raceFut: Future[FutureBase].Raising([ValueError, CancelledError]) try: - raceFut = race(pendingRequests) - - if not(isNil(timerFut)): - discard await race(raceFut, timerFut) + if not(isNil(timeout.timeoutFuture)): + try: + discard await race(pendingRequests).wait( + timeout.timeoutFuture) + except ValueError: + raiseAssert "pendingRequests sequence must not be empty!" + except AsyncTimeoutError: + discard else: - await allFutures(raceFut) + try: + discard await race(pendingRequests) + except ValueError: + raiseAssert "pendingRequests sequence must not be empty!" for index, future in pendingRequests.pairs(): - if future.finished() or - (not(isNil(timerFut)) and timerFut.finished()): + if future.finished() or timeout[].hardTimedOut(): finishedRequests.add(future) finishedNodes.add(pendingNodes[index]) let node {.inject.} = pendingNodes[index] apiResponse {.inject.} = - apiResponseOr[responseType](future, timerFut, + apiResponseOr[responseType](future, timeout.timeoutFuture, "Timeout exceeded while awaiting for the response") handlerResponse = try: @@ -378,7 +494,7 @@ template bestSuccess*( scores.add(ApiScore.init(node, score)) if bestResponse.isNone() or - (score > bestResponse.get().score): + (score > bestResponse.get().score): bestResponse = Opt.some( BestNodeResponse.init(node, handlerResponse, score)) if perfectScore(score): @@ -387,13 +503,18 @@ template bestSuccess*( else: scores.add(ApiScore.init(node, scoreType)) + if timeout[].softTimedOut(): + timeout[].switch() + if bestResponse.isSome(): + perfectScoreFound = true + if perfectScoreFound: # lazyWait will cancel `pendingRequests` on timeout. - asyncSpawn lazyWait(pendingNodes, pendingRequests, timerFut, - RequestName, strategy) + asyncSpawn lazyWait( + pendingNodes, pendingRequests, timeout, RequestName, strategy) break innerLoop - if not(isNil(timerFut)) and timerFut.finished(): + if timeout[].hardTimedOut(): # If timeout is exceeded we need to cancel all the tasks which # are still running. var pendingCancel: seq[Future[void]] @@ -408,11 +529,9 @@ template bestSuccess*( except CancelledError as exc: var pendingCancel: seq[Future[void]] - # `or` operation does not cancelling Futures passed as arguments. - if not(isNil(raceFut)) and not(raceFut.finished()): - pendingCancel.add(raceFut.cancelAndWait()) - if not(isNil(timerFut)) and not(timerFut.finished()): - pendingCancel.add(timerFut.cancelAndWait()) + # `race` operation does not cancelling Futures passed as + # arguments. + pendingCancel.add(timeout[].close()) # We should cancel all the requests which are still pending. for future in pendingRequests.items(): if not(future.finished()): @@ -425,7 +544,7 @@ template bestSuccess*( retRes = bestResponse.get().data break mainLoop else: - if timerFut.finished(): + if timeout[].hardTimedOut(): retRes = ApiResponse[handlerType].err( "Timeout exceeded while awaiting for responses") break mainLoop @@ -439,8 +558,8 @@ template bestSuccess*( debug "Best score result selected", request = RequestName, available_scores = scores, best_score = shortScore(bestResponse.get().score), - best_node = bestResponse.get().node - + best_node = bestResponse.get().node, + time_passed = timeout[].timePassed() retRes template onceToAll*( @@ -1182,6 +1301,7 @@ proc getHeadBlockRoot*( RestPlainResponse, GetBlockRootResponse, float64, + SlotDurationSoft, SlotDuration, ViableNodeStatus, {BeaconNodeRole.SyncCommitteeData}, @@ -1417,6 +1537,7 @@ proc produceAttestationData*( RestPlainResponse, ProduceAttestationDataResponse, float64, + OneThirdDurationSoft, OneThirdDuration, ViableNodeStatus, {BeaconNodeRole.AttestationData}, @@ -1762,6 +1883,7 @@ proc getAggregatedAttestation*( RestPlainResponse, GetAggregatedAttestationResponse, float64, + OneThirdDurationSoft, OneThirdDuration, ViableNodeStatus, {BeaconNodeRole.AggregatedData}, @@ -1902,6 +2024,7 @@ proc getAggregatedAttestationV2*( RestPlainResponse, GetAggregatedAttestationV2Response, float64, + OneThirdDurationSoft, OneThirdDuration, ViableNodeStatus, {BeaconNodeRole.AggregatedData}, @@ -2039,6 +2162,7 @@ proc produceSyncCommitteeContribution*( RestPlainResponse, ProduceSyncCommitteeContributionResponse, float64, + OneThirdDurationSoft, OneThirdDuration, ViableNodeStatus, {BeaconNodeRole.SyncCommitteeData}, @@ -2337,6 +2461,7 @@ proc produceBlockV3*( RestPlainResponse, ProduceBlockResponseV3, UInt256, + SlotDurationSoft, SlotDuration, ViableNodeStatus, {BeaconNodeRole.BlockProposalData}, @@ -2513,6 +2638,8 @@ proc publishBlockV2*( publishBlockV2(it, some(broadcast_validation), data.electraData) of ConsensusFork.Fulu: publishBlockV2(it, some(broadcast_validation), data.fuluData) + of ConsensusFork.Gloas: + publishBlockV2(it, some(broadcast_validation), data.gloasData) do: if apiResponse.isErr(): handleCommunicationError() @@ -2566,6 +2693,8 @@ proc publishBlockV2*( publishBlockV2(it, some(broadcast_validation), data.electraData) of ConsensusFork.Fulu: publishBlockV2(it, some(broadcast_validation), data.fuluData) + of ConsensusFork.Gloas: + publishBlockV2(it, some(broadcast_validation), data.gloasData) do: if apiResponse.isErr(): @@ -2599,120 +2728,6 @@ proc publishBlockV2*( raise (ref ValidatorApiError)( msg: "Failed to publish block", data: failures) -proc publishBlock*( - vc: ValidatorClientRef, - data: RestPublishedSignedBlockContents, - strategy: ApiStrategyKind -): Future[bool] {.async: (raises: [CancelledError, ValidatorApiError]).} = - const - RequestName = "publishBlock" - BlockBroadcasted = "Block not passed validation, but still published" - - var failures: seq[ApiNodeFailure] - - case strategy - of ApiStrategyKind.First, ApiStrategyKind.Best: - let res = block: - vc.firstSuccessParallel(RestPlainResponse, - bool, - SlotDuration, - ViableNodeStatus, - {BeaconNodeRole.BlockProposalPublish}): - case data.kind - of ConsensusFork.Phase0: - publishBlock(it, data.phase0Data) - of ConsensusFork.Altair: - publishBlock(it, data.altairData) - of ConsensusFork.Bellatrix: - publishBlock(it, data.bellatrixData) - of ConsensusFork.Capella: - publishBlock(it, data.capellaData) - of ConsensusFork.Deneb: - publishBlock(it, data.denebData) - of ConsensusFork.Electra: - publishBlock(it, data.electraData) - of ConsensusFork.Fulu: - publishBlock(it, data.fuluData) - do: - if apiResponse.isErr(): - handleCommunicationError() - ApiResponse[bool].err(apiResponse.error) - else: - let response = apiResponse.get() - case response.status: - of 200: - ApiResponse[bool].ok(true) - of 202: - debug BlockBroadcasted, node = node, - blck = shortLog(ForkedSignedBeaconBlock.init(data)) - ApiResponse[bool].ok(true) - of 400: - handle400() - ApiResponse[bool].err(ResponseInvalidError) - of 500: - handle500() - ApiResponse[bool].err(ResponseInternalError) - of 503: - handle503() - ApiResponse[bool].err(ResponseNoSyncError) - else: - handleUnexpectedCode() - ApiResponse[bool].err(ResponseUnexpectedError) - - if res.isErr(): - raise (ref ValidatorApiError)(msg: res.error, data: failures) - return res.get() - - of ApiStrategyKind.Priority: - vc.firstSuccessSequential(RestPlainResponse, - SlotDuration, - ViableNodeStatus, - {BeaconNodeRole.BlockProposalPublish}): - case data.kind - of ConsensusFork.Phase0: - publishBlock(it, data.phase0Data) - of ConsensusFork.Altair: - publishBlock(it, data.altairData) - of ConsensusFork.Bellatrix: - publishBlock(it, data.bellatrixData) - of ConsensusFork.Capella: - publishBlock(it, data.capellaData) - of ConsensusFork.Deneb: - publishBlock(it, data.denebData) - of ConsensusFork.Electra: - publishBlock(it, data.electraData) - of ConsensusFork.Fulu: - publishBlock(it, data.fuluData) - - do: - if apiResponse.isErr(): - handleCommunicationError() - false - else: - let response = apiResponse.get() - case response.status: - of 200: - return true - of 202: - debug BlockBroadcasted, node = node, - blck = shortLog(ForkedSignedBeaconBlock.init(data)) - return true - of 400: - handle400() - false - of 500: - handle500() - false - of 503: - handle503() - false - else: - handleUnexpectedCode() - false - - raise (ref ValidatorApiError)( - msg: "Failed to publish block", data: failures) - proc publishBlindedBlockV2*( vc: ValidatorClientRef, data: ForkedSignedBlindedBeaconBlock, @@ -2734,27 +2749,17 @@ proc publishBlindedBlockV2*( ViableNodeStatus, {BeaconNodeRole.BlockProposalPublish}): case data.kind - of ConsensusFork.Phase0: - publishBlindedBlockV2(it, some(broadcast_validation), - data.phase0Data) - of ConsensusFork.Altair: - publishBlindedBlockV2(it, some(broadcast_validation), - data.altairData) - of ConsensusFork.Bellatrix: - publishBlindedBlockV2(it, some(broadcast_validation), - data.bellatrixData) - of ConsensusFork.Capella: - publishBlindedBlockV2(it, some(broadcast_validation), - data.capellaData) - of ConsensusFork.Deneb: - publishBlindedBlockV2(it, some(broadcast_validation), - data.denebData) + of ConsensusFork.Phase0 .. ConsensusFork.Deneb: + raiseAssert "Unable to publish block of that kind" of ConsensusFork.Electra: - publishBlindedBlockV2(it, some(broadcast_validation), + publishJsonBlindedBlockV2(it, some(broadcast_validation), data.electraData) of ConsensusFork.Fulu: - publishBlindedBlockV2(it, some(broadcast_validation), + publishJsonBlindedBlockV2(it, some(broadcast_validation), data.fuluData) + of ConsensusFork.Gloas: + debugGloasComment "" + return false do: if apiResponse.isErr(): handleCommunicationError() @@ -2793,27 +2798,17 @@ proc publishBlindedBlockV2*( ViableNodeStatus, {BeaconNodeRole.BlockProposalPublish}): case data.kind - of ConsensusFork.Phase0: - publishBlindedBlockV2(it, some(broadcast_validation), - data.phase0Data) - of ConsensusFork.Altair: - publishBlindedBlockV2(it, some(broadcast_validation), - data.altairData) - of ConsensusFork.Bellatrix: - publishBlindedBlockV2(it, some(broadcast_validation), - data.bellatrixData) - of ConsensusFork.Capella: - publishBlindedBlockV2(it, some(broadcast_validation), - data.capellaData) - of ConsensusFork.Deneb: - publishBlindedBlockV2(it, some(broadcast_validation), - data.denebData) + of ConsensusFork.Phase0 .. ConsensusFork.Deneb: + raiseAssert "Unable to publish block of that kind" of ConsensusFork.Electra: - publishBlindedBlockV2(it, some(broadcast_validation), + publishJsonBlindedBlockV2(it, some(broadcast_validation), data.electraData) of ConsensusFork.Fulu: - publishBlindedBlockV2(it, some(broadcast_validation), + publishJsonBlindedBlockV2(it, some(broadcast_validation), data.fuluData) + of ConsensusFork.Gloas: + debugGloasComment "" + return false do: if apiResponse.isErr(): handleCommunicationError() @@ -2879,6 +2874,8 @@ proc publishBlindedBlock*( publishBlindedBlock(it, data.electraData) of ConsensusFork.Fulu: publishBlindedBlock(it, data.fuluData) + of ConsensusFork.Gloas: + publishBlindedBlock(it, data.gloasData) do: if apiResponse.isErr(): handleCommunicationError() @@ -2928,6 +2925,8 @@ proc publishBlindedBlock*( publishBlindedBlock(it, data.electraData) of ConsensusFork.Fulu: publishBlindedBlock(it, data.fuluData) + of ConsensusFork.Gloas: + publishBlindedBlock(it, data.gloasData) do: if apiResponse.isErr(): handleCommunicationError() diff --git a/beacon_chain/validator_client/attestation_service.nim b/beacon_chain/validator_client/attestation_service.nim index d39df49c90..26d28f52dc 100644 --- a/beacon_chain/validator_client/attestation_service.nim +++ b/beacon_chain/validator_client/attestation_service.nim @@ -62,7 +62,7 @@ proc serveAttestation( raise exc logScope: - delay = vc.getDelay(attestationSlot.attestation_deadline()) + delay = vc.getDelay(attestationSlot.attestation_deadline(vc.timeConfig)) debug "Sending attestation" @@ -94,7 +94,7 @@ proc serveAttestation( submitAttestation(attestation) if res: - let delay = vc.getDelay(attestationSlot.attestation_deadline()) + let delay = vc.getDelay(attestationSlot.attestation_deadline(vc.timeConfig)) beacon_attestations_sent.inc() beacon_attestation_sent_delay.observe(delay.toFloatSeconds()) notice "Attestation published" @@ -136,7 +136,7 @@ proc serveAggregateAndProof*( let signedProof = phase0.SignedAggregateAndProof( message: proof, signature: signature) logScope: - delay = vc.getDelay(slot.aggregate_deadline()) + delay = vc.getDelay(slot.aggregate_deadline(vc.timeConfig)) debug "Sending aggregated attestation", fork = fork @@ -202,7 +202,7 @@ proc serveAggregateAndProofV2*( raiseAssert "Unsupported SignedAggregateAndProof" logScope: - delay = vc.getDelay(slot.aggregate_deadline()) + delay = vc.getDelay(slot.aggregate_deadline(vc.timeConfig)) debug "Sending aggregated attestation", fork = fork @@ -316,7 +316,7 @@ proc produceAndPublishAttestations*( inc(errored) (succeed, errored, failed) - let delay = vc.getDelay(slot.attestation_deadline()) + let delay = vc.getDelay(slot.attestation_deadline(vc.timeConfig)) debug "Attestation statistics", total = len(pendingAttestations), succeed = statistics[0], failed_to_deliver = statistics[1], not_accepted = statistics[2], delay = delay, slot = slot, @@ -414,7 +414,7 @@ proc produceAndPublishAggregates( inc(errored) (succeed, errored, failed) - let delay = vc.getDelay(slot.aggregate_deadline()) + let delay = vc.getDelay(slot.aggregate_deadline(vc.timeConfig)) debug "Aggregated attestation statistics", total = len(aggregates), succeed = statistics[0], failed_to_deliver = statistics[1], not_accepted = statistics[2], delay = delay, slot = slot, @@ -433,7 +433,7 @@ proc publishAttestationsAndAggregates( let vc = service.client block: - let delay = vc.getDelay(slot.attestation_deadline()) + let delay = vc.getDelay(slot.attestation_deadline(vc.timeConfig)) debug "Producing attestations", delay = delay, slot = slot, committee_index = committee_index, duties_count = len(duties) @@ -449,15 +449,13 @@ proc publishAttestationsAndAggregates( debug "Publish attestation request was interrupted" raise exc - let aggregateTime = - # chronos.Duration subtraction could not return negative value, in such - # case it will return `ZeroDuration`. - vc.beaconClock.durationToNextSlot() - OneThirdDuration - if aggregateTime != ZeroDuration: - await sleepAsync(aggregateTime) + let aggregateTime = vc.beaconClock.fromNow( + slot.aggregate_deadline(vc.timeConfig)) + if aggregateTime.inFuture: + await sleepAsync(aggregateTime.offset) block: - let delay = vc.getDelay(slot.aggregate_deadline()) + let delay = vc.getDelay(slot.aggregate_deadline(vc.timeConfig)) debug "Producing aggregate and proofs", delay = delay await service.produceAndPublishAggregates(ad, duties) @@ -551,7 +549,7 @@ proc produceAndPublishAttestationsV2*( inc(errored) (succeed, errored, failed) - delay = vc.getDelay(slot.attestation_deadline()) + delay = vc.getDelay(slot.attestation_deadline(vc.timeConfig)) debug "Attestation statistics", total = len(pendingAttestations), succeed = statistics[0], failed_to_deliver = statistics[1], @@ -664,7 +662,7 @@ proc produceAndPublishAggregatesV2( inc(errored) (succeed, errored, failed) - let delay = vc.getDelay(slot.aggregate_deadline()) + let delay = vc.getDelay(slot.aggregate_deadline(vc.timeConfig)) debug "Aggregated attestation statistics", total = len(aggregates), succeed = statistics[0], failed_to_deliver = statistics[1], not_accepted = statistics[2], delay = delay, slot = slot, @@ -679,7 +677,7 @@ proc publishAttestationsAndAggregatesV2( vc = service.client block: - let delay = vc.getDelay(slot.attestation_deadline()) + let delay = vc.getDelay(slot.attestation_deadline(vc.timeConfig)) debug "Producing attestations", delay = delay, slot = slot, duties_count = len(duties) @@ -694,16 +692,14 @@ proc publishAttestationsAndAggregatesV2( debug "Publish attestation request was interrupted" raise exc - let aggregateTime = - # chronos.Duration subtraction could not return negative value, in such - # case it will return `ZeroDuration`. - vc.beaconClock.durationToNextSlot() - OneThirdDuration - if aggregateTime != ZeroDuration: - await sleepAsync(aggregateTime) + let aggregateTime = vc.beaconClock.fromNow( + slot.aggregate_deadline(vc.timeConfig)) + if aggregateTime.inFuture: + await sleepAsync(aggregateTime.offset) block: let - delay = vc.getDelay(slot.aggregate_deadline()) + delay = vc.getDelay(slot.aggregate_deadline(vc.timeConfig)) dutiesByCommittee = getAttesterDutiesByCommittee(duties) debug "Producing aggregate and proofs", delay = delay var tasks: seq[Future[void].Raising([CancelledError])] @@ -733,7 +729,7 @@ proc spawnAttestationTasks( try: for index, duties in dutiesByCommittee: tasks.add(service.publishAttestationsAndAggregates(slot, index, duties)) - let timeout = vc.beaconClock.durationToNextSlot() + let timeout = vc.beaconClock.fromNow(slot + 1).durationOrZero() await allFutures(tasks).wait(timeout) except AsyncTimeoutError: # Cancelling all the pending tasks. @@ -757,7 +753,7 @@ proc spawnAttestationTasksV2( await vc.waitForBlock(slot, attestationSlotOffset) try: - let timeout = vc.beaconClock.durationToNextSlot() + let timeout = vc.beaconClock.fromNow(slot + 1).durationOrZero() await service.publishAttestationsAndAggregatesV2(slot, duties).wait(timeout) except AsyncTimeoutError: discard diff --git a/beacon_chain/validator_client/block_service.nim b/beacon_chain/validator_client/block_service.nim index 5cbb8cc082..cae5b756f3 100644 --- a/beacon_chain/validator_client/block_service.nim +++ b/beacon_chain/validator_client/block_service.nim @@ -160,8 +160,7 @@ proc publishBlockV3( let signature = try: let res = await validator.getBlockSignature(fork, genesisRoot, - slot, blockRoot, - maybeBlock) + blockRoot, maybeBlock) if res.isErr(): warn "Unable to sign blinded block proposal using remote signer", reason = res.error() @@ -194,7 +193,7 @@ proc publishBlockV3( raise exc if res: - let delay = vc.getDelay(slot.block_deadline()) + let delay = vc.getDelay(slot.block_deadline(vc.timeConfig)) beacon_blocks_sent.inc() beacon_blocks_sent_delay.observe(delay.toFloatSeconds()) notice "Blinded block published", delay = delay @@ -241,7 +240,7 @@ proc publishBlockV3( signature = try: let res = await validator.getBlockSignature( - fork, genesisRoot, slot, blockRoot, maybeBlock) + fork, genesisRoot, blockRoot, maybeBlock) if res.isErr(): warn "Unable to sign block proposal using remote signer", reason = res.error() @@ -258,13 +257,9 @@ proc publishBlockV3( res = try: debug "Sending block" - if vc.isPastElectraFork(slot.epoch()): - await vc.publishBlockV2( - signedBlockContents, BroadcastValidationType.Gossip, - ApiStrategyKind.First) - else: - await vc.publishBlock( - signedBlockContents, ApiStrategyKind.First) + await vc.publishBlockV2( + signedBlockContents, BroadcastValidationType.Gossip, + ApiStrategyKind.First) except ValidatorApiError as exc: warn "Unable to publish block", reason = exc.getFailureReason() return @@ -273,7 +268,7 @@ proc publishBlockV3( raise exc if res: - let delay = vc.getDelay(slot.block_deadline()) + let delay = vc.getDelay(slot.block_deadline(vc.timeConfig)) beacon_blocks_sent.inc() beacon_blocks_sent_delay.observe(delay.toFloatSeconds()) notice "Block published", delay = delay @@ -297,9 +292,10 @@ proc publishBlock( slot = slot wall_slot = currentSlot - debug "Publishing block", delay = vc.getDelay(slot.block_deadline()), - genesis_root = genesisRoot, - graffiti = graffiti, fork = fork + debug "Publishing block", + delay = vc.getDelay(slot.block_deadline(vc.timeConfig)), + genesis_root = genesisRoot, + graffiti = graffiti, fork = fork let randaoReveal = try: @@ -595,7 +591,7 @@ proc runBlockPollMonitor(service: BlockServiceRef, currentTime = vc.beaconClock.now() afterSlot = currentTime.slotOrZero() - if currentTime > afterSlot.attestation_deadline(): + if currentTime > afterSlot.attestation_deadline(vc.timeConfig): # Attestation time already, lets wait for next slot. continue diff --git a/beacon_chain/validator_client/common.nim b/beacon_chain/validator_client/common.nim index d3c5f17af7..bb5d532c3f 100644 --- a/beacon_chain/validator_client/common.nim +++ b/beacon_chain/validator_client/common.nim @@ -46,10 +46,10 @@ const ZeroTimeDiff* = TimeDiff(nanoseconds: 0'i64) -static: doAssert(high(ConsensusFork) == ConsensusFork.Fulu, +static: doAssert(high(ConsensusFork) == ConsensusFork.Gloas, "Update OptionalForks constant!") const - OptionalForks* = {ConsensusFork.Electra, ConsensusFork.Fulu} + OptionalForks* = {ConsensusFork.Fulu, ConsensusFork.Gloas} ## When a new ConsensusFork is added and before this fork is activated on ## `mainnet`, it should be part of `OptionalForks`. ## In this case, the client will ignore missing _VERSION @@ -233,6 +233,7 @@ type proposers*: ProposerMap syncCommitteeDuties*: SyncCommitteeDutiesMap syncCommitteeProofs*: SyncCommitteeProofsMap + timeConfig*: TimeConfig beaconGenesis*: RestGenesis proposerTasks*: Table[Slot, seq[ProposerTask]] dynamicFeeRecipientsStore*: ref DynamicFeeRecipientsStore @@ -267,8 +268,14 @@ type const DefaultDutyAndProof* = DutyAndProof(epoch: FAR_FUTURE_EPOCH) DefaultSyncCommitteeDuty* = SyncCommitteeDuty() - SlotDuration* = int64(SECONDS_PER_SLOT).seconds - OneThirdDuration* = int64(SECONDS_PER_SLOT).seconds div INTERVALS_PER_SLOT + SlotDuration* = + int64(SECONDS_PER_SLOT).seconds + SlotDurationSoft* = + (int64(SECONDS_PER_SLOT) div 2).seconds + OneThirdDuration* = + (int64(SECONDS_PER_SLOT) div int64(INTERVALS_PER_SLOT)).seconds + OneThirdDurationSoft* = + (int64(SECONDS_PER_SLOT) div int64(INTERVALS_PER_SLOT) div 2'i64).seconds AllBeaconNodeRoles* = { BeaconNodeRole.Duties, BeaconNodeRole.AttestationData, @@ -503,32 +510,32 @@ chronicles.expandIt(SyncCommitteeDuty): validator_index = it.validator_index validator_sync_committee_indices = it.validator_sync_committee_indices -proc equals*(info: VCRuntimeConfig, name: string, check: uint64): bool = - let numstr = info.getOrDefault(name, "missing") - if numstr == "missing": return false - let value = Base10.decode(uint64, numstr).valueOr: - return false - value == check +func parseConfigValue[T: uint64](_: typedesc[T], str: string): Opt[T] = + let res = Base10.decode(uint64, str).valueOr: + return Opt.none T + Opt.some res -proc equals*(info: VCRuntimeConfig, name: string, check: DomainType): bool = - let domstr = info.getOrDefault(name, "missing") - if domstr == "missing": return false - let value = - try: - var dres: DomainType - hexToByteArray(domstr, distinctBase(dres)) - dres - except ValueError: - return false +func parseConfigValue[T: DomainType](_: typedesc[T], str: string): Opt[T] = + try: + var res: DomainType + hexToByteArray(str, distinctBase(res)) + Opt.some res + except ValueError: + Opt.none T + +func equals*[T](info: VCRuntimeConfig, name: string, check: T): bool = + let str = info.getOrDefault(name, "missing") + if str == "missing": return false + let value = T.parseConfigValue(str).valueOr: + return false value == check -proc equals*(info: VCRuntimeConfig, name: string, check: Epoch): bool = +func equals*(info: VCRuntimeConfig, name: string, check: Epoch): bool = info.equals(name, uint64(check)) -proc checkConfig*(c: VCRuntimeConfig): bool = +func checkConfig*(c: VCRuntimeConfig): bool = c.equals("MAX_VALIDATORS_PER_COMMITTEE", MAX_VALIDATORS_PER_COMMITTEE) and c.equals("SLOTS_PER_EPOCH", SLOTS_PER_EPOCH) and - c.equals("SECONDS_PER_SLOT", SECONDS_PER_SLOT) and c.equals("EPOCHS_PER_ETH1_VOTING_PERIOD", EPOCHS_PER_ETH1_VOTING_PERIOD) and c.equals("SLOTS_PER_HISTORICAL_ROOT", SLOTS_PER_HISTORICAL_ROOT) and c.equals("EPOCHS_PER_HISTORICAL_VECTOR", EPOCHS_PER_HISTORICAL_VECTOR) and @@ -550,6 +557,20 @@ proc checkConfig*(c: VCRuntimeConfig): bool = c.hasKey("ALTAIR_FORK_VERSION") and c.hasKey("ALTAIR_FORK_EPOCH") and not(c.equals("ALTAIR_FORK_EPOCH", FAR_FUTURE_EPOCH)) +func checkConfig*(c: VCRuntimeConfig, timeConfig: TimeConfig): bool = + c.checkConfig and c.equals("SECONDS_PER_SLOT", timeConfig.SECONDS_PER_SLOT) + +func getTimeConfig*(c: VCRuntimeConfig): Opt[TimeConfig] = + let SECONDS_PER_SLOT = block: + const defaultStr = Base10.toString( + defaultRuntimeConfig.time.SECONDS_PER_SLOT) + ? uint64.parseConfigValue c.getOrDefault("SECONDS_PER_SLOT", defaultStr) + if SECONDS_PER_SLOT notin MIN_SECONDS_PER_SLOT .. MAX_SECONDS_PER_SLOT: + return Opt.none TimeConfig + if SECONDS_PER_SLOT != presets.SECONDS_PER_SLOT: + return Opt.none TimeConfig # Temporary, until removed from presets + Opt.some TimeConfig(SECONDS_PER_SLOT: SECONDS_PER_SLOT) + proc updateStatus*(node: BeaconNodeServerRef, status: RestBeaconNodeStatus, failure: ApiNodeFailure) = @@ -835,7 +856,8 @@ proc getDurationToNextAttestation*(vc: ValidatorClientRef, if minSlot == FAR_FUTURE_SLOT: "" else: - $(minSlot.attestation_deadline() - slot.start_beacon_time()) + $(minSlot.attestation_deadline(vc.timeConfig) - + slot.start_beacon_time()) proc getDurationToNextBlock*(vc: ValidatorClientRef, slot: Slot): string = var minSlot = FAR_FUTURE_SLOT @@ -852,7 +874,8 @@ proc getDurationToNextBlock*(vc: ValidatorClientRef, slot: Slot): string = if minSlot == FAR_FUTURE_SLOT: "" else: - $(minSlot.block_deadline() - slot.start_beacon_time()) + $(minSlot.block_deadline(vc.timeConfig) - + slot.start_beacon_time()) iterator attesterDutiesForEpoch*(vc: ValidatorClientRef, epoch: Epoch): DutyAndProof = @@ -1061,7 +1084,7 @@ proc getValidatorRegistration( vc: ValidatorClientRef, validator: AttachedValidator, timestamp: Time, - fork: Fork + genesis_fork_version: Version, ): Result[PendingValidatorRegistration, RegistrationKind] = if validator.index.isNone(): debug "Validator registration missing validator index", @@ -1091,14 +1114,14 @@ proc getValidatorRegistration( var registration = SignedValidatorRegistrationV1( message: ValidatorRegistrationV1( - fee_recipient: ExecutionAddress(data: distinctBase(feeRecipient)), + fee_recipient: feeRecipient, gas_limit: gasLimit, timestamp: uint64(timestamp.toUnix()), pubkey: validator.pubkey ) ) - let sigfut = validator.getBuilderSignature(fork, registration.message) + let sigfut = validator.getBuilderSignature(genesis_fork_version, registration.message) if sigfut.finished(): # This is short-path if we able to create signature locally. if not(sigfut.completed()): @@ -1120,7 +1143,7 @@ proc getValidatorRegistration( proc prepareRegistrationList*( vc: ValidatorClientRef, timestamp: Time, - fork: Fork + genesis_fork_version: Version, ): Future[seq[SignedValidatorRegistrationV1]] {. async: (raises: [CancelledError]).} = @@ -1140,7 +1163,7 @@ proc prepareRegistrationList*( timed = 0 for validator in vc.attachedValidators[].items(): - let res = vc.getValidatorRegistration(validator, timestamp, fork) + let res = vc.getValidatorRegistration(validator, timestamp, genesis_fork_version) if res.isOk(): let preg = res.get() if preg.future.isNil(): @@ -1235,7 +1258,7 @@ proc checkedWaitForSlot*(vc: ValidatorClientRef, destinationSlot: Slot, time_to_slot = shortLog(timeToSlot) while true: - await sleepAsync2(timeToSlot) + await sleepAsync(timeToSlot) let wallTime = vc.beaconClock.now() @@ -1444,7 +1467,9 @@ proc waitForNextEpoch*(service: ClientServiceRef, async: (raises: [CancelledError], raw: true) .}= let vc = service.client - sleepTime = vc.beaconClock.durationToNextEpoch() + delay + currentSlot = vc.beaconClock.now().toSlot() + nextEpochTime = currentSlot.nextEpochStartTime(vc.timeConfig) + sleepTime = vc.beaconClock.fromNow(nextEpochTime).durationOrZero() + delay debug "Sleeping until next epoch", service = service.name, sleep_time = sleepTime, delay = delay sleepAsync(sleepTime) @@ -1453,13 +1478,19 @@ proc waitForNextEpoch*(service: ClientServiceRef): Future[void] {. async: (raises: [CancelledError], raw: true).}= waitForNextEpoch(service, ZeroDuration) -proc waitForNextSlot*(service: ClientServiceRef): Future[void] {. - async: (raises: [CancelledError], raw: true).} = +proc waitForNextSlot*( + vc: ValidatorClientRef, + currentSlot: tuple[afterGenesis: bool, slot: Slot] + ): Future[void] {.async: (raises: [CancelledError], raw: true).} = let - vc = service.client - sleepTime = vc.beaconClock.durationToNextSlot() + nextSlotTime = currentSlot.nextSlotStartTime(vc.timeConfig) + sleepTime = vc.beaconClock.fromNow(nextSlotTime).durationOrZero() sleepAsync(sleepTime) +proc waitForNextSlot*(service: ClientServiceRef): Future[void] {. + async: (raises: [CancelledError], raw: true).} = + service.client.waitForNextSlot(service.client.beaconClock.now().toSlot()) + func compareUnsorted*[T](a, b: openArray[T]): bool = if len(a) != len(b): return false diff --git a/beacon_chain/validator_client/duties_service.nim b/beacon_chain/validator_client/duties_service.nim index b0e39756d1..6702ac5fb2 100644 --- a/beacon_chain/validator_client/duties_service.nim +++ b/beacon_chain/validator_client/duties_service.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -561,13 +561,13 @@ proc registerValidators*( let vc = service.client let currentSlot = vc.getCurrentSlot().get(Slot(0)) - genesisFork = vc.forks[0] + genesis_fork_version = vc.forks[0].current_version registrations = try: - await vc.prepareRegistrationList(getTime(), genesisFork) + await vc.prepareRegistrationList(getTime(), genesis_fork_version) except CancelledError as exc: debug "Validator registration preparation was interrupted", - slot = currentSlot, fork = genesisFork + slot = currentSlot raise exc count = @@ -576,12 +576,12 @@ proc registerValidators*( await registerValidator(vc, registrations) except ValidatorApiError as exc: warn "Unable to register validators", slot = currentSlot, - fork = genesisFork, err_name = exc.name, + version = genesis_fork_version, err_name = exc.name, err_msg = exc.msg, reason = exc.getFailureReason() 0 except CancelledError as exc: debug "Validator registration was interrupted", slot = currentSlot, - fork = genesisFork + version = genesis_fork_version raise exc else: 0 diff --git a/beacon_chain/validator_client/fallback_service.nim b/beacon_chain/validator_client/fallback_service.nim index 02b9fbd978..2305571cf6 100644 --- a/beacon_chain/validator_client/fallback_service.nim +++ b/beacon_chain/validator_client/fallback_service.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -12,23 +12,6 @@ import ./common const ServiceName = "fallback_service" - FAIL_TIME_OFFSETS = [ - TimeOffset.init(-(MAXIMUM_GOSSIP_CLOCK_DISPARITY.nanoseconds)), - TimeOffset.init(MAXIMUM_GOSSIP_CLOCK_DISPARITY.nanoseconds * 4) - ] - WARN_TIME_OFFSETS = [ - TimeOffset.init(-(MAXIMUM_GOSSIP_CLOCK_DISPARITY.nanoseconds div 2)), - TimeOffset.init(MAXIMUM_GOSSIP_CLOCK_DISPARITY.nanoseconds * 2), - ] - NOTE_TIME_OFFSETS = [ - TimeOffset.init(-(MAXIMUM_GOSSIP_CLOCK_DISPARITY.nanoseconds div 4)), - TimeOffset.init(MAXIMUM_GOSSIP_CLOCK_DISPARITY.nanoseconds), - ] - -declareGauge validator_client_time_offset, - "Wall clock offset(s) between validator client and beacon node(s)", - labels = ["node"] - logScope: service = ServiceName proc nodesCount*(vc: ValidatorClientRef, @@ -155,7 +138,7 @@ proc checkCompatible( let genesisFlag = (genesis != vc.beaconGenesis) - configFlag = not(checkConfig(info)) + configFlag = not(checkConfig(info, vc.timeConfig)) node.config = info node.genesis = Opt.some(genesis) @@ -317,138 +300,15 @@ proc checkNodes*(service: FallbackServiceRef): Future[bool] {. raise exc res -proc checkOffsetStatus(node: BeaconNodeServerRef, offset: TimeOffset) = - logScope: - node = node - - node.timeOffset = Opt.some(offset) - validator_client_time_offset.set(float64(offset.milliseconds()), @[$node]) - - debug "Beacon node time offset", time_offset = offset - - let updateStatus = - if (offset <= WARN_TIME_OFFSETS[0]) or (offset >= WARN_TIME_OFFSETS[1]): - warn "Beacon node has significant time offset", - time_offset = offset - if (offset <= FAIL_TIME_OFFSETS[0]) or (offset >= FAIL_TIME_OFFSETS[1]): - # Beacon node's clock is out of acceptable offsets, we marking this - # beacon node and remote it from the list of working nodes. - warn "Beacon node has enormous time offset", - time_offset = offset - let failure = ApiNodeFailure.init(ApiFailure.NoError, - "checkTimeOffsetStatus()", node, 200, - "Beacon node has enormous time offset") - node.updateStatus(RestBeaconNodeStatus.BrokenClock, failure) - false - else: - true - elif (offset <= NOTE_TIME_OFFSETS[0]) or (offset >= NOTE_TIME_OFFSETS[1]): - info "Beacon node has notable time offset", - time_offset = offset - true - else: - true - - if updateStatus: - if node.status == RestBeaconNodeStatus.BrokenClock: - # Beacon node's clock has been recovered to some acceptable offset, so we - # could restore beacon node. - let failure = ApiNodeFailure.init(ApiFailure.NoError, - "checkTimeOffsetStatus()", node, 200, - "Beacon node has acceptable time offset") - node.updateStatus(RestBeaconNodeStatus.Offline, failure) - -proc disableNimbusExtensions(node: BeaconNodeServerRef) = - node.features.incl(RestBeaconNodeFeature.NoNimbusExtensions) - if node.status == RestBeaconNodeStatus.BrokenClock: - let failure = ApiNodeFailure.init(ApiFailure.NoError, - "disableNimbusExtensions()", node, 200, - "Nimbus extensions no longer available") - node.updateStatus(RestBeaconNodeStatus.Offline, failure) - -proc runTimeMonitor( - service: FallbackServiceRef, - node: BeaconNodeServerRef -) {.async: (raises: [CancelledError]).} = - const NimbusExtensionsLog = "Beacon node does not support Nimbus extensions" - let - vc = service.client - roles = AllBeaconNodeRoles - statuses = AllBeaconNodeStatuses - {RestBeaconNodeStatus.Offline} - - logScope: - node = node - - if BeaconNodeRole.NoTimeCheck in node.roles: - debug "Beacon node time offset checks disabled" - return - - while true: - while node.status notin statuses: - await vc.waitNodes(nil, statuses, roles, true) - - if RestBeaconNodeFeature.NoNimbusExtensions in node.features: - return - - let tres = - try: - let delay = vc.processingDelay.valueOr: ZeroDuration - await node.client.getTimeOffset(delay) - except RestResponseError as exc: - case exc.status - of 400: - debug "Beacon node returns invalid response", - status = $exc.status, reason = $exc.msg, - error_message = $exc.message - else: - notice NimbusExtensionsLog, status = $exc.status - # Exiting loop - node.disableNimbusExtensions() - return - except RestError as exc: - debug "Unable to obtain beacon node's time offset", reason = $exc.msg - notice NimbusExtensionsLog - node.disableNimbusExtensions() - return - except CancelledError as exc: - raise exc - - checkOffsetStatus(node, TimeOffset.init(tres)) - - await service.waitForNextSlot() - -proc processTimeMonitoring( - service: FallbackServiceRef -) {.async: (raises: [CancelledError]).} = - let - vc = service.client - blockNodes = vc.filterNodes( - ResolvedBeaconNodeStatuses, AllBeaconNodeRoles) - - var pendingChecks: seq[Future[void]] - - try: - for node in blockNodes: - pendingChecks.add(service.runTimeMonitor(node)) - await allFutures(pendingChecks) - except CancelledError as exc: - let pending = pendingChecks - .filterIt(not(it.finished())).mapIt(it.cancelAndWait()) - await noCancel allFutures(pending) - raise exc - proc mainLoop(service: FallbackServiceRef) {.async: (raises: []).} = let vc = service.client service.state = ServiceState.Running debug "Service started" - let timeMonitorFut = processTimeMonitoring(service) - try: await vc.preGenesisEvent.wait() except CancelledError: debug "Service interrupted" - if not(timeMonitorFut.finished()): await timeMonitorFut.cancelAndWait() return while true: @@ -462,7 +322,6 @@ proc mainLoop(service: FallbackServiceRef) {.async: (raises: []).} = false except CancelledError: debug "Service interrupted" - if not(timeMonitorFut.finished()): await timeMonitorFut.cancelAndWait() true if breakLoop: diff --git a/beacon_chain/validator_client/sync_committee_service.nim b/beacon_chain/validator_client/sync_committee_service.nim index 63f6a91411..d3e827722b 100644 --- a/beacon_chain/validator_client/sync_committee_service.nim +++ b/beacon_chain/validator_client/sync_committee_service.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -62,7 +62,8 @@ proc serveSyncCommitteeMessage*( message = shortLog(message) debug "Sending sync committee message", - delay = vc.getDelay(message.slot.sync_committee_message_deadline()) + delay = vc.getDelay( + message.slot.sync_committee_message_deadline(vc.timeConfig)) let res = try: @@ -76,7 +77,8 @@ proc serveSyncCommitteeMessage*( return false let - delay = vc.getDelay(message.slot.sync_committee_message_deadline()) + delay = vc.getDelay( + message.slot.sync_committee_message_deadline(vc.timeConfig)) dur = Moment.now() - startTime if res: @@ -131,7 +133,7 @@ proc produceAndPublishSyncCommitteeMessages( (succeed, errored, failed) let - delay = vc.getDelay(slot.attestation_deadline()) + delay = vc.getDelay(slot.attestation_deadline(vc.timeConfig)) dur = Moment.now() - startTime debug "Sync committee message statistics", @@ -174,7 +176,7 @@ proc serveContributionAndProof*( res.get() debug "Sending sync contribution", - delay = vc.getDelay(slot.sync_contribution_deadline()) + delay = vc.getDelay(slot.sync_contribution_deadline(vc.timeConfig)) let restSignedProof = RestSignedContributionAndProof.init( proof, signature) @@ -326,7 +328,7 @@ proc produceAndPublishContributions( (succeed, errored, failed) let - delay = vc.getDelay(slot.aggregate_deadline()) + delay = vc.getDelay(slot.aggregate_deadline(vc.timeConfig)) dur = Moment.now() - startTime debug "Sync message contribution statistics", @@ -353,7 +355,8 @@ proc publishSyncMessagesAndContributions( slot = slot block: - let delay = vc.getDelay(slot.sync_committee_message_deadline()) + let delay = vc.getDelay( + slot.sync_committee_message_deadline(vc.timeConfig)) debug "Producing sync committee messages", delay = delay, duties_count = len(duties) @@ -393,15 +396,15 @@ proc publishSyncMessagesAndContributions( return let currentTime = vc.beaconClock.now() - if slot.sync_contribution_deadline() > currentTime: - let waitDur = - nanoseconds((slot.sync_contribution_deadline() - currentTime).nanoseconds) + if slot.sync_contribution_deadline(vc.timeConfig) > currentTime: + let waitDur = nanoseconds(( + slot.sync_contribution_deadline(vc.timeConfig) - currentTime).nanoseconds) # Sleeping until `sync_contribution_deadline`. debug "Waiting for sync contribution deadline", wait_time = waitDur await sleepAsync(waitDur) block: - let delay = vc.getDelay(slot.sync_contribution_deadline()) + let delay = vc.getDelay(slot.sync_contribution_deadline(vc.timeConfig)) debug "Producing contribution and proofs", delay = delay try: @@ -417,7 +420,7 @@ proc processSyncCommitteeTasks( let vc = service.client duties = vc.getSyncCommitteeDutiesForSlot(slot + 1) - timeout = vc.beaconClock.durationToNextSlot() + timeout = vc.beaconClock.fromNow(slot + 1).durationOrZero() logScope: slot = slot diff --git a/beacon_chain/validators/action_tracker.nim b/beacon_chain/validators/action_tracker.nim index 55b80e2b33..28c59006c5 100644 --- a/beacon_chain/validators/action_tracker.nim +++ b/beacon_chain/validators/action_tracker.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,9 +8,10 @@ {.push raises: [].} import - stew/shims/[sets, hashes], chronicles, + stew/shims/hashes, chronicles, ../spec/forks +from stew/shims/sets import keepItIf from ../spec/validator import compute_subscribed_subnets from ../consensus_object_pools/block_pools_types import ShufflingRef from ../consensus_object_pools/spec_cache import @@ -61,9 +62,8 @@ type ## for internal validators knownValidators*: Table[ValidatorIndex, Slot] - ## Validators that we've recently seen - we'll subscribe to one stability - ## subnet for each such validator - the slot is used to expire validators - ## that no longer are posting duties + ## Validators that we've recently seen - the slot is used to expire + ## validators that no longer are posting duties duties: HashSet[AggregatorDuty] ## Known aggregation duties in the near future - before each such @@ -137,9 +137,8 @@ func stabilitySubnets*(tracker: ActionTracker, slot: Slot): AttnetBits = allSubnetBits else: var res: AttnetBits - if tracker.knownValidators.len > 0: - for subnetId in compute_subscribed_subnets(tracker.nodeId, slot.epoch): - res[subnetId.int] = true + for subnetId in compute_subscribed_subnets(tracker.nodeId, slot.epoch): + res[subnetId.int] = true res proc updateSlot*(tracker: var ActionTracker, wallSlot: Slot) = diff --git a/beacon_chain/validators/beacon_validators.nim b/beacon_chain/validators/beacon_validators.nim index 963bf75b4d..8bb65b3a8b 100644 --- a/beacon_chain/validators/beacon_validators.nim +++ b/beacon_chain/validators/beacon_validators.nim @@ -5,46 +5,47 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} -# This module is responsible for handling beacon node validators, ie those that -# that are running directly in the beacon node and not in a separate validator -# client process - we name these "beacon validators" while those running -# outside are "client validators". -# This module also contains implementation logic for the REST validator API. +## This module is responsible for handling beacon node validators, ie those that +## that are running directly in the beacon node and not in a separate validator +## client process - we name these "beacon validators" while those running +## outside are "client validators". import # Standard library std/[os, tables], # Nimble packages - stew/[assign2, byteutils], - chronos, metrics, - chronicles, chronicles/timings, - json_serialization/std/[options, sets, net], - eth/db/kvstore, - web3/primitives, - kzg4844, + stew/byteutils, + chronos, + metrics, + chronicles, + json_serialization/std/[sets, net], # Local modules ../spec/[ - eth2_merkleization, forks, helpers, network, signatures, state_transition, - validator], + eth2_merkleization, forks, helpers, network, + peerdas_helpers, signatures, state_transition, + state_transition_block, validator, + ], + ../spec/mev/rest_mev_calls, ../consensus_object_pools/[ - spec_cache, blockchain_dag, block_clearance, attestation_pool, - sync_committee_msg_pool, validator_change_pool, consensus_manager, - common_tools], + spec_cache, blockchain_dag, attestation_pool, sync_committee_msg_pool, + validator_change_pool, consensus_manager, common_tools, + ], ../el/el_manager, ../networking/eth2_network, - ../sszdump, ../sync/sync_manager, - ../gossip_processing/block_processor, - ".."/[conf, beacon_clock, beacon_node], - "."/[ - keystore_management, slashing_protection, validator_duties, validator_pool], - ".."/spec/mev/[rest_deneb_mev_calls, rest_electra_mev_calls, rest_fulu_mev_calls] + ../sszdump, + ../[conf, beacon_clock, beacon_node], + ./[ + block_payloads, keystore_management, slashing_protection, validator_duties, + validator_pool, + ] from std/sequtils import mapIt from eth/async_utils import awaitWithTimeout +from ./message_router_mev import unblindAndRouteBlockMEV # Metrics for tracking attestation and beacon block loss declareCounter beacon_light_client_finality_updates_sent, @@ -75,44 +76,6 @@ declarePublicGauge(attached_validator_balance_total, logScope: topics = "beacval" -type - EngineBid = object - blck*: ForkedBeaconBlock - executionPayloadValue*: Wei - consensusBlockValue*: UInt256 - blobsBundleOpt*: Opt[deneb.BlobsBundle] - - BuilderBid[SBBB] = object - blindedBlckPart*: SBBB - executionRequests*: ExecutionRequests - executionPayloadValue*: UInt256 - consensusBlockValue*: UInt256 - - ForkedBlockResult = - Result[EngineBid, string] - BlindedBlockResult[SBBB] = - Result[BuilderBid[SBBB], string] - - Bids[SBBB] = object - engineBid: Opt[EngineBid] - builderBid: Opt[BuilderBid[SBBB]] - - BoostFactorKind {.pure.} = enum - Local, Builder - - BoostFactor = object - case kind: BoostFactorKind - of BoostFactorKind.Local: - value8: uint8 - of BoostFactorKind.Builder: - value64: uint64 - -func init(t: typedesc[BoostFactor], value: uint8): BoostFactor = - BoostFactor(kind: BoostFactorKind.Local, value8: value) - -func init(t: typedesc[BoostFactor], value: uint64): BoostFactor = - BoostFactor(kind: BoostFactorKind.Builder, value64: value) - func getValidator*(validators: auto, pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] = let idx = validators.findIt(it.pubkey == pubkey) @@ -129,6 +92,16 @@ func blockConsensusValue(r: BlockRewards): UInt256 {.noinit.} = u256(r.attestations + r.sync_aggregate + r.proposer_slashings + r.attester_slashings) * u256(1000000000) +proc getFeeRecipient(node: BeaconNode, + pubkey: ValidatorPubKey, + validatorIdx: Opt[ValidatorIndex], + epoch: Epoch): Eth1Address = + node.consensusManager[].getFeeRecipient(pubkey, validatorIdx, epoch) + +proc getGasLimit(node: BeaconNode, + pubkey: ValidatorPubKey): uint64 = + node.consensusManager[].getGasLimit(pubkey) + proc addValidatorsFromWeb3Signer( node: BeaconNode, web3signerUrl: Web3SignerUrl, epoch: Epoch) {.async: (raises: [CancelledError]).} = @@ -147,9 +120,8 @@ proc addValidatorsFromWeb3Signer( Opt.some(data.get().index) else: Opt.none(ValidatorIndex) - feeRecipient = - node.consensusManager[].getFeeRecipient(keystore.pubkey, index, epoch) - gasLimit = node.consensusManager[].getGasLimit(keystore.pubkey) + feeRecipient = node.getFeeRecipient(keystore.pubkey, index, epoch) + gasLimit = node.getGasLimit(keystore.pubkey) v = node.attachedValidators[].addValidator(keystore, feeRecipient, gasLimit) node.attachedValidators[].updateValidator(v, data) @@ -168,9 +140,8 @@ proc addValidators*(node: BeaconNode) {.async: (raises: [CancelledError]).} = Opt.some(data.get().index) else: Opt.none(ValidatorIndex) - feeRecipient = node.consensusManager[].getFeeRecipient( - keystore.pubkey, index, epoch) - gasLimit = node.consensusManager[].getGasLimit(keystore.pubkey) + feeRecipient = node.getFeeRecipient(keystore.pubkey, index, epoch) + gasLimit = node.getGasLimit(keystore.pubkey) v = node.attachedValidators[].addValidator(keystore, feeRecipient, gasLimit) @@ -197,10 +168,8 @@ proc pollForDynamicValidators*(node: BeaconNode, let epoch = node.currentSlot().epoch index = Opt.none(ValidatorIndex) - feeRecipient = - node.consensusManager[].getFeeRecipient(keystore.pubkey, index, epoch) - gasLimit = - node.consensusManager[].getGasLimit(keystore.pubkey) + feeRecipient = node.getFeeRecipient(keystore.pubkey, index, epoch) + gasLimit = node.getGasLimit(keystore.pubkey) discard node.attachedValidators[].addValidator(keystore, feeRecipient, gasLimit) @@ -274,7 +243,7 @@ proc handleLightClientUpdates*(node: BeaconNode, slot: Slot) static: doAssert lightClientFinalityUpdateSlotOffset == lightClientOptimisticUpdateSlotOffset let sendTime = node.beaconClock.fromNow( - slot.light_client_finality_update_time()) + slot.light_client_finality_update_time(node.dag.cfg.time)) if sendTime.inFuture: debug "Waiting to send LC updates", slot, delay = shortLog(sendTime.offset) await sleepAsync(sendTime.offset) @@ -367,1040 +336,301 @@ proc createAndSendAttestation(node: BeaconNode, registered.toAttestation(signature), subnet_id, checkSignature = false, checkValidator = false) -proc getBlockProposalEth1Data*(node: BeaconNode, - state: ForkedHashedBeaconState): - BlockProposalEth1Data = - let finalizedEpochRef = node.dag.getFinalizedEpochRef() - result = node.elManager.getBlockProposalData( - state, finalizedEpochRef.eth1_data, - finalizedEpochRef.eth1_deposit_index) - -proc getFeeRecipient(node: BeaconNode, - pubkey: ValidatorPubKey, - validatorIdx: ValidatorIndex, - epoch: Epoch): Eth1Address = - node.consensusManager[].getFeeRecipient(pubkey, Opt.some(validatorIdx), epoch) - -proc getGasLimit(node: BeaconNode, - pubkey: ValidatorPubKey): uint64 = - node.consensusManager[].getGasLimit(pubkey) - -from web3/engine_api_types import PayloadExecutionStatus -from ../spec/beaconstate import get_expected_withdrawals - -proc getExecutionPayload( - PayloadType: type ForkyExecutionPayloadForSigning, - node: BeaconNode, head: BlockRef, proposalState: ref ForkedHashedBeaconState, - validator_index: ValidatorIndex): Future[Opt[PayloadType]] - {.async: (raises: [CancelledError], raw: true).} = - # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/validator.md#executionpayload - +proc registerBlock( + node: BeaconNode, + validator: AttachedValidator, + validator_index: ValidatorIndex, + blck: ForkyBeaconBlock | ForkyBlindedBeaconBlock, +): Result[Eth2Digest, string] = let - epoch = withState(proposalState[]): - forkyState.data.slot.epoch - feeRecipient = block: - let pubkey = node.dag.validatorKey(validator_index) - if pubkey.isNone(): - warn "Cannot get proposer pubkey, bug?", validator_index - default(Eth1Address) - else: - node.getFeeRecipient(pubkey.get().toPubKey(), validator_index, epoch) - - beaconHead = node.attestationPool[].getBeaconHead(head) - executionHead = withState(proposalState[]): - when consensusFork >= ConsensusFork.Bellatrix: - forkyState.data.latest_execution_payload_header.block_hash - else: - (static(default(Eth2Digest))) - latestSafe = beaconHead.safeExecutionBlockHash - latestFinalized = beaconHead.finalizedExecutionBlockHash - timestamp = withState(proposalState[]): - compute_timestamp_at_slot(forkyState.data, forkyState.data.slot) - random = withState(proposalState[]): - get_randao_mix(forkyState.data, get_current_epoch(forkyState.data)) - withdrawals = withState(proposalState[]): - when consensusFork >= ConsensusFork.Capella: - get_expected_withdrawals(forkyState.data) - else: - @[] + fork = node.dag.forkAtEpoch(blck.slot.epoch) + genesis_validators_root = node.dag.genesis_validators_root + blockRoot = hash_tree_root(blck) + signingRoot = + compute_block_signing_root(fork, genesis_validators_root, blck.slot, blockRoot) + + node.attachedValidators[].slashingProtection.registerBlock( + validator_index, validator.pubkey, blck.slot, signingRoot + ).isOkOr: + warn "Slashing protection activated for block proposal", + blockRoot = shortLog(blockRoot), + blck = shortLog(blck), + signingRoot = shortLog(signingRoot), + validator = validator.pubkey, + slot = blck.slot, + existingProposal = error + return err("Proposal would be slashable: " & $error) + ok blockRoot + +proc getBlockSignature( + node: BeaconNode, + validator: AttachedValidator, + blockRoot: Eth2Digest, + blck: ForkyBeaconBlock | ForkyBlindedBeaconBlock, +): Future[Result[ValidatorSig, string]] {.async: (raises: [CancelledError]).} = + # Check with slashing protection before submitBlindedBlock + let + fork = node.dag.forkAtEpoch(blck.slot.epoch) + genesis_validators_root = node.dag.genesis_validators_root - info "Requesting engine payload", - beaconHead = shortLog(beaconHead.blck), - executionHead = shortLog(executionHead), - validatorIndex = validator_index, - feeRecipient = $feeRecipient + res = + await validator.getBlockSignature(fork, genesis_validators_root, blockRoot, blck) - node.elManager.getPayload( - PayloadType, beaconHead.blck.bid.root, executionHead, latestSafe, - latestFinalized, timestamp, random, feeRecipient, withdrawals) + if res.isErr: + warn "Could not get block proposal signature", + validator = shortLog(validator), + blockRoot = shortLog(blockRoot), + blck = shortLog(blck), + err = res.error -# BlockRewards has issues resolving somehow otherwise -import ".."/spec/state_transition_block + res -proc makeBeaconBlockForHeadAndSlot*( - PayloadType: type ForkyExecutionPayloadForSigning, - node: BeaconNode, randao_reveal: ValidatorSig, - validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef, +proc proposeBlockAux( + node: BeaconNode, + consensusFork: static ConsensusFork, + validator: AttachedValidator, + head: BlockRef, slot: Slot, - - # These parameters are for the builder API - execution_payload: Opt[PayloadType], - transactions_root: Opt[Eth2Digest], - execution_payload_root: Opt[Eth2Digest], - withdrawals_root: Opt[Eth2Digest], - kzg_commitments: Opt[KzgCommitments], - execution_requests: ExecutionRequests): - Future[ForkedBlockResult] {.async: (raises: [CancelledError]).} = - # Advance state to the slot that we're proposing for - var cache = StateCache() + randao_reveal: ValidatorSig, +): Future[BlockRef] {.async: (raises: [CancelledError]).} = + var + cache = new StateCache + # TODO move the creation of this proposal state away from the hot path + state = node.dag.getProposalState(head, slot, cache[]).valueOr: + beacon_block_production_errors.inc() + return head let - # The clearance state already typically sits at the right slot per - # `advanceClearanceState` + graffiti = node.getGraffitiBytes(validator) + validator_index = validator.index.expect("index set for proposer") + + engineBid = + when consensusFork == ConsensusFork.Gloas: + debugGloasComment "when need to getExecutionPayload/getPayload" + default(Opt[EngineBid[gloas.ExecutionPayloadForSigning]]) + elif consensusFork >= ConsensusFork.Electra: + # Fetch both builder and engine payloads then use the better one to + # make a block + let + payloadBuilderClient = + node.getPayloadBuilderClient(validator_index.distinctBase).valueOr(nil) + + bids = await node.collectBids( + consensusFork, payloadBuilderClient, validator.pubkey, validator_index, + head, slot, state, + ) + + localBlockValueBoost = node.config.localBlockValueBoost + useBuilderPayload = + bids.useBuilderPayload(BoostFactor.init(localBlockValueBoost)) + + if payloadBuilderClient != nil: + # Log payload selection only if the user enabled builder support + info "Payload selected", + slot, + validator = shortLog(validator), + localBlockValueBoost, + useBuilderPayload, + hasBuilderPayload = bids.builderBid.isSome(), + hasEnginePayload = bids.engineBid.isSome() + + if useBuilderPayload: + doAssert bids.builderBid.isSome(), "Checked in useBuilderPayload" + let builderBlockRes = node.makeBuilderBlock( + consensusFork, + state[].forky(consensusFork), + cache[], + validator_index, + randao_reveal, + graffiti, + head, + slot, + bids.builderBid.value(), + ) + + if builderBlockRes.isOk: + # Slashing database serves as the cutoff point for falling back to + # engine blocks since failures from this point onwards should be + # independent of the (type of) payload. + template blck(): untyped = + builderBlockRes.get().blck + + let + blockRoot = node.registerBlock(validator, validator_index, blck).valueOr: + beacon_block_builder_missed_without_fallback.inc() + beacon_block_production_errors.inc() + return head + + signature = (await node.getBlockSignature(validator, blockRoot, blck)).valueOr: + beacon_block_builder_missed_without_fallback.inc() + beacon_block_production_errors.inc() + return head + + blindedBlock = consensusFork.SignedBlindedBeaconBlock( + message: blck, signature: signature + ) + + unblindedBlockRef = + await node.unblindAndRouteBlockMEV(payloadBuilderClient, blindedBlock) + + if unblindedBlockRef.isErr: + # unblindedBlockRef.isErr or unblindedBlockRef.get.isNone indicates that + # the block failed to validate or integrate into the DAG, which for the + # purpose of this return value, is equivalent. It's used to drive Beacon + # REST API output. + # + # https://collective.flashbots.net/t/post-mortem-april-3rd-2023-mev-boost-relay-incident-and-related-timing-issue/1540 + # has caused false positives, because + # "A potential mitigation to this attack is to introduce a cutoff timing + # into the proposer's slot whereafter this time (e.g. 3 seconds) the relay + # will no longer return a block to the proposer. Relays began to roll out + # this mitigation in the evening of April 3rd UTC time with a 2 second + # cutoff, and notified other relays to do the same. After receiving + # credible reports of honest validators missing their slots the suggested + # timing cutoff was increased to 3 seconds." + let errMsg = + if unblindedBlockRef.isErr: + unblindedBlockRef.error + else: + "Unblinded block not returned to proposer" + + warn "Failed to unblind or route builder payload", + validator = shortLog(validator), + blck = shortLog(blindedBlock.message), + err = errMsg + + # TODO Just because the relay didn't answer doesn't mean it was missed? + beacon_block_builder_missed_without_fallback.inc() + + return head + + when consensusFork >= ConsensusFork.Fulu: + if unblindedBlockRef.get.isNone: + # This corresponds to 202 in Fulu MEV. + return head + else: + if unblindedBlockRef.get.isNone: + warn "Failed to unblind or route builder payload", + validator = shortLog(validator), + blck = shortLog(blindedBlock.message), + err = "Unblinded block not returned to proposer" + return head + + beacon_blocks_proposed.inc() + return unblindedBlockRef.get.get + + if bids.engineBid.isNone() and state[].is_merge_transition_complete(): + # Cannot fall back to engine without a payload, post merge + beacon_block_production_errors.inc() + return head - # TODO can use `valueOr:`/`return err($error)` if/when - # https://github.com/status-im/nim-stew/issues/161 is addressed - maybeState = node.dag.getProposalState(head, slot, cache) + beacon_block_builder_missed_with_fallback.inc() - if maybeState.isErr: - beacon_block_production_errors.inc() - return err($maybeState.error) + notice "Failed to create builder-based block, trying engine payload", + slot, error = builderBlockRes.error - let - state = maybeState.get - payloadFut = - if execution_payload.isSome: - # Builder API - - # In Capella, only get withdrawals root from relay. - # The execution payload will be small enough to be safe to copy because - # it won't have transactions (it's blinded) - var modified_execution_payload = execution_payload - withState(state[]): - when consensusFork >= ConsensusFork.Capella and - PayloadType.kind >= ConsensusFork.Capella: - let withdrawals = List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]( - get_expected_withdrawals(forkyState.data)) - if withdrawals_root.isNone or - hash_tree_root(withdrawals) != withdrawals_root.get: - # If engine API returned a block, will use that - return err("Builder relay provided incorrect withdrawals root") - # Otherwise, the state transition function notices that there are - # too few withdrawals. - assign(modified_execution_payload.get.executionPayload.withdrawals, - withdrawals) - - let fut = Future[Opt[PayloadType]].Raising([CancelledError]).init( - "given-payload") - fut.complete(modified_execution_payload) - fut - elif slot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH or - not state[].is_merge_transition_complete: - let fut = Future[Opt[PayloadType]].Raising([CancelledError]).init( - "empty-payload") - fut.complete(Opt.some(default(PayloadType))) - fut - else: - # Create execution payload while packing attestations - getExecutionPayload(PayloadType, node, head, state, validator_index) + # makeBuilderBlock will invalidate the state - get a new one + cache = new StateCache + state = node.dag.getProposalState(head, slot, cache[]).valueOr: + beacon_block_production_errors.inc() + return head - eth1Proposal = node.getBlockProposalEth1Data(state[]) + bids.engineBid + else: + await node.getExecutionPayload( + consensusFork, head, state, validator_index, validator.pubkey + ) - if eth1Proposal.hasMissingDeposits: + if engineBid.isNone(): beacon_block_production_errors.inc() - warn "Eth1 deposits not available. Skipping block proposal", slot - return err("Eth1 deposits not available") + return head let - attestations = - when PayloadType.kind >= ConsensusFork.Electra: - node.attestationPool[].getElectraAttestationsForBlock(state[], cache) - else: - node.attestationPool[].getAttestationsForBlock(state[], cache) - exits = withState(state[]): - node.validatorChangePool[].getBeaconBlockValidatorChanges( - node.dag.cfg, forkyState.data) - # TODO workaround for https://github.com/arnetheduck/nim-results/issues/34 - payloadRes = await payloadFut - payload = payloadRes.valueOr: - beacon_block_production_errors.inc() - warn "Unable to get execution payload. Skipping block proposal", - slot, validator_index - return err("Unable to get execution payload") - - # Don't use the requests passed in, TODO remove that - let execution_requests_actual = - when PayloadType.kind >= ConsensusFork.Electra: - # Don't want un-decoded SSZ going any further/deeper - var - execution_requests_buffer: ExecutionRequests - prev_type: Opt[byte] - try: - for request_type_and_payload in payload.executionRequests: - if request_type_and_payload.len < 2: - return err("Execution layer request too short") - - let request_type = request_type_and_payload[0] - if prev_type.isSome: - if request_type < prev_type.get: - return err("Execution layer request types not sorted") - if request_type == prev_type.get: - return err("Execution layer request types duplicated") - prev_type.ok request_type - - template request_payload: untyped = - request_type_and_payload.toOpenArray( - 1, request_type_and_payload.len - 1) - case request_type_and_payload[0] - of DEPOSIT_REQUEST_TYPE: - execution_requests_buffer.deposits = - SSZ.decode(request_payload, - List[DepositRequest, Limit MAX_DEPOSIT_REQUESTS_PER_PAYLOAD]) - of WITHDRAWAL_REQUEST_TYPE: - execution_requests_buffer.withdrawals = - SSZ.decode(request_payload, - List[WithdrawalRequest, Limit MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD]) - of CONSOLIDATION_REQUEST_TYPE: - execution_requests_buffer.consolidations = - SSZ.decode(request_payload, - List[ConsolidationRequest, Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD]) - else: - return err("Execution layer invalid request type") - except CatchableError: - return err("Unable to deserialize execution layer requests") - - execution_requests_buffer - else: - default(ExecutionRequests) # won't be used by block builder - - let res = makeBeaconBlockWithRewards( - node.dag.cfg, - state[], + engineBlock = node.makeEngineBlock( + consensusFork, + state[].forky(consensusFork), + cache[], validator_index, randao_reveal, - eth1Proposal.vote, graffiti, - attestations, - eth1Proposal.deposits, - exits, - node.syncCommitteeMsgPool[].produceSyncAggregate(head.bid, slot), - payload, - noRollback, # Temporary state - no need for rollback - cache, - verificationFlags = {}, - transactions_root = transactions_root, - execution_payload_root = execution_payload_root, - kzg_commitments = kzg_commitments, - execution_requests = execution_requests_actual).mapErr do (error: cstring) -> string: - # This is almost certainly a bug, but it's complex enough that there's a - # small risk it might happen even when most proposals succeed - thus we - # log instead of asserting - beacon_block_production_errors.inc() - warn "Cannot create block for proposal", - slot, head = shortLog(head), error - $error - - var blobsBundleOpt = Opt.none(deneb.BlobsBundle) - when typeof(payload).kind >= ConsensusFork.Deneb: - blobsBundleOpt = Opt.some(payload.blobsBundle) - - if res.isOk: - ok(EngineBid( - blck: res.get().blck, - executionPayloadValue: payload.blockValue, - consensusBlockValue: res.get().rewards.blockConsensusValue(), - blobsBundleOpt: blobsBundleOpt, - )) - else: - err(res.error) - -# TODO what is this for -proc makeBeaconBlockForHeadAndSlot*( - PayloadType: type ForkyExecutionPayloadForSigning, node: BeaconNode, randao_reveal: ValidatorSig, - validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef, - slot: Slot): - Future[ForkedBlockResult] = - return makeBeaconBlockForHeadAndSlot( - PayloadType, node, randao_reveal, validator_index, graffiti, head, slot, - execution_payload = Opt.none(PayloadType), - transactions_root = Opt.none(Eth2Digest), - execution_payload_root = Opt.none(Eth2Digest), - withdrawals_root = Opt.none(Eth2Digest), - kzg_commitments = Opt.none(KzgCommitments), - execution_requests = static(default(ExecutionRequests))) - -proc getBlindedExecutionPayload[ - EPH: deneb_mev.BlindedExecutionPayloadAndBlobsBundle | - electra_mev.BlindedExecutionPayloadAndBlobsBundle | - fulu_mev.BlindedExecutionPayloadAndBlobsBundle]( - node: BeaconNode, payloadBuilderClient: RestClientRef, slot: Slot, - executionBlockHash: Eth2Digest, pubkey: ValidatorPubKey): - Future[BlindedBlockResult[EPH]] {.async: (raises: [CancelledError, RestError]).} = - # Not ideal to use `when` where instead of splitting into separate functions, - # but Nim doesn't overload on generic EPH type parameter. - when EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle: - let - response = awaitWithTimeout( - payloadBuilderClient.getHeaderDeneb( - slot, executionBlockHash, pubkey), - BUILDER_PROPOSAL_DELAY_TOLERANCE): - return err "Timeout obtaining Deneb blinded header from builder" - - res = decodeBytesJsonOrSsz( - GetHeaderResponseDeneb, response.data, response.contentType, - response.headers.getString("eth-consensus-version")) - - blindedHeader = res.valueOr: - return err( - "Unable to decode Deneb blinded header: " & $res.error & - " with HTTP status " & $response.status & ", Content-Type " & - $response.contentType & " and content " & $response.data) - elif EPH is electra_mev.BlindedExecutionPayloadAndBlobsBundle: - let - response = awaitWithTimeout( - payloadBuilderClient.getHeaderElectra( - slot, executionBlockHash, pubkey), - BUILDER_PROPOSAL_DELAY_TOLERANCE): - return err "Timeout obtaining Electra blinded header from builder" - - res = decodeBytesJsonOrSsz( - GetHeaderResponseElectra, response.data, response.contentType, - response.headers.getString("eth-consensus-version")) - - blindedHeader = res.valueOr: - return err( - "Unable to decode Electra blinded header: " & $res.error & - " with HTTP status " & $response.status & ", Content-Type " & - $response.contentType & " and content " & $response.data) - elif EPH is fulu_mev.BlindedExecutionPayloadAndBlobsBundle: - - debugFuluComment "Because electra MEV isn't working yet, this is a placeholder copy" - let - response = awaitWithTimeout( - payloadBuilderClient.getHeaderFulu( - slot, executionBlockHash, pubkey), - BUILDER_PROPOSAL_DELAY_TOLERANCE): - return err "Timeout obtaining Fulu blinded header from builder" - - res = decodeBytesJsonOrSsz( - GetHeaderResponseFulu, response.data, response.contentType, - response.headers.getString("eth-consensus-version")) - - blindedHeader = res.valueOr: - return err( - "Unable to decode Fulu blinded header: " & $res.error & - " with HTTP status " & $response.status & ", Content-Type " & - $response.contentType & " and content " & $response.data) - else: - static: doAssert false - - const httpOk = 200 - if response.status != httpOk: - return err "getBlindedExecutionPayload: non-200 HTTP response" - else: - if not verify_builder_signature( - node.dag.cfg.genesisFork, blindedHeader.data.message, - blindedHeader.data.message.pubkey, blindedHeader.data.signature): - return err "getBlindedExecutionPayload: signature verification failed" - - template builderBid: untyped = blindedHeader.data.message - when EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle: - return ok(BuilderBid[EPH]( - blindedBlckPart: EPH( - execution_payload_header: builderBid.header, - blob_kzg_commitments: builderBid.blob_kzg_commitments), - executionRequests: default(ExecutionRequests), - executionPayloadValue: builderBid.value)) - elif EPH is electra_mev.BlindedExecutionPayloadAndBlobsBundle or - EPH is fulu_mev.BlindedExecutionPayloadAndBlobsBundle: - return ok(BuilderBid[EPH]( - blindedBlckPart: EPH( - execution_payload_header: builderBid.header, - blob_kzg_commitments: builderBid.blob_kzg_commitments), - executionRequests: builderBid.execution_requests, - executionPayloadValue: builderBid.value)) - else: - static: doAssert false - -from ./message_router_mev import - copyFields, getFieldNames, unblindAndRouteBlockMEV - -func constructSignableBlindedBlock[T: deneb_mev.SignedBlindedBeaconBlock]( - blck: deneb.BeaconBlock, - blindedBundle: deneb_mev.BlindedExecutionPayloadAndBlobsBundle): T = - # Leaves signature field default, to be filled in by caller - const - blckFields = getFieldNames(typeof(blck)) - blckBodyFields = getFieldNames(typeof(blck.body)) - - var blindedBlock: T - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#block-proposal - copyFields(blindedBlock.message, blck, blckFields) - copyFields(blindedBlock.message.body, blck.body, blckBodyFields) - assign( - blindedBlock.message.body.execution_payload_header, - blindedBundle.execution_payload_header) - assign( - blindedBlock.message.body.blob_kzg_commitments, - blindedBundle.blob_kzg_commitments) - - blindedBlock - -func constructSignableBlindedBlock[T: electra_mev.SignedBlindedBeaconBlock | - fulu_mev.SignedBlindedBeaconBlock]( - blck: electra.BeaconBlock | fulu.BeaconBlock, - blindedBundle: electra_mev.BlindedExecutionPayloadAndBlobsBundle | - fulu_mev.BlindedExecutionPayloadAndBlobsBundle): T = - # Leaves signature field default, to be filled in by caller - const - blckFields = getFieldNames(typeof(blck)) - blckBodyFields = getFieldNames(typeof(blck.body)) - - var blindedBlock: T - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#block-proposal - copyFields(blindedBlock.message, blck, blckFields) - copyFields(blindedBlock.message.body, blck.body, blckBodyFields) - assign( - blindedBlock.message.body.execution_payload_header, - blindedBundle.execution_payload_header) - assign( - blindedBlock.message.body.blob_kzg_commitments, - blindedBundle.blob_kzg_commitments) - - blindedBlock - -func constructSignableBlindedBlock[T: fulu_mev.SignedBlindedBeaconBlock]( - blck: fulu.BeaconBlock, - blindedBundle: fulu_mev.BlindedExecutionPayloadAndBlobsBundle): T = - # Leaves signature field default, to be filled in by caller - const - blckFields = getFieldNames(typeof(blck)) - blckBodyFields = getFieldNames(typeof(blck.body)) - - var blindedBlock: T - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#block-proposal - copyFields(blindedBlock.message, blck, blckFields) - copyFields(blindedBlock.message.body, blck.body, blckBodyFields) - assign( - blindedBlock.message.body.execution_payload_header, - blindedBundle.execution_payload_header) - assign( - blindedBlock.message.body.blob_kzg_commitments, - blindedBundle.blob_kzg_commitments) - - blindedBlock - -func constructPlainBlindedBlock[T: fulu_mev.BlindedBeaconBlock]( - blck: ForkyBeaconBlock, - blindedBundle: fulu_mev.BlindedExecutionPayloadAndBlobsBundle): T = - # https://github.com/nim-lang/Nim/issues/23020 workaround - static: doAssert T is fulu_mev.BlindedBeaconBlock + head, + slot, + engineBid[].eps, + engineBid[].execution_requests, + ).valueOr: + beacon_block_production_errors.inc() + return head - const - blckFields = getFieldNames(typeof(blck)) - blckBodyFields = getFieldNames(typeof(blck.body)) - - var blindedBlock: T - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#block-proposal - copyFields(blindedBlock, blck, blckFields) - copyFields(blindedBlock.body, blck.body, blckBodyFields) - assign( - blindedBlock.body.execution_payload_header, - blindedBundle.execution_payload_header) - assign( - blindedBlock.body.blob_kzg_commitments, - blindedBundle.blob_kzg_commitments) - - blindedBlock - -proc blindedBlockCheckSlashingAndSign[ - T: deneb_mev.SignedBlindedBeaconBlock | - electra_mev.SignedBlindedBeaconBlock | - fulu_mev.SignedBlindedBeaconBlock]( - node: BeaconNode, slot: Slot, validator: AttachedValidator, - validator_index: ValidatorIndex, nonsignedBlindedBlock: T): - Future[Result[T, string]] {.async: (raises: [CancelledError]).} = - # Check with slashing protection before submitBlindedBlock - let - fork = node.dag.forkAtEpoch(slot.epoch) - genesis_validators_root = node.dag.genesis_validators_root - blockRoot = hash_tree_root(nonsignedBlindedBlock.message) - signingRoot = compute_block_signing_root( - fork, genesis_validators_root, slot, blockRoot) - notSlashable = node.attachedValidators - .slashingProtection - .registerBlock(validator_index, validator.pubkey, slot, signingRoot) - - if notSlashable.isErr: - warn "Slashing protection activated for MEV block", - blockRoot = shortLog(blockRoot), blck = shortLog(nonsignedBlindedBlock), - signingRoot = shortLog(signingRoot), validator = validator.pubkey, - slot = slot, existingProposal = notSlashable.error - return err("MEV proposal would be slashable: " & $notSlashable.error) - - var blindedBlock = nonsignedBlindedBlock - blindedBlock.signature = block: - let res = await validator.getBlockSignature( - fork, genesis_validators_root, slot, blockRoot, blindedBlock.message) - if res.isErr(): - return err("Unable to sign block: " & res.error()) - res.get() + blockRoot = node.registerBlock(validator, validator_index, engineBlock.blck).valueOr: + beacon_block_production_errors.inc() + return head - return ok blindedBlock - -func getUnsignedBlindedBeaconBlock[ - T: deneb_mev.SignedBlindedBeaconBlock | - electra_mev.SignedBlindedBeaconBlock | - fulu_mev.SignedBlindedBeaconBlock]( - node: BeaconNode, slot: Slot, - validator_index: ValidatorIndex, forkedBlock: ForkedBeaconBlock, - executionPayloadHeader: deneb_mev.BlindedExecutionPayloadAndBlobsBundle | - electra_mev.BlindedExecutionPayloadAndBlobsBundle | - fulu_mev.BlindedExecutionPayloadAndBlobsBundle): - Result[T, string] = - withBlck(forkedBlock): - when consensusFork >= ConsensusFork.Deneb: - when not ( - (T is deneb_mev.SignedBlindedBeaconBlock and - consensusFork == ConsensusFork.Deneb) or - (T is electra_mev.SignedBlindedBeaconBlock and - consensusFork == ConsensusFork.Electra) or - (T is fulu_mev.SignedBlindedBeaconBlock and - consensusFork == ConsensusFork.Fulu)): - return err("getUnsignedBlindedBeaconBlock: mismatched block/payload types") - else: - return ok constructSignableBlindedBlock[T]( - forkyBlck, executionPayloadHeader) - else: - return err("getUnsignedBlindedBeaconBlock: attempt to construct pre-Deneb blinded block") - -proc getBlindedBlockParts[ - EPH: deneb_mev.BlindedExecutionPayloadAndBlobsBundle | - electra_mev.BlindedExecutionPayloadAndBlobsBundle | - fulu_mev.BlindedExecutionPayloadAndBlobsBundle]( - node: BeaconNode, payloadBuilderClient: RestClientRef, head: BlockRef, - pubkey: ValidatorPubKey, slot: Slot, randao: ValidatorSig, - validator_index: ValidatorIndex, graffiti: GraffitiBytes): - Future[Result[(EPH, UInt256, UInt256, ForkedBeaconBlock), string]] - {.async: (raises: [CancelledError]).} = - let - executionBlockHash = node.dag.loadExecutionBlockHash(head).valueOr: - # With checkpoint sync, the checkpoint block may be unavailable, - # and it could already be the parent of the new block before backfill. - # Fallback to EL, hopefully the block is available on the local path. - warn "Failed to load parent execution block hash, skipping block builder", - slot, validator_index, head = shortLog(head) - return err("loadExecutionBlockHash failed") - - blindedBlockRes = - try: - awaitWithTimeout( - getBlindedExecutionPayload[EPH]( - node, payloadBuilderClient, slot, executionBlockHash, pubkey), - BUILDER_PROPOSAL_DELAY_TOLERANCE): - BlindedBlockResult[EPH].err("getBlindedExecutionPayload timed out") - except RestDecodingError as exc: - BlindedBlockResult[EPH].err( - "getBlindedExecutionPayload REST decoding error: " & exc.msg) - except RestError as exc: - BlindedBlockResult[EPH].err( - "getBlindedExecutionPayload REST error: " & exc.msg) + signature = await(node.getBlockSignature(validator, blockRoot, engineBlock.blck)).valueOr: + beacon_block_production_errors.inc() + return head - if blindedBlockRes.isErr: - warn "Could not obtain blinded execution payload header", - error = blindedBlockRes.error, slot, validator_index, - head = shortLog(head) - # Haven't committed to the MEV block, so allow EL fallback. - return err(blindedBlockRes.error) - - # When creating this block, need to ensure it uses the MEV-provided execution - # payload, both to avoid repeated calls to network services and to ensure the - # consistency of this block (e.g., its state root being correct). Since block - # processing does not work directly using blinded blocks, fix up transactions - # root after running the state transition function on an otherwise equivalent - # non-blinded block without transactions. - # - # This doesn't have withdrawals, which each node has regardless of engine or - # builder API. makeBeaconBlockForHeadAndSlot fills it in later. - when EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle: - type PayloadType = deneb.ExecutionPayloadForSigning - template actualEPH: untyped = - blindedBlockRes.get.blindedBlckPart.execution_payload_header - let - withdrawals_root = Opt.some actualEPH.withdrawals_root - kzg_commitments = Opt.some( - blindedBlockRes.get.blindedBlckPart.blob_kzg_commitments) - execution_requests = default(ExecutionRequests) - - var shimExecutionPayload: PayloadType - type DenebEPH = - deneb_mev.BlindedExecutionPayloadAndBlobsBundle.execution_payload_header - copyFields( - shimExecutionPayload.executionPayload, actualEPH, getFieldNames(DenebEPH)) - elif EPH is electra_mev.BlindedExecutionPayloadAndBlobsBundle: - type PayloadType = electra.ExecutionPayloadForSigning - template actualEPH: untyped = - blindedBlockRes.get.blindedBlckPart.execution_payload_header - let - withdrawals_root = Opt.some actualEPH.withdrawals_root - kzg_commitments = Opt.some( - blindedBlockRes.get.blindedBlckPart.blob_kzg_commitments) - execution_requests = blindedBlockRes.get.executionRequests - - var shimExecutionPayload: PayloadType - type ElectraEPH = - electra_mev.BlindedExecutionPayloadAndBlobsBundle.execution_payload_header - copyFields( - shimExecutionPayload.executionPayload, actualEPH, getFieldNames(ElectraEPH)) - elif EPH is fulu_mev.BlindedExecutionPayloadAndBlobsBundle: - debugFuluComment "verify (again, after change) this is what builder API needs" - type PayloadType = fulu.ExecutionPayloadForSigning - template actualEPH: untyped = - blindedBlockRes.get.blindedBlckPart.execution_payload_header - let - withdrawals_root = Opt.some actualEPH.withdrawals_root - kzg_commitments = Opt.some( - blindedBlockRes.get.blindedBlckPart.blob_kzg_commitments) - execution_requests = blindedBlockRes.get.executionRequests - - var shimExecutionPayload: PayloadType - type FuluEPH = - fulu_mev.BlindedExecutionPayloadAndBlobsBundle.execution_payload_header - copyFields( - shimExecutionPayload.executionPayload, actualEPH, getFieldNames(FuluEPH)) - else: - static: doAssert false - - let newBlock = await makeBeaconBlockForHeadAndSlot( - PayloadType, node, randao, validator_index, graffiti, head, slot, - execution_payload = Opt.some shimExecutionPayload, - transactions_root = Opt.some actualEPH.transactions_root, - execution_payload_root = Opt.some hash_tree_root(actualEPH), - withdrawals_root = withdrawals_root, - kzg_commitments = kzg_commitments, - execution_requests = execution_requests) - - if newBlock.isErr(): - # Haven't committed to the MEV block, so allow EL fallback. - return err(newBlock.error) # already logged elsewhere! - - let forkedBlck = newBlock.get() - - return ok( - (blindedBlockRes.get.blindedBlckPart, - blindedBlockRes.get.executionPayloadValue, - forkedBlck.consensusBlockValue, - forkedBlck.blck)) - -proc getBuilderBid[ - SBBB: deneb_mev.SignedBlindedBeaconBlock | - electra_mev.SignedBlindedBeaconBlock | - fulu_mev.SignedBlindedBeaconBlock]( - node: BeaconNode, payloadBuilderClient: RestClientRef, head: BlockRef, - validator_pubkey: ValidatorPubKey, slot: Slot, randao: ValidatorSig, - graffitiBytes: GraffitiBytes, validator_index: ValidatorIndex): - Future[BlindedBlockResult[SBBB]] {.async: (raises: [CancelledError]).} = - ## Returns the unsigned blinded block obtained from the Builder API. - ## Used by the BN's own validators, but not the REST server - when SBBB is deneb_mev.SignedBlindedBeaconBlock: - type EPH = deneb_mev.BlindedExecutionPayloadAndBlobsBundle - elif SBBB is electra_mev.SignedBlindedBeaconBlock: - type EPH = electra_mev.BlindedExecutionPayloadAndBlobsBundle - elif SBBB is fulu_mev.SignedBlindedBeaconBlock: - type EPH = fulu_mev.BlindedExecutionPayloadAndBlobsBundle - else: - static: doAssert false - - let blindedBlockParts = await getBlindedBlockParts[EPH]( - node, payloadBuilderClient, head, validator_pubkey, slot, randao, - validator_index, graffitiBytes) - if blindedBlockParts.isErr: - # Not signed yet, fine to try to fall back on EL - beacon_block_builder_missed_with_fallback.inc() - return err blindedBlockParts.error() - - # These, together, get combined into the blinded block for signing and - # proposal through the relay network. - let (executionPayloadHeader, bidValue, consensusValue, forkedBlck) = - blindedBlockParts.get - - let unsignedBlindedBlock = getUnsignedBlindedBeaconBlock[SBBB]( - node, slot, validator_index, forkedBlck, executionPayloadHeader) - - if unsignedBlindedBlock.isErr: - return err unsignedBlindedBlock.error() - - template execution_requests: untyped = - unsignedBlindedBlock.get.message.body.execution_requests - when SBBB is deneb_mev.SignedBlindedBeaconBlock: - return ok(BuilderBid[SBBB]( - blindedBlckPart: unsignedBlindedBlock.get, - executionRequests: default(ExecutionRequests), - executionPayloadValue: bidValue, - consensusBlockValue: consensusValue)) - elif SBBB is electra_mev.SignedBlindedBeaconBlock or - SBBB is fulu_mev.SignedBlindedBeaconBlock: - return ok(BuilderBid[SBBB]( - blindedBlckPart: unsignedBlindedBlock.get, - executionRequests: execution_requests, - executionPayloadValue: bidValue, - consensusBlockValue: consensusValue)) - else: - static: doAssert false - -proc proposeBlockMEV( - node: BeaconNode, payloadBuilderClient: RestClientRef, - blindedBlock: - deneb_mev.SignedBlindedBeaconBlock | - electra_mev.SignedBlindedBeaconBlock | - fulu_mev.SignedBlindedBeaconBlock): - Future[Result[BlockRef, string]] {.async: (raises: [CancelledError]).} = - let unblindedBlockRef = await node.unblindAndRouteBlockMEV( - payloadBuilderClient, blindedBlock) - return if unblindedBlockRef.isOk and unblindedBlockRef.get.isSome: - beacon_blocks_proposed.inc() - ok(unblindedBlockRef.get.get) - else: - # unblindedBlockRef.isOk and unblindedBlockRef.get.isNone indicates that - # the block failed to validate and integrate into the DAG, which for the - # purpose of this return value, is equivalent. It's used to drive Beacon - # REST API output. - # - # https://collective.flashbots.net/t/post-mortem-april-3rd-2023-mev-boost-relay-incident-and-related-timing-issue/1540 - # has caused false positives, because - # "A potential mitigation to this attack is to introduce a cutoff timing - # into the proposer's slot whereafter this time (e.g. 3 seconds) the relay - # will no longer return a block to the proposer. Relays began to roll out - # this mitigation in the evening of April 3rd UTC time with a 2 second - # cutoff, and notified other relays to do the same. After receiving - # credible reports of honest validators missing their slots the suggested - # timing cutoff was increased to 3 seconds." - let errMsg = - if unblindedBlockRef.isErr: - unblindedBlockRef.error - else: - "Unblinded block not returned to proposer" - err errMsg - -func isEFMainnet(cfg: RuntimeConfig): bool = - cfg.DEPOSIT_CHAIN_ID == 1 and cfg.DEPOSIT_NETWORK_ID == 1 - -proc collectBids( - SBBB: typedesc, EPS: typedesc, node: BeaconNode, - payloadBuilderClient: RestClientRef, validator_pubkey: ValidatorPubKey, - validator_index: ValidatorIndex, graffitiBytes: GraffitiBytes, - head: BlockRef, slot: Slot, - randao: ValidatorSig): Future[Bids[SBBB]] {.async: (raises: [CancelledError]).} = - let usePayloadBuilder = - if not payloadBuilderClient.isNil: - withState(node.dag.headState): - # Head slot, not proposal slot, matters here - # TODO it might make some sense to allow use of builder API if local - # EL fails -- i.e. it would change priorities, so any block from the - # execution layer client would override builder API. But it seems an - # odd requirement to produce no block at all in those conditions. - (not node.dag.cfg.isEFMainnet) or (not livenessFailsafeInEffect( - forkyState.data.block_roots.data, forkyState.data.slot)) - else: - false + signedBlock = consensusFork.SignedBeaconBlock( + message: engineBlock.blck, signature: signature, root: blockRoot + ) - let - payloadBuilderBidFut = - if usePayloadBuilder: - # TODO apparently some capella support still here? - when not (EPS is bellatrix.ExecutionPayloadForSigning): - getBuilderBid[SBBB](node, payloadBuilderClient, head, - validator_pubkey, slot, randao, graffitiBytes, - validator_index) - else: - let fut = newFuture[BlindedBlockResult[SBBB]]("builder-bid") - fut.complete(BlindedBlockResult[SBBB].err( - "Bellatrix Builder API unsupported")) - fut - else: - let fut = newFuture[BlindedBlockResult[SBBB]]("builder-bid") - fut.complete(BlindedBlockResult[SBBB].err( - "either payload builder disabled or liveness failsafe active")) - fut - engineBlockFut = makeBeaconBlockForHeadAndSlot( - EPS, node, randao, validator_index, graffitiBytes, head, slot) - - # getBuilderBid times out after BUILDER_PROPOSAL_DELAY_TOLERANCE, with 1 more - # second for remote validators. makeBeaconBlockForHeadAndSlot times out after - # 1 second. - await allFutures(payloadBuilderBidFut, engineBlockFut) - doAssert payloadBuilderBidFut.finished and engineBlockFut.finished - - let builderBid = - if payloadBuilderBidFut.completed: - if payloadBuilderBidFut.value().isOk: - Opt.some(payloadBuilderBidFut.value().value()) - elif usePayloadBuilder: - notice "Payload builder error", - slot, head = shortLog(head), validator = shortLog(validator_pubkey), - err = payloadBuilderBidFut.value().error() - Opt.none(BuilderBid[SBBB]) - else: - # Effectively the same case, but without the log message - Opt.none(BuilderBid[SBBB]) - else: - notice "Payload builder bid request failed", - slot, head = shortLog(head), validator = shortLog(validator_pubkey), - err = payloadBuilderBidFut.error.msg - Opt.none(BuilderBid[SBBB]) - - let engineBid = - if engineBlockFut.completed: - if engineBlockFut.value.isOk: - Opt.some(engineBlockFut.value().value()) + blobsOpt = + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + Opt.some( + signedBlock.create_blob_sidecars( + deneb.KzgProofs(engineBlock.blobsBundle.proofs), + engineBlock.blobsBundle.blobs)) else: - notice "Engine block building error", - slot, head = shortLog(head), validator = shortLog(validator_pubkey), - err = engineBlockFut.value.error() - Opt.none(EngineBid) - else: - notice "Engine block building failed", - slot, head = shortLog(head), validator = shortLog(validator_pubkey), - err = engineBlockFut.error.msg - Opt.none(EngineBid) - - Bids[SBBB]( - engineBid: engineBid, - builderBid: builderBid) - -func builderBetterBid(localBlockValueBoost: uint8, - builderValue: UInt256, engineValue: Wei): bool = - # Scale down to ensure no overflows; if lower few bits would have been - # otherwise decisive, was close enough not to matter. Calibrate to let - # uint8-range percentages avoid overflowing. - const scalingBits = 10 - static: doAssert 1 shl scalingBits > - high(typeof(localBlockValueBoost)).uint16 + 100 - let - scaledBuilderValue = (builderValue shr scalingBits) * 100 - scaledEngineValue = engineValue shr scalingBits - scaledBuilderValue > - scaledEngineValue * (localBlockValueBoost.uint16 + 100).u256 - -func builderBetterBid*(builderBoostFactor: uint64, - builderValue: UInt256, engineValue: Wei): bool = - if builderBoostFactor == 0'u64: - false - elif builderBoostFactor == 100'u64: - builderValue >= engineValue - elif builderBoostFactor == high(uint64): - true - else: - let - multiplier = builderBoostFactor.u256 - multipledBuilderValue = builderValue * multiplier - overflow = - if builderValue == UInt256.zero: - false - else: - builderValue != multipledBuilderValue div multiplier - - if overflow: - # In case of overflow we will use `builderValue`. - true - else: - (multipledBuilderValue div 100) >= engineValue - -func builderBetterBid(boostFactor: BoostFactor, builderValue: UInt256, - engineValue: Wei): bool = - case boostFactor.kind - of BoostFactorKind.Local: - builderBetterBid(boostFactor.value8, builderValue, engineValue) - of BoostFactorKind.Builder: - builderBetterBid(boostFactor.value64, builderValue, engineValue) + Opt.none(seq[BlobSidecar]) -proc proposeBlockAux( - SBBB: typedesc, EPS: typedesc, node: BeaconNode, - validator: AttachedValidator, validator_index: ValidatorIndex, - head: BlockRef, slot: Slot, randao: ValidatorSig, fork: Fork, - genesis_validators_root: Eth2Digest, - localBlockValueBoost: uint8 -): Future[BlockRef] {.async: (raises: [CancelledError]).} = - let - boostFactor = BoostFactor.init(localBlockValueBoost) - graffitiBytes = node.getGraffitiBytes(validator) - payloadBuilderClient = - node.getPayloadBuilderClient(validator_index.distinctBase).valueOr(nil) - - collectedBids = await collectBids( - SBBB, EPS, node, payloadBuilderClient, validator.pubkey, validator_index, - graffitiBytes, head, slot, randao) - - useBuilderBlock = - if collectedBids.builderBid.isSome(): - collectedBids.engineBid.isNone() or builderBetterBid( - boostFactor, - collectedBids.builderBid.value().executionPayloadValue, - collectedBids.engineBid.value().executionPayloadValue) + columnsOpt = + when consensusFork >= ConsensusFork.Fulu: + Opt.some(signedBlock.assemble_data_column_sidecars( + engineBlock.blobsBundle.blobs.mapIt(kzg.KzgBlob(bytes: it)), + @(engineBlock.blobsBundle.proofs.mapIt(kzg.KzgProof(it))))) else: - if not collectedBids.engineBid.isSome(): - return head # errors logged in router - false - - # There should always be an engine bid, and if payloadBuilderClient exists, - # not getting a builder bid is also an error. Do not report lack of builder - # when that's intentional. Replicate some of the nested if statements here, - # because that avoids entangling logging with other functionality. The logs - # here are inteded to clarify that, for example, when the builder API relay - # URL is provided for this validator, it's reasonable for Nimbus not to use - # it for every block. - if collectedBids.engineBid.isSome(): - # Three cases: builder bid expected and absent, builder bid expected and - # present, and builder bid not expected. - if collectedBids.builderBid.isSome(): - info "Compared engine and builder block bids", - localBlockValueBoost, - useBuilderBlock, - builderBlockValue = - toString(collectedBids.builderBid.value().executionPayloadValue, 10), - engineBlockValue = - toString(collectedBids.engineBid.value().executionPayloadValue, 10) - elif payloadBuilderClient.isNil: - discard # builder API not configured for this block - else: - info "Did not receive expected builder bid; using engine block", - engineBlockValue = collectedBids.engineBid.value().executionPayloadValue - else: - # Similar three cases: builder bid expected and absent, builder bid - # expected and present, and builder bid not expected. However, only - # the second is worth logging, because the other two result in this - # block being missed altogether, and with details logged elsewhere. - if collectedBids.builderBid.isSome: - info "Did not receive expected engine bid; using builder block", - builderBlockValue = - collectedBids.builderBid.value().executionPayloadValue - - if useBuilderBlock: - let - blindedBlock = (await blindedBlockCheckSlashingAndSign( - node, slot, validator, validator_index, - collectedBids.builderBid.value().blindedBlckPart)).valueOr: - return head - # Before proposeBlockMEV, can fall back to EL; after, cannot without - # risking slashing. - maybeUnblindedBlock = await proposeBlockMEV( - node, payloadBuilderClient, blindedBlock) - - return maybeUnblindedBlock.valueOr: - warn "Blinded block proposal incomplete", - head = shortLog(head), slot, validator_index, - validator = shortLog(validator), - err = maybeUnblindedBlock.error, - blindedBlck = shortLog(blindedBlock) - beacon_block_builder_missed_without_fallback.inc() - return head - - let engineBid = collectedBids.engineBid.value() - - withBlck(engineBid.blck): - let - blockRoot = hash_tree_root(forkyBlck) - signingRoot = compute_block_signing_root( - fork, genesis_validators_root, slot, blockRoot) - - notSlashable = node.attachedValidators - .slashingProtection - .registerBlock(validator_index, validator.pubkey, slot, signingRoot) - - if notSlashable.isErr: - warn "Slashing protection activated for block proposal", - blockRoot = shortLog(blockRoot), blck = shortLog(forkyBlck), - signingRoot = shortLog(signingRoot), - validator = validator.pubkey, - slot = slot, - existingProposal = notSlashable.error - return head - - let - signature = - block: - let res = await validator.getBlockSignature( - fork, genesis_validators_root, slot, blockRoot, engineBid.blck) - if res.isErr(): - warn "Unable to sign block", - validator = shortLog(validator), error_msg = res.error() - return head - res.get() - signedBlock = consensusFork.SignedBeaconBlock( - message: forkyBlck, signature: signature, root: blockRoot) - blobsOpt = - when consensusFork >= ConsensusFork.Deneb: - template blobsBundle: untyped = - engineBid.blobsBundleOpt.get - Opt.some(signedBlock.create_blob_sidecars( - blobsBundle.proofs, blobsBundle.blobs)) - else: - Opt.none(seq[BlobSidecar]) - newBlockRef = ( - await node.router.routeSignedBeaconBlock(signedBlock, blobsOpt, - checkValidator = false) - ).valueOr: - return head # Errors logged in router + Opt.none(seq[fulu.DataColumnSidecar]) + newBlockRef = await( + node.router.routeSignedBeaconBlock(signedBlock, blobsOpt, + columnsOpt, checkValidator = false) + ).valueOr: + # TODO Is this an error? + beacon_block_production_errors.inc() + return head # Errors logged in router - if newBlockRef.isNone(): - return head # Validation errors logged in router + if newBlockRef.isNone(): + # TODO is this an error? + beacon_block_production_errors.inc() + return head # Validation errors logged in router - notice "Block proposed", - blockRoot = shortLog(blockRoot), blck = shortLog(forkyBlck), - signature = shortLog(signature), validator = shortLog(validator) + notice "Block proposed", + blockRoot = shortLog(blockRoot), + blck = shortLog(signedBlock.message), + signature = shortLog(signature), + validator = shortLog(validator) - beacon_blocks_proposed.inc() + beacon_blocks_proposed.inc() - return newBlockRef.get() + newBlockRef.get() proc proposeBlock( - node: BeaconNode, - validator: AttachedValidator, - validator_index: ValidatorIndex, - head: BlockRef, - slot: Slot + node: BeaconNode, validator: AttachedValidator, head: BlockRef, slot: Slot ): Future[BlockRef] {.async: (raises: [CancelledError]).} = - if head.slot >= slot: - # We should normally not have a head newer than the slot we're proposing for - # but this can happen if block proposal is delayed - warn "Skipping proposal, have newer head already", - headSlot = shortLog(head.slot), - headBlockRoot = shortLog(head.root), - slot = shortLog(slot) - return head - let fork = node.dag.forkAtEpoch(slot.epoch) genesis_validators_root = node.dag.genesis_validators_root - randao = block: - let res = await validator.getEpochSignature( - fork, genesis_validators_root, slot.epoch) - if res.isErr(): - warn "Unable to generate randao reveal", - validator = shortLog(validator), error_msg = res.error() - return head - res.get() - - template proposeBlockContinuation(type1, type2: untyped): auto = - await proposeBlockAux( - type1, type2, node, validator, validator_index, head, slot, randao, fork, - genesis_validators_root, node.config.localBlockValueBoost) + randao_reveal = ( + await validator.getEpochSignature(fork, genesis_validators_root, slot.epoch) + ).valueOr: + warn "Unable to generate randao reveal", + validator = shortLog(validator), error_msg = error + return head - return withConsensusFork(node.dag.cfg.consensusForkAtEpoch(slot.epoch)): - when consensusFork >= ConsensusFork.Deneb: - proposeBlockContinuation( - consensusFork.SignedBlindedBeaconBlock, - consensusFork.ExecutionPayloadForSigning) + withConsensusFork(node.dag.cfg.consensusForkAtEpoch(slot.epoch)): + when consensusFork >= ConsensusFork.Bellatrix: + await node.proposeBlockAux(consensusFork, validator, head, slot, randao_reveal) else: - # Pre-Deneb MEV is not supported; this signals that, because it triggers - # intentional SignedBlindedBeaconBlock/ExecutionPayload mismatches. - proposeBlockContinuation( - deneb_mev.SignedBlindedBeaconBlock, - max(ConsensusFork.Bellatrix, consensusFork).ExecutionPayloadForSigning) + warn "Block proposals for fork no longer supported", consensusFork + head proc sendAttestations(node: BeaconNode, head: BlockRef, slot: Slot) = ## Perform all attestations that the validators attached to this node should @@ -1613,6 +843,15 @@ proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot): ## that is supposed to do so, given the shuffling at that slot for the given ## head - to compute the proposer, we need to advance a state to the given ## slot + if head.slot >= slot: + # We should normally not have a head newer than the slot we're proposing for + # but this can happen if block proposal is delayed + warn "Skipping proposal, have newer head already", + headSlot = shortLog(head.slot), + headBlockRoot = shortLog(head.root), + slot = shortLog(slot) + return head + let proposer = node.dag.getProposer(head, slot).valueOr: return head @@ -1624,7 +863,7 @@ proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot): proposer = shortLog(proposerKey) return head - return await proposeBlock(node, validator, proposer, head, slot) + await proposeBlock(node, validator, head, slot) proc signAndSendAggregate( node: BeaconNode, validator: AttachedValidator, shufflingRef: ShufflingRef, @@ -1677,19 +916,6 @@ proc signAndSendAggregate( return signAndSendAggregatedAttestations() - else: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#construct-aggregate - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#aggregateandproof - var msg = phase0.SignedAggregateAndProof( - message: phase0.AggregateAndProof( - aggregator_index: distinctBase validator_index, - selection_proof: selectionProof)) - - msg.message.aggregate = node.attestationPool[].getPhase0AggregatedAttestation( - slot, committee_index).valueOr: - return - - signAndSendAggregatedAttestations() proc sendAggregatedAttestations( node: BeaconNode, head: BlockRef, slot: Slot) = @@ -1750,32 +976,32 @@ from std/times import epochTime proc getValidatorRegistration( node: BeaconNode, validator: AttachedValidator, epoch: Epoch): Future[Result[SignedValidatorRegistrationV1, string]] {.async: (raises: [CancelledError]).} = - let validatorIdx = validator.index.valueOr: + if validator.index.isNone: # The validator index will be missing when the validator was not # activated for duties yet. We can safely skip the registration then. return - let feeRecipient = node.getFeeRecipient(validator.pubkey, validatorIdx, epoch) - let gasLimit = node.getGasLimit(validator.pubkey) + let + feeRecipient = node.getFeeRecipient(validator.pubkey, validator.index, epoch) + gasLimit = node.getGasLimit(validator.pubkey) + var validatorRegistration = SignedValidatorRegistrationV1( message: ValidatorRegistrationV1( - fee_recipient: ExecutionAddress(data: distinctBase(feeRecipient)), + fee_recipient: feeRecipient, gas_limit: gasLimit, timestamp: epochTime().uint64, - pubkey: validator.pubkey)) - - let signature = await validator.getBuilderSignature( - node.dag.cfg.genesisFork, validatorRegistration.message) - - debug "getValidatorRegistration: registering", - validatorRegistration + pubkey: validator.pubkey, + ) + ) - if signature.isErr: - return err signature.error + debug "getValidatorRegistration: registering", validatorRegistration - validatorRegistration.signature = signature.get + validatorRegistration.signature = + ?await validator.getBuilderSignature( + node.dag.cfg.GENESIS_FORK_VERSION, validatorRegistration.message + ) - return ok validatorRegistration + ok validatorRegistration proc registerValidatorsPerBuilder( node: BeaconNode, payloadBuilderAddress: string, epoch: Epoch, @@ -2006,13 +1232,14 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async: (ra withState(node.dag.headState): node.updateValidators(forkyState.data.validators.asSeq()) - let newHead = await handleProposal(node, head, slot) - head = newHead - let - # The latest point in time when we'll be sending out attestations - attestationCutoff = node.beaconClock.fromNow(slot.attestation_deadline()) + timeConfig = node.dag.cfg.time + newHead = await handleProposal(node, head, slot) + head = newHead + # The latest point in time when we'll be sending out attestations + let attestationCutoff = node.beaconClock.fromNow( + slot.attestation_deadline(timeConfig)) if attestationCutoff.inFuture: debug "Waiting to send attestations", head = shortLog(head), @@ -2040,8 +1267,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async: (ra # the result in aggregates static: doAssert aggregateSlotOffset == syncContributionSlotOffset, "Timing change?" - let - aggregateCutoff = node.beaconClock.fromNow(slot.aggregate_deadline()) + let aggregateCutoff = node.beaconClock.fromNow( + slot.aggregate_deadline(timeConfig)) if aggregateCutoff.inFuture: debug "Waiting to send aggregate attestations", aggregateCutoff = shortLog(aggregateCutoff.offset) @@ -2095,83 +1322,3 @@ proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async: (raises: [Cancel node.consensusManager[].actionTracker.registerDuty( slot, subnet_id, validator_index, isAggregator) - -proc makeMaybeBlindedBeaconBlockForHeadAndSlotImpl[ResultType]( - node: BeaconNode, consensusFork: static ConsensusFork, - randao_reveal: ValidatorSig, graffiti: GraffitiBytes, - head: BlockRef, slot: Slot, - builderBoostFactor: uint64): Future[ResultType] {.async: (raises: [CancelledError]).} = - let - proposer = node.dag.getProposer(head, slot).valueOr: - return ResultType.err( - "Unable to get proposer for specific head and slot") - proposerKey = node.dag.validatorKey(proposer).get().toPubKey() - - payloadBuilderClient = - node.getPayloadBuilderClient(proposer.distinctBase).valueOr(nil) - - collectedBids = - await collectBids(consensusFork.SignedBlindedBeaconBlock, - consensusFork.ExecutionPayloadForSigning, - node, - payloadBuilderClient, proposerKey, - proposer, graffiti, head, slot, - randao_reveal) - useBuilderBlock = - if collectedBids.builderBid.isSome(): - collectedBids.engineBid.isNone() or builderBetterBid( - BoostFactor.init(builderBoostFactor), - collectedBids.builderBid.value().executionPayloadValue, - collectedBids.engineBid.value().executionPayloadValue) - else: - if not(collectedBids.engineBid.isSome): - return ResultType.err("Engine bid is not available") - false - - engineBid = block: - if useBuilderBlock: - let blindedBid = collectedBids.builderBid.value() - return ResultType.ok(( - blck: - consensusFork.MaybeBlindedBeaconBlock( - isBlinded: true, - blindedData: blindedBid.blindedBlckPart.message), - executionValue: Opt.some(blindedBid.executionPayloadValue), - consensusValue: Opt.some(blindedBid.consensusBlockValue))) - - collectedBids.engineBid.value() - - doAssert engineBid.blck.kind == consensusFork - template forkyBlck: untyped = engineBid.blck.forky(consensusFork) - when consensusFork >= ConsensusFork.Deneb: - let blobsBundle = engineBid.blobsBundleOpt.get() - doAssert blobsBundle.commitments == forkyBlck.body.blob_kzg_commitments - ResultType.ok(( - blck: consensusFork.MaybeBlindedBeaconBlock( - isBlinded: false, - data: consensusFork.BlockContents( - `block`: forkyBlck, - kzg_proofs: blobsBundle.proofs, - blobs: blobsBundle.blobs)), - executionValue: Opt.some(engineBid.executionPayloadValue), - consensusValue: Opt.some(engineBid.consensusBlockValue))) - else: - ResultType.ok(( - blck: consensusFork.MaybeBlindedBeaconBlock( - isBlinded: false, - data: forkyBlck), - executionValue: Opt.some(engineBid.executionPayloadValue), - consensusValue: Opt.some(engineBid.consensusBlockValue))) - -proc makeMaybeBlindedBeaconBlockForHeadAndSlot*( - node: BeaconNode, consensusFork: static ConsensusFork, - randao_reveal: ValidatorSig, graffiti: GraffitiBytes, - head: BlockRef, slot: Slot, builderBoostFactor: uint64): auto = - type ResultType = Result[tuple[ - blck: consensusFork.MaybeBlindedBeaconBlock, - executionValue: Opt[UInt256], - consensusValue: Opt[UInt256]], string] - - makeMaybeBlindedBeaconBlockForHeadAndSlotImpl[ResultType]( - node, consensusFork, randao_reveal, graffiti, head, slot, - builderBoostFactor) \ No newline at end of file diff --git a/beacon_chain/validators/block_payloads.nim b/beacon_chain/validators/block_payloads.nim new file mode 100644 index 0000000000..6c94556ad1 --- /dev/null +++ b/beacon_chain/validators/block_payloads.nim @@ -0,0 +1,729 @@ +# beacon_chain +# Copyright (c) 2018-2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +## Utilities and logic for getting an execution payload from either engine or +## builder and then creating a block from the best one. +## +## In general, we ask both engine and builder for a payload then compare how +## much each would pay, selecting the most profitable one to proceed with. +## +## Once we have a payload, a consensus block is constructed that gets applied +## to a state to check that it's correct and to compute the post-state-root +## which is part of the block. +## +## With the state root in hand, we can go on to either sign the block when +## running a beacon validator or pass it to the validator client that will sign +## and then pass it back. +## +## Either way, signing is out of scope for this module. +## + +# Implementation notes +# +# * Even though they are in theory redundant, we sometimes pass both +# `consensusFork` and fork-specific `Forky*` types - this makes spelling the +# return type slightly easier + +{.push raises: [], gcsafe.} + +import + chronicles, + results, + ../consensus_object_pools/[attestation_pool, consensus_manager], + ../spec/[forks, state_transition], + ../spec/mev/rest_mev_calls, + ../beacon_node + +from eth/async_utils import awaitWithTimeout +from ../spec/beaconstate import get_expected_withdrawals + +export results + +type + BuilderBidResult[BB: ForkyBuilderBid] = Result[BB, string] + + EngineBlock[BB: ForkyBeaconBlock] = object + blck*: BB + executionValue*: Wei + consensusValue*: UInt256 + blobsBundle*: fulu.BlobsBundle + + BuilderBlock[BBB: ForkyBlindedBeaconBlock] = object + blck*: BBB + executionValue*: Wei + consensusValue*: UInt256 + + EngineBlockResult[BB: ForkyBeaconBlock] = Result[EngineBlock[BB], string] + BuilderBlockResult[BBB: ForkyBlindedBeaconBlock] = Result[BuilderBlock[BBB], string] + + EngineBid*[EPS: ForkyExecutionPayloadForSigning] = object + eps*: EPS + execution_requests*: ExecutionRequests + + Bids[consensusFork: static ConsensusFork] = object + engineBid*: Opt[EngineBid[consensusFork.ExecutionPayloadForSigning]] + builderBid*: Opt[consensusFork.BuilderBid] + + BoostFactorKind {.pure.} = enum + Local + Builder + + BoostFactor* = object + case kind: BoostFactorKind + of BoostFactorKind.Local: + value8: uint8 + of BoostFactorKind.Builder: + value64: uint64 + +template toBlockContents( + engineBlock: EngineBlock, consensusFork: static ConsensusFork +): untyped = + when consensusFork >= ConsensusFork.Fulu: + consensusFork.BlockContents( + `block`: engineBlock.blck, + kzg_proofs: engineBlock.blobsBundle.proofs, + blobs: engineBlock.blobsBundle.blobs, + ) + elif consensusFork >= ConsensusFork.Deneb: + consensusFork.BlockContents( + `block`: engineBlock.blck, + kzg_proofs: deneb.KzgProofs(engineBlock.blobsBundle.proofs), + blobs: engineBlock.blobsBundle.blobs, + ) + else: + engineBlock.blck + +func init*(t: typedesc[BoostFactor], value: uint8): BoostFactor = + BoostFactor(kind: BoostFactorKind.Local, value8: value) + +func init*(t: typedesc[BoostFactor], value: uint64): BoostFactor = + BoostFactor(kind: BoostFactorKind.Builder, value64: value) + +func builderBetterBid( + localBlockValueBoost: uint8, builderValue: UInt256, engineValue: Wei +): bool = + # Scale down to ensure no overflows; if lower few bits would have been + # otherwise decisive, was close enough not to matter. Calibrate to let + # uint8-range percentages avoid overflowing. + const scalingBits = 10 + static: + doAssert 1 shl scalingBits > high(typeof(localBlockValueBoost)).uint16 + 100 + let + scaledBuilderValue = (builderValue shr scalingBits) * 100 + scaledEngineValue = engineValue shr scalingBits + scaledBuilderValue > scaledEngineValue * (localBlockValueBoost.uint16 + 100).u256 + +func builderBetterBid*( + builderBoostFactor: uint64, builderValue: UInt256, engineValue: Wei +): bool = + if builderBoostFactor == 0'u64: + false + elif builderBoostFactor == 100'u64: + builderValue >= engineValue + elif builderBoostFactor == high(uint64): + true + else: + let + multiplier = builderBoostFactor.u256 + multipledBuilderValue = builderValue * multiplier + overflow = + if builderValue == UInt256.zero: + false + else: + builderValue != multipledBuilderValue div multiplier + + if overflow: + # In case of overflow we will use `builderValue`. + true + else: + (multipledBuilderValue div 100) >= engineValue + +func builderBetterBid( + boostFactor: BoostFactor, builderValue: UInt256, engineValue: Wei +): bool = + case boostFactor.kind + of BoostFactorKind.Local: + builderBetterBid(boostFactor.value8, builderValue, engineValue) + of BoostFactorKind.Builder: + builderBetterBid(boostFactor.value64, builderValue, engineValue) + +func decodePayloadRequests( + _: + bellatrix.ExecutionPayloadForSigning | capella.ExecutionPayloadForSigning | + deneb.ExecutionPayloadForSigning +): Result[ExecutionRequests, string] = + ok default(ExecutionRequests) + +func decodePayloadRequests( + eps: electra.ExecutionPayloadForSigning | fulu.ExecutionPayloadForSigning +): Result[ExecutionRequests, string] = + try: + var + execution_requests_buffer: ExecutionRequests + prev_type: Opt[byte] + + # TODO why aren't these decoded already? + for request_type_and_payload in eps.executionRequests: + if request_type_and_payload.len < 2: + return err("Execution layer request too short") + + let request_type = request_type_and_payload[0] + if prev_type.isSome: + if request_type < prev_type.get: + return err("Execution layer request types not sorted") + if request_type == prev_type.get: + return err("Execution layer request types duplicated") + prev_type.ok request_type + + template request_payload(): untyped = + request_type_and_payload.toOpenArray(1, request_type_and_payload.len - 1) + + case request_type_and_payload[0] + of DEPOSIT_REQUEST_TYPE: + execution_requests_buffer.deposits = SSZ.decode( + request_payload, List[DepositRequest, Limit MAX_DEPOSIT_REQUESTS_PER_PAYLOAD] + ) + of WITHDRAWAL_REQUEST_TYPE: + execution_requests_buffer.withdrawals = SSZ.decode( + request_payload, + List[WithdrawalRequest, Limit MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD], + ) + of CONSOLIDATION_REQUEST_TYPE: + execution_requests_buffer.consolidations = SSZ.decode( + request_payload, + List[ConsolidationRequest, Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD], + ) + else: + return err("Execution layer invalid request type") + + ok execution_requests_buffer + except SerializationError: + err("Failed to deserialize execution requests") + +proc makeEngineBlock*( + node: BeaconNode, + consensusFork: static ConsensusFork, + state: var ForkyHashedBeaconState, + cache: var StateCache, + validator_index: ValidatorIndex, + randao_reveal: ValidatorSig, + graffiti: GraffitiBytes, + head: BlockRef, + slot: Slot, + eps: ForkyExecutionPayloadForSigning, + execution_requests: ExecutionRequests, +): EngineBlockResult[consensusFork.BeaconBlock] = + let + attestations = node.attestationPool[].getAttestationsForBlock(state, cache) + exits = node.validatorChangePool[].getBeaconBlockValidatorChanges( + node.dag.cfg, state.data + ) + sync_aggregate = node.syncCommitteeMsgPool[].produceSyncAggregate(head.bid, slot) + + blockAndRewards = makeBeaconBlockWithRewards( + node.dag.cfg, + consensusFork, + state, + cache, + validator_index, + randao_reveal, + Eth1Data(), + graffiti, + attestations, + @[], + exits, + sync_aggregate, + eps.executionPayload, + verificationFlags = {}, + eps.kzg_commitments, + execution_requests, + ).valueOr: + # This is almost certainly a bug, but it's complex enough that there's a + # small risk it might happen even when most proposals succeed - thus we + # log instead of asserting + warn "Cannot create block for proposal", + slot, head = shortLog(head), error = error + return err($error) + + template getFuluBlobsBundle(bb: fulu.BlobsBundle): + fulu.BlobsBundle {.used.} = + bb + template getFuluBlobsBundle(bb: deneb.BlobsBundle): + fulu.BlobsBundle {.used.} = + fulu.BlobsBundle( + commitments: bb.commitments, + proofs: fulu.KzgProofs(bb.proofs), + blobs: bb.blobs) + + ok EngineBlock[consensusFork.BeaconBlock]( + blck: blockAndRewards.blck, + executionValue: eps.blockValue, + consensusValue: blockAndRewards.rewards.blockConsensusValue(), + blobsBundle: + when consensusFork >= ConsensusFork.Deneb: + getFuluBlobsBundle(eps.blobsBundle) + else: + default(fulu.BlobsBundle), + ) + +proc getExecutionPayload*( + node: BeaconNode, + consensusFork: static ConsensusFork, + head: BlockRef, + proposalState: ref ForkedHashedBeaconState, + validator_index: ValidatorIndex, + validator_pubkey: ValidatorPubKey, +): Future[Opt[EngineBid[consensusFork.ExecutionPayloadForSigning]]] {. + async: (raises: [CancelledError]) +.} = + # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/validator.md#executionpayload + + let + slot = withState(proposalState[]): + forkyState.data.slot + feeRecipient = node.consensusManager[].getFeeRecipient( + validator_pubkey, Opt.some(validator_index), slot.epoch + ) + beaconHead = node.attestationPool[].getBeaconHead(head) + executionHead = withState(proposalState[]): + when consensusFork >= ConsensusFork.Bellatrix and + consensusFork < ConsensusFork.Gloas: + forkyState.data.latest_execution_payload_header.block_hash + elif consensusFork >= ConsensusFork.Gloas: + forkyState.data.latest_execution_payload_bid.block_hash + else: + (static(default(Eth2Digest))) + latestSafe = beaconHead.safeExecutionBlockHash + latestFinalized = beaconHead.finalizedExecutionBlockHash + timestamp = withState(proposalState[]): + compute_timestamp_at_slot(forkyState.data, forkyState.data.slot) + prevRandao = withState(proposalState[]): + get_randao_mix(forkyState.data, get_current_epoch(forkyState.data)) + withdrawals = withState(proposalState[]): + when consensusFork >= ConsensusFork.Capella: + when consensusFork >= ConsensusFork.Gloas: + debugGloasComment "Extracting just the withdrawals from tuple" + get_expected_withdrawals(forkyState.data)[0] + else: + get_expected_withdrawals(forkyState.data) + else: + @[] + + # Block production happens rarely enough that we want to log request/response + # as they become ready + info "Requesting engine payload", + slot, + beaconHead = shortLog(beaconHead.blck), + executionHead = shortLog(executionHead), + latestSafe = shortLog(latestSafe), + latestFinalized = shortLog(latestFinalized), + feeRecipient = $feeRecipient + + type PayloadType = consensusFork.ExecutionPayloadForSigning + let + eps = ( + await node.elManager.getPayload( + PayloadType, beaconHead.blck.bid.root, executionHead, latestSafe, + latestFinalized, timestamp, prevRandao, feeRecipient, withdrawals, + ) + ).valueOr: + if not proposalState[].is_merge_transition_complete(): + # Pre-merge, an all-zeroes execution payload is used and there are no + # requests, so default is fine here + return Opt.some(static(default(EngineBid[PayloadType]))) + return Opt.none(EngineBid[PayloadType]) + + requests = decodePayloadRequests(eps).valueOr: + warn "Cannot decode payload requests from engine", slot, err = error + return Opt.none(EngineBid[PayloadType]) + + # TODO errors are logged in elmanager but unlike most other things, we want + # success log here for getting the payload since they are so rare - it + # would be nice to have a more structured approach to the logging here + info "Received engine payload", + slot, value = shortLog(eps.blockValue), payload = shortLog(eps.executionPayload) + + ok EngineBid[PayloadType](eps: eps, execution_requests: requests) + +proc getSignedBuilderBid( + payloadBuilderClient: RestClientRef, + SBB: type ForkySignedBuilderBid, + slot: Slot, + executionBlockHash: Eth2Digest, + pubkey: ValidatorPubKey, +): Future[Result[SBB, string]] {.async: (raises: [CancelledError]).} = + let response = + try: + await payloadBuilderClient.getHeader(slot, executionBlockHash, pubkey) + except RestDecodingError as exc: + return err("getSignedBuilderBid REST decoding error: " & exc.msg) + except RestError as exc: + return err("getSignedBuilderBid REST error: " & exc.msg) + + const httpOk = 200 + if response.status != httpOk: + return err "getSignedBuilderBid: HTTP error " & $response.status + + let res = decodeBytesJsonOrSsz( + DataVersionEnclosedObject[SBB], + response.data, + response.contentType, + response.headers.getString("eth-consensus-version"), + ).valueOr: + return err( + "Unable to decode blinded header: " & $error & " with HTTP status " & + $response.status & ", Content-Type " & $response.contentType & " and content " & + $response.data + ) + ok res.data + +proc getBuilderBid( + node: BeaconNode, + consensusFork: static ConsensusFork, + payloadBuilderClient: RestClientRef, + slot: Slot, + executionBlockHash: Eth2Digest, + pubkey: ValidatorPubKey, + expected_withdrawals_root: Eth2Digest, +): Future[BuilderBidResult[consensusFork.BuilderBid]] {. + async: (raises: [CancelledError]) +.} = + # Block production happens rarely enough that we want to log the request + info "Requesting builder bid", + slot, executionHead = shortLog(executionBlockHash), pubkey = shortLog(pubkey) + + let + sbbRes = awaitWithTimeout( + payloadBuilderClient.getSignedBuilderBid( + consensusFork.SignedBuilderBid, slot, executionBlockHash, pubkey + ), + BUILDER_PROPOSAL_DELAY_TOLERANCE, + ): + return err "Timeout obtaining blinded header from builder" + sbb = ?sbbRes + + if not verify_builder_signature( + node.dag.cfg.GENESIS_FORK_VERSION, sbb.message, sbb.message.pubkey, sbb.signature + ): + return err "Builder signature verification failed" + + info "Received builder bid", + slot, value = sbb.message.value, payload = shortLog(sbb.message.header) + + ok sbb.message + +proc getBuilderBid( + node: BeaconNode, + consensusFork: static ConsensusFork, + payloadBuilderClient: RestClientRef, + head: BlockRef, + slot: Slot, + pubkey: ValidatorPubKey, + expected_withdrawals_root: Eth2Digest, +): Future[BuilderBidResult[consensusFork.BuilderBid]] {. + async: (raises: [CancelledError]) +.} = + let executionBlockHash = node.dag.loadExecutionBlockHash(head).valueOr: + # With checkpoint sync, the checkpoint block may be unavailable, + # and it could already be the parent of the new block before backfill. + # Fallback to EL, hopefully the block is available on the local path. + warn "Failed to load parent execution block hash, skipping block builder", + slot, head = shortLog(head) + return err("loadExecutionBlockHash failed") + + await node.getBuilderBid( + consensusFork, payloadBuilderClient, slot, executionBlockHash, pubkey, + expected_withdrawals_root, + ) + +proc makeBuilderBlock*( + node: BeaconNode, + consensusFork: static ConsensusFork, + state: var ForkyHashedBeaconState, + cache: var StateCache, + validator_index: ValidatorIndex, + randao_reveal: ValidatorSig, + graffiti: GraffitiBytes, + head: BlockRef, + slot: Slot, + builderBid: ForkyBuilderBid, +): BuilderBlockResult[consensusFork.BlindedBeaconBlock] = + let + attestations = node.attestationPool[].getAttestationsForBlock(state, cache) + exits = node.validatorChangePool[].getBeaconBlockValidatorChanges( + node.dag.cfg, state.data + ) + sync_aggregate = node.syncCommitteeMsgPool[].produceSyncAggregate(head.bid, slot) + + blockAndRewards = makeBeaconBlockWithRewards( + node.dag.cfg, + consensusFork, + state, + cache, + validator_index, + randao_reveal, + Eth1Data(), + graffiti, + attestations, + @[], + exits, + sync_aggregate, + builderBid.header, + verificationFlags = {}, + builderBid.blob_kzg_commitments, + builderBid.execution_requests, + ).valueOr: + # This is almost certainly a bug, but it's complex enough that there's a + # small risk it might happen even when most proposals succeed - thus we + # log instead of asserting + warn "Cannot create block for proposal", + slot, head = shortLog(head), error = error + return err($error) + + ok BuilderBlock[consensusFork.BlindedBeaconBlock]( + blck: blockAndRewards.blck, + executionValue: builderBid.value, + consensusValue: blockAndRewards.rewards.blockConsensusValue(), + ) + +func isExcludedTestnet(cfg: RuntimeConfig): bool = + ## Ensure that builder API testing can still occur in certain circumstances. + cfg.DEPOSIT_CHAIN_ID == cfg.DEPOSIT_NETWORK_ID and + cfg.DEPOSIT_CHAIN_ID in [17000'u64, 560048] # Holesky and Hoodi, respectively + +proc collectBids*( + node: BeaconNode, + consensusFork: static ConsensusFork, + payloadBuilderClient: RestClientRef, + validator_pubkey: ValidatorPubKey, + validator_index: ValidatorIndex, + head: BlockRef, + slot: Slot, + proposalState: ref ForkedHashedBeaconState, +): Future[Bids[consensusFork]] {.async: (raises: [CancelledError]).} = + type BB = consensusFork.BuilderBid + + let + usePayloadBuilder = + if not payloadBuilderClient.isNil: + withState(node.dag.headState): + # Head slot, not proposal slot, matters here + # TODO it might make some sense to allow use of builder API if local + # EL fails -- i.e. it would change priorities, so any block from the + # execution layer client would override builder API. But it seems an + # odd requirement to produce no block at all in those conditions. + (node.dag.cfg.isExcludedTestnet) or ( + not livenessFailsafeInEffect( + forkyState.data.block_roots.data, forkyState.data.slot + ) + ) + else: + false + + builderBidFut = + if usePayloadBuilder: + debugGloasComment "handle different get_expected_withdrawals types" + let + withdrawals = List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]( + when consensusFork == ConsensusFork.Gloas: + get_expected_withdrawals( + proposalState[].forky(consensusFork).data)[0] + else: + get_expected_withdrawals( + proposalState[].forky(consensusFork).data) + ) + expected_withdrawals_root = hash_tree_root(withdrawals) + node.getBuilderBid( + consensusFork, payloadBuilderClient, head, slot, validator_pubkey, + expected_withdrawals_root, + ) + else: + nil + + enginePayloadFut = node.getExecutionPayload( + consensusFork, head, proposalState, validator_index, validator_pubkey + ) + + # getBuilderBid times out after BUILDER_PROPOSAL_DELAY_TOLERANCE, with 1 more + # second for remote validators. getExecutionPayload times out after + # 1 second. + let + builderBid = + if builderBidFut.isNil: + if not payloadBuilderClient.isNil: + notice "Liveness failsafe in effect, ignoring builder" + Opt.none(BB) + else: + let res = await builderBidFut + if res.isErr: + notice "Payload builder error", + slot, + head = shortLog(head), + validator = shortLog(validator_pubkey), + err = res.error + Opt.none(BB) + else: + Opt.some res[] + + enginePayload = await enginePayloadFut + + Bids[consensusFork](engineBid: enginePayload, builderBid: builderBid) + +proc useBuilderPayload*(bids: Bids, boostFactor: BoostFactor): bool = + bids.builderBid.isSome() and ( + bids.engineBid.isNone() or + builderBetterBid( + boostFactor, bids.builderBid.value().value, bids.engineBid.value().eps.blockValue + ) + ) + +proc makeMaybeBlindedBeaconBlockForHeadAndSlot*( + node: BeaconNode, + consensusFork: static ConsensusFork, + validator_index: ValidatorIndex, + randao_reveal: ValidatorSig, + graffiti: GraffitiBytes, + head: BlockRef, + slot: Slot, + builderBoostFactor: uint64, +): Future[ + Result[ + tuple[ + blck: consensusFork.MaybeBlindedBeaconBlock, + executionValue: UInt256, + consensusValue: UInt256, + ], + string, + ] +] {.async: (raises: [CancelledError]).} = + let + proposerKey = node.dag.validatorKey(validator_index).get().toPubKey() + + payloadBuilderClient = + node.getPayloadBuilderClient(validator_index.distinctBase).valueOr(nil) + + cache = new StateCache + state = node.dag.getProposalState(head, slot, cache[]).valueOr: + return err("Proposal state is not available") + + bids = await node.collectBids( + consensusFork, payloadBuilderClient, proposerKey, validator_index, head, slot, + state, + ) + + useBuilderPayload = bids.useBuilderPayload(BoostFactor.init(builderBoostFactor)) + + if payloadBuilderClient != nil: + info "Payload selected", + builderBoostFactor, + useBuilderPayload, + hasBuilderPayload = bids.builderBid.isSome(), + hasEnginePayload = bids.engineBid.isSome() + + if useBuilderPayload: + let builderBlock = node.makeBuilderBlock( + consensusFork, + state[].forky(consensusFork), + cache[], + validator_index, + randao_reveal, + graffiti, + head, + slot, + bids.builderBid.value(), + ).valueOr: + return err("Failed to create builder block") + + return ok( + ( + blck: consensusFork.MaybeBlindedBeaconBlock( + isBlinded: true, blindedData: builderBlock.blck + ), + executionValue: builderBlock.executionValue, + consensusValue: builderBlock.consensusValue, + ) + ) + + if bids.engineBid.isNone: + return err("Engine payload is not available") + + let engineBlock = + ?node.makeEngineBlock( + consensusFork, + state[].forky(consensusFork), + cache[], + validator_index, + randao_reveal, + graffiti, + head, + slot, + bids.engineBid[].eps, + bids.engineBid[].execution_requests, + ) + + ok( + ( + blck: consensusFork.MaybeBlindedBeaconBlock( + isBlinded: false, data: engineBlock.toBlockContents(consensusFork) + ), + executionValue: engineBlock.executionValue, + consensusValue: engineBlock.consensusValue, + ) + ) + +proc makeBeaconBlockForHeadAndSlot*( + node: BeaconNode, + consensusFork: static ConsensusFork, + validator_index: ValidatorIndex, + randao_reveal: ValidatorSig, + graffiti: GraffitiBytes, + head: BlockRef, + slot: Slot, +): Future[ + Result[ + tuple[ + blck: consensusFork.BlockContents, + executionValue: UInt256, + consensusValue: UInt256, + ], + string, + ] +] {.async: (raises: [CancelledError]).} = + let + proposerKey = node.dag.validatorKey(validator_index).get().toPubKey() + cache = new StateCache + # TODO move the creation of this proposal state away from the hot path + state = node.dag.getProposalState(head, slot, cache[]).valueOr: + return err("Proposal state is not available") + enginePayload = ( + await node.getExecutionPayload( + consensusFork, head, state, validator_index, proposerKey + ) + ).valueOr: + return err("Engine payload is not available") + + let engineBlock = + ?node.makeEngineBlock( + consensusFork, + state[].forky(consensusFork), + cache[], + validator_index, + randao_reveal, + graffiti, + head, + slot, + enginePayload.eps, + enginePayload.execution_requests, + ) + + ok( + ( + blck: engineBlock.toBlockContents(consensusFork), + executionValue: engineBlock.executionValue, + consensusValue: engineBlock.consensusValue, + ) + ) diff --git a/beacon_chain/validators/keystore_management.nim b/beacon_chain/validators/keystore_management.nim index 22f3000f1b..bbe0b1134e 100644 --- a/beacon_chain/validators/keystore_management.nim +++ b/beacon_chain/validators/keystore_management.nim @@ -11,7 +11,7 @@ import std/[os, unicode, sequtils], chronicles, chronos, json_serialization, bearssl/rand, - serialization, blscurve, eth/common/eth_types, confutils, + serialization, blscurve, confutils, nimbus_security_resources, ".."/spec/[eth2_merkleization, keystore, crypto], ".."/spec/datatypes/base, @@ -1672,7 +1672,7 @@ proc generateDeposits*(cfg: RuntimeConfig, var derivedKey = baseKey defer: burnMem(derivedKey) derivedKey = deriveChildKey(derivedKey, validatorIdx) - derivedKey = deriveChildKey(derivedKey, 0) # This is witdrawal key + derivedKey = deriveChildKey(derivedKey, 0) # This is withdrawal key let withdrawalPubKey = derivedKey.toPubKey derivedKey = deriveChildKey(derivedKey, 0) # This is the signing key let signingPubKey = derivedKey.toPubKey @@ -1693,7 +1693,7 @@ proc generateDeposits*(cfg: RuntimeConfig, var derivedKey = baseKey defer: burnMem(derivedKey) derivedKey = deriveChildKey(derivedKey, validatorIdx) - derivedKey = deriveChildKey(derivedKey, 0) # This is witdrawal key + derivedKey = deriveChildKey(derivedKey, 0) # This is withdrawal key let withdrawalPubKey = derivedKey.toPubKey derivedKey = deriveChildKey(derivedKey, 0) # This is the signing key let signingPubKey = derivedKey.toPubKey diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 31c9fca380..e0c170274a 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import std/sequtils, @@ -84,8 +84,8 @@ template getCurrentBeaconTime(router: MessageRouter): BeaconTime = type RouteBlockResult = Result[Opt[BlockRef], string] proc routeSignedBeaconBlock*( router: ref MessageRouter, blck: ForkySignedBeaconBlock, - blobsOpt: Opt[seq[BlobSidecar]], checkValidator: bool): - Future[RouteBlockResult] {.async: (raises: [CancelledError]).} = + blobsOpt: Opt[seq[BlobSidecar]], dataColumnsOpt: Opt[seq[fulu.DataColumnSidecar]], + checkValidator: bool): Future[RouteBlockResult] {.async: (raises: [CancelledError]).} = ## Validate and broadcast beacon block, then add it to the block database ## Returns the new Head when block is added successfully to dag, none when ## block passes validation but is not added, and error otherwise @@ -112,7 +112,7 @@ proc routeSignedBeaconBlock*( signature = shortLog(blck.signature), error = res.error() return err($(res.error()[1])) - when typeof(blck).kind >= ConsensusFork.Deneb: + when typeof(blck).kind in [ConsensusFork.Deneb, ConsensusFork.Electra]: if blobsOpt.isSome: let blobs = blobsOpt.get() let kzgCommits = blck.message.body.blob_kzg_commitments.asSeq @@ -131,8 +131,9 @@ proc routeSignedBeaconBlock*( return err(res.error()) let + timeConfig = router.processor.dag.cfg.time sendTime = router[].getCurrentBeaconTime() - delay = sendTime - blck.message.slot.block_deadline() + delay = sendTime - blck.message.slot.block_deadline(timeConfig) # The block (and blobs, if present) passed basic gossip validation # - we can "safely" broadcast it now. In fact, per the spec, we # should broadcast it even if it later fails to apply to our @@ -152,28 +153,67 @@ proc routeSignedBeaconBlock*( blockRoot = shortLog(blck.root), blck = shortLog(blck.message), signature = shortLog(blck.signature), error = res.error() - var blobRefs = Opt.none(BlobSidecars) - if blobsOpt.isSome(): - let blobs = blobsOpt.get() - var workers = newSeq[Future[SendResult]](blobs.len) - for i in 0..= ConsensusFork.Fulu: + var sidecarOpt = Opt.none(fulu.DataColumnSidecars) + let dataColumns = dataColumnsOpt.get() + if dataColumnsOpt.isSome(): + var das_workers = + newSeq[Future[SendResult]](len(dataColumns)) + for i in 0..= ConsensusFork.Deneb: + when blindedBlock is electra_mev.SignedBlindedBeaconBlock: + if response.status != 200: + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/validator.md#proposer-slashing + # This means if a validator publishes a signature for a + # `BlindedBeaconBlock` (via a dissemination of a + # `SignedBlindedBeaconBlock`) then the validator **MUST** not use the + # local build process as a fallback, even in the event of some failure + # with the external builder network. + return err("submitBlindedBlock failed with HTTP error code " & + $response.status & ": " & $shortLog(blindedBlock)) + + let + res = decodeBytesJsonOrSsz( + SubmitBlindedBlockResponseElectra, response.data, response.contentType, + response.headers.getString("eth-consensus-version")) + bundle = res.valueOr: + return err("Could not decode Electra blinded block: " & $res.error & + " with HTTP status " & $response.status & ", Content-Type " & + $response.contentType & " and content " & $response.data) + + template execution_payload: untyped = bundle.data.execution_payload + + if hash_tree_root(blindedBlock.message.body.execution_payload_header) != + hash_tree_root(execution_payload): + return err("unblinded payload doesn't match blinded payload header: " & + $blindedBlock.message.body.execution_payload_header) + + # Signature provided is consistent with unblinded execution payload, + # so construct full beacon block + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/validator.md#block-proposal + var signedBlock = electra.SignedBeaconBlock( + signature: blindedBlock.signature) + copyFields( + signedBlock.message, blindedBlock.message, + getFieldNames(typeof(signedBlock.message))) + copyFields( + signedBlock.message.body, blindedBlock.message.body, + getFieldNames(typeof(signedBlock.message.body))) + assign(signedBlock.message.body.execution_payload, execution_payload) + signedBlock.root = hash_tree_root(signedBlock.message) + doAssert signedBlock.root == hash_tree_root(blindedBlock.message) + + let blobsOpt = block: template blobs_bundle: untyped = bundle.data.blobs_bundle if blindedBlock.message.body.blob_kzg_commitments != bundle.data.blobs_bundle.commitments: @@ -143,22 +125,34 @@ proc unblindAndRouteBlockMEV*( return err("unblinded blobs bundle is invalid") Opt.some(signedBlock.create_blob_sidecars( blobs_bundle.proofs, blobs_bundle.blobs)) - else: - Opt.none(seq[BlobSidecar]) - debug "unblindAndRouteBlockMEV: proposing unblinded block", - blck = shortLog(signedBlock) + debug "unblindAndRouteBlockMEV: proposing unblinded block", + blck = shortLog(signedBlock) - let newBlockRef = - (await node.router.routeSignedBeaconBlock( - signedBlock, blobsOpt, checkValidator = false)).valueOr: - # submitBlindedBlock has run, so don't allow fallback to run - return err("routeSignedBeaconBlock error") # Errors logged in router + let newBlockRef = + (await node.router.routeSignedBeaconBlock( + signedBlock, blobsOpt, Opt.none(seq[fulu.DataColumnSidecar]), checkValidator = false)).valueOr: + # submitBlindedBlock has run, so don't allow fallback to run + return err("routeSignedBeaconBlock error") # Errors logged in router - if newBlockRef.isSome: - beacon_block_builder_proposed.inc() - notice "Block proposed (MEV)", - blockRoot = shortLog(signedBlock.root), blck = shortLog(signedBlock), - signature = shortLog(signedBlock.signature) + if newBlockRef.isSome: + beacon_block_builder_proposed.inc() + notice "Block proposed (MEV)", + blockRoot = shortLog(signedBlock.root), blck = shortLog(signedBlock), + signature = shortLog(signedBlock.signature) - ok newBlockRef \ No newline at end of file + ok newBlockRef + elif blindedBlock is fulu_mev.SignedBlindedBeaconBlock: + if response.status == 202: + ok(Opt.none(BlockRef)) + else: + # https://github.com/ethereum/builder-specs/blob/v0.5.0/specs/bellatrix/validator.md#proposer-slashing + # This means if a validator publishes a signature for a + # `BlindedBeaconBlock` (via a dissemination of a + # `SignedBlindedBeaconBlock`) then the validator **MUST** not use the + # local build process as a fallback, even in the event of some failure + # with the external builder network. + err("submitBlindedBlock failed with HTTP error code " & + $response.status & ": " & $shortLog(blindedBlock)) + else: + static: doAssert false diff --git a/beacon_chain/validators/slashing_protection.nim b/beacon_chain/validators/slashing_protection.nim index 8cc6a0a7ff..c902ecf7ce 100644 --- a/beacon_chain/validators/slashing_protection.nim +++ b/beacon_chain/validators/slashing_protection.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -115,8 +115,7 @@ proc init*( proc loadUnchecked*( T: type SlashingProtectionDB, - basePath, dbname: string, readOnly: bool - ): SlashingProtectionDB {.raises:[IOError].}= + basePath, dbname: string, readOnly: bool): SlashingProtectionDB = ## Load a slashing protection DB ## Note: This is for CLI tool usage ## this doesn't check the genesis validator root @@ -282,12 +281,10 @@ proc registerSyntheticAttestation*(db: SlashingProtectionDB, source, target: Epoch) = db.db_v2.registerSyntheticAttestation(validator, source, target) -proc inclSPDIR*(db: SlashingProtectionDB, spdir: SPDIR): SlashingImportStatus - {.raises: [SerializationError, IOError].} = +proc inclSPDIR*(db: SlashingProtectionDB, spdir: SPDIR): SlashingImportStatus = db.db_v2.inclSPDIR(spdir) -proc toSPDIR*(db: SlashingProtectionDB): SPDIR - {.raises: [IOError].} = +proc toSPDIR*(db: SlashingProtectionDB): SPDIR = db.db_v2.toSPDIR() proc exportSlashingInterchange*( diff --git a/beacon_chain/validators/slashing_protection_common.nim b/beacon_chain/validators/slashing_protection_common.nim index 729d4c426e..65ba68f8ee 100644 --- a/beacon_chain/validators/slashing_protection_common.nim +++ b/beacon_chain/validators/slashing_protection_common.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -203,7 +203,7 @@ func `==`*(a, b: BadProposal): bool = proc writeValue*( writer: var JsonWriter, value: PubKey0x) {.inline, raises: [IOError].} = - writer.writeValue("0x" & value.PubKeyBytes.toHex()) + writer.writeValue(value.PubKeyBytes.to0xHex()) proc readValue*(reader: var JsonReader, value: var PubKey0x) {.raises: [SerializationError, IOError].} = @@ -214,7 +214,7 @@ proc readValue*(reader: var JsonReader, value: var PubKey0x) proc writeValue*( w: var JsonWriter, a: Eth2Digest0x) {.inline, raises: [IOError].} = - w.writeValue "0x" & a.Eth2Digest.data.toHex() + w.writeValue a.Eth2Digest.data.to0xHex() proc readValue*(r: var JsonReader, a: var Eth2Digest0x) {.raises: [SerializationError, IOError].} = @@ -272,15 +272,13 @@ chronicles.formatIt EpochString: it.Slot.shortLog chronicles.formatIt Eth2Digest0x: it.Eth2Digest.shortLog chronicles.formatIt SPDIR_SignedBlock: it.shortLog chronicles.formatIt SPDIR_SignedAttestation: it.shortLog +chronicles.formatIt PubKey0x: it.PubKeyBytes.to0xHex # Interchange import # -------------------------------------------- proc importInterchangeV5Impl*( - db: auto, - spdir: var SPDIR - ): SlashingImportStatus - {.raises: [SerializationError, IOError].} = + db: auto, spdir: var SPDIR): SlashingImportStatus = ## Common implementation of interchange import ## according to https://eips.ethereum.org/EIPS/eip-3076 ## spdir needs to be `var` as it will be sorted in-place @@ -292,8 +290,7 @@ proc importInterchangeV5Impl*( let key = ValidatorPubKey.fromRaw(spdir.data[v].pubkey.PubKeyBytes) if key.isErr: # The bytes does not describe a valid encoding (length error) - error "Invalid public key.", - pubkey = "0x" & spdir.data[v].pubkey.PubKeyBytes.toHex() + error "Invalid public key.", pubkey = spdir.data[v].pubkey result = siPartial continue @@ -301,8 +298,7 @@ proc importInterchangeV5Impl*( # The bytes don't deserialize to a valid BLS G1 elliptic curve point. # Deserialization is costly but done only once per validator. # and SlashingDB import is a very rare event. - error "Invalid public key.", - pubkey = "0x" & spdir.data[v].pubkey.PubKeyBytes.toHex() + error "Invalid public key.", pubkey = spdir.data[v].pubkey result = siPartial continue diff --git a/beacon_chain/validators/slashing_protection_v2.nim b/beacon_chain/validators/slashing_protection_v2.nim index 6549ce7026..c33fea1b45 100644 --- a/beacon_chain/validators/slashing_protection_v2.nim +++ b/beacon_chain/validators/slashing_protection_v2.nim @@ -1381,8 +1381,7 @@ proc registerSyntheticAttestation*( let status = db.sqlCommitTransaction.exec() checkStatus() -proc toSPDIR*(db: SlashingProtectionDB_v2): SPDIR - {.raises: [IOError].} = +proc toSPDIR*(db: SlashingProtectionDB_v2): SPDIR = ## Export the full slashing protection database ## to a json the Slashing Protection Database Interchange (Complete) Format result.metadata.interchange_format_version = "5" @@ -1477,8 +1476,8 @@ proc toSPDIR*(db: SlashingProtectionDB_v2): SPDIR ) doAssert status.isOk() -proc inclSPDIR*(db: SlashingProtectionDB_v2, spdir: SPDIR): SlashingImportStatus - {.raises: [SerializationError, IOError].} = +proc inclSPDIR*(db: SlashingProtectionDB_v2, spdir: SPDIR): + SlashingImportStatus = ## Import a Slashing Protection Database Intermediate Representation ## file into the specified slashing protection DB ## diff --git a/beacon_chain/validators/validator_duties.nim b/beacon_chain/validators/validator_duties.nim index f7e821d354..1be1222ed5 100644 --- a/beacon_chain/validators/validator_duties.nim +++ b/beacon_chain/validators/validator_duties.nim @@ -72,11 +72,9 @@ proc waitAfterBlockCutoff*(clock: BeaconClock, slot: Slot, # delay. # Take into consideration chains with a different slot time - const afterBlockDelay = nanos(attestationSlotOffset.nanoseconds div 2) - let - afterBlockTime = clock.now() + afterBlockDelay - afterBlockCutoff = clock.fromNow( - min(afterBlockTime, slot.attestation_deadline() + afterBlockDelay)) + const extraDelay = nanos(attestationSlotOffset.nanoseconds div 2) + let afterBlockCutoff = clock.fromNow( + min(clock.now(), slot.attestation_deadline(clock.timeConfig)) + extraDelay) if afterBlockCutoff.inFuture: if head.isSome(): diff --git a/beacon_chain/validators/validator_monitor.nim b/beacon_chain/validators/validator_monitor.nim index 8488bab65a..dc18ebea78 100644 --- a/beacon_chain/validators/validator_monitor.nim +++ b/beacon_chain/validators/validator_monitor.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import std/tables, @@ -194,6 +194,8 @@ type summaries: array[2, EpochSummary] # We monitor the current and previous epochs ValidatorMonitor* = object + timeConfig: TimeConfig + epoch: Epoch # The most recent epoch seen in monitoring monitors: Table[ValidatorPubKey, ref MonitoredValidator] @@ -257,8 +259,12 @@ proc addAutoMonitor*( info "Started monitoring validator", validator = shortLog(pubkey), pubkey, index -func init*(T: type ValidatorMonitor, autoRegister = false, totals = false): T = - T(autoRegister: autoRegister, totals: totals) +func init*( + T: type ValidatorMonitor, + timeConfig: TimeConfig, + autoRegister = false, + totals = false): T = + T(timeConfig: timeConfig, autoRegister: autoRegister, totals: totals) template summaryIdx(epoch: Epoch): int = (epoch.uint64 mod 2).int @@ -659,7 +665,7 @@ proc registerAttestation*( attestation: phase0.Attestation | SingleAttestation, idx: ValidatorIndex) = let slot = attestation.data.slot - delay = seen_timestamp - slot.attestation_deadline() + delay = seen_timestamp - slot.attestation_deadline(self.timeConfig) self.withMonitor(idx): let id = monitor.id @@ -684,7 +690,7 @@ proc registerAggregate*( attesting_indices: openArray[ValidatorIndex]) = let slot = aggregate_and_proof.aggregate.data.slot - delay = seen_timestamp - slot.aggregate_deadline() + delay = seen_timestamp - slot.aggregate_deadline(self.timeConfig) aggregator_index = aggregate_and_proof.aggregator_index self.withMonitor(aggregator_index): @@ -755,7 +761,7 @@ proc registerBeaconBlock*( let id = monitor.id slot = blck.slot - delay = seen_timestamp - slot.block_deadline() + delay = seen_timestamp - slot.block_deadline(self.timeConfig) validator_monitor_beacon_block.inc(1, [$src, metricId]) validator_monitor_beacon_block_delay_seconds.observe( @@ -774,7 +780,8 @@ proc registerSyncCommitteeMessage*( let id = monitor.id slot = sync_committee_message.slot - delay = seen_timestamp - slot.sync_committee_message_deadline() + delay = seen_timestamp - + slot.sync_committee_message_deadline(self.timeConfig) validator_monitor_sync_committee_messages.inc(1, [$src, metricId]) validator_monitor_sync_committee_messages_delay_seconds.observe( @@ -797,7 +804,7 @@ proc registerSyncContribution*( participants: openArray[ValidatorIndex]) = let slot = contribution_and_proof.contribution.slot - delay = seen_timestamp - slot.sync_contribution_deadline() + delay = seen_timestamp - slot.sync_contribution_deadline(self.timeConfig) let aggregator_index = contribution_and_proof.aggregator_index self.withMonitor(aggregator_index): diff --git a/beacon_chain/validators/validator_pool.nim b/beacon_chain/validators/validator_pool.nim index 246e2819bb..761b16b281 100644 --- a/beacon_chain/validators/validator_pool.nim +++ b/beacon_chain/validators/validator_pool.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import std/[tables, json, streams, sequtils, uri, sets], @@ -21,8 +21,7 @@ import ./slashing_protection export - streams, keystore, phase0, altair, tables, uri, crypto, - signatures.voluntary_exit_signature_fork, + streams, keystore, phase0, altair, tables, uri, crypto, signatures, rest_types, eth2_rest_serialization, rest_remote_signer_calls, slashing_protection @@ -535,294 +534,92 @@ proc signData(v: AttachedValidator, else: v.signWithDistributedKey(request) + +proc init(T: type Web3SignerForkedBeaconBlock, blck: ForkyBeaconBlock | ForkyBlindedBeaconBlock): Web3SignerForkedBeaconBlock = + Web3SignerForkedBeaconBlock(kind: typeof(blck).kind, data: blck.toBeaconBlockHeader()) + +proc forkIndex(prop: ProvenProperty, fork: static ConsensusFork): GeneralizedIndex = + when fork < ConsensusFork.Electra: + static: raiseAssert "Unsupported fork " & $fork + elif fork == ConsensusFork.Electra: + prop.electraIndex + elif fork == ConsensusFork.Fulu: + prop.fuluIndex + elif fork == ConsensusFork.Gloas: + prop.gloasIndex + else: + static: raiseAssert "Unknown fork " & $fork + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#signature proc getBlockSignature*(v: AttachedValidator, fork: Fork, - genesis_validators_root: Eth2Digest, slot: Slot, + genesis_validators_root: Eth2Digest, block_root: Eth2Digest, - blck: ForkedBeaconBlock | ForkedBlindedBeaconBlock | - ForkedMaybeBlindedBeaconBlock | - deneb_mev.BlindedBeaconBlock | - electra_mev.BlindedBeaconBlock | - fulu_mev.BlindedBeaconBlock + blck: ForkyBeaconBlock | ForkyBlindedBeaconBlock ): Future[SignatureResult] {.async: (raises: [CancelledError]).} = - type SomeBlockBody = - capella.BeaconBlockBody | - deneb.BeaconBlockBody | - deneb_mev.BlindedBeaconBlockBody | - electra.BeaconBlockBody | - electra_mev.BlindedBeaconBlockBody | - fulu.BeaconBlockBody | - fulu_mev.BlindedBeaconBlockBody - - template blockPropertiesProofs(blockBody: SomeBlockBody, - forkIndexField: untyped): seq[Web3SignerMerkleProof] = - var proofs: seq[Web3SignerMerkleProof] - for prop in v.data.provenBlockProperties: - if prop.forkIndexField.isSome: - let - idx = prop.forkIndexField.get - proofRes = build_proof(blockBody, idx) - if proofRes.isErr: - return err proofRes.error - proofs.add Web3SignerMerkleProof( - index: idx, - proof: proofRes.get) - proofs - case v.kind of ValidatorKind.Local: SignatureResult.ok( get_block_signature( - fork, genesis_validators_root, slot, block_root, + fork, genesis_validators_root, blck.slot, block_root, v.data.privateKey).toValidatorSig()) of ValidatorKind.Remote: - let web3signerRequest = - when blck is ForkedBlindedBeaconBlock: - case blck.kind - of ConsensusFork.Phase0 .. ConsensusFork.Capella: - return SignatureResult.err("Invalid blinded beacon block fork") - of ConsensusFork.Deneb: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Deneb, - data: blck.denebData.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = blockPropertiesProofs( - blck.denebData.body, denebIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Deneb, - data: blck.denebData.toBeaconBlockHeader), - proofs) - of ConsensusFork.Electra: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Electra, - data: blck.electraData.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = blockPropertiesProofs( - blck.electraData.body, electraIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Electra, - data: blck.electraData.toBeaconBlockHeader), - proofs) - of ConsensusFork.Fulu: + const consensusFork = typeof(blck).kind + when consensusFork >= ConsensusFork.Bellatrix: + let + fbb = Web3SignerForkedBeaconBlock.init(blck) + web3signerRequest = case v.data.remoteType of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Fulu, - data: blck.fuluData.toBeaconBlockHeader)) + Web3SignerRequest.init(fork, genesis_validators_root, fbb) of RemoteSignerType.VerifyingWeb3Signer: - let proofs = blockPropertiesProofs( - blck.fuluData.body, fuluIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Fulu, - data: blck.fuluData.toBeaconBlockHeader), - proofs) - elif blck is deneb_mev.BlindedBeaconBlock: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Deneb, - data: blck.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = blockPropertiesProofs( - blck.body, denebIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Deneb, - data: blck.toBeaconBlockHeader), - proofs) - elif blck is electra_mev.BlindedBeaconBlock: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Electra, - data: blck.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = blockPropertiesProofs( - blck.body, electraIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Electra, - data: blck.toBeaconBlockHeader), - proofs) - elif blck is fulu_mev.BlindedBeaconBlock: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Fulu, - data: blck.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = blockPropertiesProofs( - blck.body, fuluIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Fulu, - data: blck.toBeaconBlockHeader), - proofs) - elif blck is ForkedMaybeBlindedBeaconBlock: - withForkyMaybeBlindedBlck(blck): - # TODO why isn't this a case statement - when consensusFork < ConsensusFork.Capella: - return SignatureResult.err("Invalid beacon block fork") - elif consensusFork == ConsensusFork.Capella: - when isBlinded: - return SignatureResult.err("Invalid blinded beacon block fork") - else: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Capella, - data: forkyMaybeBlindedBlck.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = - blockPropertiesProofs(forkyMaybeBlindedBlck.body, - capellaIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Capella, - data: forkyMaybeBlindedBlck.toBeaconBlockHeader), - proofs) - elif consensusFork == ConsensusFork.Deneb: - when isBlinded: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Deneb, - data: forkyMaybeBlindedBlck.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = - blockPropertiesProofs(forkyMaybeBlindedBlck.body, - denebIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Deneb, - data: forkyMaybeBlindedBlck.toBeaconBlockHeader), proofs) - else: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Deneb, - data: forkyMaybeBlindedBlck.`block`.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = - blockPropertiesProofs(forkyMaybeBlindedBlck.`block`.body, - denebIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Deneb, - data: forkyMaybeBlindedBlck.`block`.toBeaconBlockHeader), - proofs) - elif consensusFork == ConsensusFork.Electra: - when isBlinded: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Electra, - data: forkyMaybeBlindedBlck.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = - blockPropertiesProofs(forkyMaybeBlindedBlck.body, - electraIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Electra, - data: forkyMaybeBlindedBlck.toBeaconBlockHeader), proofs) - else: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Electra, - data: forkyMaybeBlindedBlck.`block`.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = - blockPropertiesProofs(forkyMaybeBlindedBlck.`block`.body, - electraIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Electra, - data: forkyMaybeBlindedBlck.`block`.toBeaconBlockHeader), - proofs) - elif consensusFork == ConsensusFork.Fulu: - when isBlinded: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Fulu, - data: forkyMaybeBlindedBlck.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = - blockPropertiesProofs(forkyMaybeBlindedBlck.body, - fuluIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Fulu, - data: forkyMaybeBlindedBlck.toBeaconBlockHeader), proofs) + when typeof(blck).kind >= ConsensusFork.Electra: + template blockPropertiesProofs(): seq[Web3SignerMerkleProof] = + var proofs: seq[Web3SignerMerkleProof] + + for prop in v.data.provenBlockProperties: + let idx = prop.forkIndex(typeof(blck).kind) + proofs.add Web3SignerMerkleProof( + index: idx, + proof: ?build_proof(blck.body, idx) + ) + + proofs + + Web3SignerRequest.init( + fork, genesis_validators_root, fbb, blockPropertiesProofs()) else: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Fulu, - data: forkyMaybeBlindedBlck.`block`.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = - blockPropertiesProofs(forkyMaybeBlindedBlck.`block`.body, - fuluIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Fulu, - data: forkyMaybeBlindedBlck.`block`.toBeaconBlockHeader), - proofs) - else: - case blck.kind - of ConsensusFork.Phase0 .. ConsensusFork.Bellatrix: - return SignatureResult.err("Invalid beacon block fork") - of ConsensusFork.Capella: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Capella, - data: blck.capellaData.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = blockPropertiesProofs( - blck.capellaData.body, capellaIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Capella, - data: blck.capellaData.toBeaconBlockHeader), - proofs) - of ConsensusFork.Deneb: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Deneb, - data: blck.denebData.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = blockPropertiesProofs( - blck.denebData.body, denebIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Deneb, - data: blck.denebData.toBeaconBlockHeader), - proofs) - of ConsensusFork.Electra: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Electra, - data: blck.electraData.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = blockPropertiesProofs( - blck.electraData.body, electraIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Electra, - data: blck.electraData.toBeaconBlockHeader), - proofs) - of ConsensusFork.Fulu: - case v.data.remoteType - of RemoteSignerType.Web3Signer: - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Fulu, - data: blck.fuluData.toBeaconBlockHeader)) - of RemoteSignerType.VerifyingWeb3Signer: - let proofs = blockPropertiesProofs( - blck.fuluData.body, fuluIndex) - Web3SignerRequest.init(fork, genesis_validators_root, - Web3SignerForkedBeaconBlock(kind: ConsensusFork.Fulu, - data: blck.fuluData.toBeaconBlockHeader), - proofs) - await v.signData(web3signerRequest) + return err("Unsupported fork for verifying Web3Signer: " & $typeof(blck).kind) + + await v.signData(web3signerRequest) + else: + return err("Unsupported fork for Web3Signer: " & $consensusFork) + +proc getBlockSignature*(v: AttachedValidator, fork: Fork, + genesis_validators_root: Eth2Digest, + block_root: Eth2Digest, + blck: ForkedBeaconBlock + ): Future[SignatureResult] + {.async: (raises: [CancelledError], raw: true).} = + withBlck(blck): + getBlockSignature(v, fork, genesis_validators_root, block_root, forkyBlck) + +proc getBlockSignature*(v: AttachedValidator, fork: Fork, + genesis_validators_root: Eth2Digest, + block_root: Eth2Digest, + blck: ForkyBlockContents + ): Future[SignatureResult] + {.async: (raises: [CancelledError], raw: true).} = + v.getBlockSignature(fork, genesis_validators_root, block_root, blck.`block`) + +proc getBlockSignature*(v: AttachedValidator, fork: Fork, + genesis_validators_root: Eth2Digest, + block_root: Eth2Digest, + blck: ForkedMaybeBlindedBeaconBlock + ): Future[SignatureResult] + {.async: (raises: [CancelledError], raw: true).} = + withForkyMaybeBlindedBlck(blck): + v.getBlockSignature(fork, genesis_validators_root, block_root, forkyMaybeBlindedBlck) # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#aggregate-signature proc getAttestationSignature*(v: AttachedValidator, fork: Fork, @@ -1005,14 +802,13 @@ proc getDepositMessageSignature*(v: AttachedValidator, version: Version, await v.signData(request) # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signing -proc getBuilderSignature*(v: AttachedValidator, fork: Fork, +proc getBuilderSignature*(v: AttachedValidator, genesis_fork_version: Version, validatorRegistration: ValidatorRegistrationV1): Future[SignatureResult] {.async: (raises: [CancelledError]).} = case v.kind of ValidatorKind.Local: SignatureResult.ok(get_builder_signature( - fork, validatorRegistration, v.data.privateKey).toValidatorSig()) + genesis_fork_version, validatorRegistration, v.data.privateKey).toValidatorSig()) of ValidatorKind.Remote: - let request = Web3SignerRequest.init( - fork, ZERO_HASH, validatorRegistration) + let request = Web3SignerRequest.init(ZERO_HASH, validatorRegistration) await v.signData(request) diff --git a/beacon_chain/version.nim b/beacon_chain/version.nim index 54d97287bc..4e74342085 100644 --- a/beacon_chain/version.nim +++ b/beacon_chain/version.nim @@ -5,46 +5,22 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} ## This module implements the version tagging details of all binaries included ## in the Nimbus release process (i.e. beacon_node, validator_client, etc) -import std/[strutils, compilesettings] +import std/[os, strutils], ./buildinfo const - compileYear = CompileDate[0 ..< 4] # YYYY-MM-DD (UTC) - copyrights* = - "Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH" - versionMajor* = 25 - versionMinor* = 3 - versionBuild* = 1 + versionMinor* = 9 + versionBuild* = 2 versionBlob* = "stateofus" # Single word - ends up in the default graffiti - ## You can override this if you are building the - ## sources outside the git tree of Nimbus: - git_revision_override* {.strdefine.} = - when querySetting(SingleValueSetting.command) == "check": - # The staticExec call below returns an empty string - # when `nim check` is used and this leads to a faux - # compile-time error. - # We work-around the problem with this override and - # save some time in executing the external command. - "123456" - else: - "" - - gitRevisionLong* = when git_revision_override.len == 0: - staticExec "git rev-parse --short HEAD" - else: - git_revision_override - - gitRevision* = strip(gitRevisionLong)[0..5] - - nimFullBanner* = staticExec("nim --version") - nimBanner* = staticExec("nim --version | grep Version") + sourcePath = currentSourcePath.rsplit({DirSep, AltSep}, 1)[0] + gitRevision* = strip(generateGitRevision(sourcePath))[0..5] versionAsStr* = $versionMajor & "." & $versionMinor & "." & $versionBuild @@ -53,19 +29,7 @@ const nimbusAgentStr* = "Nimbus/" & fullVersionStr -func getNimGitHash*(): string = - const gitPrefix = "git hash: " - let tmp = splitLines(nimFullBanner) - if tmp.len == 0: - return - for line in tmp: - if line.startsWith(gitPrefix) and line.len > 8 + gitPrefix.len: - result = line[gitPrefix.len.. 0: - tmp[0] & " (" & gitHash & ")" - else: - tmp[0] +when not defined(nimscript): + import metrics + declareGauge versionGauge, "Nimbus version info (as metric labels)", ["version", "commit"], name = "version" + versionGauge.set(1, labelValues=[fullVersionStr, gitRevision]) diff --git a/beacon_chain/winservice.nim b/beacon_chain/winservice.nim index e5a131f9ee..3208ea1ccf 100644 --- a/beacon_chain/winservice.nim +++ b/beacon_chain/winservice.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -12,7 +12,7 @@ when defined(windows): import results, chronicles import chronos/[osdefs, osutils, oserrno] - import ./conf_common + import ./nimbus_binary_common type SERVICE_STATUS* {.final, pure.} = object @@ -106,10 +106,8 @@ when defined(windows): proc reportServiceStatusSuccess*() = reportServiceStatus(SERVICE_RUNNING, NO_ERROR, 0) - template establishWindowsService*(argClientId, - argCopyrights, - argNimBanner, - argSpecVersion, + template establishWindowsService*(argHelpBanner, argCopyright: string, + argVersions: openArray[string], argServiceName: string, argConfigType: untyped, argEntryPoint: untyped, @@ -150,12 +148,13 @@ when defined(windows): reportServiceStatus(SERVICE_STOPPED, ERROR_INVALID_PARAMETER, 0) quit QuitFailure - var config = makeBannerAndConfig(argClientId, argCopyrights, - argNimBanner, argSpecVersion, - environment, argConfigType).valueOr: + var config = loadWithBanners(argConfigType, argHelpBanner, argCopyright, + argVersions, false, environment).valueOr: reportServiceStatus(SERVICE_STOPPED, ERROR_BAD_CONFIGURATION, 0) quit QuitFailure + setupLogging(config.logLevel, config.logStdout, config.logFile) + try: argEntryPoint(config) info "Service thread stopped" diff --git a/benchmarks/rest_api_benchmark.nim.cfg b/benchmarks/rest_api_benchmark.nim.cfg index 9fd135f695..565f5c1619 100644 --- a/benchmarks/rest_api_benchmark.nim.cfg +++ b/benchmarks/rest_api_benchmark.nim.cfg @@ -1,2 +1,2 @@ -d:"chronicles_runtime_filtering=on" --d:"chronicles_disable_thread_id" +-d:"chronicles_thread_ids=no" diff --git a/ci/Jenkinsfile b/ci/Jenkinsfile index 31dc700925..b5ab2b5ecf 100644 --- a/ci/Jenkinsfile +++ b/ci/Jenkinsfile @@ -31,6 +31,7 @@ pipeline { } options { + disableRestartFromStage() timestamps() ansiColor('xterm') /* This also includes wait time in the queue. */ @@ -58,6 +59,7 @@ pipeline { environment { NPROC = Runtime.getRuntime().availableProcessors() MAKEFLAGS = "V=${params.VERBOSITY} NIM_COMMIT=${params.NIM_COMMIT} -j${env.NPROC}" + XDG_CACHE_HOME = "${env.WORKSPACE_TMP}/.cache" } stages { diff --git a/ci/Jenkinsfile.benchmarks b/ci/Jenkinsfile.benchmarks deleted file mode 100644 index a2649bdc95..0000000000 --- a/ci/Jenkinsfile.benchmarks +++ /dev/null @@ -1,52 +0,0 @@ -/* beacon_chain - * Copyright (c) 2022-2025 Status Research & Development GmbH - * Licensed and distributed under either of - * * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). - * * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). - * at your option. This file may not be copied, modified, or distributed except according to those terms. - */ - -// https://stackoverflow.com/questions/40760716/jenkins-abort-running-build-if-new-one-is-started -// We should only abort older jobs in PR branches, so we have a nice CI history in "master" and "devel". -if (env.BRANCH_NAME != "master" && env.BRANCH_NAME != "devel") { - def buildNumber = env.BUILD_NUMBER as int - if (buildNumber > 1) { - milestone(buildNumber - 1) - } - milestone(buildNumber) -} - -node("metal") { - withEnv(["NPROC=${sh(returnStdout: true, script: 'nproc').trim()}"]) { - try { - stage("Clone") { - /* source code checkout */ - checkout scm - /* we need to update the submodules before caching kicks in */ - sh "git -c lfs.fetchexclude=/public-keys/all.txt,/metadata/genesis.ssz,/parsed/parsedConsensusGenesis.json submodule update --init --recursive" - } - - stage("Build") { - sh """#!/bin/bash - set -e - make -j${env.NPROC} update # to allow a newer Nim version to be detected - """ - } - - stage("Benchmark") { - sh """#!/bin/bash - set -e - git clone https://github.com/status-im/nimbus-benchmarking.git - ./nimbus-benchmarking/run_nbc_benchmarks.sh - """ - benchmark(altInputSchema: "", altInputSchemaLocation: "", inputLocation: "results/*/result.json", schemaSelection: "defaultSchema", truncateStrings: true) - } - } catch(e) { - // we need to rethrow the exception here - throw e - } finally { - // clean the workspace - cleanWs(disableDeferredWipeout: true, deleteDirs: true) - } - } -} diff --git a/ci/Jenkinsfile.linux b/ci/Jenkinsfile.linux new file mode 100644 index 0000000000..b07131e2be --- /dev/null +++ b/ci/Jenkinsfile.linux @@ -0,0 +1,189 @@ +#!/usr/bin/env groovy +/* beacon_chain + * Copyright (c) 2025 Status Research & Development GmbH + * Licensed and distributed under either of + * * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). + * * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). + * at your option. This file may not be copied, modified, or distributed except according to those terms. + */ +library 'status-jenkins-lib@v1.9.2' + +pipeline { + agent { + dockerfile { + label 'linuxcontainer' + filename 'linux.Dockerfile' + dir 'ci' + } + } + + parameters { + choice( + name: 'VERBOSITY', + description: 'Value for the V make flag to increase log verbosity', + choices: [0, 1, 2] + ) + string( + name: 'NIM_COMMIT', + description: 'Value for the NIM_COMMIT make flag to choose Nim commit', + defaultValue: nimCommitForJob(), + ) + } + + options { + disableRestartFromStage() + timestamps() + ansiColor('xterm') + /* This also includes wait time in the queue. */ + timeout(time: 24, unit: 'HOURS') + /* Limit builds retained. */ + buildDiscarder(logRotator( + numToKeepStr: '5', + daysToKeepStr: '30', + artifactNumToKeepStr: '3', + )) + /* Throttle number of concurrent builds. */ + throttleJobProperty( + throttleEnabled: true, + throttleOption: 'category', + categories: ['nimbus-eth2'], + maxConcurrentPerNode: 1, + maxConcurrentTotal: 9 + ) + /* Abort old builds for non-main branches. */ + disableConcurrentBuilds( + abortPrevious: !isMainBranch() + ) + } + + environment { + NPROC = Runtime.getRuntime().availableProcessors() + MAKEFLAGS = "V=${params.VERBOSITY} NIM_COMMIT=${params.NIM_COMMIT} -j${env.NPROC}" + XDG_CACHE_HOME = "${env.WORKSPACE_TMP}/.cache" + } + + stages { + stage('Deps') { + steps { + timeout(20) { + script { + /* To allow the following parallel stages. */ + sh 'make QUICK_AND_DIRTY_COMPILER=1 update' + /* Allow the following parallel stages. */ + sh 'make deps' + /* Download test vectors. */ + sh './scripts/setup_scenarios.sh' + } + } + } + } + + stage('Build') { + steps { + timeout(50) { + script { + sh 'make LOG_LEVEL=TRACE' + } + } + } + } + + stage('Check Docs') { + steps { + script { + sh './scripts/check_docs_help_msg.sh' + } + } + } + + stage('Tests') { + parallel { + stage('General') { + steps { + timeout(60) { + script { + sh 'make DISABLE_TEST_FIXTURES_SCRIPT=1 NIMFLAGS="--passC:\"-fno-lto\" --passL:\"-fno-lto\"" test' + sh 'git diff --exit-code --ignore-submodules=all' + } + } + } + } + + stage('REST') { + steps { + timeout(5) { + script { + sh 'make restapi-test' + } + } + } + post { always { + sh 'tar cjf restapi-test.tar.gz resttest0_data/*.txt' + } } + } + } + post { always { timeout(5) { + archiveArtifacts(artifacts: '*.tar.gz', allowEmptyArchive: true) + } } } + } + + stage('Finalizations') { + stages { /* parallel builds of minimal / mainnet not yet supported */ + stage('minimal') { + steps { + timeout(26) { + script { + sh 'make local-testnet-minimal' + } + } + } + post { always { + sh 'tar cjf local-testnet-minimal.tar.gz local-testnet-minimal/logs/*' + } } + } + + stage('mainnet') { + steps { + timeout(62) { + script { + sh 'make local-testnet-mainnet' + } + } + } + post { always { + sh 'tar cjf local-testnet-mainnet.tar.gz local-testnet-mainnet/logs/*' + } } + } + } + post { + always { timeout(10) { + /* DEBUG: Show file sizes to catch too big ones. */ + sh 'ls -hl *.tar.gz' + archiveArtifacts( + artifacts: '*.tar.gz', + excludes: '**/geth-*.tar.gz', /* `scripts/geth_binaries.sh` */ + allowEmptyArchive: true + ) + } } + } + } + } + + post { + always { + cleanWs( + disableDeferredWipeout: true, + deleteDirs: true + ) + dir("${env.WORKSPACE}@tmp") { deleteDir() } + } + } +} + +def isMainBranch() { + return ['stable', 'testing', 'unstable'].contains(env.BRANCH_NAME) +} + +def nimCommitForJob() { + return JOB_NAME.contains('nimv2_2') ? 'upstream/version-2-2' : '' +} diff --git a/ci/Jenkinsfile.macos b/ci/Jenkinsfile.macos new file mode 100644 index 0000000000..8fbd885e3e --- /dev/null +++ b/ci/Jenkinsfile.macos @@ -0,0 +1,173 @@ +#!/usr/bin/env groovy +/* beacon_chain + * Copyright (c) 2019-2025 Status Research & Development GmbH + * Licensed and distributed under either of + * * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). + * * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). + * at your option. This file may not be copied, modified, or distributed except according to those terms. + */ +library 'status-jenkins-lib@v1.9.2' + +pipeline { + /* This way we run the same Jenkinsfile on different platforms. */ + agent { label 'macos && aarch64' } + + parameters { + choice( + name: 'VERBOSITY', + description: 'Value for the V make flag to increase log verbosity', + choices: [0, 1, 2] + ) + string( + name: 'NIM_COMMIT', + description: 'Value for the NIM_COMMIT make flag to choose Nim commit', + defaultValue: nimCommitForJob(), + ) + } + + options { + disableRestartFromStage() + timestamps() + ansiColor('xterm') + /* This also includes wait time in the queue. */ + timeout(time: 24, unit: 'HOURS') + /* Limit builds retained. */ + buildDiscarder(logRotator( + numToKeepStr: '5', + daysToKeepStr: '30', + artifactNumToKeepStr: '3', + )) + /* Throttle number of concurrent builds. */ + throttleJobProperty( + throttleEnabled: true, + throttleOption: 'category', + categories: ['nimbus-eth2'], + maxConcurrentPerNode: 1, + maxConcurrentTotal: 9 + ) + /* Abort old builds for non-main branches. */ + disableConcurrentBuilds( + abortPrevious: !isMainBranch() + ) + } + + environment { + NPROC = Runtime.getRuntime().availableProcessors() + MAKEFLAGS = "V=${params.VERBOSITY} NIM_COMMIT=${params.NIM_COMMIT} -j${env.NPROC}" + XDG_CACHE_HOME = "${env.WORKSPACE_TMP}/.cache" + } + + stages { + stage('Setup') { + steps { script { + def brew_prefix = brew.prefix() + /* Explicit PATH to avoid using HomeBrew LLVM. */ + env.PATH = "/usr/local/bin:/usr/sbin:/usr/bin:/bin:${brew_prefix}/bin" + /* Newer Clang 18.0 from Homebrew on macOS, XCode provides 15.0. + * Temp fix for BLST issue: https://github.com/supranational/blst/issues/209 */ + if (utils.arch() == 'arm64') { + env.PATH = "${brew_prefix}/opt/llvm/bin:$PATH" + env.LDFLAGS = "-L${brew_prefix}/opt/llvm/lib" + env.CPPFLAGS = "-I${brew_prefix}/opt/llvm/include" + } + } } + } + + stage('Deps') { + steps { timeout(20) { + /* To allow the following parallel stages. */ + sh 'make QUICK_AND_DIRTY_COMPILER=1 update' + /* Allow the following parallel stages. */ + sh 'make deps' + /* Download test vectors. */ + sh './scripts/setup_scenarios.sh' + } } + } + + stage('Build') { + steps { timeout(50) { + sh 'make LOG_LEVEL=TRACE' + } } + } + + stage('Check Docs') { + steps { + sh './scripts/check_docs_help_msg.sh' + } + } + + stage('Tests') { + parallel { + stage('General') { + steps { timeout(60) { + sh 'make DISABLE_TEST_FIXTURES_SCRIPT=1 test' + sh 'git diff --exit-code --ignore-submodules=all' /* Check no uncommitted changes. */ + } } + } + + stage('REST') { + steps { timeout(5) { + sh 'make restapi-test' + } } + post { always { + sh 'tar cjf restapi-test.tar.gz resttest0_data/*.txt' + } } + } + } + post { always { timeout(5) { + archiveArtifacts(artifacts: '*.tar.gz', allowEmptyArchive: true) + } } } + } + + stage('Finalizations') { + stages { /* parallel builds of minimal / mainnet not yet supported */ + stage('minimal') { + steps { timeout(26) { + sh 'make local-testnet-minimal' + } } + post { always { + sh 'tar cjf local-testnet-minimal.tar.gz local-testnet-minimal/logs/*' + } } + } + + stage('mainnet') { + steps { timeout(62) { + sh 'make local-testnet-mainnet' + } } + post { always { + sh 'tar cjf local-testnet-mainnet.tar.gz local-testnet-mainnet/logs/*' + } } + } + } + post { + always { timeout(10) { + /* DEBUG: Show file sizes to catch too big ones. */ + sh 'ls -hl *.tar.gz' + archiveArtifacts( + artifacts: '*.tar.gz', + excludes: '**/geth-*.tar.gz', /* `scripts/geth_binaries.sh` */ + allowEmptyArchive: true + ) + } } + } + } + } + + post { + always { + cleanWs( + disableDeferredWipeout: true, + deleteDirs: true + ) + dir("${env.WORKSPACE}@tmp") { deleteDir() } + } + } +} + +def isMainBranch() { + return ['stable', 'testing', 'unstable'].contains(env.BRANCH_NAME) +} + +def nimCommitForJob() { + return JOB_NAME.contains('nimv2_2') ? 'upstream/version-2-2' : '' +} diff --git a/ci/linux.Dockerfile b/ci/linux.Dockerfile new file mode 100644 index 0000000000..4a5984613c --- /dev/null +++ b/ci/linux.Dockerfile @@ -0,0 +1,46 @@ +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +FROM harbor.status.im/infra/ci-build-containers:linux-base-1.0.0 +USER root + +RUN apt-get update && apt-get install -yq --no-install-recommends \ + figlet \ + git \ + git-lfs \ + make \ + openssl \ + lsof \ + psmisc \ + procps \ + curl \ + jq \ + openjdk-17-jre-headless \ + python3 \ + python3-pip \ + python3-venv \ + gcc-11 \ + g++-11 \ + lsb-release \ + && rm -rf /var/lib/apt/lists/* + +RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-11 100 \ + && update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-11 100 + +RUN ln -sf /usr/bin/gcc /usr/bin/cc \ + && ln -sf /usr/bin/g++ /usr/bin/c++ + +RUN pip3 install --no-cache-dir --break-system-packages \ + mkdocs \ + mkdocs-material \ + mkdocs-material-extensions \ + pymdown-extensions + +USER jenkins +ENTRYPOINT [""] \ No newline at end of file diff --git a/ci/nix.Jenkinsfile b/ci/nix.Jenkinsfile index 1c8d904cc5..b7e28913dd 100644 --- a/ci/nix.Jenkinsfile +++ b/ci/nix.Jenkinsfile @@ -1,6 +1,6 @@ #!/usr/bin/env groovy /* beacon_chain - * Copyright (c) 2019-2024 Status Research & Development GmbH + * Copyright (c) 2019-2025 Status Research & Development GmbH * Licensed and distributed under either of * * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). * * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -26,6 +26,7 @@ pipeline { } options { + disableRestartFromStage() timestamps() ansiColor('xterm') /* This also includes wait time in the queue. */ diff --git a/config.nims b/config.nims index 0e6af5334b..cef701a038 100644 --- a/config.nims +++ b/config.nims @@ -96,8 +96,8 @@ if defined(windows): if defined(disableMarchNative): if defined(i386) or defined(amd64): if defined(macosx): - # https://support.apple.com/en-us/102861 - # "macOS Ventura is compatible with these computers" lists current oldest + # https://support.apple.com/en-us/105113 + # "macOS Sonoma is compatible with these computers" lists current oldest # supported x86 models, all of which have Kaby Lake or newer CPUs. switch("passC", "-march=skylake -mtune=generic") switch("passL", "-march=skylake -mtune=generic") @@ -121,9 +121,11 @@ elif defined(riscv64): else: switch("passC", "-march=native") switch("passL", "-march=native") - if defined(windows): + if defined(i386) or defined(amd64): # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65782 # ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes) + # For non-Windows targets, https://github.com/bitcoin-core/secp256k1/issues/1623 + # also suggests disabling the same flag to address Ubuntu 22.04/recent AMD CPUs. switch("passC", "-mno-avx512f") switch("passL", "-mno-avx512f") @@ -145,8 +147,6 @@ switch("passL", "-fno-omit-frame-pointer") switch("define", "nim_compiler_path=" & currentDir & "env.sh nim") switch("define", "withoutPCRE") -switch("import", "testutils/moduletests") - when not defined(disable_libbacktrace): --define:nimStackTraceOverride switch("import", "libbacktrace") @@ -178,14 +178,25 @@ if canEnableDebuggingSymbols: --define:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9 +switch("warningAsError", "BareExcept:on") +switch("warningAsError", "CStringConv:on") +switch("warningAsError", "UnusedImport:on") +switch("hintAsError", "ConvFromXtoItselfNotNeeded:on") +switch("hintAsError", "DuplicateModuleImport:on") + # `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'" switch("warning", "CaseTransition:off") -# Too many right now to read compiler output. Warnings are legitimate, but -# should be fixed out-of-band of `unstable` branch. -switch("warning", "BareExcept:off") - -# Too many of these because of Defect compat in 1.2 +# 1 nimbus-eth2/tests/consensus_spec/test_fixture_ssz_generic_types.nim(238, 28) Hint: 'sszCheck' cannot raise 'YamlConstructionError' [XCannotRaiseY] +# 1 nimbus-eth2/tests/consensus_spec/test_fixture_ssz_generic_types.nim(238, 51) Hint: 'sszCheck' cannot raise 'YamlParserError' [XCannotRaiseY] +# 1 nimbus-eth2/vendor/nim-testutils/testutils/moduletests.nim(17, 24) Hint: 'main' cannot raise 'CatchableError' [XCannotRaiseY] +# 2 nimbus-eth2/tests/consensus_spec/test_fixture_light_client_sync.nim(135, 20) Hint: 'loadTestMeta' cannot raise 'YamlConstructionError' [XCannotRaiseY] +# 2 nimbus-eth2/tests/consensus_spec/test_fixture_light_client_sync.nim(135, 43) Hint: 'loadTestMeta' cannot raise 'YamlParserError' [XCannotRaiseY] +# 2 nimbus-eth2/vendor/nim-toml-serialization/toml_serialization/reader.nim(213, 58) Hint: 'readValue' cannot raise 'IOError' [XCannotRaiseY] +# 3 nimbus-eth2/vendor/nim-toml-serialization/toml_serialization/reader.nim(369, 38) Hint: 'readValue' cannot raise 'SerializationError' [XCannotRaiseY] +# 3 nimbus-eth2/vendor/nim-toml-serialization/toml_serialization/reader.nim(369, 58) Hint: 'readValue' cannot raise 'IOError' [XCannotRaiseY] +# 4 nimbus-eth2/vendor/nim-serialization/serialization.nim(27, 86) Hint: 'readValue' cannot raise 'IOError' [XCannotRaiseY] +# 116 nimbus-eth2/vendor/nim-ssz-serialization/ssz_serialization.nim(51, 77) Hint: 'writeFixedSized' cannot raise 'IOError' [XCannotRaiseY] switch("hint", "XCannotRaiseY:off") # Useful for Chronos metrics. diff --git a/docker/dist/base_image/Dockerfile.amd64 b/docker/dist/base_image/Dockerfile.amd64 index 2696142e44..c5a952e6ba 100644 --- a/docker/dist/base_image/Dockerfile.amd64 +++ b/docker/dist/base_image/Dockerfile.amd64 @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2023 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -13,7 +13,7 @@ # it once, upload it to Docker Hub and make sure it's being pulled regularly so # it's not deleted after 6 months of inactivity. -FROM ubuntu:20.04 +FROM ubuntu:22.04 SHELL ["/bin/bash", "-c"] diff --git a/docker/dist/base_image/Dockerfile.arm b/docker/dist/base_image/Dockerfile.arm index e060e77f12..46a83690ec 100644 --- a/docker/dist/base_image/Dockerfile.arm +++ b/docker/dist/base_image/Dockerfile.arm @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2023 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -13,7 +13,7 @@ # it once, upload it to Docker Hub and make sure it's being pulled regularly so # it's not deleted after 6 months of inactivity. -FROM ubuntu:20.04 +FROM ubuntu:22.04 SHELL ["/bin/bash", "-c"] diff --git a/docker/dist/base_image/Dockerfile.macos b/docker/dist/base_image/Dockerfile.macos index 1aae9b49e1..8a1098521a 100644 --- a/docker/dist/base_image/Dockerfile.macos +++ b/docker/dist/base_image/Dockerfile.macos @@ -15,7 +15,7 @@ # # Mar 2025: CMake is no longer needed, removable when base image is updated -FROM ubuntu:20.04 +FROM ubuntu:22.04 SHELL ["/bin/bash", "-c"] diff --git a/docker/dist/base_image/Dockerfile.win64 b/docker/dist/base_image/Dockerfile.win64 index 7e52ef5221..513e030bb6 100644 --- a/docker/dist/base_image/Dockerfile.win64 +++ b/docker/dist/base_image/Dockerfile.win64 @@ -15,7 +15,7 @@ # # Mar 2025: CMake is no longer needed, removable when base image is updated -FROM ubuntu:20.04 +FROM ubuntu:22.04 SHELL ["/bin/bash", "-c"] diff --git a/docker/dist/binaries/Dockerfile.bn.amd64 b/docker/dist/binaries/Dockerfile.bn.amd64 index 089bd456cd..4f6885ede9 100644 --- a/docker/dist/binaries/Dockerfile.bn.amd64 +++ b/docker/dist/binaries/Dockerfile.bn.amd64 @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2025 Status Research & Development GmbH. Licensed under +# Copyright (c) 2022-2024 Status Research & Development GmbH. Licensed under # either of: # - Apache License, version 2.0 # - MIT license @@ -25,4 +25,4 @@ RUN mkdir -p /home/user/nimbus-eth2/build && \ chown -R user:user /home/user/nimbus-eth2/build WORKDIR "/home/user/" -ENTRYPOINT ["/home/user/nimbus_beacon_node"] \ No newline at end of file +ENTRYPOINT ["/home/user/nimbus_beacon_node"] diff --git a/docker/dist/binaries/Dockerfile.bn.arm b/docker/dist/binaries/Dockerfile.bn.arm index b7d63892f1..89147e7c2f 100644 --- a/docker/dist/binaries/Dockerfile.bn.arm +++ b/docker/dist/binaries/Dockerfile.bn.arm @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2025 Status Research & Development GmbH. Licensed under +# Copyright (c) 2022-2024 Status Research & Development GmbH. Licensed under # either of: # - Apache License, version 2.0 # - MIT license @@ -29,4 +29,4 @@ RUN mkdir -p /home/user/nimbus-eth2/build && \ chown -R user:user /home/user/nimbus-eth2/build WORKDIR "/home/user/" -ENTRYPOINT ["/home/user/nimbus_beacon_node"] \ No newline at end of file +ENTRYPOINT ["/home/user/nimbus_beacon_node"] diff --git a/docker/dist/binaries/Dockerfile.bn.arm64 b/docker/dist/binaries/Dockerfile.bn.arm64 index 330c747ec3..713bb3df76 100644 --- a/docker/dist/binaries/Dockerfile.bn.arm64 +++ b/docker/dist/binaries/Dockerfile.bn.arm64 @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2025 Status Research & Development GmbH. Licensed under +# Copyright (c) 2022-2024 Status Research & Development GmbH. Licensed under # either of: # - Apache License, version 2.0 # - MIT license @@ -29,4 +29,4 @@ RUN mkdir -p /home/user/nimbus-eth2/build && \ chown -R user:user /home/user/nimbus-eth2/build WORKDIR "/home/user/" -ENTRYPOINT ["/home/user/nimbus_beacon_node"] \ No newline at end of file +ENTRYPOINT ["/home/user/nimbus_beacon_node"] diff --git a/docker/dist/binaries/Dockerfile.vc.amd64 b/docker/dist/binaries/Dockerfile.vc.amd64 index edf8e195c2..f6c239b170 100644 --- a/docker/dist/binaries/Dockerfile.vc.amd64 +++ b/docker/dist/binaries/Dockerfile.vc.amd64 @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2025 Status Research & Development GmbH. Licensed under +# Copyright (c) 2022-2024 Status Research & Development GmbH. Licensed under # either of: # - Apache License, version 2.0 # - MIT license @@ -20,4 +20,4 @@ STOPSIGNAL SIGINT COPY "nimbus-eth2/build/nimbus_validator_client" "/home/user/nimbus_validator_client" WORKDIR "/home/user/" -ENTRYPOINT ["/home/user/nimbus_validator_client"] \ No newline at end of file +ENTRYPOINT ["/home/user/nimbus_validator_client"] diff --git a/docker/dist/binaries/Dockerfile.vc.arm b/docker/dist/binaries/Dockerfile.vc.arm index f4365cb658..cd7c98df08 100644 --- a/docker/dist/binaries/Dockerfile.vc.arm +++ b/docker/dist/binaries/Dockerfile.vc.arm @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2025 Status Research & Development GmbH. Licensed under +# Copyright (c) 2022-2024 Status Research & Development GmbH. Licensed under # either of: # - Apache License, version 2.0 # - MIT license @@ -24,4 +24,4 @@ STOPSIGNAL SIGINT COPY "nimbus-eth2/build/nimbus_validator_client" "/home/user/nimbus_validator_client" WORKDIR "/home/user/" -ENTRYPOINT ["/home/user/nimbus_validator_client"] \ No newline at end of file +ENTRYPOINT ["/home/user/nimbus_validator_client"] diff --git a/docker/dist/binaries/Dockerfile.vc.arm64 b/docker/dist/binaries/Dockerfile.vc.arm64 index 06d068153f..ae66e47e99 100644 --- a/docker/dist/binaries/Dockerfile.vc.arm64 +++ b/docker/dist/binaries/Dockerfile.vc.arm64 @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2025 Status Research & Development GmbH. Licensed under +# Copyright (c) 2022-2024 Status Research & Development GmbH. Licensed under # either of: # - Apache License, version 2.0 # - MIT license @@ -24,4 +24,5 @@ STOPSIGNAL SIGINT COPY "nimbus-eth2/build/nimbus_validator_client" "/home/user/nimbus_validator_client" WORKDIR "/home/user/" -ENTRYPOINT ["/home/user/nimbus_validator_client"] \ No newline at end of file +ENTRYPOINT ["/home/user/nimbus_validator_client"] + diff --git a/docker/dist/entry_point.sh b/docker/dist/entry_point.sh index 966503f761..9db43b7972 100755 --- a/docker/dist/entry_point.sh +++ b/docker/dist/entry_point.sh @@ -55,32 +55,20 @@ if [[ "${PLATFORM}" == "Windows_amd64" ]]; then -C vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc \ -f Makefile.mingw \ CC="${CC}" \ + CFLAGS="-Os -fPIC" \ libminiupnpc.a &>/dev/null make \ -j$(nproc) \ -C vendor/nim-nat-traversal/vendor/libnatpmp-upstream \ CC="${CC}" \ - CFLAGS="-Wall -Os -DWIN32 -DNATPMP_STATICLIB -DENABLE_STRNATPMPERR -DNATPMP_MAX_RETRIES=4 ${CFLAGS}" \ + CFLAGS="-Wall -Os -fPIC -DWIN32 -DNATPMP_STATICLIB -DENABLE_STRNATPMPERR -DNATPMP_MAX_RETRIES=4 ${CFLAGS}" \ libnatpmp.a &>/dev/null - # We set CXX and add CXXFLAGS for libunwind's C++ code, even though we don't - # use those C++ objects. I don't see an easy way of disabling the C++ parts in - # libunwind itself. - # - # "libunwind.a" combines objects produced from C and C++ code. Even though we - # don't link any C++-generated objects, the linker still checks them for - # undefined symbols, so we're forced to use g++ as a linker wrapper. - # For some reason, macOS's Clang doesn't need this trick, nor do native (and - # newer) Mingw-w64 toolchains on Windows. - # # nim-blscurve's Windows SSSE3 detection doesn't work when cross-compiling, # so we enable it here. make \ - CC="${CC}" \ - CXX="${CXX}" \ - CXXFLAGS="${CXXFLAGS} -D__STDC_FORMAT_MACROS -D_WIN32_WINNT=0x0600" \ - USE_VENDORED_LIBUNWIND=1 \ LOG_LEVEL="TRACE" \ - NIMFLAGS="${NIMFLAGS_COMMON} --os:windows --gcc.exe=${CC} --gcc.linkerexe=${CXX} --passL:-static -d:BLSTuseSSSE3=1" \ + CC="${CC}" \ + NIMFLAGS="${NIMFLAGS_COMMON} --os:windows --gcc.exe=${CC} --gcc.linkerexe=${CC} --passL:-static -d:BLSTuseSSSE3=1" \ ${BINARIES} elif [[ "${PLATFORM}" == "Linux_arm32v7" ]]; then CC="arm-linux-gnueabihf-gcc" @@ -142,7 +130,6 @@ elif [[ "${PLATFORM}" == "macOS_amd64" ]]; then RANLIB="x86_64-apple-darwin${DARWIN_VER}-ranlib" \ DSYMUTIL="x86_64-apple-darwin${DARWIN_VER}-dsymutil" \ FORCE_DSYMUTIL=1 \ - USE_VENDORED_LIBUNWIND=1 \ NIMFLAGS="${NIMFLAGS_COMMON} --os:macosx --clang.exe=${CC} --clang.linkerexe=${CC}" \ ${BINARIES} elif [[ "${PLATFORM}" == "macOS_arm64" ]]; then @@ -173,7 +160,6 @@ elif [[ "${PLATFORM}" == "macOS_arm64" ]]; then RANLIB="arm64-apple-darwin${DARWIN_VER}-ranlib" \ DSYMUTIL="arm64-apple-darwin${DARWIN_VER}-dsymutil" \ FORCE_DSYMUTIL=1 \ - USE_VENDORED_LIBUNWIND=1 \ NIMFLAGS="${NIMFLAGS_COMMON} --os:macosx --cpu:arm64 --passC:'-mcpu=apple-a13' --passL:'-mcpu=apple-a13' --clang.exe=${CC} --clang.linkerexe=${CC}" \ ${BINARIES} elif [[ "${PLATFORM}" == "Linux_amd64_opt" ]]; then diff --git a/docs/e2store.md b/docs/e2store.md index 36bf2ca384..1c2e2c5657 100644 --- a/docs/e2store.md +++ b/docs/e2store.md @@ -181,7 +181,7 @@ Each era is identified by when it ends. Thus, the genesis era is era `0`, follow ## File name -`.era` file names follow a simple convention: `---.era`: +`.era` file names follow a simple convention: `--.era`: * `config-name` is the `CONFIG_NAME` field of the runtime configuration (`mainnet`, `sepolia`, `holesky`, `hoodi`, etc) * `era-number` is the number of the _first_ era stored in the file - for example, the genesis era file has number 0 - as a 5-digit 0-filled decimal integer diff --git a/docs/requirements.in b/docs/requirements.in index c1260f4085..1aecdee4f8 100644 --- a/docs/requirements.in +++ b/docs/requirements.in @@ -1,3 +1,4 @@ pip-tools pyyaml >= 6.0.1 mkdocs-material +mkdocs-redirects diff --git a/docs/requirements.txt b/docs/requirements.txt index 67f0fabc3f..4c45dca4af 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.13 # by the following command: # # pip-compile requirements.in @@ -40,13 +40,17 @@ mergedeep==1.3.4 # mkdocs # mkdocs-get-deps mkdocs==1.6.1 - # via mkdocs-material + # via + # mkdocs-material + # mkdocs-redirects mkdocs-get-deps==0.2.0 # via mkdocs mkdocs-material==9.5.42 # via -r requirements.in mkdocs-material-extensions==1.3.1 # via mkdocs-material +mkdocs-redirects==1.2.2 + # via -r requirements.in packaging==24.1 # via # build @@ -80,11 +84,11 @@ pyyaml-env-tag==0.1 # via mkdocs regex==2024.9.11 # via mkdocs-material -requests==2.32.3 +requests==2.32.4 # via mkdocs-material six==1.16.0 # via python-dateutil -urllib3==2.2.3 +urllib3==2.5.0 # via requests watchdog==5.0.3 # via mkdocs diff --git a/docs/the_nimbus_book/mkdocs.yml b/docs/the_nimbus_book/mkdocs.yml index db0dc5be19..2b9b75953b 100644 --- a/docs/the_nimbus_book/mkdocs.yml +++ b/docs/the_nimbus_book/mkdocs.yml @@ -39,6 +39,11 @@ edit_uri: edit/unstable/docs/the_nimbus_book/src site_url: https://nimbus.guide docs_dir: src +plugins: + - redirects: + redirect_maps: + 'el-light-client.md': 'consensus-light-client.md' + markdown_extensions: - admonition - pymdownx.details @@ -60,7 +65,7 @@ nav: - Getting started: - 'quick-start.md' - 'run-a-validator.md' - - 'el-light-client.md' + - 'consensus-light-client.md' - 'execution-client.md' - 'pi-guide.md' diff --git a/docs/the_nimbus_book/src/el-light-client.md b/docs/the_nimbus_book/src/consensus-light-client.md similarity index 83% rename from docs/the_nimbus_book/src/el-light-client.md rename to docs/the_nimbus_book/src/consensus-light-client.md index 09b84c466c..a112d33c35 100644 --- a/docs/the_nimbus_book/src/el-light-client.md +++ b/docs/the_nimbus_book/src/consensus-light-client.md @@ -1,18 +1,18 @@ -# Light client +# Consensus light client !!! warning - The light client is currently in BETA and details around running it may change. + The consensus light client is currently in BETA and details around running it may change. -The Nimbus Light Client is a light-weight alternative to running a full beacon node, when you're not planning on becoming a validator but still want to run an Ethereum execution layer client. +The Nimbus Consensus Light Client is a light-weight alternative to running a full beacon node, when you're not planning on becoming a validator but still want to run an Ethereum execution layer client. Execution layer (EL) clients provide the [Web3 API](https://ethereum.github.io/execution-apis/api-documentation/) to expose information stored on the Ethereum blockchain. Since the merge 🐼, execution clients can no longer run standalone. ## Comparison -Compared to a full beacon node, a light client has several advantages and disadvantages. +Compared to a full beacon node, a consensus light client has several advantages and disadvantages. -| Feature | Beacon Node | Light Client | +| Feature | Beacon Node | Consensus Light Client | | -- | -- | -- | | Disk usage | ~200GB | **<1MB** | | Bandwidth | *TBD* | **TBD (low)** | @@ -20,8 +20,8 @@ Compared to a full beacon node, a light client has several advantages and disadv | Head delay | **None** | 4/3 slot (15 s) | | Security | **Full** | Light | -Light clients delegate full validation to other network participants and operate under a honest supermajority (> 2/3) assumption among elected participants. -Due to this delegation, light clients are typically behind by ~4/3 slots (~15 seconds on Ethereum mainnet). +Consensus light clients delegate full validation to other network participants and operate under a honest supermajority (> 2/3) assumption among elected participants. +Due to this delegation, consensus light clients are typically behind by ~4/3 slots (~15 seconds on Ethereum mainnet). !!! note If you are validating, you must run a full beacon node. @@ -29,7 +29,7 @@ Due to this delegation, light clients are typically behind by ~4/3 slots (~15 se ## Building from source -The Nimbus light client is currently not bundled as part of the Docker images and needs to be built from source. +The Nimbus consensus light client is currently not bundled as part of the Docker images and needs to be built from source. ### 1. Clone the `nimbus-eth2` repository @@ -40,7 +40,7 @@ cd nimbus-eth2 ### 2. Run the build process -To build the Nimbus light client and its dependencies, make sure you have [all prerequisites](./install.md) and then run: +To build the Nimbus consensus light client and its dependencies, make sure you have [all prerequisites](./install.md) and then run: ```sh make -j4 nimbus_light_client @@ -56,10 +56,10 @@ When the process finishes, the `nimbus_light_client` executable can be found in Follow the [regular instructions](./eth1.md) for running the execution client, taking note of its JWT secret configuration that you will need in the next step. -## Running the light client +## Running the consensus light client -The light client starts syncing from a trusted block. -This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the light client. +The consensus light client starts syncing from a trusted block. +This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the consensus light client. ### 1. Obtaining a trusted block root @@ -80,13 +80,13 @@ A block root may be obtained from another trusted beacon node, or from a trusted Otherwise, for example if the bottom-most slot was `Missed`, go back and pick a different epoch. !!! warning - Selecting a block root from an untrusted source or using an outdated block root may lead to the light client syncing to an unexpected state. - If that happens, stop the light client and restart it with a new trusted block root. + Selecting a block root from an untrusted source or using an outdated block root may lead to the consensus light client syncing to an unexpected state. + If that happens, stop the consensus light client and restart it with a new trusted block root. Depending on the EL client, its database must be deleted and sync restarted from scratch. -### 2. Starting the light client +### 2. Starting the consensus light client -To start the light client, run the following commands (inserting your own trusted block root): +To start the consensus light client, run the following commands (inserting your own trusted block root): === "Mainnet" ```sh @@ -107,12 +107,12 @@ To start the light client, run the following commands (inserting your own truste ``` !!! tip - The light client can be left running in the background. + The consensus light client can be left running in the background. Note that a new trusted block root is required when restarting. ## Observing the sync process -After a while, the light client will pick up beacon block headers from the Ethereum network and start informing the EL client about the latest data. +After a while, the consensus light client will pick up beacon block headers from the Ethereum network and start informing the EL client about the latest data. You should see logs similar to the following: ### Nimbus diff --git a/docs/the_nimbus_book/src/execution-client.md b/docs/the_nimbus_book/src/execution-client.md index 1f9c5ba41e..99480788bf 100644 --- a/docs/the_nimbus_book/src/execution-client.md +++ b/docs/the_nimbus_book/src/execution-client.md @@ -7,7 +7,7 @@ If you're looking for information about setting up an execution client for validator duties or any other production usage, see the [execution clients guide](./eth1.md). -The Nimbus execution client is a light-weight implementation of the Ethereum execution protocol. Paired with a [beacon node](./quick-start.md) or [light client](./el-light-client.md), it provides access to Ethereum blockchain for dapps and users alike via the standard [Web3 API](https://ethereum.github.io/execution-apis/api-documentation/). +The Nimbus execution client is a light-weight implementation of the Ethereum execution protocol. Paired with a [beacon node](./quick-start.md) or [consensus light client](./consensus-light-client.md), it provides access to Ethereum blockchain for dapps and users alike via the standard [Web3 API](https://ethereum.github.io/execution-apis/api-documentation/). ## Building from source @@ -25,20 +25,20 @@ cd nimbus-eth1 To build the Nimbus execution client and its dependencies, make sure you have [all prerequisites](./install.md) and then run: ```sh -make -j4 nimbus_execution_client nrpc +make -j4 nimbus_execution_client ``` This may take a few minutes. -When the process finishes, the `nimbus_execution_client` and `nrpc` executables can be found in the `build` subdirectory. +When the process finishes, the `nimbus_execution_client` executables can be found in the `build` subdirectory. -## Import era files +## Syncing using era files Syncing Nimbus requires a set of `era1` and `era` files. These can be generated from a `geth` and `nimbus` consensus client respectively or downloaded from a third-party repository. In addition to the era files themselves, you will need at least 200GB of free space on a fast SSD in your data directory, as set by the `--data-dir` command line option. -!!! info "`era` file download locations" +!!! info "`era` file downloading" `era` and `era1` files for testing purposes could at the time of writing be found here - these sources may or may not be available: === "Mainnet" @@ -59,12 +59,52 @@ In addition to the era files themselves, you will need at least 200GB of free sp * https://sepolia.era.nimbus.team/ * https://sepolia.era1.nimbus.team/ + A wider community maintained list of `era` and `era1` files can be found eth-clients github [history-endpoints](https://eth-clients.github.io/history-endpoints/) + + Downloading these files can take a long time, specially if you are downloading sequentially. + For easier and fast download, please use the `era_downloader.sh` script provided in the `nimbus-eth1` repository. + #### You'll need: + - [`aria2`](https://aria2.github.io/) installed: + - **macOS**: `brew install aria2` + - **Ubuntu/Debian**: `sudo apt install aria2` + - Standard Unix tools: `bash`, `awk`, `find`, `grep`, `curl` + + === "Mainnet" + ```sh + cd nimbus-eth1 + chmod +x scripts/era_downloader.sh + ./scripts/era_downloader.sh https://mainnet.era1.nimbus.team/ ../build/era1 + ./scripts/era_downloader.sh https://mainnet.era.nimbus.team/ ../build/era + ``` + + === "Hoodi" + ```sh + cd nimbus-eth1 + chmod +x scripts/era_downloader.sh + ./scripts/era_downloader.sh https://hoodi.era.nimbus.team/ ../build/era + ``` + + === "Holesky" + ```sh + cd nimbus-eth1 + chmod +x scripts/era_downloader.sh + ./scripts/era_downloader.sh https://holesky.era.nimbus.team/ ../build/era + ``` + + === "Sepolia" + ```sh + cd nimbus-eth1 + chmod +x scripts/era_downloader.sh + ./scripts/era_downloader.sh https://sepolia.era1.nimbus.team/ ../build/era1 + ./scripts/era_downloader.sh https://sepolia.era.nimbus.team/ ../build/era + ``` + It is recommended that you place the era files in the data directory under `era1` and `era` respectively. Era files can be shared between multiple nodes and can reside on a slow drive - use the `--era1-dir` and `--era-dir` options if they are located outside of the data directory. See the [era file guide](./era-store.md) for more information. !!! tip "" - Future versions of Nimbus will support other methods of syncing + Future versions of Nimbus will support other methods of syncing, such as snap sync. === "Mainnet" !!! note "" @@ -92,7 +132,7 @@ See the [era file guide](./era-store.md) for more information. ## Launch the client -In order for the execution client to operate, you need to connect a consensus node. This can be the [Nimbus beacon node](./quick-start.md), a [supported consensus client](https://ethereum.org/en/developers/docs/nodes-and-clients/#consensus-clients) or a [light client](./el-light-client.md). +In order for the execution client to operate, you need to connect a consensus node. This can be the [Nimbus beacon node](./quick-start.md), a [supported consensus client](https://ethereum.org/en/developers/docs/nodes-and-clients/#consensus-clients) or a [consensus light client](./consensus-light-client.md). The consensus node connects to the execution client via the Engine API which is enabled using `--engine-api` and by default runs on port `8551`. @@ -118,16 +158,39 @@ During startup, a `jwt.hex` file will be placed in the data directory containing build/nimbus_execution_client --network=sepolia --data-dir=build/sepolia --engine-api ``` -## Top up blocks from the consensus node +## Optionally quickstart with a pre-synced database -While era files cover the majority of chain history, Nimbus currenty relies on the consensus node to sync the most recent blocks using the `nrpc` helper. +!!! warning "Unverified pre-synced database" + The pre-synced database is provided by the Nimbus team which contained the state, but using this database is trusting the team to have provided a valid database. This gives you a headstart on syncing, but if you don't trust the provider, you should do a full sync instead, either from era files or from the p2p network. + The pre-synced database is not available for all networks, and is only available for mainnet + +If you want to skip the era file import and start with a pre-synced database, you can download a pre-synced database from the Nimbus team. This database is for now only available for the mainnet. + +```sh +# Download the pre-synced database +wget https://eth1-db.nimbus.team/mainnet-static-vid-keyed.tar.gz + +# Extract the database into the data directory +tar -xzf mainnet-static-vid-keyed.tar.gz +``` + +This will extract the pre-synced database into the current directory, which you can then use as your data directory. + +## Using the consensus node to sync + +While era files cover the majority of chain history. In most cases, Nimbus will automatically sync recent blocks via peer-to-peer networking. +However, if your node is stuck, has no peers, or you're on a weak network connection, you can optionally use nrpc to sync recent blocks directly from a connected consensus node using the Engine API. This method of syncing loads blocks from the consensus node and passes them to the execution client via the Engine API. === "Mainnet" ```sh - # Start `nrpc` every 2 seconds in case there is a fork or the execution client goes out of sync - while true; do build/nrpc sync --beacon-api=http://localhost:5052 --el-engine-api=http://localhost:8550 --jwt-secret=build/mainnet/jwt.hex; sleep 2; done + ./build/nrpc sync --beacon-api=http://localhost:5052 --el-engine-api=http://localhost:8550 --jwt-secret=build/mainnet/jwt.hex + ``` + +=== "Hoodi" + ```sh + ./build/nrpc sync --network=hoodi --beacon-api=http://localhost:5052 --el-engine-api=http://localhost:8550 --jwt-secret=build/hoodi/jwt.hex ``` === "Hoodi" @@ -138,15 +201,13 @@ This method of syncing loads blocks from the consensus node and passes them to t === "Holesky" ```sh - # Start `nrpc` every 2 seconds in case there is a fork or the execution client goes out of sync - while true; do build/nrpc sync --network=holesky --beacon-api=http://localhost:5052 --el-engine-api=http://localhost:8550 --jwt-secret=build/holesky/jwt.hex; sleep 2; done + ./build/nrpc sync --network=holesky --beacon-api=http://localhost:5052 --el-engine-api=http://localhost:8550 --jwt-secret=build/holesky/jwt.hex ``` === "Sepolia" ```sh - # Start `nrpc` every 2 seconds in case there is a fork or the execution client goes out of sync - while true; do build/nrpc sync --network=sepolia --beacon-api=http://localhost:5052 --el-engine-api=http://localhost:8550 --jwt-secret=build/sepolia/jwt.hex; sleep 2; done + ./build/nrpc sync --network=sepolia --beacon-api=http://localhost:5052 --el-engine-api=http://localhost:8550 --jwt-secret=build/sepolia/jwt.hex ``` !!! tip "" - Future versions of Nimbus will support other methods of syncing + Future versions of Nimbus will support snap sync. diff --git a/docs/the_nimbus_book/src/external-block-builder.md b/docs/the_nimbus_book/src/external-block-builder.md index f8accaf4d1..a48ef4695f 100644 --- a/docs/the_nimbus_book/src/external-block-builder.md +++ b/docs/the_nimbus_book/src/external-block-builder.md @@ -56,6 +56,8 @@ Additionally, the URL of the service exposing the [builder API](https://ethereum - [EthStaker MEV relay list](https://ethstaker.cc/mev-relay-list/) +- [MEV Relay List](https://www.coincashew.com/coins/overview-eth/mev-boost/mev-relay-list) + - [Mainnet Relay Overview](https://beaconcha.in/relays) - [Hoodi Relay Overview](https://hoodi.beaconcha.in/relays) diff --git a/docs/the_nimbus_book/src/index.md b/docs/the_nimbus_book/src/index.md index 25676e4bc8..23224b039e 100644 --- a/docs/the_nimbus_book/src/index.md +++ b/docs/the_nimbus_book/src/index.md @@ -4,7 +4,7 @@ Nimbus is a client for the Ethereum network that is [lightweight](https://our.st Its efficiency and low resource consumption allows it to perform well on all kinds of systems: ranging from Raspberry Pi and mobile devices — where it contributes to low power consumption and security — to powerful servers where it leaves resources free to perform other tasks. -This book describes the consensus protocol implementation which includes a [beacon node](./quick-start.md), [validator client](./validator-client.md) and [light client](./el-light-client.md). +This book describes the consensus protocol implementation which includes a [beacon node](./quick-start.md), [validator client](./validator-client.md) and [consensus light client](./consensus-light-client.md). An [execution client](https://github.com/status-im/nimbus-eth1) is also under development - see its [quickstart guide](./execution-client.md). @@ -18,7 +18,7 @@ Our companion project [fluffy](https://github.com/status-im/nimbus-eth1/tree/mas * [Web3Signer](https://docs.web3signer.consensys.net/en/latest/) remote signing * [Validator monitoring](./validator-monitor.md) and [performance analysis](./attestation-performance.md) tooling * [External block builder](./external-block-builder.md) (PBS / mev-boost) support with execution client fallback -* [Light consensus client](./el-light-client.md) for running an execution client without a full beacon node +* [Consensus light client](./consensus-light-client.md) for running an execution client without a full beacon node ## Design goals @@ -45,7 +45,7 @@ You can read this book from start to finish, or you might want to read just spec * Coming from a different client? Check out the [migration guide](./migration.md). * Visualize the important metrics with [Grafana and Prometheus](./metrics-pretty-pictures.md). * Interested in becoming a validator? Follow the [validator guide](./run-a-validator.md). -* If you're not planning on becoming a validator, you can run the [light client](./el-light-client.md). +* If you're not planning on becoming a validator, you can run the [consensus light client](./consensus-light-client.md). ## Get in touch diff --git a/docs/the_nimbus_book/src/light-client-data.md b/docs/the_nimbus_book/src/light-client-data.md index 36a7bb6a0a..0288a88003 100644 --- a/docs/the_nimbus_book/src/light-client-data.md +++ b/docs/the_nimbus_book/src/light-client-data.md @@ -1,11 +1,11 @@ # Light client data -Nimbus is configured by default to serve data that allows light clients to stay in sync with the Ethereum network. +Nimbus beacon node is configured by default to serve data that allows consensus light clients to stay in sync with the Ethereum network. Light client data is imported incrementally and does not affect validator performance. Information about the light client sync protocol can be found in the [Ethereum consensus specs](https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md). !!! note - Nimbus also implements a [standalone light client](./el-light-client.md) that may be used to sync an execution layer (EL) client. + Nimbus also implements a [standalone consensus light client](./consensus-light-client.md) that may be used to sync an execution layer (EL) client. ## Configuration diff --git a/docs/the_nimbus_book/src/migration.md b/docs/the_nimbus_book/src/migration.md index 1f13b0cd1d..50a232030a 100644 --- a/docs/the_nimbus_book/src/migration.md +++ b/docs/the_nimbus_book/src/migration.md @@ -188,6 +188,6 @@ For a quick guide on how to set up a systemd service, see [our systemd guide](./ ## Final thoughts -If you are unsure of the safety of a step, please get in touch with us directly on [discord](https://discord.gg/nnNEBvHu3m). +If you are unsure of the safety of a step, please get in touch with us directly on [Discord](https://discord.gg/XRxWahP). Additionally, we recommend testing the migration works correctly on a testnet before going ahead on mainnet. diff --git a/docs/the_nimbus_book/src/options.md b/docs/the_nimbus_book/src/options.md index e2e0711edc..15c8a5a6e9 100644 --- a/docs/the_nimbus_book/src/options.md +++ b/docs/the_nimbus_book/src/options.md @@ -78,7 +78,6 @@ The following options are available: built-in genesis state). --genesis-state-url URL for obtaining the genesis state of the network (for networks without a built-in genesis state). - --finalized-deposit-tree-snapshot SSZ file specifying a recent finalized EIP-4881 deposit tree snapshot. --node-name A name for this node that will appear in the logs. If you set this to 'auto', a persistent automatically generated ID will be selected for each --data-dir folder. @@ -183,3 +182,4 @@ Here is an example config file illustrating all of the above: | 0 | Successful exit | | 1 | Generic failure or unspecified error | | 129 | Doppelganger detection; one might prefer not to restart automatically | +| 198 | Slashing detection; one might prefer not to restart automatically | diff --git a/docs/the_nimbus_book/src/start-syncing.md b/docs/the_nimbus_book/src/start-syncing.md index 70d261685d..c411263669 100644 --- a/docs/the_nimbus_book/src/start-syncing.md +++ b/docs/the_nimbus_book/src/start-syncing.md @@ -131,7 +131,7 @@ The following [configuration options](./options.md) control checkpoint sync beha | Option | Description | |------------------------------------------|-------------| | `--external-beacon-api-url` |
  • External beacon API to use for checkpoint sync
| -| `--trusted-block-root` |
  • Recent trusted finalized block root to sync from external beacon API
  • Uses the [light client sync protocol](https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md) to obtain the latest finalized checkpoint
| +| `--trusted-block-root` |
  • Recent trusted finalized block root to sync from external beacon API
  • Uses the [light client sync protocol](https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/altair/light-client/sync-protocol.md) to obtain the latest finalized checkpoint
| | `--trusted-state-root` |
  • Recent trusted finalized state root to sync from external beacon API
  • Takes precedence over `--trusted-block-root` if both are specified
| !!! info diff --git a/docs/the_nimbus_book/src/trusted-node-sync.md b/docs/the_nimbus_book/src/trusted-node-sync.md index ae7dc75e52..dcc0c9113f 100644 --- a/docs/the_nimbus_book/src/trusted-node-sync.md +++ b/docs/the_nimbus_book/src/trusted-node-sync.md @@ -82,12 +82,12 @@ The `head` root is also printed in the log output at regular intervals. ## Advanced -### Verify the downloaded state through the Nimbus light client +### Verify the downloaded state through the Nimbus consensus light client !!! note "" This feature is available from `v23.4.0` onwards. -The `--trusted-block-root` option enables you to leverage the Nimbus light client in order to minimize the required trust in the specified Beacon API endpoint. After downloading a state snapshot, the light client will verify that it conforms to the established consensus on the network. Note that the provided `--trusted-block-root` should be somewhat recent, and that additional security precautions such as comparing the state root against block explorers is still recommended. +The `--trusted-block-root` option enables you to leverage the Nimbus consensus light client in order to minimize the required trust in the specified Beacon API endpoint. After downloading a state snapshot, the consensus light client will verify that it conforms to the established consensus on the network. Note that the provided `--trusted-block-root` should be somewhat recent, and that additional security precautions such as comparing the state root against block explorers is still recommended. ### Sync deposit history diff --git a/grafana/beacon_nodes_Grafana_dashboard.json b/grafana/beacon_nodes_Grafana_dashboard.json index 16463ddbc2..a296a2e21d 100644 --- a/grafana/beacon_nodes_Grafana_dashboard.json +++ b/grafana/beacon_nodes_Grafana_dashboard.json @@ -5667,7 +5667,7 @@ "value": null }, { - "color": "red", + "color": "green", "value": 1 } ] @@ -5705,7 +5705,7 @@ "uid": "${DS_PROMETHEUS-PROXY}" }, "exemplar": true, - "expr": "sum(validator_monitor_slashed{instance=\"${instance}\",container=\"${container}\"})", + "expr": "sum(validator_monitor_withdrawable{instance=\"${instance}\",container=\"${container}\"})", "interval": "", "legendFormat": "", "refId": "A" diff --git a/ncli/deposit_downloader.nim b/ncli/deposit_downloader.nim deleted file mode 100644 index edd970e83f..0000000000 --- a/ncli/deposit_downloader.nim +++ /dev/null @@ -1,89 +0,0 @@ -# beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -import - std/[json, strutils, times, sequtils], - chronos, confutils, chronicles, - web3, web3/primitives, - eth/async_utils, - ../beacon_chain/beacon_chain_db, - ../beacon_chain/networking/network_metadata, - ../beacon_chain/el/el_manager, - ../beacon_chain/spec/[presets, helpers] - -type - CliFlags = object - network* {. - defaultValue: "mainnet" - name: "network".}: string - elUrls* {. - name: "el".}: seq[EngineApiUrlConfigValue] - jwtSecret* {. - name: "jwt-secret".}: Option[InputFile] - outDepositsFile* {. - name: "out-deposits-file".}: Option[OutFile] - configFile* {. - desc: "Loads the configuration from a TOML file" - name: "config-file" .}: Option[InputFile] - -proc main(flags: CliFlags) {.async.} = - let - db = BeaconChainDB.new("", inMemory = true) - metadata = getMetadataForNetwork(flags.network) - beaconTimeFn = proc(): BeaconTime = - # BEWARE of this hack - # The EL manager consults the current time in order to determine when the - # transition configuration exchange should start. We assume Bellatrix has - # just arrived which should trigger the configuration exchange and allow - # the downloader to connect to ELs serving the Engine API. - start_beacon_time(Slot(metadata.cfg.BELLATRIX_FORK_EPOCH * SLOTS_PER_EPOCH)) - - let - elManager = ELManager.new( - metadata.cfg, - metadata.depositContractBlock, - metadata.depositContractBlockHash, - db, - toFinalEngineApiUrls(flags.elUrls, flags.jwtSecret), - eth1Network = metadata.eth1Network) - - elManager.start() - - var depositsFile: File - if flags.outDepositsFile.isSome: - depositsFile = open(string flags.outDepositsFile.get, fmWrite) - depositsFile.write( - "block", ",", - "validatorKey", ",", - "withdrawalCredentials", "\n") - depositsFile.flushFile() - - var blockIdx = 0 - while not elManager.isSynced(): - await sleepAsync chronos.seconds(1) - - if flags.outDepositsFile.isSome and - elManager.eth1ChainBlocks.len > blockIdx: - for i in blockIdx ..< elManager.eth1ChainBlocks.len: - for deposit in elManager.eth1ChainBlocks[i].deposits: - depositsFile.write( - $elManager.eth1ChainBlocks[i].number, ",", - $deposit.pubkey, ",", - $deposit.withdrawal_credentials, "\n") - depositsFile.flushFile() - - blockIdx = elManager.eth1ChainBlocks.len - - info "All deposits downloaded" - -waitFor main( - load(CliFlags, - secondarySources = proc ( - config: CliFlags, sources: ref SecondarySources - ) {.raises: [ConfigurationError].} = - if config.configFile.isSome: - sources.addConfigFile(Toml, config.configFile.get))) diff --git a/ncli/download_mainnet_deposits.sh b/ncli/download_mainnet_deposits.sh deleted file mode 100755 index c658645b6f..0000000000 --- a/ncli/download_mainnet_deposits.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -set -eu - -cd "$(dirname "$0")" - -WEB3_URL=wss://mainnet.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a - -../env.sh nim c -r deposit_downloader.nim \ - --web3-url="$WEB3_URL" \ - --deposit-contract=0x00000000219ab540356cBB839Cbe05303d7705Fa \ - --start-block=11052984 - diff --git a/ncli/e2store.nim b/ncli/e2store.nim index b1e8f3df33..d2390e47b6 100644 --- a/ncli/e2store.nim +++ b/ncli/e2store.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -95,7 +95,7 @@ proc readRecord*(f: IoHandle, data: var seq[byte]): Result[Header, string] = ? f.checkBytesLeft(header.len) if data.len != header.len: - data = newSeqUninitialized[byte](header.len) + data = newSeqUninit[byte](header.len) ? readFileExact(f, data) @@ -123,4 +123,3 @@ proc findIndexStartOffset*(f: IoHandle): Result[int64, string] = bytes = count.int64 * 8 + 24 ok(-bytes) - diff --git a/ncli/era.nim b/ncli/era.nim index 6f27134b29..e532bf93b7 100644 --- a/ncli/era.nim +++ b/ncli/era.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -113,7 +113,7 @@ proc readIndex*(f: IoHandle): Result[Index, string] = # technically not an error, but we'll throw this sanity check in here.. if slot > int32.high().uint64: return err("fishy slot") - var offsets = newSeqUninitialized[int64](count) + var offsets = newSeqUninit[int64](count) for i in 0.. 0: @@ -509,21 +442,6 @@ proc doCreateTestnet*(config: CliConfig, writeFile(bootstrapFile, enr.toURI) echo "Wrote ", bootstrapFile -type - DelayGenerator = proc(): chronos.Duration {.gcsafe, raises: [].} - -func ethToWei(eth: UInt256): UInt256 = - eth * 1000000000000000000.u256 - -proc initWeb3(web3Url, privateKey: string): Future[Web3] {.async.} = - result = await newWeb3(web3Url) - if privateKey.len != 0: - result.privateKey = Opt.some(keys.PrivateKey.fromHex(privateKey)[]) - else: - let accounts = await result.provider.eth_accounts() - doAssert(accounts.len > 0) - result.defaultAccount = accounts[0] - {.pop.} # TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError when isMainModule: @@ -534,6 +452,21 @@ when isMainModule: from std/sequtils import mapIt, toSeq from std/terminal import readPasswordFromStdin + type + DelayGenerator = proc(): chronos.Duration {.gcsafe, raises: [].} + + func ethToWei(eth: UInt256): UInt256 = + eth * 1000000000000000000.u256 + + proc initWeb3(web3Url, privateKey: string): Future[Web3] {.async.} = + result = await newWeb3(web3Url) + if privateKey.len != 0: + result.privateKey = Opt.some(keys.PrivateKey.fromHex(privateKey)[]) + else: + let accounts = await result.provider.eth_accounts() + doAssert(accounts.len > 0) + result.defaultAccount = accounts[0] + # Compiled version of /scripts/depositContract.v.py in this repo # The contract was compiled in Remix (https://remix.ethereum.org/) with vyper (remote) compiler. const depositContractCode = @@ -727,4 +660,4 @@ when isMainModule: # This is handled above before the case statement discard - waitFor main() \ No newline at end of file + waitFor main() diff --git a/ncli/requirements.txt b/ncli/requirements.txt index 63a0aedc6c..e8105032d3 100644 --- a/ncli/requirements.txt +++ b/ncli/requirements.txt @@ -79,7 +79,7 @@ stack-data==0.1.4 terminado==0.12.1 testpath==0.5.0 tomli==1.2.3 -tornado==6.4.2 +tornado==6.5.1 traitlets==5.1.1 typing_extensions==4.0.1 wcwidth==0.2.5 diff --git a/ncli/resttest-rules.json b/ncli/resttest-rules.json index 3f26bc8be4..f3849babbb 100644 --- a/ncli/resttest-rules.json +++ b/ncli/resttest-rules.json @@ -2431,6 +2431,59 @@ "body": [{"operator": "jstructcmps", "start": ["data"], "value": [{"index": "", "balance": ""}]}] } }, + { + "topics": ["beacon", "states_validator_identities", "slow", "post"], + "request": { + "method": "POST", + "body": { + "content-type": "application/json", + "data": "[]" + }, + "url": "/eth/v1/beacon/states/head/validator_identities", + "headers": {"Accept": "application/json"} + }, + "response": { + "status": {"operator": "equals", "value": "200"}, + "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}], + "body": [{"operator": "jstructcmps", "start": ["data"], "value": [{"index": "", "pubkey": ""}]}] + } + }, + { + "topics": ["beacon", "states_validator_identities", "post"], + "comment": "Correct hexadecimal values #1", + "request": { + "method": "POST", + "body": { + "content-type": "application/json", + "data": "[\"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"]" + }, + "url": "/eth/v1/beacon/states/head/validator_identities", + "headers": {"Accept": "application/json"} + }, + "response": { + "status": {"operator": "equals", "value": "200"}, + "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}], + "body": [{"operator": "jstructcmps", "start": ["data"], "value": [{"index": "", "pubkey": ""}]}] + } + }, + { + "topics": ["beacon", "states_validator_identities", "post"], + "comment": "Incorrect hexadecimal values #1", + "request": { + "method": "POST", + "body": { + "content-type": "application/json", + "data": "[\"0x\"]" + }, + "url": "/eth/v1/beacon/states/head/validator_identities", + "headers": {"Accept": "application/json"} + }, + "response": { + "status": {"operator": "equals", "value": "400"}, + "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}], + "body": [{"operator": "jstructcmpns", "value": {"code": 400, "message": ""}}] + } + }, { "topics": ["beacon", "states_committees"], "request": { @@ -2623,6 +2676,18 @@ "body": [{"operator": "jstructcmpns", "value": {"code": 400, "message": ""}}] } }, + { + "topics": ["beacon", "states_pending_consolidations"], + "request": { + "url": "/eth/v1/beacon/states/head/pending_consolidations", + "headers": {"Accept": "application/json"} + }, + "response": { + "status": {"operator": "equals", "value": "400"}, + "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}], + "body": [{"operator": "jstructcmpns", "value": {"code": 400, "message": ""}}] + } + }, { "topics": ["beacon", "states_pending_deposits"], "request": { @@ -4204,7 +4269,7 @@ "response": { "status": {"operator": "equals", "value": "200"}, "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}], - "body": [{"operator": "jstructcmps", "start": ["data"], "value": {"MAX_COMMITTEES_PER_SLOT":"","TARGET_COMMITTEE_SIZE":"","MAX_VALIDATORS_PER_COMMITTEE":"","SHUFFLE_ROUND_COUNT":"","HYSTERESIS_QUOTIENT":"","HYSTERESIS_DOWNWARD_MULTIPLIER":"","HYSTERESIS_UPWARD_MULTIPLIER":"","MIN_DEPOSIT_AMOUNT":"","MAX_EFFECTIVE_BALANCE":"","EFFECTIVE_BALANCE_INCREMENT":"","MIN_ATTESTATION_INCLUSION_DELAY":"","SLOTS_PER_EPOCH":"","MIN_SEED_LOOKAHEAD":"","MAX_SEED_LOOKAHEAD":"","EPOCHS_PER_ETH1_VOTING_PERIOD":"","SLOTS_PER_HISTORICAL_ROOT":"","MIN_EPOCHS_TO_INACTIVITY_PENALTY":"","EPOCHS_PER_HISTORICAL_VECTOR":"","EPOCHS_PER_SLASHINGS_VECTOR":"","HISTORICAL_ROOTS_LIMIT":"","VALIDATOR_REGISTRY_LIMIT":"","BASE_REWARD_FACTOR":"","WHISTLEBLOWER_REWARD_QUOTIENT":"","PROPOSER_REWARD_QUOTIENT":"","INACTIVITY_PENALTY_QUOTIENT":"","MIN_SLASHING_PENALTY_QUOTIENT":"","PROPORTIONAL_SLASHING_MULTIPLIER":"","MAX_PROPOSER_SLASHINGS":"","MAX_ATTESTER_SLASHINGS":"","MAX_ATTESTATIONS":"","MAX_DEPOSITS":"","MAX_VOLUNTARY_EXITS":"","INACTIVITY_PENALTY_QUOTIENT_ALTAIR":"","MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":"","PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":"","SYNC_COMMITTEE_SIZE":"","EPOCHS_PER_SYNC_COMMITTEE_PERIOD":"","MIN_SYNC_COMMITTEE_PARTICIPANTS":"","UPDATE_TIMEOUT":"","INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":"","MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":"","PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":"","MAX_BYTES_PER_TRANSACTION":"","MAX_TRANSACTIONS_PER_PAYLOAD":"","BYTES_PER_LOGS_BLOOM":"","MAX_EXTRA_DATA_BYTES":"","MAX_BLS_TO_EXECUTION_CHANGES":"","MAX_WITHDRAWALS_PER_PAYLOAD":"","MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":"","FIELD_ELEMENTS_PER_BLOB":"","MAX_BLOB_COMMITMENTS_PER_BLOCK":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":"","PRESET_BASE":"","CONFIG_NAME":"","TERMINAL_TOTAL_DIFFICULTY":"","TERMINAL_BLOCK_HASH":"","TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":"","MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":"","MIN_GENESIS_TIME":"","GENESIS_FORK_VERSION":"","GENESIS_DELAY":"","ALTAIR_FORK_VERSION":"","ALTAIR_FORK_EPOCH":"","BELLATRIX_FORK_VERSION":"","BELLATRIX_FORK_EPOCH":"","CAPELLA_FORK_VERSION":"","CAPELLA_FORK_EPOCH":"","DENEB_FORK_VERSION":"","DENEB_FORK_EPOCH":"","ELECTRA_FORK_VERSION":"","ELECTRA_FORK_EPOCH":"","FULU_FORK_VERSION":"","FULU_FORK_EPOCH":"","SECONDS_PER_SLOT":"","SECONDS_PER_ETH1_BLOCK":"","MIN_VALIDATOR_WITHDRAWABILITY_DELAY":"","SHARD_COMMITTEE_PERIOD":"","ETH1_FOLLOW_DISTANCE":"","INACTIVITY_SCORE_BIAS":"","INACTIVITY_SCORE_RECOVERY_RATE":"","EJECTION_BALANCE":"","MIN_PER_EPOCH_CHURN_LIMIT":"","CHURN_LIMIT_QUOTIENT":"","MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":"","PROPOSER_SCORE_BOOST":"","REORG_HEAD_WEIGHT_THRESHOLD":"","REORG_PARENT_WEIGHT_THRESHOLD":"","REORG_MAX_EPOCHS_SINCE_FINALIZATION":"","DEPOSIT_CHAIN_ID":"","DEPOSIT_NETWORK_ID":"","DEPOSIT_CONTRACT_ADDRESS":"","MAX_PAYLOAD_SIZE":"","MAX_REQUEST_BLOCKS":"","EPOCHS_PER_SUBNET_SUBSCRIPTION":"","MIN_EPOCHS_FOR_BLOCK_REQUESTS":"","TTFB_TIMEOUT":"","RESP_TIMEOUT":"","ATTESTATION_PROPAGATION_SLOT_RANGE":"","MAXIMUM_GOSSIP_CLOCK_DISPARITY":"","MESSAGE_DOMAIN_INVALID_SNAPPY":"","MESSAGE_DOMAIN_VALID_SNAPPY":"","SUBNETS_PER_NODE":"","ATTESTATION_SUBNET_COUNT":"","ATTESTATION_SUBNET_EXTRA_BITS":"","ATTESTATION_SUBNET_PREFIX_BITS":"","MAX_REQUEST_BLOCKS_DENEB":"","MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":"","BLOB_SIDECAR_SUBNET_COUNT":"","MAX_BLOBS_PER_BLOCK":"","MAX_REQUEST_BLOB_SIDECARS":"","MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA":"","MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT":"","BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":"","MAX_BLOBS_PER_BLOCK_ELECTRA":"","MAX_REQUEST_BLOB_SIDECARS_ELECTRA":"","NUMBER_OF_COLUMNS":"","NUMBER_OF_CUSTODY_GROUPS":"","DATA_COLUMN_SIDECAR_SUBNET_COUNT":"","MAX_REQUEST_DATA_COLUMN_SIDECARS":"","SAMPLES_PER_SLOT":"","CUSTODY_REQUIREMENT":"","VALIDATOR_CUSTODY_REQUIREMENT":"","BALANCE_PER_ADDITIONAL_CUSTODY_GROUP":"","BLS_WITHDRAWAL_PREFIX":"","ETH1_ADDRESS_WITHDRAWAL_PREFIX":"","DOMAIN_BEACON_PROPOSER":"","DOMAIN_BEACON_ATTESTER":"","DOMAIN_RANDAO":"","DOMAIN_DEPOSIT":"","DOMAIN_VOLUNTARY_EXIT":"","DOMAIN_SELECTION_PROOF":"","DOMAIN_AGGREGATE_AND_PROOF":"","TIMELY_SOURCE_FLAG_INDEX":"","TIMELY_TARGET_FLAG_INDEX":"","TIMELY_HEAD_FLAG_INDEX":"","TIMELY_SOURCE_WEIGHT":"","TIMELY_TARGET_WEIGHT":"","TIMELY_HEAD_WEIGHT":"","SYNC_REWARD_WEIGHT":"","PROPOSER_WEIGHT":"","WEIGHT_DENOMINATOR":"","DOMAIN_SYNC_COMMITTEE":"","DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":"","DOMAIN_CONTRIBUTION_AND_PROOF":"","DOMAIN_BLS_TO_EXECUTION_CHANGE":"","TARGET_AGGREGATORS_PER_COMMITTEE":"","TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":"","SYNC_COMMITTEE_SUBNET_COUNT":"","UNSET_DEPOSIT_REQUESTS_START_INDEX":"","FULL_EXIT_REQUEST_AMOUNT":"","COMPOUNDING_WITHDRAWAL_PREFIX":"","DEPOSIT_REQUEST_TYPE":"","WITHDRAWAL_REQUEST_TYPE":"","CONSOLIDATION_REQUEST_TYPE":"","MIN_ACTIVATION_BALANCE":"","MAX_EFFECTIVE_BALANCE_ELECTRA":"","MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA":"","WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA":"","PENDING_DEPOSITS_LIMIT":"","PENDING_PARTIAL_WITHDRAWALS_LIMIT":"","PENDING_CONSOLIDATIONS_LIMIT":"","MAX_ATTESTER_SLASHINGS_ELECTRA":"","MAX_ATTESTATIONS_ELECTRA":"","MAX_DEPOSIT_REQUESTS_PER_PAYLOAD":"","MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD":"","MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD":"","MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP":"","MAX_PENDING_DEPOSITS_PER_EPOCH":""}}] + "body": [{"operator": "jstructcmps", "start": ["data"], "value": {"MAX_COMMITTEES_PER_SLOT":"","TARGET_COMMITTEE_SIZE":"","MAX_VALIDATORS_PER_COMMITTEE":"","SHUFFLE_ROUND_COUNT":"","HYSTERESIS_QUOTIENT":"","HYSTERESIS_DOWNWARD_MULTIPLIER":"","HYSTERESIS_UPWARD_MULTIPLIER":"","MIN_DEPOSIT_AMOUNT":"","MAX_EFFECTIVE_BALANCE":"","EFFECTIVE_BALANCE_INCREMENT":"","MIN_ATTESTATION_INCLUSION_DELAY":"","SLOTS_PER_EPOCH":"","MIN_SEED_LOOKAHEAD":"","MAX_SEED_LOOKAHEAD":"","EPOCHS_PER_ETH1_VOTING_PERIOD":"","SLOTS_PER_HISTORICAL_ROOT":"","MIN_EPOCHS_TO_INACTIVITY_PENALTY":"","EPOCHS_PER_HISTORICAL_VECTOR":"","EPOCHS_PER_SLASHINGS_VECTOR":"","HISTORICAL_ROOTS_LIMIT":"","VALIDATOR_REGISTRY_LIMIT":"","BASE_REWARD_FACTOR":"","WHISTLEBLOWER_REWARD_QUOTIENT":"","PROPOSER_REWARD_QUOTIENT":"","INACTIVITY_PENALTY_QUOTIENT":"","MIN_SLASHING_PENALTY_QUOTIENT":"","PROPORTIONAL_SLASHING_MULTIPLIER":"","MAX_PROPOSER_SLASHINGS":"","MAX_ATTESTER_SLASHINGS":"","MAX_ATTESTATIONS":"","MAX_DEPOSITS":"","MAX_VOLUNTARY_EXITS":"","INACTIVITY_PENALTY_QUOTIENT_ALTAIR":"","MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":"","PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":"","SYNC_COMMITTEE_SIZE":"","EPOCHS_PER_SYNC_COMMITTEE_PERIOD":"","MIN_SYNC_COMMITTEE_PARTICIPANTS":"","UPDATE_TIMEOUT":"","INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":"","MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":"","PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":"","MAX_BYTES_PER_TRANSACTION":"","MAX_TRANSACTIONS_PER_PAYLOAD":"","BYTES_PER_LOGS_BLOOM":"","MAX_EXTRA_DATA_BYTES":"","MAX_BLS_TO_EXECUTION_CHANGES":"","MAX_WITHDRAWALS_PER_PAYLOAD":"","MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":"","FIELD_ELEMENTS_PER_BLOB":"","MAX_BLOB_COMMITMENTS_PER_BLOCK":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":"","PRESET_BASE":"","CONFIG_NAME":"","TERMINAL_TOTAL_DIFFICULTY":"","TERMINAL_BLOCK_HASH":"","TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":"","MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":"","MIN_GENESIS_TIME":"","GENESIS_FORK_VERSION":"","GENESIS_DELAY":"","ALTAIR_FORK_VERSION":"","ALTAIR_FORK_EPOCH":"","BELLATRIX_FORK_VERSION":"","BELLATRIX_FORK_EPOCH":"","CAPELLA_FORK_VERSION":"","CAPELLA_FORK_EPOCH":"","DENEB_FORK_VERSION":"","DENEB_FORK_EPOCH":"","ELECTRA_FORK_VERSION":"","ELECTRA_FORK_EPOCH":"","FULU_FORK_VERSION":"","FULU_FORK_EPOCH":"","SECONDS_PER_SLOT":"","SECONDS_PER_ETH1_BLOCK":"","MIN_VALIDATOR_WITHDRAWABILITY_DELAY":"","SHARD_COMMITTEE_PERIOD":"","ETH1_FOLLOW_DISTANCE":"","INACTIVITY_SCORE_BIAS":"","INACTIVITY_SCORE_RECOVERY_RATE":"","EJECTION_BALANCE":"","MIN_PER_EPOCH_CHURN_LIMIT":"","CHURN_LIMIT_QUOTIENT":"","MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":"","PROPOSER_SCORE_BOOST":"","REORG_HEAD_WEIGHT_THRESHOLD":"","REORG_PARENT_WEIGHT_THRESHOLD":"","REORG_MAX_EPOCHS_SINCE_FINALIZATION":"","DEPOSIT_CHAIN_ID":"","DEPOSIT_NETWORK_ID":"","DEPOSIT_CONTRACT_ADDRESS":"","MAX_PAYLOAD_SIZE":"","MAX_REQUEST_BLOCKS":"","EPOCHS_PER_SUBNET_SUBSCRIPTION":"","MIN_EPOCHS_FOR_BLOCK_REQUESTS":"","ATTESTATION_PROPAGATION_SLOT_RANGE":"","MAXIMUM_GOSSIP_CLOCK_DISPARITY":"","MESSAGE_DOMAIN_INVALID_SNAPPY":"","MESSAGE_DOMAIN_VALID_SNAPPY":"","SUBNETS_PER_NODE":"","ATTESTATION_SUBNET_COUNT":"","ATTESTATION_SUBNET_EXTRA_BITS":"","ATTESTATION_SUBNET_PREFIX_BITS":"","MAX_REQUEST_BLOCKS_DENEB":"","MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":"","BLOB_SIDECAR_SUBNET_COUNT":"","MAX_BLOBS_PER_BLOCK":"","MAX_REQUEST_BLOB_SIDECARS":"","MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA":"","MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT":"","BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":"","MAX_BLOBS_PER_BLOCK_ELECTRA":"","MAX_REQUEST_BLOB_SIDECARS_ELECTRA":"","NUMBER_OF_COLUMNS":"","NUMBER_OF_CUSTODY_GROUPS":"","DATA_COLUMN_SIDECAR_SUBNET_COUNT":"","MAX_REQUEST_DATA_COLUMN_SIDECARS":"","SAMPLES_PER_SLOT":"","CUSTODY_REQUIREMENT":"","VALIDATOR_CUSTODY_REQUIREMENT":"","BALANCE_PER_ADDITIONAL_CUSTODY_GROUP":"","BLOB_SCHEDULE": [{"EPOCH": "*", "MAX_BLOBS_PER_BLOCK": "*"}],"MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS":"","BLS_WITHDRAWAL_PREFIX":"","ETH1_ADDRESS_WITHDRAWAL_PREFIX":"","DOMAIN_BEACON_PROPOSER":"","DOMAIN_BEACON_ATTESTER":"","DOMAIN_RANDAO":"","DOMAIN_DEPOSIT":"","DOMAIN_VOLUNTARY_EXIT":"","DOMAIN_SELECTION_PROOF":"","DOMAIN_AGGREGATE_AND_PROOF":"","TIMELY_SOURCE_FLAG_INDEX":"","TIMELY_TARGET_FLAG_INDEX":"","TIMELY_HEAD_FLAG_INDEX":"","TIMELY_SOURCE_WEIGHT":"","TIMELY_TARGET_WEIGHT":"","TIMELY_HEAD_WEIGHT":"","SYNC_REWARD_WEIGHT":"","PROPOSER_WEIGHT":"","WEIGHT_DENOMINATOR":"","DOMAIN_SYNC_COMMITTEE":"","DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":"","DOMAIN_CONTRIBUTION_AND_PROOF":"","DOMAIN_BLS_TO_EXECUTION_CHANGE":"","TARGET_AGGREGATORS_PER_COMMITTEE":"","TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":"","SYNC_COMMITTEE_SUBNET_COUNT":"","UNSET_DEPOSIT_REQUESTS_START_INDEX":"","FULL_EXIT_REQUEST_AMOUNT":"","COMPOUNDING_WITHDRAWAL_PREFIX":"","DEPOSIT_REQUEST_TYPE":"","WITHDRAWAL_REQUEST_TYPE":"","CONSOLIDATION_REQUEST_TYPE":"","MIN_ACTIVATION_BALANCE":"","MAX_EFFECTIVE_BALANCE_ELECTRA":"","MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA":"","WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA":"","PENDING_DEPOSITS_LIMIT":"","PENDING_PARTIAL_WITHDRAWALS_LIMIT":"","PENDING_CONSOLIDATIONS_LIMIT":"","MAX_ATTESTER_SLASHINGS_ELECTRA":"","MAX_ATTESTATIONS_ELECTRA":"","MAX_DEPOSIT_REQUESTS_PER_PAYLOAD":"","MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD":"","MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD":"","MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP":"","MAX_PENDING_DEPOSITS_PER_EPOCH":""}}] } }, { @@ -4229,6 +4294,30 @@ "status": {"operator": "equals", "value": "410"} } }, + { + "topics": ["debug", "beacon_data_column_sidecars_blockid"], + "request": { + "url": "/eth/v1/debug/beacon/data_column_sidecars/head", + "headers": {"Accept": "application/json"} + }, + "response": {"status": {"operator": "equals", "value": "200"}} + }, + { + "topics": ["debug", "beacon_data_column_sidecars_blockid"], + "request": { + "url": "/eth/v1/debug/beacon/data_column_sidecars/finalized", + "headers": {"Accept": "application/json"} + }, + "response": {"status": {"operator": "equals", "value": "200"}} + }, + { + "topics": ["debug", "beacon_data_column_sidecars_blockid"], + "request": { + "url": "/eth/v1/debug/beacon/data_column_sidecars/0x0000000000000000000000000000000000000000000000000000000000000000", + "headers": {"Accept": "application/json"} + }, + "response": {"status": {"operator": "equals", "value": "404"}} + }, { "topics": ["debug", "beacon_states_head_slow", "slow"], "request": { diff --git a/nix/checksums.nix b/nix/checksums.nix index d79345d240..c9c9f3d452 100644 --- a/nix/checksums.nix +++ b/nix/checksums.nix @@ -6,7 +6,7 @@ let in pkgs.fetchFromGitHub { owner = "nim-lang"; repo = "checksums"; - rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\"$" sourceFile; + rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\".*$" sourceFile; # WARNING: Requires manual updates when Nim compiler version changes. - hash = "sha256-Bm5iJoT2kAvcTexiLMFBa9oU5gf7d4rWjo3OiN7obWQ="; + hash = "sha256-JZhWqn4SrAgNw/HLzBK0rrj3WzvJ3Tv1nuDMn83KoYY="; } diff --git a/nix/nimble.nix b/nix/nimble.nix index 39c5e0fff7..1eabe11dde 100644 --- a/nix/nimble.nix +++ b/nix/nimble.nix @@ -7,7 +7,7 @@ in pkgs.fetchFromGitHub { owner = "nim-lang"; repo = "nimble"; fetchSubmodules = true; - rev = tools.findKeyValue "^ +NimbleStableCommit = \"([a-f0-9]+)\".+" sourceFile; + rev = tools.findKeyValue "^ +NimbleStableCommit = \"([a-f0-9]+)\".*$" sourceFile; # WARNING: Requires manual updates when Nim compiler version changes. - hash = "sha256-Rz48sGUKZEAp+UySla+MlsOfsERekuGKw69Tm11fDz8="; + hash = "sha256-wgzFhModFkwB8st8F5vSkua7dITGGC2cjoDvgkRVZMs="; } diff --git a/nix/sat.nix b/nix/sat.nix index ca6403f68f..dc3d5df740 100644 --- a/nix/sat.nix +++ b/nix/sat.nix @@ -6,7 +6,7 @@ let in pkgs.fetchFromGitHub { owner = "nim-lang"; repo = "sat"; - rev = tools.findKeyValue "^ +SatStableCommit = \"([a-f0-9]+)\"$" sourceFile; + rev = tools.findKeyValue "^ +SatStableCommit = \"([a-f0-9]+)\".*$" sourceFile; # WARNING: Requires manual updates when Nim compiler version changes. hash = "sha256-JFrrSV+mehG0gP7NiQ8hYthL0cjh44HNbXfuxQNhq7c="; } diff --git a/nix/shell.nix b/nix/shell.nix index 8b5fe3c511..6c36f0f0bb 100644 --- a/nix/shell.nix +++ b/nix/shell.nix @@ -21,6 +21,7 @@ in pkgs.mkShell { openssl # for generating the JWT file lsof # for killing processes by port killall # for killing processes manually + procps # for killing processes with pkill curl # for working with the node APIs jq # for parsing beacon API for LC start openjdk # for running web3signer diff --git a/research/block_sim.nim b/research/block_sim.nim index 0f5af2e552..0a0cc6c6dc 100644 --- a/research/block_sim.nim +++ b/research/block_sim.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # `block_sim` is a block, attestation, and sync committee simulator, whose task # is to run the beacon chain without considering the network or the wall clock. @@ -15,33 +15,32 @@ # nodes, just like a set of `beacon_node` instances would. import - confutils, chronicles, eth/db/kvstore_sqlite3, - chronos, chronos/timer, taskpools, - ../tests/testblockutil, + confutils, + chronicles, + eth/db/kvstore_sqlite3, + chronos, + chronos/timer, + taskpools, ../beacon_chain/spec/[forks, state_transition], ../beacon_chain/beacon_chain_db, ../beacon_chain/gossip_processing/[batch_validation, gossip_validation], ../beacon_chain/consensus_object_pools/[blockchain_dag, block_clearance], ./simutils -from std/random import Rand, gauss, initRand, rand +from std/random import initRand, rand from std/stats import RunningStat from ../beacon_chain/consensus_object_pools/attestation_pool import - AttestationPool, addAttestation, addForkChoice, - getElectraAttestationsForBlock, init, prune -from ../beacon_chain/consensus_object_pools/block_quarantine import - Quarantine, init + AttestationPool, addAttestation, addForkChoice, getAttestationsForBlock, init, prune +from ../beacon_chain/consensus_object_pools/block_quarantine import Quarantine, init from ../beacon_chain/consensus_object_pools/sync_committee_msg_pool import SyncCommitteeMsgPool, addContribution, addSyncCommitteeMessage, init, produceContribution, produceSyncAggregate, pruneData -from ../beacon_chain/el/eth1_chain import - Eth1Block, Eth1BlockNumber, Eth1BlockTimestamp, Eth1Chain, addBlock, - getBlockProposalData, init from ../beacon_chain/spec/beaconstate import - get_beacon_committee, get_beacon_proposer_index, - get_committee_count_per_slot, get_committee_indices + get_beacon_committee, get_beacon_proposer_index, get_committee_count_per_slot, + get_committee_indices from ../beacon_chain/spec/state_transition_block import process_block from ../tests/testbcutil import addHeadBlock +from ../tests/testblockutil import makeAttestationData, MockPrivKeys, `[]` type Timers = enum tBlock = "Process non-epoch slot with block" @@ -52,149 +51,57 @@ type Timers = enum tSyncCommittees = "Produce sync committee actions" tReplay = "Replay all produced blocks" -proc makeSimulationBlock( - cfg: RuntimeConfig, - state: var electra.HashedBeaconState, - proposer_index: ValidatorIndex, - randao_reveal: ValidatorSig, - eth1_data: Eth1Data, - graffiti: GraffitiBytes, - attestations: seq[electra.Attestation], - deposits: seq[Deposit], - exits: BeaconBlockValidatorChanges, - sync_aggregate: SyncAggregate, - execution_payload: electra.ExecutionPayloadForSigning, - bls_to_execution_changes: SignedBLSToExecutionChangeList, - rollback: RollbackHashedProc[electra.HashedBeaconState], - cache: var StateCache, - # TODO: - # `verificationFlags` is needed only in tests and can be - # removed if we don't use invalid signatures there - verificationFlags: UpdateFlags = {}): Result[electra.BeaconBlock, cstring] = - ## Create a block for the given state. The latest block applied to it will - ## be used for the parent_root value, and the slot will be take from - ## state.slot meaning process_slots must be called up to the slot for which - ## the block is to be created. - - # To create a block, we'll first apply a partial block to the state, skipping - # some validations. - - var blck = partialBeaconBlock( - cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, - attestations, deposits, exits, sync_aggregate, execution_payload, - default(ExecutionRequests)) - - let res = process_block( - cfg, state.data, blck.asSigVerified(), verificationFlags, cache) - - if res.isErr: - rollback(state) - return err(res.error()) - - state.root = hash_tree_root(state.data) - blck.state_root = state.root - - ok(blck) - -proc makeSimulationBlock( - cfg: RuntimeConfig, - state: var fulu.HashedBeaconState, - proposer_index: ValidatorIndex, - randao_reveal: ValidatorSig, - eth1_data: Eth1Data, - graffiti: GraffitiBytes, - attestations: seq[electra.Attestation], - deposits: seq[Deposit], - exits: BeaconBlockValidatorChanges, - sync_aggregate: SyncAggregate, - execution_payload: fulu.ExecutionPayloadForSigning, - bls_to_execution_changes: SignedBLSToExecutionChangeList, - rollback: RollbackHashedProc[fulu.HashedBeaconState], - cache: var StateCache, - # TODO: - # `verificationFlags` is needed only in tests and can be - # removed if we don't use invalid signatures there - verificationFlags: UpdateFlags = {}): Result[fulu.BeaconBlock, cstring] = - ## Create a block for the given state. The latest block applied to it will - ## be used for the parent_root value, and the slot will be take from - ## state.slot meaning process_slots must be called up to the slot for which - ## the block is to be created. - - # To create a block, we'll first apply a partial block to the state, skipping - # some validations. - - var blck = partialBeaconBlock( - cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, - attestations, deposits, exits, sync_aggregate, execution_payload, - default(ExecutionRequests)) - - let res = process_block( - cfg, state.data, blck.asSigVerified(), verificationFlags, cache) - - if res.isErr: - rollback(state) - return err(res.error()) - - state.root = hash_tree_root(state.data) - blck.state_root = state.root - - ok(blck) - # TODO confutils is an impenetrable black box. how can a help text be added here? -cli do(slots = SLOTS_PER_EPOCH * 7, - validators = SLOTS_PER_EPOCH * 500, - attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.82, - syncCommitteeRatio {.desc: "ratio of validators that perform sync committee actions in each round"} = 0.82, - blockRatio {.desc: "ratio of slots with blocks"} = 1.0, - replay = true): - let - (genesisState, depositTreeSnapshot) = loadGenesis(validators, false) - genesisTime = float getStateField(genesisState[], genesis_time) +cli do( + slots = SLOTS_PER_EPOCH * 7, + validators = SLOTS_PER_EPOCH * 500, + attesterRatio {.desc: "ratio of validators that attest in each round".} = 0.82, + syncCommitteeRatio {. + desc: "ratio of validators that perform sync committee actions in each round" + .} = 0.82, + blockRatio {.desc: "ratio of slots with blocks".} = 1.0, + replay = true +): + let genesisState = loadGenesis(validators, false) const cfg = getSimulationConfig() echo "Starting simulation..." - let db = BeaconChainDB.new("block_sim_db") - defer: db.close() + let db = BeaconChainDB.new("block_sim_db", cfg) + defer: + db.close() - ChainDAGRef.preInit(db, genesisState[]) - db.putDepositContractSnapshot(depositTreeSnapshot) + proc eager(): bool = + true + ChainDAGRef.preInit(db, genesisState[]) let rng = HmacDrbgContext.new() var - validatorMonitor = newClone(ValidatorMonitor.init()) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) dag = ChainDAGRef.init(cfg, db, validatorMonitor, {}) - eth1Chain = Eth1Chain.init(cfg, db, 0, default Eth2Digest) - merkleizer = DepositsMerkleizer.init(depositTreeSnapshot.depositContractState) taskpool = try: Taskpool.new() except Exception as exc: raiseAssert "Failed to initialize Taskpool: " & exc.msg verifier = BatchVerifier.init(rng, taskpool) - quarantine = newClone(Quarantine.init()) + quarantine = newClone(Quarantine.init(cfg)) attPool = AttestationPool.init(dag, quarantine) - batchCrypto = BatchCrypto.new( - rng, eager = func(): bool = true, - genesis_validators_root = dag.genesis_validators_root, - taskpool).expect("working batcher") + batchCrypto = BatchCrypto + .new(rng, eager, genesis_validators_root = dag.genesis_validators_root, taskpool) + .expect("working batcher") syncCommitteePool = newClone SyncCommitteeMsgPool.init(rng, cfg) timers: array[Timers, RunningStat] attesters: RunningStat r = initRand(1) tmpState = assignClone(dag.headState) - eth1Chain.addBlock Eth1Block( - number: Eth1BlockNumber 1, - timestamp: Eth1BlockTimestamp genesisTime) - let replayState = assignClone(dag.headState) proc handleAttestations(slot: Slot) = - let - attestationHead = dag.head.atSlot(slot) + let attestationHead = dag.head.atSlot(slot) - dag.withUpdatedState(tmpState[], attestationHead.toBlockSlotId.expect("not nil")) do: + dag.withUpdatedState(tmpState[], attestationHead.toBlockSlotId.expect("not nil")): let fork = getStateField(updatedState, fork) genesis_validators_root = getStateField(updatedState, genesis_validators_root) @@ -202,58 +109,72 @@ cli do(slots = SLOTS_PER_EPOCH * 7, get_committee_count_per_slot(updatedState, slot.epoch, cache) for committee_index in get_committee_indices(committees_per_slot): - let committee = get_beacon_committee( - updatedState, slot, committee_index, cache) + let committee = get_beacon_committee(updatedState, slot, committee_index, cache) for index_in_committee, validator_index in committee: if rand(r, 1.0) <= attesterRatio: if tmpState.kind < ConsensusFork.Electra: let - data = makeAttestationData( - updatedState, slot, committee_index, bid.root) - sig = - get_attestation_signature( - fork, genesis_validators_root, data, - MockPrivKeys[validator_index]) - attestation = phase0.Attestation.init( - [uint64 index_in_committee], committee.len, data, - sig.toValidatorSig()).expect("valid data") + data = + makeAttestationData(updatedState, slot, committee_index, bid.root) + sig = get_attestation_signature( + fork, genesis_validators_root, data, MockPrivKeys[validator_index] + ) + attestation = phase0.Attestation + .init( + [uint64 index_in_committee], + committee.len, + data, + sig.toValidatorSig(), + ) + .expect("valid data") attPool.addAttestation( - attestation, [validator_index], attestation.aggregation_bits.len, - -1, sig, data.slot.start_beacon_time) + attestation, + [validator_index], + attestation.aggregation_bits.len, + -1, + sig, + data.slot.start_beacon_time, + ) else: - var data = makeAttestationData( - updatedState, slot, committee_index, bid.root) - data.index = 0 # fix in makeAttestationData for Electra + var data = + makeAttestationData(updatedState, slot, committee_index, bid.root) + data.index = 0 # fix in makeAttestationData for Electra let sig = get_attestation_signature( - fork, genesis_validators_root, data, - MockPrivKeys[validator_index]) + fork, genesis_validators_root, data, MockPrivKeys[validator_index] + ) attestation = SingleAttestation( committee_index: committee_index.distinctBase, - attester_index: validator_index.uint64, data: data, - signature: sig.toValidatorSig()) + attester_index: validator_index.uint64, + data: data, + signature: sig.toValidatorSig(), + ) attPool.addAttestation( - attestation, [validator_index], committee.len, - index_in_committee, sig, data.slot.start_beacon_time) + attestation, + [validator_index], + committee.len, + index_in_committee, + sig, + data.slot.start_beacon_time, + ) do: raiseAssert "withUpdatedState failed" proc handleSyncCommitteeActions(slot: Slot) = - type - Aggregator = object - subcommitteeIdx: SyncSubcommitteeIndex - validatorIdx: ValidatorIndex - selectionProof: ValidatorSig + type Aggregator = object + subcommitteeIdx: SyncSubcommitteeIndex + validatorIdx: ValidatorIndex + selectionProof: ValidatorSig let syncCommittee = @(dag.syncCommitteeParticipants(slot + 1)) genesis_validators_root = dag.genesis_validators_root fork = dag.forkAtEpoch(slot.epoch) - messagesTime = slot.attestation_deadline() - contributionsTime = slot.sync_contribution_deadline() + messagesTime = slot.attestation_deadline(dag.cfg.time) + contributionsTime = slot.sync_contribution_deadline(dag.cfg.time) var aggregators: seq[Aggregator] @@ -265,126 +186,110 @@ cli do(slots = SLOTS_PER_EPOCH * 7, let validatorPrivKey = MockPrivKeys[validatorIdx] signature = get_sync_committee_message_signature( - fork, genesis_validators_root, - slot, dag.head.root, validatorPrivKey) + fork, genesis_validators_root, slot, dag.head.root, validatorPrivKey + ) msg = SyncCommitteeMessage( slot: slot, beacon_block_root: dag.head.root, validator_index: uint64 validatorIdx, - signature: signature.toValidatorSig) + signature: signature.toValidatorSig, + ) let res = waitFor noCancel dag.validateSyncCommitteeMessage( - quarantine, - batchCrypto, - syncCommitteePool, - msg, - subcommitteeIdx, - messagesTime, - false) + quarantine, batchCrypto, syncCommitteePool, msg, subcommitteeIdx, + messagesTime, false, + ) doAssert res.isOk let (bid, cookedSig, positions) = res.get() syncCommitteePool[].addSyncCommitteeMessage( - msg.slot, - bid, - msg.validator_index, - cookedSig, - subcommitteeIdx, - positions) + msg.slot, bid, msg.validator_index, cookedSig, subcommitteeIdx, positions + ) - let - selectionProofSig = get_sync_committee_selection_proof( - fork, genesis_validators_root, slot, subcommitteeIdx, - validatorPrivKey).toValidatorSig + let selectionProofSig = get_sync_committee_selection_proof( + fork, genesis_validators_root, slot, subcommitteeIdx, validatorPrivKey + ).toValidatorSig if is_sync_committee_aggregator(selectionProofSig): aggregators.add Aggregator( subcommitteeIdx: subcommitteeIdx, validatorIdx: validatorIdx, - selectionProof: selectionProofSig) + selectionProof: selectionProofSig, + ) for aggregator in aggregators: var contribution: SyncCommitteeContribution let contributionWasProduced = syncCommitteePool[].produceContribution( - slot, dag.head.bid, aggregator.subcommitteeIdx, contribution) + slot, dag.head.bid, aggregator.subcommitteeIdx, contribution + ) if contributionWasProduced: let contributionAndProof = ContributionAndProof( aggregator_index: uint64 aggregator.validatorIdx, contribution: contribution, - selection_proof: aggregator.selectionProof) + selection_proof: aggregator.selectionProof, + ) validatorPrivKey = MockPrivKeys[aggregator.validatorIdx] signedContributionAndProof = SignedContributionAndProof( message: contributionAndProof, signature: get_contribution_and_proof_signature( - fork, genesis_validators_root, contributionAndProof, - validatorPrivKey).toValidatorSig) + fork, genesis_validators_root, contributionAndProof, validatorPrivKey + ).toValidatorSig, + ) res = waitFor noCancel dag.validateContribution( - quarantine, - batchCrypto, - syncCommitteePool, - signedContributionAndProof, - contributionsTime, - false) + quarantine, batchCrypto, syncCommitteePool, signedContributionAndProof, + contributionsTime, false, + ) if res.isOk(): let (bid, sig, _) = res.get - syncCommitteePool[].addContribution( - signedContributionAndProof, bid, sig) + syncCommitteePool[].addContribution(signedContributionAndProof, bid, sig) else: # We ignore duplicates / already-covered contributions doAssert res.error()[0] == ValidationResult.Ignore - proc getNewBlock[T]( - state: var ForkedHashedBeaconState, slot: Slot, cache: var StateCache): T = + let blockRatio = blockRatio # can't find in proposeBlock otherwise (?) + proc proposeBlock( + consensusFork: static ConsensusFork, + state: var ForkyHashedBeaconState, + cache: var StateCache, + ) = + if rand(r, 1.0) > blockRatio: + return + let - finalizedEpochRef = dag.getFinalizedEpochRef() - proposerIdx = get_beacon_proposer_index( - state, cache, getStateField(state, slot)).get() + slot = state.data.slot + proposerIdx = get_beacon_proposer_index(state.data, cache, slot).get() privKey = MockPrivKeys[proposerIdx] - eth1ProposalData = eth1Chain.getBlockProposalData( - state, - finalizedEpochRef.eth1_data, - finalizedEpochRef.eth1_deposit_index) - sync_aggregate = - syncCommitteePool[].produceSyncAggregate(dag.head.bid, slot) - hashedState = - when T is electra.SignedBeaconBlock: - addr state.electraData - elif T is fulu.SignedBeaconBlock: - addr state.fuluData - else: - static: doAssert false - message = makeSimulationBlock( - cfg, - hashedState[], - proposerIdx, - get_epoch_signature( - getStateField(state, fork), - getStateField(state, genesis_validators_root), - slot.epoch, privKey).toValidatorSig(), - eth1ProposalData.vote, - default(GraffitiBytes), - attPool.getElectraAttestationsForBlock(state, cache), - eth1ProposalData.deposits, - BeaconBlockValidatorChanges(), - sync_aggregate, - (when T is electra.SignedBeaconBlock: - default(electra.ExecutionPayloadForSigning) - elif T is fulu.SignedBeaconBlock: - default(fulu.ExecutionPayloadForSigning) - else: - static: doAssert false), - static(default(SignedBLSToExecutionChangeList)), - noRollback, - cache) - - var newBlock = T(message: message.get()) + randao_reveal = get_epoch_signature( + state.data.fork, state.data.genesis_validators_root, slot.epoch, privKey + ) + sync_aggregate = syncCommitteePool[].produceSyncAggregate(dag.head.bid, slot) + + message = makeBeaconBlock( + cfg, + consensusFork, + state, + cache, + proposerIdx, + randao_reveal.toValidatorSig(), + default(Eth1Data), + default(GraffitiBytes), + attPool.getAttestationsForBlock(state, cache), + default(seq[Deposit]), + default(BeaconBlockValidatorChanges), + sync_aggregate, + default(consensusFork.ExecutionPayloadForSigning), + {}, + ) + .expect("block") + + var newBlock = consensusFork.SignedBeaconBlock(message: message) let blockRoot = withTimerRet(timers[tHashBlock]): hash_tree_root(newBlock.message) @@ -392,105 +297,48 @@ cli do(slots = SLOTS_PER_EPOCH * 7, # Careful, state no longer valid after here because of the await.. newBlock.signature = withTimerRet(timers[tSignBlock]): get_block_signature( - getStateField(state, fork), - getStateField(state, genesis_validators_root), - newBlock.message.slot, - blockRoot, privKey).toValidatorSig() - - newBlock - - # TODO when withUpdatedState's state template doesn't conflict with chronos's - # HTTP server's state function, combine all proposeForkBlock functions into a - # single generic function. Until https://github.com/nim-lang/Nim/issues/20811 - # is fixed, that generic function must take `blockRatio` as a parameter. - proc proposeElectraBlock(slot: Slot) = - if rand(r, 1.0) > blockRatio: - return - - dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do: - let - newBlock = getNewBlock[electra.SignedBeaconBlock](updatedState, slot, cache) - added = dag.addHeadBlock(verifier, newBlock) do ( - blckRef: BlockRef, signedBlock: electra.TrustedSignedBeaconBlock, - epochRef: EpochRef, unrealized: FinalityCheckpoints): - # Callback add to fork choice if valid - attPool.addForkChoice( - epochRef, blckRef, unrealized, signedBlock.message, - blckRef.slot.start_beacon_time) - - dag.updateHead(added[], quarantine[], []) - if dag.needStateCachesAndForkChoicePruning(): - dag.pruneStateCachesDAG() - attPool.prune() - do: - raiseAssert "withUpdatedState failed" - - proc proposeFuluBlock(slot: Slot) = - if rand(r, 1.0) > blockRatio: - return - - dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do: - let - newBlock = getNewBlock[fulu.SignedBeaconBlock](updatedState, slot, cache) - added = dag.addHeadBlock(verifier, newBlock) do ( - blckRef: BlockRef, signedBlock: fulu.TrustedSignedBeaconBlock, - epochRef: EpochRef, unrealized: FinalityCheckpoints): - # Callback add to fork choice if valid - attPool.addForkChoice( - epochRef, blckRef, unrealized, signedBlock.message, - blckRef.slot.start_beacon_time) - - dag.updateHead(added[], quarantine[], []) - if dag.needStateCachesAndForkChoicePruning(): - dag.pruneStateCachesDAG() - attPool.prune() - do: - raiseAssert "withUpdatedState failed" - - var - lastEth1BlockAt = genesisTime - eth1BlockNum = 1000 - - for i in 0.. now: - break - - inc eth1BlockNum - var eth1Block = Eth1Block( - hash: makeFakeHash(eth1BlockNum), - number: Eth1BlockNumber eth1BlockNum, - timestamp: Eth1BlockTimestamp nextBlockTime) - - let newDeposits = int clamp(gauss(r, 5.0, 8.0), 0.0, 1000.0) - for i in 0 ..< newDeposits: - let validatorIdx = merkleizer.getChunkCount.int - let d = makeDeposit(validatorIdx, {skipBlsValidation}) - eth1Block.deposits.add d - merkleizer.addChunk hash_tree_root(d).data - - eth1Block.depositRoot = merkleizer.getDepositsRoot - eth1Block.depositCount = merkleizer.getChunkCount - - eth1Chain.addBlock eth1Block - lastEth1BlockAt = nextBlockTime + t = if slot.is_epoch: tEpoch else: tBlock if blockRatio > 0.0: withTimer(timers[t]): - case dag.cfg.consensusForkAtEpoch(slot.epoch) - of ConsensusFork.Fulu: proposeFuluBlock(slot) - of ConsensusFork.Electra: proposeElectraBlock(slot) - of ConsensusFork.Phase0 .. ConsensusFork.Deneb: - doAssert false + let bsi = dag.getBlockIdAtSlot(slot).expect("block") + var cache = StateCache() + doAssert dag.updateState(tmpState[], bsi, false, cache, dag.updateFlags) + withState(tmpState[]): + when consensusFork >= ConsensusFork.Bellatrix: + proposeBlock(consensusFork, forkyState, cache) + else: + raiseAssert "Unsupported fork " & $consensusFork + if attesterRatio > 0.0: withTimer(timers[tAttest]): handleAttestations(slot) @@ -517,9 +365,13 @@ cli do(slots = SLOTS_PER_EPOCH * 7, withTimer(timers[tReplay]): var cache = StateCache() doAssert dag.updateState( - replayState[], dag.getBlockIdAtSlot(Slot(slots)).expect("block"), - false, cache, dag.updateFlags) + replayState[], + dag.getBlockIdAtSlot(Slot(slots)).expect("block"), + false, + cache, + dag.updateFlags, + ) echo "Done!" - printTimers(dag.headState, attesters, true, timers) \ No newline at end of file + printTimers(dag.headState, attesters, true, timers) diff --git a/research/fakeee.nim b/research/fakeee.nim index d6280a07ec..b4d2509277 100644 --- a/research/fakeee.nim +++ b/research/fakeee.nim @@ -36,7 +36,7 @@ proc setupEngineAPI*(server: RpcServer) = ) # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/paris.md#engine_getpayloadv1 - server.rpc("engine_getPayloadV1") do(payloadId: PayloadID) -> ExecutionPayloadV1: + server.rpc("engine_getPayloadV1") do(payloadId: Bytes8) -> ExecutionPayloadV1: info "engine_getPayloadV1", id = payloadId.toHex diff --git a/research/mev_mock.nim b/research/mev_mock.nim index be9840babc..5dfd4de406 100644 --- a/research/mev_mock.nim +++ b/research/mev_mock.nim @@ -1,11 +1,11 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import confutils, presto, @@ -50,7 +50,8 @@ proc getParentBlock(restClient: RestClientRef): return Opt.none ParentHeaderInfo withBlck(resp): - when consensusFork >= ConsensusFork.Capella: + when consensusFork >= ConsensusFork.Capella and + consensusFork < ConsensusFork.Gloas: return Opt.some ParentHeaderInfo( block_number: forkyBlck.message.body.execution_payload.block_number, timestamp: forkyBlck.message.body.execution_payload.timestamp) diff --git a/research/simutils.nim b/research/simutils.nim index fd85ae6a32..bbc451522e 100644 --- a/research/simutils.nim +++ b/research/simutils.nim @@ -16,7 +16,6 @@ from std/stats import RunningStat, mean, push, standardDeviationS from std/strformat import `&` from std/times import cpuTime from ../beacon_chain/filepath import secureCreatePath -from ../beacon_chain/spec/deposit_snapshots import DepositContractSnapshot template withTimer*(stats: var RunningStat, body: untyped) = # TODO unify timing somehow @@ -66,8 +65,7 @@ func getSimulationConfig*(): RuntimeConfig {.compileTime.} = cfg proc loadGenesis*( - validators: Natural, - validate: bool): (ref ForkedHashedBeaconState, DepositContractSnapshot) = + validators: Natural, validate: bool): ref ForkedHashedBeaconState = const genesisDir = "test_sim" if (let res = secureCreatePath(genesisDir); res.isErr): fatal "Could not create directory", @@ -110,19 +108,7 @@ proc loadGenesis*( info "Loaded genesis file", fileName = genesisFn # TODO check that the private keys are EF test keys - - let contractSnapshot = - try: - SSZ.loadFile(contractSnapshotFn, DepositContractSnapshot) - except IOError as exc: - fatal "Deposit contract snapshot failed to load", - fileName = contractSnapshotFn, exc = exc.msg - quit 1 - except SerializationError as exc: - fatal "Deposit contract snapshot malformed", - fileName = contractSnapshotFn, exc = exc.msg - quit 1 - (res, contractSnapshot) + res else: warn "Genesis file not found, making one up", hint = "use nimbus_beacon_node createTestnet to make one" @@ -133,18 +119,12 @@ proc loadGenesis*( deposits = makeInitialDeposits(validators.uint64, flags) info "Generating Genesis..." - var merkleizer = init DepositsMerkleizer - for d in deposits: - merkleizer.addChunk hash_tree_root(d).data - let contractSnapshot = DepositContractSnapshot( - depositContractState: merkleizer.toDepositContractState) - let res = (ref ForkedHashedBeaconState)( kind: ConsensusFork.Electra, electraData: electra.HashedBeaconState( data: initialize_beacon_state_from_eth1( - cfg, ZERO_HASH, 0, deposits, - default(electra.ExecutionPayloadHeader), {skipBlsValidation}))) + cfg, ConsensusFork.Electra, ZERO_HASH, 0, deposits, + default(deneb.ExecutionPayloadHeader), {skipBlsValidation}))) info "Saving genesis file", fileName = genesisFn try: @@ -153,15 +133,8 @@ proc loadGenesis*( fatal "Genesis file failed to save", fileName = genesisFn, exc = exc.msg quit 1 - info "Saving deposit contract snapshot", fileName = contractSnapshotFn - try: - SSZ.saveFile(contractSnapshotFn, contractSnapshot) - except IOError as exc: - fatal "Deposit contract snapshot failed to save", - fileName = contractSnapshotFn, exc = exc.msg - quit 1 - (res, contractSnapshot) + res proc printTimers*[Timers: enum]( validate: bool, diff --git a/research/wss_sim.nim b/research/wss_sim.nim index 72fddb6b75..459e1d408b 100644 --- a/research/wss_sim.nim +++ b/research/wss_sim.nim @@ -1,18 +1,18 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} # `wss_sim` loads a state and a set of validator keys, then simulates a # beacon chain running with the given validators producing blocks # and attesting when they're supposed to. import - std/[strformat, sequtils, tables], + std/[strformat, tables], chronicles, confutils, stew/io2, @@ -27,6 +27,7 @@ import signatures, state_transition], ../beacon_chain/validators/[keystore_management, validator_pool] +from std/sequtils import filterIt, toSeq from ../beacon_chain/gossip_processing/block_processor import newExecutionPayload @@ -94,13 +95,7 @@ cli do(validatorsDir: string, secretsDir: string, fatal "failed to read EL URL", err = finalUrl.error quit QuitFailure finalUrl.get - elManager = ELManager.new( - cfg, - metadata.depositContractBlock, - metadata.depositContractBlockHash, - db = nil, - @[engineApiUrl], - metadata.eth1Network) + elManager = ELManager.new(@[engineApiUrl], metadata.eth1Network) feeRecipient = try: Address.fromHex(suggestedFeeRecipient) @@ -119,7 +114,8 @@ cli do(validatorsDir: string, secretsDir: string, # The EL may otherwise refuse to produce new heads elManager.start(syncChain = false) withBlck(blck[]): - when consensusFork >= ConsensusFork.Bellatrix: + debugGloasComment "" + when consensusFork >= ConsensusFork.Bellatrix and consensusFork != ConsensusFork.Gloas: if forkyBlck.message.is_execution_block: template payload(): auto = forkyBlck.message.body.execution_payload if not payload.block_hash.isZero: @@ -142,8 +138,10 @@ cli do(validatorsDir: string, secretsDir: string, break var - clock = BeaconClock.init(getStateField(state[], genesis_time)).valueOr: - error "Invalid genesis time in state" + genesisTime = getStateField(state[], genesis_time) + beaconClock = BeaconClock.init(cfg.time, genesisTime).valueOr: + error "Invalid genesis time in state", + genesis_time = genesisTime, seconds_per_slot = cfg.time.SECONDS_PER_SLOT quit 1 validators: Table[ValidatorIndex, ValidatorPrivKey] validatorKeys: Table[ValidatorPubKey, ValidatorPrivKey] @@ -186,7 +184,7 @@ cli do(validatorsDir: string, secretsDir: string, slot = getStateField(state[], slot) + 1 process_slots(cfg, state[], slot, cache, info, {}).expect("works") - if start_beacon_time(slot) > clock.now(): + if start_beacon_time(slot) > beaconClock.now(): notice "Ran out of time", epoch = slot.epoch break @@ -246,91 +244,94 @@ cli do(validatorsDir: string, secretsDir: string, validators.getOrDefault( proposer, default(ValidatorPrivKey))).toValidatorSig() withState(state[]): - let - payload = - when consensusFork >= ConsensusFork.Bellatrix: - let - executionHead = - forkyState.data.latest_execution_payload_header.block_hash - withdrawals = - when consensusFork >= ConsensusFork.Capella: - get_expected_withdrawals(forkyState.data) - else: - newSeq[capella.Withdrawal]() - - var pl: consensusFork.ExecutionPayloadForSigning - while true: - pl = (waitFor noCancel elManager.getPayload( - consensusFork.ExecutionPayloadForSigning, - consensusHead = forkyState.latest_block_root, - headBlock = executionHead, - safeBlock = executionHead, - finalizedBlock = ZERO_HASH, - timestamp = compute_timestamp_at_slot( - forkyState.data, forkyState.data.slot), - randomData = get_randao_mix( - forkyState.data, get_current_epoch(forkyState.data)), - suggestedFeeRecipient = feeRecipient, - withdrawals = withdrawals)).valueOr: - waitFor noCancel sleepAsync(chronos.seconds(2)) - continue - break - pl - else: - default(bellatrix.ExecutionPayloadForSigning) - message = makeBeaconBlock( - cfg, - state[], - proposer, - randao_reveal, - forkyState.data.eth1_data, - graffitiValue, - when typeof(payload).kind >= ConsensusFork.Electra: - default(seq[electra.Attestation]) - else: - blockAggregates, - @[], - BeaconBlockValidatorChanges(), - syncAggregate, - payload, - noRollback, - cache).get() - - blockRoot = message.forky(consensusFork).hash_tree_root() - let - proposerPrivkey = - try: - validators[proposer] - except KeyError as exc: - raiseAssert "Proposer key not available: " & exc.msg - signedBlock = consensusFork.SignedBeaconBlock( - message: message.forky(consensusFork), - root: blockRoot, - signature: get_block_signature( - fork, genesis_validators_root, slot, blockRoot, - proposerPrivkey).toValidatorSig()) - - dump(".", signedBlock) - when consensusFork >= ConsensusFork.Deneb: - let blobs = signedBlock.create_blob_sidecars( - payload.blobsBundle.proofs, payload.blobsBundle.blobs) - for blob in blobs: - dump(".", blob) - - notice "Block proposed", message, blockRoot - - when consensusFork >= ConsensusFork.Bellatrix: - while true: - let status = waitFor noCancel elManager - .newExecutionPayload(signedBlock.message) - if status.isNone: - waitFor noCancel sleepAsync(chronos.seconds(2)) - continue - doAssert status.get in [ - PayloadExecutionStatus.valid, - PayloadExecutionStatus.accepted, - PayloadExecutionStatus.syncing] - break + debugGloasComment "" + when consensusFork != ConsensusFork.Gloas: + let + payload = + when consensusFork >= ConsensusFork.Bellatrix: + let + executionHead = + forkyState.data.latest_execution_payload_header.block_hash + withdrawals = + when consensusFork >= ConsensusFork.Capella: + get_expected_withdrawals(forkyState.data) + else: + newSeq[capella.Withdrawal]() + + var pl: consensusFork.ExecutionPayloadForSigning + while true: + pl = (waitFor noCancel elManager.getPayload( + consensusFork.ExecutionPayloadForSigning, + consensusHead = forkyState.latest_block_root, + headBlock = executionHead, + safeBlock = executionHead, + finalizedBlock = ZERO_HASH, + timestamp = compute_timestamp_at_slot( + forkyState.data, forkyState.data.slot), + prevRandao = get_randao_mix( + forkyState.data, get_current_epoch(forkyState.data)), + suggestedFeeRecipient = feeRecipient, + withdrawals = withdrawals)).valueOr: + waitFor noCancel sleepAsync(chronos.seconds(2)) + continue + break + pl + else: + default(bellatrix.ExecutionPayloadForSigning) + message = makeBeaconBlock( + cfg, + consensusFork, + forkyState, + cache, + proposer, + randao_reveal, + forkyState.data.eth1_data, + graffitiValue, + when consensusFork >= ConsensusFork.Electra: + default(seq[electra.Attestation]) + else: + blockAggregates, + @[], + BeaconBlockValidatorChanges(), + syncAggregate, + payload, + {}).expect("block") + + blockRoot = message.hash_tree_root() + let + proposerPrivkey = + try: + validators[proposer] + except KeyError as exc: + raiseAssert "Proposer key not available: " & exc.msg + signedBlock = consensusFork.SignedBeaconBlock( + message: message, + root: blockRoot, + signature: get_block_signature( + fork, genesis_validators_root, slot, blockRoot, + proposerPrivkey).toValidatorSig()) + + dump(".", signedBlock) + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + let blobs = signedBlock.create_blob_sidecars( + payload.blobsBundle.proofs, payload.blobsBundle.blobs) + for blob in blobs: + dump(".", blob) + + notice "Block proposed", message, blockRoot + + when consensusFork >= ConsensusFork.Bellatrix: + while true: + let status = waitFor noCancel elManager + .newExecutionPayload(signedBlock.message) + if status.isNone: + waitFor noCancel sleepAsync(chronos.seconds(2)) + continue + doAssert status.get in [ + PayloadExecutionStatus.valid, + PayloadExecutionStatus.accepted, + PayloadExecutionStatus.syncing] + break aggregates.setLen(0) @@ -398,4 +399,4 @@ cli do(validatorsDir: string, secretsDir: string, syncAggregate.sync_committee_bits.setBit(i) if inited: - syncAggregate.sync_committee_signature = finish(agg).toValidatorSig() \ No newline at end of file + syncAggregate.sync_committee_signature = finish(agg).toValidatorSig() diff --git a/scripts/execution_genesis.json.template b/scripts/execution_genesis.json.template index bda9859168..2d6f8b9805 100644 --- a/scripts/execution_genesis.json.template +++ b/scripts/execution_genesis.json.template @@ -32,6 +32,7 @@ } }, "pragueTime":PRAGUE_FORK_TIME, + "osakaTime":OSAKA_FORK_TIME, "mergeForkBlock":0, "mergeNetsplitBlock":0, "terminalTotalDifficulty":0, diff --git a/scripts/launch_local_testnet.sh b/scripts/launch_local_testnet.sh index c423eba69b..e69030d92f 100755 --- a/scripts/launch_local_testnet.sh +++ b/scripts/launch_local_testnet.sh @@ -49,7 +49,7 @@ CURL_BINARY="$(command -v curl)" || { echo "Curl not installed. Aborting."; exit JQ_BINARY="$(command -v jq)" || { echo "jq not installed. Aborting."; exit 1; } OPTS="ht:n:d:g" -LONGOPTS="help,preset:,nodes:,data-dir:,remote-validators-count:,threshold:,signer-nodes:,signer-type:,with-ganache,stop-at-epoch:,disable-htop,use-vc:,disable-vc,enable-payload-builder,log-level:,base-port:,base-rest-port:,base-metrics-port:,base-vc-metrics-port:,base-vc-keymanager-port:,base-remote-signer-port:,base-remote-signer-metrics-port:,base-el-net-port:,base-el-rpc-port:,base-el-ws-port:,base-el-auth-rpc-port:,el-port-offset:,reuse-existing-data-dir,reuse-binaries,timeout:,kill-old-processes,eth2-docker-image:,lighthouse-vc-nodes:,run-geth,dl-geth,dl-nimbus-eth1,dl-nimbus-eth2,light-clients:,run-nimbus-eth1,verbose,deneb-fork-epoch:,electra-fork-epoch:" +LONGOPTS="help,preset:,nodes:,data-dir:,remote-validators-count:,threshold:,signer-nodes:,signer-type:,with-ganache,stop-at-epoch:,disable-htop,use-vc:,disable-vc,enable-payload-builder,log-level:,base-port:,base-rest-port:,base-metrics-port:,base-vc-metrics-port:,base-vc-keymanager-port:,base-remote-signer-port:,base-remote-signer-metrics-port:,base-el-net-port:,base-el-rpc-port:,base-el-ws-port:,base-el-auth-rpc-port:,el-port-offset:,reuse-existing-data-dir,reuse-binaries,timeout:,kill-old-processes,eth2-docker-image:,lighthouse-vc-nodes:,run-geth,dl-geth,dl-nimbus-eth1,dl-nimbus-eth2,light-clients:,run-nimbus-eth1,verbose,electra-fork-epoch:,fulu-fork-epoch:" # default values BINARIES="" @@ -99,8 +99,8 @@ DL_GETH="0" : ${NIMBUS_ETH2_REVISION:=6c0d756d} : ${BEACON_NODE_COMMAND:="./build/nimbus_beacon_node$EXE_EXTENSION"} -: ${DENEB_FORK_EPOCH:=0} -: ${ELECTRA_FORK_EPOCH:=500} +: ${ELECTRA_FORK_EPOCH:=0} +: ${FULU_FORK_EPOCH:=100000} #NIMBUS EL VARS RUN_NIMBUS_ETH1="0" @@ -207,14 +207,14 @@ while true; do CONST_PRESET="$2" shift 2 ;; - --deneb-fork-epoch) - DENEB_FORK_EPOCH="$2" - shift 2 - ;; --electra-fork-epoch) ELECTRA_FORK_EPOCH="$2" shift 2 ;; + --fulu-fork-epoch) + FULU_FORK_EPOCH="$2" + shift 2 + ;; --stop-at-epoch) STOP_AT_EPOCH=$2 STOP_AT_EPOCH_FLAG="--debug-stop-at-epoch=$2" @@ -812,9 +812,8 @@ fi GENESIS_OFFSET=60 # See `Scheduling first slot action` > `startTime` NOW_UNIX_TIMESTAMP=$(date +%s) GENESIS_TIME=$((NOW_UNIX_TIMESTAMP + GENESIS_OFFSET)) -SHANGHAI_FORK_TIME=${GENESIS_TIME} -CANCUN_FORK_TIME=$((GENESIS_TIME + SECONDS_PER_SLOT * SLOTS_PER_EPOCH * DENEB_FORK_EPOCH)) PRAGUE_FORK_TIME=$((GENESIS_TIME + SECONDS_PER_SLOT * SLOTS_PER_EPOCH * ELECTRA_FORK_EPOCH)) +OSAKA_FORK_TIME=$((GENESIS_TIME + SECONDS_PER_SLOT * SLOTS_PER_EPOCH * FULU_FORK_EPOCH)) EXECUTION_GENESIS_JSON="${DATA_DIR}/execution_genesis.json" EXECUTION_GENESIS_BLOCK_JSON="${DATA_DIR}/execution_genesis_block.json" @@ -823,7 +822,7 @@ EXECUTION_GENESIS_BLOCK_JSON="${DATA_DIR}/execution_genesis_block.json" # currently hard-codes some merkle branches that won't match the random deposits generated # by this simulation. This doesn't happen to produce problems only by accident. If we enable # the `deposit_root` safety-checks in the deposit downloader, it will detect the discrepancy. -sed "s/SHANGHAI_FORK_TIME/${SHANGHAI_FORK_TIME}/g; s/CANCUN_FORK_TIME/${CANCUN_FORK_TIME}/g; s/PRAGUE_FORK_TIME/${PRAGUE_FORK_TIME}/g" \ +sed "s/SHANGHAI_FORK_TIME/${GENESIS_TIME}/g; s/CANCUN_FORK_TIME/${GENESIS_TIME}/g; s/PRAGUE_FORK_TIME/${PRAGUE_FORK_TIME}/g; s/OSAKA_FORK_TIME/${OSAKA_FORK_TIME}/g" \ "${SCRIPTS_DIR}/execution_genesis.json.template" > "$EXECUTION_GENESIS_JSON" DEPOSIT_CONTRACT_ADDRESS="0x4242424242424242424242424242424242424242" @@ -885,14 +884,13 @@ done --total-validators=$TOTAL_VALIDATORS \ --output-genesis="$CONTAINER_DATA_DIR/genesis.ssz" \ --output-bootstrap-file="$CONTAINER_DATA_DIR/bootstrap_nodes.txt" \ - --output-deposit-tree-snapshot="$CONTAINER_DEPOSIT_TREE_SNAPSHOT_FILE" \ --bootstrap-address=127.0.0.1 \ --bootstrap-port=$(( BASE_PORT + BOOTSTRAP_NODE - 1 )) \ --netkey-file=$CONTAINER_BOOTSTRAP_NETWORK_KEYFILE \ --insecure-netkey-password=true \ --genesis-time=$GENESIS_TIME \ --capella-fork-epoch=0 \ - --deneb-fork-epoch=$DENEB_FORK_EPOCH \ + --deneb-fork-epoch=0 \ --electra-fork-epoch="${ELECTRA_FORK_EPOCH}" \ --execution-genesis-block="$EXECUTION_GENESIS_BLOCK_JSON" @@ -925,8 +923,9 @@ ETH1_FOLLOW_DISTANCE: 1 ALTAIR_FORK_EPOCH: 0 BELLATRIX_FORK_EPOCH: 0 CAPELLA_FORK_EPOCH: 0 -DENEB_FORK_EPOCH: ${DENEB_FORK_EPOCH} +DENEB_FORK_EPOCH: 0 ELECTRA_FORK_EPOCH: ${ELECTRA_FORK_EPOCH} +FULU_FORK_EPOCH: ${FULU_FORK_EPOCH} TERMINAL_TOTAL_DIFFICULTY: 0 EOF @@ -1061,7 +1060,7 @@ if ((SIGNER_NODES > 0)); then for NUM_REMOTE in $(seq 0 $LAST_SIGNER_NODE_IDX); do # TODO find some way for this and other background-launched processes to # still participate in set -e, ideally - source "${SCRIPTS_DIR}/signers/${SIGNER_TYPE}.sh" $NUM_REMOTE + source "${SCRIPTS_DIR}/signers/${SIGNER_TYPE}.sh" $NUM_REMOTE $CONST_PRESET done fi @@ -1130,7 +1129,6 @@ for NUM_NODE in $(seq 1 "${NUM_NODES}"); do ${STOP_AT_EPOCH_FLAG} \ ${KEYMANAGER_FLAG} \ --keymanager-token-file="${DATA_DIR}/keymanager-token" \ - --finalized-deposit-tree-snapshot="$CONTAINER_DEPOSIT_TREE_SNAPSHOT_FILE" \ --rest-port="$(( BASE_REST_PORT + NUM_NODE - 1 ))" \ --metrics-port="$(( BASE_METRICS_PORT + NUM_NODE - 1 ))" \ --doppelganger-detection=off \ diff --git a/scripts/mainnet-non-overriden-config.yaml b/scripts/mainnet-non-overriden-config.yaml index b6ccbfeed8..3bbf19a202 100644 --- a/scripts/mainnet-non-overriden-config.yaml +++ b/scripts/mainnet-non-overriden-config.yaml @@ -123,10 +123,6 @@ MAX_REQUEST_BLOCKS: 1024 EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# 5s -TTFB_TIMEOUT: 5 -# 10s -RESP_TIMEOUT: 10 ATTESTATION_PROPAGATION_SLOT_RANGE: 32 # 500ms MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 @@ -173,5 +169,4 @@ SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 VALIDATOR_CUSTODY_REQUIREMENT: 8 BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 -MAX_BLOBS_PER_BLOCK_FULU: 12 MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 diff --git a/scripts/minimal-non-overriden-config.yaml b/scripts/minimal-non-overriden-config.yaml index b264220897..da6bde4d2a 100644 --- a/scripts/minimal-non-overriden-config.yaml +++ b/scripts/minimal-non-overriden-config.yaml @@ -124,10 +124,6 @@ MAX_REQUEST_BLOCKS: 1024 EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # [customized] `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 272) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 272 -# 5s -TTFB_TIMEOUT: 5 -# 10s -RESP_TIMEOUT: 10 ATTESTATION_PROPAGATION_SLOT_RANGE: 32 # 500ms MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 @@ -174,5 +170,4 @@ SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 VALIDATOR_CUSTODY_REQUIREMENT: 8 BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 -MAX_BLOBS_PER_BLOCK_FULU: 12 MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 diff --git a/scripts/package_src/nimbus_beacon_node/image/lib/systemd/system/nimbus_beacon_node.service b/scripts/package_src/nimbus_beacon_node/image/lib/systemd/system/nimbus_beacon_node.service index 452a34c20b..78583f55e9 100644 --- a/scripts/package_src/nimbus_beacon_node/image/lib/systemd/system/nimbus_beacon_node.service +++ b/scripts/package_src/nimbus_beacon_node/image/lib/systemd/system/nimbus_beacon_node.service @@ -57,8 +57,8 @@ WorkingDirectory=/var/lib/nimbus TimeoutSec=1200 Restart=always -# Don't restart when Doppelganger detection has been activated -RestartPreventExitStatus=129 +# Don't restart when Doppelganger or slashing detection has been activated +RestartPreventExitStatus=129 198 ExecStart=/usr/bin/nimbus_beacon_node \ --network=${NETWORK} \ diff --git a/scripts/signers/nimbus.sh b/scripts/signers/nimbus.sh index a3e25a0dca..99e6338041 100755 --- a/scripts/signers/nimbus.sh +++ b/scripts/signers/nimbus.sh @@ -11,6 +11,7 @@ SIGNING_NODE_IDX=$1 ./build/nimbus_signing_node \ --log-level=DEBUG \ + --network=$2 \ --validators-dir="${DATA_DIR}/validators_shares/$(( SIGNING_NODE_IDX + 1 ))" \ --secrets-dir="${DATA_DIR}/secrets_shares/$(( SIGNING_NODE_IDX + 1 ))" \ --bind-port=$(( BASE_REMOTE_SIGNER_PORT + SIGNING_NODE_IDX )) &> "${DATA_DIR}/logs/nimbus_signing_node.${SIGNING_NODE_IDX}.jsonl" & diff --git a/scripts/signers/web3signer.sh b/scripts/signers/web3signer.sh index 5d8723f2ae..d492c098b9 100755 --- a/scripts/signers/web3signer.sh +++ b/scripts/signers/web3signer.sh @@ -1,6 +1,6 @@ #!/usr/bin/bash -# Copyright (c) 2023-2025 Status Research & Development GmbH. +# Copyright (c) 2023-2024 Status Research & Development GmbH. # Licensed under either of: # - Apache License, version 2.0 # - MIT license @@ -44,4 +44,4 @@ done --keystores-path="${KEYSTORES_DIR}" \ --network="${RUNTIME_CONFIG_FILE}" &> "${DATA_DIR}/logs/web3signer.${WEB3SIGNER_NODE_IDX}.log" & -echo $! > "${DATA_DIR}/pids/web3signer.${WEB3SIGNER_NODE_IDX}" \ No newline at end of file +echo $! > "${DATA_DIR}/pids/web3signer.${WEB3SIGNER_NODE_IDX}" diff --git a/tests/all_tests.nim b/tests/all_tests.nim index b3f86ada9b..d73ac785ad 100644 --- a/tests/all_tests.nim +++ b/tests/all_tests.nim @@ -23,12 +23,12 @@ import # Unit test ./test_block_quarantine, ./test_conf, ./test_datatypes, - ./test_deposit_snapshots, ./test_discovery, ./test_engine_api_conversions, ./test_engine_authentication, ./test_el_manager, ./test_el_conf, + ./test_eth2_rest_serialization, ./test_eth2_ssz_serialization, ./test_forks, ./test_gossip_transition, @@ -45,8 +45,6 @@ import # Unit test ./test_peer_pool, ./test_peerdas_helpers, ./test_remote_keystore, - ./test_rest_json_serialization, - ./test_serialization, ./test_spec, ./test_statediff, ./test_sync_committee_pool, @@ -61,9 +59,11 @@ import # Unit test ./slashing_protection/test_fixtures, ./slashing_protection/test_slashing_protection_db, ./test_validator_client, - ./test_beacon_validators, + ./test_block_payloads, ./test_beacon_chain_file, ./test_mev_calls, + ./test_column_map, + ./test_quarantine, ./test_keymanager_api # currently has to run after test_remote_keystore summarizeLongTests("AllTests") diff --git a/tests/consensus_spec/all_tests.nim b/tests/consensus_spec/all_tests.nim index d17bf5f540..5ece555cce 100644 --- a/tests/consensus_spec/all_tests.nim +++ b/tests/consensus_spec/all_tests.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -14,6 +14,7 @@ # Tests that do not depend on `mainnet` vs `minimal` compile-time configuration import + ./test_fixture_fork_digest, ./test_fixture_kzg, ./test_fixture_networking, ./test_fixture_ssz_generic_types diff --git a/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim b/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim index a37968c1dc..4f05531dec 100644 --- a/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim +++ b/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim @@ -203,7 +203,7 @@ proc runTest(storeDataFork: static LightClientDataFork) = store.optimistic_header == update.attested_header store.current_max_active_participants > 0 - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.0/tests/core/pyspec/eth2spec/test/altair/unittests/light_client/test_sync_protocol.py#L64-L96 + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/tests/core/pyspec/eth2spec/test/altair/unittests/light_client/test_sync_protocol.py#L64-L96 test "test_process_light_client_update_at_period_boundary": var forked = assignClone(genesisState[]) template state(): auto = forked[].altairData.data diff --git a/tests/consensus_spec/altair/test_fixture_ssz_consensus_objects.nim b/tests/consensus_spec/altair/test_fixture_ssz_consensus_objects.nim index 86af9c1aa5..6513b7ea61 100644 --- a/tests/consensus_spec/altair/test_fixture_ssz_consensus_objects.nim +++ b/tests/consensus_spec/altair/test_fixture_ssz_consensus_objects.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -80,10 +80,8 @@ proc checkSSZ( # TODO check the value (requires YAML loader) -proc loadExpectedHashTreeRoot( - dir: string -): SSZHashTreeRoot {.raises: [ - Exception, IOError, OSError, YamlConstructionError, YamlParserError].} = +proc loadExpectedHashTreeRoot(dir: string): SSZHashTreeRoot + {.raises: [IOError, OSError, YamlConstructionError, YamlParserError].} = let s = openFileStream(dir/"roots.yaml") yaml.load(s, result) s.close() diff --git a/tests/consensus_spec/bellatrix/test_fixture_operations.nim b/tests/consensus_spec/bellatrix/test_fixture_operations.nim index b2cfb33ff6..b007bb0306 100644 --- a/tests/consensus_spec/bellatrix/test_fixture_operations.nim +++ b/tests/consensus_spec/bellatrix/test_fixture_operations.nim @@ -143,7 +143,7 @@ suite baseDescription & "Execution Payload " & preset(): proc makeApplyExecutionPayloadCb(path: string): auto = return proc( preState: var bellatrix.BeaconState, body: bellatrix.BeaconBlockBody): - Result[void, cstring] {.raises: [IOError].} = + Result[void, cstring] = let payloadValid = os_ops.readFile( OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml" ).contains("execution_valid: true") diff --git a/tests/consensus_spec/bellatrix/test_fixture_ssz_consensus_objects.nim b/tests/consensus_spec/bellatrix/test_fixture_ssz_consensus_objects.nim index 27337b5fec..3b34356441 100644 --- a/tests/consensus_spec/bellatrix/test_fixture_ssz_consensus_objects.nim +++ b/tests/consensus_spec/bellatrix/test_fixture_ssz_consensus_objects.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -80,10 +80,8 @@ proc checkSSZ( # TODO check the value (requires YAML loader) -proc loadExpectedHashTreeRoot( - dir: string -): SSZHashTreeRoot {.raises: [ - Exception, IOError, OSError, YamlConstructionError, YamlParserError].} = +proc loadExpectedHashTreeRoot(dir: string): SSZHashTreeRoot + {.raises: [IOError, OSError, YamlConstructionError, YamlParserError].} = let s = openFileStream(dir/"roots.yaml") yaml.load(s, result) s.close() diff --git a/tests/consensus_spec/capella/test_fixture_operations.nim b/tests/consensus_spec/capella/test_fixture_operations.nim index c5a0ec17ed..8d13f6bd3e 100644 --- a/tests/consensus_spec/capella/test_fixture_operations.nim +++ b/tests/consensus_spec/capella/test_fixture_operations.nim @@ -160,7 +160,7 @@ suite baseDescription & "Execution Payload " & preset(): func makeApplyExecutionPayloadCb(path: string): auto = return proc( preState: var capella.BeaconState, body: capella.BeaconBlockBody): - Result[void, cstring] {.raises: [IOError].} = + Result[void, cstring] = let payloadValid = os_ops.readFile( OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml" ).contains("execution_valid: true") diff --git a/tests/consensus_spec/capella/test_fixture_ssz_consensus_objects.nim b/tests/consensus_spec/capella/test_fixture_ssz_consensus_objects.nim index b10951b594..2bb9552fec 100644 --- a/tests/consensus_spec/capella/test_fixture_ssz_consensus_objects.nim +++ b/tests/consensus_spec/capella/test_fixture_ssz_consensus_objects.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -82,10 +82,8 @@ proc checkSSZ( # TODO check the value (requires YAML loader) -proc loadExpectedHashTreeRoot( - dir: string -): SSZHashTreeRoot {.raises: [ - Exception, IOError, OSError, YamlConstructionError, YamlParserError].} = +proc loadExpectedHashTreeRoot(dir: string): SSZHashTreeRoot + {.raises: [IOError, OSError, YamlConstructionError, YamlParserError].} = let s = openFileStream(dir/"roots.yaml") yaml.load(s, result) s.close() diff --git a/tests/consensus_spec/consensus_spec_tests_preset.nim b/tests/consensus_spec/consensus_spec_tests_preset.nim index a71079906a..7cbefbfb72 100644 --- a/tests/consensus_spec/consensus_spec_tests_preset.nim +++ b/tests/consensus_spec/consensus_spec_tests_preset.nim @@ -19,6 +19,7 @@ import ./deneb/all_deneb_fixtures, ./electra/all_electra_fixtures, ./fulu/all_fulu_fixtures, + ./gloas/all_gloas_fixtures, ./test_fixture_fork, ./test_fixture_fork_choice, ./test_fixture_light_client_data_collection, @@ -30,4 +31,4 @@ import ./test_fixture_sanity_slots, ./test_fixture_transition -summarizeLongTests("ConsensusSpecPreset") \ No newline at end of file +summarizeLongTests("ConsensusSpecPreset") diff --git a/tests/consensus_spec/deneb/test_fixture_operations.nim b/tests/consensus_spec/deneb/test_fixture_operations.nim index f98c6e68ad..6155d6ba22 100644 --- a/tests/consensus_spec/deneb/test_fixture_operations.nim +++ b/tests/consensus_spec/deneb/test_fixture_operations.nim @@ -163,7 +163,7 @@ suite baseDescription & "Execution Payload " & preset(): func makeApplyExecutionPayloadCb(path: string): auto = return proc( preState: var deneb.BeaconState, body: deneb.BeaconBlockBody): - Result[void, cstring] {.raises: [IOError].} = + Result[void, cstring] = let payloadValid = os_ops.readFile( OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml" ).contains("execution_valid: true") diff --git a/tests/consensus_spec/deneb/test_fixture_ssz_consensus_objects.nim b/tests/consensus_spec/deneb/test_fixture_ssz_consensus_objects.nim index f4637ba170..2555487626 100644 --- a/tests/consensus_spec/deneb/test_fixture_ssz_consensus_objects.nim +++ b/tests/consensus_spec/deneb/test_fixture_ssz_consensus_objects.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -85,10 +85,8 @@ proc checkSSZ( # TODO check the value (requires YAML loader) -proc loadExpectedHashTreeRoot( - dir: string -): SSZHashTreeRoot {.raises: [ - Exception, IOError, OSError, YamlConstructionError, YamlParserError].} = +proc loadExpectedHashTreeRoot(dir: string): SSZHashTreeRoot + {.raises: [IOError, OSError, YamlConstructionError, YamlParserError].} = let s = openFileStream(dir/"roots.yaml") yaml.load(s, result) s.close() diff --git a/tests/consensus_spec/deneb/test_fixture_state_transition_epoch.nim b/tests/consensus_spec/deneb/test_fixture_state_transition_epoch.nim index 74fafafb04..24b404d881 100644 --- a/tests/consensus_spec/deneb/test_fixture_state_transition_epoch.nim +++ b/tests/consensus_spec/deneb/test_fixture_state_transition_epoch.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -23,7 +23,6 @@ import from std/sequtils import mapIt, toSeq from std/strutils import rsplit from ../../../beacon_chain/spec/datatypes/deneb import BeaconState -from ../../teststateutil import checkPerValidatorBalanceCalc const RootDir = SszTestsDir/const_preset/"deneb"/"epoch_processing" @@ -75,7 +74,6 @@ template runSuite( # --------------------------------------------------------------- runSuite(JustificationFinalizationDir, "Justification & Finalization"): let info = altair.EpochInfo.init(state) - check checkPerValidatorBalanceCalc(state) process_justification_and_finalization(state, info.balances) Result[void, cstring].ok() @@ -83,7 +81,6 @@ runSuite(JustificationFinalizationDir, "Justification & Finalization"): # --------------------------------------------------------------- runSuite(InactivityDir, "Inactivity"): let info = altair.EpochInfo.init(state) - check checkPerValidatorBalanceCalc(state) process_inactivity_updates(cfg, state, info) Result[void, cstring].ok() diff --git a/tests/consensus_spec/electra/test_fixture_operations.nim b/tests/consensus_spec/electra/test_fixture_operations.nim index 109dbf9394..9156f14e18 100644 --- a/tests/consensus_spec/electra/test_fixture_operations.nim +++ b/tests/consensus_spec/electra/test_fixture_operations.nim @@ -198,7 +198,7 @@ suite baseDescription & "Execution Payload " & preset(): func makeApplyExecutionPayloadCb(path: string): auto = return proc( preState: var electra.BeaconState, body: electra.BeaconBlockBody): - Result[void, cstring] {.raises: [IOError].} = + Result[void, cstring] = let payloadValid = os_ops.readFile( OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml" ).contains("execution_valid: true") @@ -209,7 +209,7 @@ suite baseDescription & "Execution Payload " & preset(): body.compute_execution_block_hash( preState.latest_block_root( assignClone(preState)[].hash_tree_root()))) - func executePayload(_: electra.ExecutionPayload): bool = payloadValid + func executePayload(_: deneb.ExecutionPayload): bool = payloadValid process_execution_payload( defaultRuntimeConfig, preState, body, executePayload) @@ -285,10 +285,10 @@ suite baseDescription & "Voluntary Exit " & preset(): suite baseDescription & "Withdrawals " & preset(): func applyWithdrawals( preState: var electra.BeaconState, - executionPayload: electra.ExecutionPayload): Result[void, cstring] = + executionPayload: deneb.ExecutionPayload): Result[void, cstring] = process_withdrawals(preState, executionPayload) for path in walkTests(OpWithdrawalsDir): - runTest[electra.ExecutionPayload, typeof applyWithdrawals]( + runTest[deneb.ExecutionPayload, typeof applyWithdrawals]( OpWithdrawalsDir, suiteName, "Withdrawals", "execution_payload", applyWithdrawals, path) diff --git a/tests/consensus_spec/electra/test_fixture_ssz_consensus_objects.nim b/tests/consensus_spec/electra/test_fixture_ssz_consensus_objects.nim index 9a3b92d83c..a5b4dbf6f9 100644 --- a/tests/consensus_spec/electra/test_fixture_ssz_consensus_objects.nim +++ b/tests/consensus_spec/electra/test_fixture_ssz_consensus_objects.nim @@ -88,10 +88,8 @@ proc checkSSZ( # TODO check the value (requires YAML loader) -proc loadExpectedHashTreeRoot( - dir: string -): SSZHashTreeRoot {.raises: [ - Exception, IOError, OSError, YamlConstructionError, YamlParserError].} = +proc loadExpectedHashTreeRoot(dir: string): SSZHashTreeRoot + {.raises: [IOError, OSError, YamlConstructionError, YamlParserError].} = let s = openFileStream(dir/"roots.yaml") yaml.load(s, result) s.close() @@ -137,9 +135,9 @@ suite "EF - Electra - SSZ consensus objects " & preset(): of "Eth1Block": checkSSZ(Eth1Block, path, hash) of "Eth1Data": checkSSZ(Eth1Data, path, hash) of "ExecutionPayload": - checkSSZ(electra.ExecutionPayload, path, hash) + checkSSZ(deneb.ExecutionPayload, path, hash) of "ExecutionPayloadHeader": - checkSSZ(electra.ExecutionPayloadHeader, path, hash) + checkSSZ(deneb.ExecutionPayloadHeader, path, hash) of "ExecutionRequests": checkSSZ(ExecutionRequests, path, hash) of "Fork": checkSSZ(Fork, path, hash) of "ForkData": checkSSZ(ForkData, path, hash) diff --git a/tests/consensus_spec/fixtures_utils.nim b/tests/consensus_spec/fixtures_utils.nim index b8a4602710..e6ec7a091d 100644 --- a/tests/consensus_spec/fixtures_utils.nim +++ b/tests/consensus_spec/fixtures_utils.nim @@ -47,6 +47,14 @@ func readValue*(r: var JsonReader, a: var seq[byte]) = func genesisTestRuntimeConfig*(consensusFork: ConsensusFork): RuntimeConfig = var res = defaultRuntimeConfig case consensusFork + of ConsensusFork.Gloas: + res.GLOAS_FORK_EPOCH = GENESIS_EPOCH + res.FULU_FORK_EPOCH = GENESIS_EPOCH + res.ELECTRA_FORK_EPOCH = GENESIS_EPOCH + res.DENEB_FORK_EPOCH = GENESIS_EPOCH + res.CAPELLA_FORK_EPOCH = GENESIS_EPOCH + res.BELLATRIX_FORK_EPOCH = GENESIS_EPOCH + res.ALTAIR_FORK_EPOCH = GENESIS_EPOCH of ConsensusFork.Fulu: res.FULU_FORK_EPOCH = GENESIS_EPOCH res.ELECTRA_FORK_EPOCH = GENESIS_EPOCH @@ -90,7 +98,7 @@ type rewards*: List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT] penalties*: List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#eth1block + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/validator.md#eth1block Eth1Block* = object timestamp*: uint64 deposit_root*: Eth2Digest @@ -181,7 +189,8 @@ proc loadBlock*( validateBlockHash = true): auto = var blck = parseTest(path, SSZ, consensusFork.SignedBeaconBlock) blck.root = hash_tree_root(blck.message) - when consensusFork >= ConsensusFork.Bellatrix: + debugGloasComment "" + when consensusFork >= ConsensusFork.Bellatrix and consensusFork != ConsensusFork.Gloas: if blck.message.is_execution_block and not blck.message.body.execution_payload.transactions.anyIt(it.len == 0): if blck.message.body.execution_payload.block_hash != diff --git a/tests/consensus_spec/fulu/test_fixture_operations.nim b/tests/consensus_spec/fulu/test_fixture_operations.nim index 5620db0c20..e9043db8cf 100644 --- a/tests/consensus_spec/fulu/test_fixture_operations.nim +++ b/tests/consensus_spec/fulu/test_fixture_operations.nim @@ -198,7 +198,7 @@ suite baseDescription & "Execution Payload " & preset(): func makeApplyExecutionPayloadCb(path: string): auto = return proc( preState: var fulu.BeaconState, body: fulu.BeaconBlockBody): - Result[void, cstring] {.raises: [IOError].} = + Result[void, cstring] = let payloadValid = os_ops.readFile( OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml" ).contains("execution_valid: true") @@ -209,7 +209,7 @@ suite baseDescription & "Execution Payload " & preset(): body.compute_execution_block_hash( preState.latest_block_root( assignClone(preState)[].hash_tree_root()))) - func executePayload(_: fulu.ExecutionPayload): bool = payloadValid + func executePayload(_: deneb.ExecutionPayload): bool = payloadValid process_execution_payload( defaultRuntimeConfig, preState, body, executePayload) @@ -285,10 +285,10 @@ suite baseDescription & "Voluntary Exit " & preset(): suite baseDescription & "Withdrawals " & preset(): func applyWithdrawals( preState: var fulu.BeaconState, - executionPayload: fulu.ExecutionPayload): Result[void, cstring] = + executionPayload: deneb.ExecutionPayload): Result[void, cstring] = process_withdrawals(preState, executionPayload) for path in walkTests(OpWithdrawalsDir): - runTest[fulu.ExecutionPayload, typeof applyWithdrawals]( + runTest[deneb.ExecutionPayload, typeof applyWithdrawals]( OpWithdrawalsDir, suiteName, "Withdrawals", "execution_payload", applyWithdrawals, path) \ No newline at end of file diff --git a/tests/consensus_spec/fulu/test_fixture_ssz_consensus_objects.nim b/tests/consensus_spec/fulu/test_fixture_ssz_consensus_objects.nim index 5d235760c3..c9132bd083 100644 --- a/tests/consensus_spec/fulu/test_fixture_ssz_consensus_objects.nim +++ b/tests/consensus_spec/fulu/test_fixture_ssz_consensus_objects.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -90,10 +90,8 @@ proc checkSSZ( # TODO check the value (requires YAML loader) -proc loadExpectedHashTreeRoot( - dir: string -): SSZHashTreeRoot {.raises: [ - Exception, IOError, OSError, YamlConstructionError, YamlParserError].} = +proc loadExpectedHashTreeRoot(dir: string): SSZHashTreeRoot + {.raises: [IOError, OSError, YamlConstructionError, YamlParserError].} = let s = openFileStream(dir/"roots.yaml") yaml.load(s, result) s.close() @@ -125,15 +123,16 @@ suite "EF - Fulu - SSZ consensus objects " & preset(): of "BeaconBlock": checkSSZ(electra.BeaconBlock, path, hash) of "BeaconBlockBody": checkSSZ(electra.BeaconBlockBody, path, hash) of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash) - of "BeaconState": checkSSZ(electra.BeaconState, path, hash) + of "BeaconState": checkSSZ(fulu.BeaconState, path, hash) of "BlobIdentifier": checkSSZ(BlobIdentifier, path, hash) of "BlobSidecar": checkSSZ(BlobSidecar, path, hash) of "BLSToExecutionChange": checkSSZ(BLSToExecutionChange, path, hash) of "Checkpoint": checkSSZ(Checkpoint, path, hash) of "ConsolidationRequest": checkSSZ(ConsolidationRequest, path, hash) of "ContributionAndProof": checkSSZ(ContributionAndProof, path, hash) - of "DataColumnSidecar": checkSSZ(DataColumnSidecar, path, hash) - of "DataColumnIdentifier": checkSSZ(DataColumnIdentifier, path, hash) + of "DataColumnSidecar": checkSSZ(fulu.DataColumnSidecar, path, hash) + of "DataColumnsByRootIdentifier": + checkSSZ(DataColumnsByRootIdentifier, path, hash) of "Deposit": checkSSZ(Deposit, path, hash) of "DepositData": checkSSZ(DepositData, path, hash) of "DepositMessage": checkSSZ(DepositMessage, path, hash) @@ -141,9 +140,9 @@ suite "EF - Fulu - SSZ consensus objects " & preset(): of "Eth1Block": checkSSZ(Eth1Block, path, hash) of "Eth1Data": checkSSZ(Eth1Data, path, hash) of "ExecutionPayload": - checkSSZ(electra.ExecutionPayload, path, hash) + checkSSZ(deneb.ExecutionPayload, path, hash) of "ExecutionPayloadHeader": - checkSSZ(electra.ExecutionPayloadHeader, path, hash) + checkSSZ(deneb.ExecutionPayloadHeader, path, hash) of "ExecutionRequests": checkSSZ(electra.ExecutionRequests, path, hash) of "Fork": checkSSZ(Fork, path, hash) @@ -196,4 +195,4 @@ suite "EF - Fulu - SSZ consensus objects " & preset(): of "Validator": checkSSZ(Validator, path, hash) of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash) else: - raise newException(ValueError, "Unsupported test: " & sszType) \ No newline at end of file + raise newException(ValueError, "Unsupported test: " & sszType) diff --git a/tests/consensus_spec/fulu/test_fixture_state_transition_epoch.nim b/tests/consensus_spec/fulu/test_fixture_state_transition_epoch.nim index 465cbe9982..b27161ef79 100644 --- a/tests/consensus_spec/fulu/test_fixture_state_transition_epoch.nim +++ b/tests/consensus_spec/fulu/test_fixture_state_transition_epoch.nim @@ -41,6 +41,8 @@ const HistoricalSummariesUpdateDir = RootDir/"historical_summaries_update" PendingConsolidationsDir = RootDir/"pending_consolidations" PendingDepositsDir = RootDir/"pending_deposits" + ProposerLookaheadDir = RootDir/"proposer_lookahead" + doAssert (toHashSet(mapIt(toSeq(walkDir(RootDir, relative = false)), it.path)) - toHashSet([SyncCommitteeDir])) == @@ -49,7 +51,7 @@ doAssert (toHashSet(mapIt(toSeq(walkDir(RootDir, relative = false)), it.path)) - SlashingsDir, Eth1DataResetDir, EffectiveBalanceUpdatesDir, SlashingsResetDir, RandaoMixesResetDir, ParticipationFlagDir, RewardsAndPenaltiesDir, HistoricalSummariesUpdateDir, - PendingDepositsDir, PendingConsolidationsDir]) + PendingDepositsDir, PendingConsolidationsDir, ProposerLookaheadDir]) template runSuite( suiteDir, testName: string, transitionProc: untyped): untyped = @@ -153,6 +155,11 @@ runSuite(PendingDepositsDir, "Pending deposits"): runSuite(PendingConsolidationsDir, "Pending consolidations"): process_pending_consolidations(cfg, state) +# Proposer lookahead +# --------------------------------------------------------------- +runSuite(ProposerLookaheadDir, "Proposer lookahead"): + process_proposer_lookahead(state, cache) + # Sync committee updates # --------------------------------------------------------------- diff --git a/tests/consensus_spec/gloas/all_gloas_fixtures.nim b/tests/consensus_spec/gloas/all_gloas_fixtures.nim new file mode 100644 index 0000000000..c76f38ebea --- /dev/null +++ b/tests/consensus_spec/gloas/all_gloas_fixtures.nim @@ -0,0 +1,23 @@ +# beacon_chain +<<<<<<<< HEAD:tests/consensus_spec/fulu/all_fulu_fixtures.nim +# Copyright (c) 2022-2025 Status Research & Development GmbH +======== +# Copyright (c) 2025 Status Research & Development GmbH +>>>>>>>> origin/unstable:tests/consensus_spec/gloas/all_gloas_fixtures.nim +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [], gcsafe.} +{.used.} + +import + ./test_fixture_operations, +<<<<<<<< HEAD:tests/consensus_spec/fulu/all_fulu_fixtures.nim + ./test_fixture_ssz_consensus_objects, + ./test_fixture_state_transition_epoch +======== + ./test_fixture_state_transition_epoch, + ./test_fixture_ssz_consensus_objects +>>>>>>>> origin/unstable:tests/consensus_spec/gloas/all_gloas_fixtures.nim diff --git a/tests/consensus_spec/gloas/test_fixture_operations.nim b/tests/consensus_spec/gloas/test_fixture_operations.nim new file mode 100644 index 0000000000..9c0e67fbb1 --- /dev/null +++ b/tests/consensus_spec/gloas/test_fixture_operations.nim @@ -0,0 +1,334 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [], gcsafe.} +{.used.} + +import + # Utilities + chronicles, + unittest2, + # Beacon chain internals + ../../../beacon_chain/spec/state_transition_block, + ../../../beacon_chain/spec/datatypes/gloas, + # Test utilities + ../../testutil, + ../fixtures_utils, ../os_ops, + ../../helpers/debug_state + +from std/sequtils import anyIt, mapIt, toSeq +from std/strutils import contains +from ../../../beacon_chain/spec/beaconstate import + get_base_reward_per_increment, get_state_exit_queue_info, + get_total_active_balance, latest_block_root, process_attestation + +const + OpDir = SszTestsDir/const_preset/"gloas"/"operations" + OpAttestationsDir = OpDir/"attestation" + OpAttSlashingDir = OpDir/"attester_slashing" + OpBlockHeaderDir = OpDir/"block_header" + OpBlsToExecutionChangeDir = OpDir/"bls_to_execution_change" + OpConsolidationRequestDir = OpDir/"consolidation_request" + OpDepositRequestDir = OpDir/"deposit_request" + OpDepositsDir = OpDir/"deposit" + OpWithdrawalRequestDir = OpDir/"withdrawal_request" + OpExecutionPayloadDir = OpDir/"execution_payload" + OpExecutionPayloadBidDir = OpDir/"execution_payload_bid" + OpPayloadAttestationDir = OpDir/"payload_attestation" + OpProposerSlashingDir = OpDir/"proposer_slashing" + OpSyncAggregateDir = OpDir/"sync_aggregate" + OpVoluntaryExitDir = OpDir/"voluntary_exit" + OpWithdrawalsDir = OpDir/"withdrawals" + + baseDescription = "EF - Gloas - Operations - " + +const testDirs = toHashSet([ + OpAttestationsDir, OpAttSlashingDir, OpBlockHeaderDir, + OpBlsToExecutionChangeDir, OpConsolidationRequestDir, OpDepositRequestDir, + OpDepositsDir, OpWithdrawalRequestDir, OpExecutionPayloadDir, + OpExecutionPayloadBidDir, OpPayloadAttestationDir, OpProposerSlashingDir, + OpSyncAggregateDir, OpVoluntaryExitDir, OpWithdrawalsDir +]) + +doAssert toHashSet( + mapIt(toSeq(walkDir(OpDir, relative = false)), it.path)) == testDirs + +proc runTest[T, U]( + testSuiteDir, suiteName, opName, applyFile: string, + applyProc: U, identifier: string) = + let testDir = testSuiteDir / "pyspec_tests" / identifier + + let prefix = + if fileExists(testDir/"post.ssz_snappy"): + "[Valid] " + else: + "[Invalid] " + + test prefix & baseDescription & opName & " - " & identifier: + let preState = newClone( + parseTest(testDir/"pre.ssz_snappy", SSZ, gloas.BeaconState)) + let done = applyProc( + preState[], parseTest(testDir/(applyFile & ".ssz_snappy"), SSZ, T)) + + if fileExists(testDir/"post.ssz_snappy"): + let + postState = newClone(parseTest( + testDir/"post.ssz_snappy", SSZ, gloas.BeaconState)) + pass = preState[].hash_tree_root() == postState[].hash_tree_root() + + # TODO reportDiff doesn't understand at least one of HashArray or + # HashList merkle tree caching, so only check if htr's mismatch. + if not pass: + reportDiff(preState, postState) + check: + done.isOk() + pass + else: + check: done.isErr() # No post state = processing should fail + +suite baseDescription & "Attestation " & preset(): + proc applyAttestation( + preState: var gloas.BeaconState, attestation: electra.Attestation): + Result[void, cstring] = + var cache: StateCache + let + total_active_balance = get_total_active_balance(preState, cache) + base_reward_per_increment = + get_base_reward_per_increment(total_active_balance) + + # This returns the proposer reward for including the attestation, which + # isn't tested here. + discard ? process_attestation( + preState, attestation, {strictVerification}, base_reward_per_increment, cache) + ok() + + for path in walkTests(OpAttestationsDir): + runTest[electra.Attestation, typeof applyAttestation]( + OpAttestationsDir, suiteName, "Attestation", "attestation", + applyAttestation, path) + +suite baseDescription & "Attester Slashing " & preset(): + proc applyAttesterSlashing( + preState: var gloas.BeaconState, + attesterSlashing: electra.AttesterSlashing): Result[void, cstring] = + var cache: StateCache + doAssert (? process_attester_slashing( + defaultRuntimeConfig, preState, attesterSlashing, {}, + get_state_exit_queue_info(preState), cache))[0] > 0.Gwei + ok() + + for path in walkTests(OpAttSlashingDir): + runTest[electra.AttesterSlashing, typeof applyAttesterSlashing]( + OpAttSlashingDir, suiteName, "Attester Slashing", "attester_slashing", + applyAttesterSlashing, path) + +suite baseDescription & "Block Header " & preset(): + proc applyBlockHeader( + preState: var gloas.BeaconState, blck: gloas.BeaconBlock): + Result[void, cstring] = + var cache: StateCache + process_block_header(preState, blck, {}, cache) + + for path in walkTests(OpBlockHeaderDir): + runTest[gloas.BeaconBlock, typeof applyBlockHeader]( + OpBlockHeaderDir, suiteName, "Block Header", "block", + applyBlockHeader, path) + +from ../../../beacon_chain/spec/datatypes/capella import + SignedBLSToExecutionChange + +suite baseDescription & "BLS to execution change " & preset(): + proc applyBlsToExecutionChange( + preState: var gloas.BeaconState, + signed_address_change: SignedBLSToExecutionChange): + Result[void, cstring] = + process_bls_to_execution_change( + defaultRuntimeConfig, preState, signed_address_change) + + for path in walkTests(OpBlsToExecutionChangeDir): + runTest[SignedBLSToExecutionChange, typeof applyBlsToExecutionChange]( + OpBlsToExecutionChangeDir, suiteName, "BLS to execution change", "address_change", + applyBlsToExecutionChange, path) + +from ".."/".."/".."/beacon_chain/validator_bucket_sort import + sortValidatorBuckets + +suite baseDescription & "Consolidation Request " & preset(): + proc applyConsolidationRequest( + preState: var gloas.BeaconState, + consolidation_request: ConsolidationRequest): Result[void, cstring] = + var cache: StateCache + process_consolidation_request( + defaultRuntimeConfig, preState, + sortValidatorBuckets(preState.validators.asSeq)[], + consolidation_request, cache) + ok() + + for path in walkTests(OpConsolidationRequestDir): + runTest[ConsolidationRequest, typeof applyConsolidationRequest]( + OpConsolidationRequestDir, suiteName, "Consolidation Request", + "consolidation_request", applyConsolidationRequest, path) + +suite baseDescription & "Deposit " & preset(): + func applyDeposit( + preState: var gloas.BeaconState, deposit: Deposit): + Result[void, cstring] = + process_deposit( + defaultRuntimeConfig, preState, + sortValidatorBuckets(preState.validators.asSeq)[], deposit, {}) + + for path in walkTests(OpDepositsDir): + runTest[Deposit, typeof applyDeposit]( + OpDepositsDir, suiteName, "Deposit", "deposit", applyDeposit, path) + +suite baseDescription & "Deposit Request " & preset(): + func applyDepositRequest( + preState: var gloas.BeaconState, depositRequest: DepositRequest): + Result[void, cstring] = + process_deposit_request( + defaultRuntimeConfig, preState, depositRequest, {}) + + for path in walkTests(OpDepositRequestDir): + runTest[DepositRequest, typeof applyDepositRequest]( + OpDepositRequestDir, suiteName, "Deposit Request", "deposit_request", + applyDepositRequest, path) + +suite baseDescription & "Execution Payload " & preset(): + proc makeApplyExecutionPayloadCb(path: string): auto = + return proc( + preState: var gloas.BeaconState, + signed_envelope: SignedExecutionPayloadEnvelope): + Result[void, cstring] = + let payloadValid = os_ops.readFile( + OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml" + ).contains("execution_valid: true") + var + cache: StateCache + let hashedState = (ref gloas.HashedBeaconState)( + data: preState, root: hash_tree_root(preState)) + + func executePayload(_: deneb.ExecutionPayload): bool = payloadValid + let res = process_execution_payload( + defaultRuntimeConfig, hashedState[], + signed_envelope, executePayload, cache) + preState = hashedState.data + res + + for path in walkTests(OpExecutionPayloadDir): + let + testDir = OpExecutionPayloadDir / "pyspec_tests" / path + inputFile = + if fileExists(testDir/"signed_envelope.ssz_snappy"): + "signed_envelope" + # Skip test vectors with missing signed envelope files + # will be fixed in next consensus-spec-tests release + # https://github.com/ethereum/consensus-specs/issues/4545 + else: + continue + + let applyExecutionPayload = makeApplyExecutionPayloadCb(path) + runTest[SignedExecutionPayloadEnvelope, typeof applyExecutionPayload]( + OpExecutionPayloadDir, suiteName, "Execution Payload", inputFile, + applyExecutionPayload, path) + +suite baseDescription & "Execution Payload Bid " & preset(): + proc applyExecutionPayloadBid( + preState: var gloas.BeaconState, + blck: gloas.BeaconBlock): Result[void, cstring] = + process_execution_payload_bid( + defaultRuntimeConfig, preState, blck) + + for path in walkTests(OpExecutionPayloadBidDir): + runTest[gloas.BeaconBlock, typeof applyExecutionPayloadBid]( + OpExecutionPayloadBidDir, suiteName, "Execution Payload Bid", + "block", applyExecutionPayloadBid, path) + +suite baseDescription & "Payload Attestation " & preset(): + proc applyPayloadAttestation( + preState: var gloas.BeaconState, + payloadAttestation: PayloadAttestation): Result[void, cstring] = + var cache: StateCache + process_payload_attestation(preState, payloadAttestation, cache) + + for path in walkTests(OpPayloadAttestationDir): + runTest[PayloadAttestation, typeof applyPayloadAttestation]( + OpPayloadAttestationDir, suiteName, "Payload Attestation", + "payload_attestation", applyPayloadAttestation, path) + +suite baseDescription & "Withdrawal Request " & preset(): + func applyWithdrawalRequest( + preState: var gloas.BeaconState, withdrawalRequest: WithdrawalRequest): + Result[void, cstring] = + var cache: StateCache + process_withdrawal_request( + defaultRuntimeConfig, preState, + sortValidatorBuckets(preState.validators.asSeq)[], withdrawalRequest, + cache) + ok() + + for path in walkTests(OpWithdrawalRequestDir): + runTest[WithdrawalRequest, typeof applyWithdrawalRequest]( + OpWithdrawalRequestDir, suiteName, "Withdrawal Request", + "withdrawal_request", applyWithdrawalRequest, path) + +suite baseDescription & "Proposer Slashing " & preset(): + proc applyProposerSlashing( + preState: var gloas.BeaconState, proposerSlashing: ProposerSlashing): + Result[void, cstring] = + var cache: StateCache + doAssert (? process_proposer_slashing( + defaultRuntimeConfig, preState, proposerSlashing, {}, + get_state_exit_queue_info(preState), cache))[0] > 0.Gwei + ok() + + for path in walkTests(OpProposerSlashingDir): + runTest[ProposerSlashing, typeof applyProposerSlashing]( + OpProposerSlashingDir, suiteName, "Proposer Slashing", "proposer_slashing", + applyProposerSlashing, path) + +suite baseDescription & "Sync Aggregate " & preset(): + proc applySyncAggregate( + preState: var gloas.BeaconState, syncAggregate: SyncAggregate): + Result[void, cstring] = + var cache: StateCache + discard ? process_sync_aggregate( + preState, syncAggregate, get_total_active_balance(preState, cache), + {}, cache) + ok() + + for path in walkTests(OpSyncAggregateDir): + runTest[SyncAggregate, typeof applySyncAggregate]( + OpSyncAggregateDir, suiteName, "Sync Aggregate", "sync_aggregate", + applySyncAggregate, path) + +suite baseDescription & "Voluntary Exit " & preset(): + proc applyVoluntaryExit( + preState: var gloas.BeaconState, voluntaryExit: SignedVoluntaryExit): + Result[void, cstring] = + var cache: StateCache + if process_voluntary_exit( + defaultRuntimeConfig, preState, voluntaryExit, {}, + get_state_exit_queue_info(preState), cache).isOk: + ok() + else: + err("") + + for path in walkTests(OpVoluntaryExitDir): + runTest[SignedVoluntaryExit, typeof applyVoluntaryExit]( + OpVoluntaryExitDir, suiteName, "Voluntary Exit", "voluntary_exit", + applyVoluntaryExit, path) + +suite baseDescription & "Withdrawals " & preset(): + func applyWithdrawals( + preState: var gloas.BeaconState, + executionPayload: deneb.ExecutionPayload): Result[void, cstring] = + process_withdrawals(preState) + + for path in walkTests(OpWithdrawalsDir): + runTest[deneb.ExecutionPayload, typeof applyWithdrawals]( + OpWithdrawalsDir, suiteName, "Withdrawals", "execution_payload", + applyWithdrawals, path) diff --git a/tests/consensus_spec/gloas/test_fixture_rewards.nim b/tests/consensus_spec/gloas/test_fixture_rewards.nim new file mode 100644 index 0000000000..dab9aacb8f --- /dev/null +++ b/tests/consensus_spec/gloas/test_fixture_rewards.nim @@ -0,0 +1,88 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} +{.used.} + +import + # Beacon chain internals + ../../../beacon_chain/spec/[beaconstate, validator, helpers, state_transition_epoch], + ../../../beacon_chain/spec/datatypes/[altair, gloas], + # Test utilities + ../../testutil, + ../fixtures_utils, ../os_ops + +const + RewardsDirBase = SszTestsDir/const_preset/"gloas"/"rewards" + RewardsDirBasic = RewardsDirBase/"basic"/"pyspec_tests" + RewardsDirLeak = RewardsDirBase/"leak"/"pyspec_tests" + RewardsDirRandom = RewardsDirBase/"random"/"pyspec_tests" + +func init(T: type Deltas, len: int): T = + if not result.rewards.setLen(len): + raiseAssert "setLen" + if not result.penalties.setLen(len): + raiseAssert "setLen" + +proc runTest(rewardsDir, identifier: string) = + let testDir = rewardsDir / identifier + + var info: altair.EpochInfo + + let + state = newClone( + parseTest(testDir/"pre.ssz_snappy", SSZ, gloas.BeaconState)) + flagDeltas = [ + parseTest(testDir/"source_deltas.ssz_snappy", SSZ, Deltas), + parseTest(testDir/"target_deltas.ssz_snappy", SSZ, Deltas), + parseTest(testDir/"head_deltas.ssz_snappy", SSZ, Deltas)] + inactivityPenaltyDeltas = + parseTest(testDir/"inactivity_penalty_deltas.ssz_snappy", SSZ, Deltas) + + info.init(state[]) + let + total_balance = info.balances.current_epoch + base_reward_per_increment = get_base_reward_per_increment(total_balance) + + var + flagDeltas2: array[TimelyFlag, Deltas] = [ + Deltas.init(state[].validators.len), + Deltas.init(state[].validators.len), + Deltas.init(state[].validators.len)] + inactivityPenaltyDeltas2 = Deltas.init(state[].validators.len) + + let finality_delay = get_finality_delay(state[]) + + for validator_index, reward0, reward1, reward2, penalty0, penalty1, penalty2 + in get_flag_and_inactivity_deltas( + defaultRuntimeConfig, state[], base_reward_per_increment, info, + finality_delay): + if not is_eligible_validator(info.validators[validator_index]): + continue + flagDeltas2[TimelyFlag.TIMELY_SOURCE_FLAG_INDEX].rewards[validator_index] = + reward0 + flagDeltas2[TimelyFlag.TIMELY_TARGET_FLAG_INDEX].rewards[validator_index] = + reward1 + flagDeltas2[TimelyFlag.TIMELY_HEAD_FLAG_INDEX].rewards[validator_index] = + reward2 + flagDeltas2[TimelyFlag.TIMELY_SOURCE_FLAG_INDEX].penalties[validator_index] = + penalty0 + flagDeltas2[TimelyFlag.TIMELY_TARGET_FLAG_INDEX].penalties[validator_index] = + penalty1 + flagDeltas2[TimelyFlag.TIMELY_HEAD_FLAG_INDEX].penalties[validator_index] = + 0.Gwei + inactivityPenaltyDeltas2.penalties[validator_index] = penalty2 + + check: + flagDeltas == flagDeltas2 + inactivityPenaltyDeltas == inactivityPenaltyDeltas2 + +suite "EF - Gloas - Rewards " & preset(): + for rewardsDir in [RewardsDirBasic, RewardsDirLeak, RewardsDirRandom]: + for kind, path in walkDir(rewardsDir, relative = true, checkDir = true): + test "EF - Gloas - Rewards - " & path & preset(): + runTest(rewardsDir, path) diff --git a/tests/consensus_spec/gloas/test_fixture_ssz_consensus_objects.nim b/tests/consensus_spec/gloas/test_fixture_ssz_consensus_objects.nim new file mode 100644 index 0000000000..e50b60d55b --- /dev/null +++ b/tests/consensus_spec/gloas/test_fixture_ssz_consensus_objects.nim @@ -0,0 +1,220 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} +{.used.} + +import + # Standard library + std/[ + strutils, streams, strformat, + macros, sets], + # Third-party + yaml, + # Beacon chain internals + ../../../beacon_chain/spec/datatypes/[ + altair, + electra, + fulu, + gloas], + # Status libraries + snappy, + # Test utilities + ../../testutil, ../fixtures_utils, ../os_ops + +from ../../../beacon_chain/spec/datatypes/bellatrix import PowBlock +from ../../../beacon_chain/spec/datatypes/capella import + BLSToExecutionChange, SignedBLSToExecutionChange, HistoricalSummary, + Withdrawal +from ../../../beacon_chain/spec/datatypes/deneb import + BlobIdentifier, BlobSidecar + +# SSZ tests of consensus objects (minimal/mainnet preset specific) + +# Parsing definitions +# ---------------------------------------------------------------- + +const + SSZDir = SszTestsDir/const_preset/"gloas"/"ssz_static" + +type + SSZHashTreeRoot = object + # The test files have the values at the "root" + # so we **must** use "root" as a field name + root: string + # Some have a signing_root field + signing_root {.defaultVal: "".}: string + +# Note this only tracks HashTreeRoot +# Checking the values against the yaml file is TODO (require more flexible Yaml parser) + +proc checkSSZ( + T: type gloas.SignedBeaconBlock, + dir: string, + expectedHash: SSZHashTreeRoot +) {.raises: [IOError, SerializationError, UnconsumedInput].} = + # Deserialize into a ref object to not fill Nim stack + let encoded = snappy.decode( + readFileBytes(dir/"serialized.ssz_snappy"), MaxObjectSize) + let deserialized = newClone(sszDecodeEntireInput(encoded, T)) + + # SignedBeaconBlocks usually not hashed because they're identified by + # htr(BeaconBlock), so do it manually + check: expectedHash.root == "0x" & toLowerAscii($hash_tree_root( + [hash_tree_root(deserialized.message), + hash_tree_root(deserialized.signature)])) + + check deserialized.root == hash_tree_root(deserialized.message) + check SSZ.encode(deserialized[]) == encoded + check sszSize(deserialized[]) == encoded.len + + # TODO check the value (requires YAML loader) + +proc checkSSZ( + T: type, + dir: string, + expectedHash: SSZHashTreeRoot +) {.raises: [IOError, SerializationError, UnconsumedInput].} = + # Deserialize into a ref object to not fill Nim stack + let encoded = snappy.decode( + readFileBytes(dir/"serialized.ssz_snappy"), MaxObjectSize) + let deserialized = newClone(sszDecodeEntireInput(encoded, T)) + + check: expectedHash.root == "0x" & toLowerAscii($hash_tree_root(deserialized[])) + + check SSZ.encode(deserialized[]) == encoded + check sszSize(deserialized[]) == encoded.len + + # TODO check the value (requires YAML loader) + +proc loadExpectedHashTreeRoot(dir: string): SSZHashTreeRoot + {.raises: [IOError, OSError, YamlConstructionError, YamlParserError].} = + let s = openFileStream(dir/"roots.yaml") + yaml.load(s, result) + s.close() + +# Test runner +# ---------------------------------------------------------------- + +suite "EF - Gloas - SSZ consensus objects " & preset(): + doAssert dirExists(SSZDir), "You need to run the \"download_test_vectors.sh\" script to retrieve the consensus spec test vectors." + for pathKind, sszType in walkDir(SSZDir, relative = true, checkDir = true): + doAssert pathKind == pcDir + + test &" Testing {sszType}": + let path = SSZDir/sszType + for pathKind, sszTestKind in walkDir( + path, relative = true, checkDir = true): + doAssert pathKind == pcDir + let path = SSZDir/sszType/sszTestKind + for pathKind, sszTestCase in walkDir( + path, relative = true, checkDir = true): + let path = SSZDir/sszType/sszTestKind/sszTestCase + let hash = loadExpectedHashTreeRoot(path) + + case sszType: + of "AggregateAndProof": checkSSZ(electra.AggregateAndProof, path, hash) + of "Attestation": checkSSZ(electra.Attestation, path, hash) + of "AttestationData": checkSSZ(AttestationData, path, hash) + of "AttesterSlashing": checkSSZ(electra.AttesterSlashing, path, hash) + of "BeaconBlock": checkSSZ(gloas.BeaconBlock, path, hash) + of "BeaconBlockBody": checkSSZ(gloas.BeaconBlockBody, path, hash) + of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash) + of "BeaconState": checkSSZ(gloas.BeaconState, path, hash) + of "BlobIdentifier": checkSSZ(BlobIdentifier, path, hash) + of "BlobSidecar": checkSSZ(BlobSidecar, path, hash) + of "BLSToExecutionChange": checkSSZ(BLSToExecutionChange, path, hash) + of "BuilderPendingPayment": checkSSZ(BuilderPendingPayment, path, hash) + of "BuilderPendingWithdrawal": + checkSSZ(BuilderPendingWithdrawal, path, hash) + of "Checkpoint": checkSSZ(Checkpoint, path, hash) + of "ConsolidationRequest": checkSSZ(ConsolidationRequest, path, hash) + of "ContributionAndProof": checkSSZ(ContributionAndProof, path, hash) + of "DataColumnSidecar": checkSSZ(gloas.DataColumnSidecar, path, hash) + of "DataColumnsByRootIdentifier": + checkSSZ(DataColumnsByRootIdentifier, path, hash) + of "Deposit": checkSSZ(Deposit, path, hash) + of "DepositData": checkSSZ(DepositData, path, hash) + of "DepositMessage": checkSSZ(DepositMessage, path, hash) + of "DepositRequest": checkSSZ(DepositRequest, path, hash) + of "Eth1Block": checkSSZ(Eth1Block, path, hash) + of "Eth1Data": checkSSZ(Eth1Data, path, hash) + of "ExecutionPayload": + checkSSZ(deneb.ExecutionPayload, path, hash) + of "ExecutionPayloadHeader": + checkSSZ(deneb.ExecutionPayloadHeader, path, hash) + of "ExecutionPayloadEnvelope": + checkSSZ(ExecutionPayloadEnvelope, path, hash) + of "ExecutionPayloadBid": + checkSSZ(gloas.ExecutionPayloadBid, path, hash) + of "ExecutionRequests": + checkSSZ(electra.ExecutionRequests, path, hash) + of "Fork": checkSSZ(Fork, path, hash) + of "ForkChoiceNode": + debugGloasComment "skipping ForkChoiceNode test for now" + of "ForkData": checkSSZ(ForkData, path, hash) + of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash) + of "HistoricalSummary": checkSSZ(HistoricalSummary, path, hash) + of "IndexedAttestation": + checkSSZ(electra.IndexedAttestation, path, hash) + of "IndexedPayloadAttestation": + checkSSZ(IndexedPayloadAttestation, path, hash) + of "LightClientBootstrap": + checkSSZ(gloas.LightClientBootstrap, path, hash) + of "LightClientHeader": + checkSSZ(gloas.LightClientHeader, path, hash) + of "LightClientUpdate": + checkSSZ(gloas.LightClientUpdate, path, hash) + of "LightClientFinalityUpdate": + checkSSZ(gloas.LightClientFinalityUpdate, path, hash) + of "LightClientOptimisticUpdate": + checkSSZ(gloas.LightClientOptimisticUpdate, path, hash) + of "MatrixEntry": + checkSSZ(MatrixEntry, path, hash) + of "PayloadAttestation": + checkSSZ(PayloadAttestation, path, hash) + of "PayloadAttestationData": + checkSSZ(PayloadAttestationData, path, hash) + of "PayloadAttestationMessage": + checkSSZ(PayloadAttestationMessage, path, hash) + of "PendingAttestation": checkSSZ(PendingAttestation, path, hash) + of "PendingConsolidation": checkSSZ(PendingConsolidation, path, hash) + of "PendingDeposit": checkSSZ(PendingDeposit, path, hash) + of "PendingPartialWithdrawal": + checkSSZ(PendingPartialWithdrawal, path, hash) + of "PowBlock": checkSSZ(PowBlock, path, hash) + of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash) + of "SignedAggregateAndProof": + checkSSZ(electra.SignedAggregateAndProof, path, hash) + of "SignedBeaconBlock": + checkSSZ(gloas.SignedBeaconBlock, path, hash) + of "SignedBeaconBlockHeader": + checkSSZ(SignedBeaconBlockHeader, path, hash) + of "SignedBLSToExecutionChange": + checkSSZ(SignedBLSToExecutionChange, path, hash) + of "SignedContributionAndProof": + checkSSZ(SignedContributionAndProof, path, hash) + of "SignedExecutionPayloadEnvelope": + checkSSZ(SignedExecutionPayloadEnvelope, path, hash) + of "SignedExecutionPayloadBid": + checkSSZ(SignedExecutionPayloadBid, path, hash) + of "SignedVoluntaryExit": checkSSZ(SignedVoluntaryExit, path, hash) + of "SigningData": checkSSZ(SigningData, path, hash) + of "SingleAttestation": checkSSZ(SingleAttestation, path, hash) + of "SyncAggregate": checkSSZ(SyncAggregate, path, hash) + of "SyncAggregatorSelectionData": + checkSSZ(SyncAggregatorSelectionData, path, hash) + of "SyncCommittee": checkSSZ(SyncCommittee, path, hash) + of "SyncCommitteeContribution": + checkSSZ(SyncCommitteeContribution, path, hash) + of "SyncCommitteeMessage": checkSSZ(SyncCommitteeMessage, path, hash) + of "Withdrawal": checkSSZ(Withdrawal, path, hash) + of "WithdrawalRequest": checkSSZ(WithdrawalRequest, path, hash) + of "Validator": checkSSZ(Validator, path, hash) + of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash) + else: + raise newException(ValueError, "Unsupported test: " & sszType) diff --git a/tests/consensus_spec/gloas/test_fixture_state_transition_epoch.nim b/tests/consensus_spec/gloas/test_fixture_state_transition_epoch.nim new file mode 100644 index 0000000000..6b247afd2f --- /dev/null +++ b/tests/consensus_spec/gloas/test_fixture_state_transition_epoch.nim @@ -0,0 +1,178 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [], gcsafe.} +{.used.} + +import + # Status internals + chronicles, + # Beacon chain internals + ../../../beacon_chain/spec/[presets, state_transition_epoch], + ../../../beacon_chain/spec/datatypes/altair, + # Test utilities + ../../testutil, + ../fixtures_utils, ../os_ops, + ./test_fixture_rewards, + ../../helpers/debug_state + +from std/sequtils import mapIt, toSeq +from std/strutils import rsplit +from ../../../beacon_chain/spec/datatypes/gloas import BeaconState + +const + RootDir = SszTestsDir/const_preset/"gloas"/"epoch_processing" + + JustificationFinalizationDir = RootDir/"justification_and_finalization" + InactivityDir = RootDir/"inactivity_updates" + RegistryUpdatesDir = RootDir/"registry_updates" + SlashingsDir = RootDir/"slashings" + Eth1DataResetDir = RootDir/"eth1_data_reset" + EffectiveBalanceUpdatesDir = RootDir/"effective_balance_updates" + SlashingsResetDir = RootDir/"slashings_reset" + RandaoMixesResetDir = RootDir/"randao_mixes_reset" + ParticipationFlagDir = RootDir/"participation_flag_updates" + SyncCommitteeDir = RootDir/"sync_committee_updates" + RewardsAndPenaltiesDir = RootDir/"rewards_and_penalties" + HistoricalSummariesUpdateDir = RootDir/"historical_summaries_update" + PendingConsolidationsDir = RootDir/"pending_consolidations" + PendingDepositsDir = RootDir/"pending_deposits" + ProposerLookaheadDir = RootDir/"proposer_lookahead" + BuilderPendingPaymentsDir = RootDir/"builder_pending_payments" + +doAssert (toHashSet(mapIt(toSeq(walkDir(RootDir, relative = false)), it.path)) - + toHashSet([SyncCommitteeDir])) == + toHashSet([ + JustificationFinalizationDir, InactivityDir, RegistryUpdatesDir, + SlashingsDir, Eth1DataResetDir, EffectiveBalanceUpdatesDir, + SlashingsResetDir, RandaoMixesResetDir, ParticipationFlagDir, + RewardsAndPenaltiesDir, HistoricalSummariesUpdateDir, + PendingDepositsDir, PendingConsolidationsDir, ProposerLookaheadDir, + BuilderPendingPaymentsDir]) + +template runSuite( + suiteDir, testName: string, transitionProc: untyped): untyped = + suite "EF - Gloas - Epoch Processing - " & testName & preset(): + for testDir in walkDirRec( + suiteDir / "pyspec_tests", yieldFilter = {pcDir}, checkDir = true): + let unitTestName = testDir.rsplit(DirSep, 1)[1] + test testName & " - " & unitTestName & preset(): + # BeaconState objects are stored on the heap to avoid stack overflow + type T = gloas.BeaconState + let preState {.inject.} = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T)) + var cache {.inject, used.} = StateCache() + template state: untyped {.inject, used.} = preState[] + template cfg: untyped {.inject, used.} = defaultRuntimeConfig + + if transitionProc.isOk: + let postState = + newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T)) + check: hash_tree_root(preState[]) == hash_tree_root(postState[]) + reportDiff(preState, postState) + else: + check: not fileExists(testDir/"post.ssz_snappy") + +# Justification & Finalization +# --------------------------------------------------------------- +runSuite(JustificationFinalizationDir, "Justification & Finalization"): + let info = altair.EpochInfo.init(state) + process_justification_and_finalization(state, info.balances) + Result[void, cstring].ok() + +# Inactivity updates +# --------------------------------------------------------------- +runSuite(InactivityDir, "Inactivity"): + let info = altair.EpochInfo.init(state) + process_inactivity_updates(cfg, state, info) + Result[void, cstring].ok() + +# Rewards & Penalties +# --------------------------------------------------------------- +runSuite(RewardsAndPenaltiesDir, "Rewards and penalties"): + var info = altair.EpochInfo.init(state) + process_rewards_and_penalties(cfg, state, info) + Result[void, cstring].ok() + +# rest in test_fixture_rewards + +# Registry updates +# --------------------------------------------------------------- +runSuite(RegistryUpdatesDir, "Registry updates"): + process_registry_updates(cfg, state, cache) + +# Slashings +# --------------------------------------------------------------- +runSuite(SlashingsDir, "Slashings"): + let info = altair.EpochInfo.init(state) + process_slashings(state, info.balances.current_epoch) + Result[void, cstring].ok() + +# Eth1 data reset +# --------------------------------------------------------------- +runSuite(Eth1DataResetDir, "Eth1 data reset"): + process_eth1_data_reset(state) + Result[void, cstring].ok() + +# Effective balance updates +# --------------------------------------------------------------- +runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates"): + process_effective_balance_updates(state) + Result[void, cstring].ok() + +# Slashings reset +# --------------------------------------------------------------- +runSuite(SlashingsResetDir, "Slashings reset"): + process_slashings_reset(state) + Result[void, cstring].ok() + +# RANDAO mixes reset +# --------------------------------------------------------------- +runSuite(RandaoMixesResetDir, "RANDAO mixes reset"): + process_randao_mixes_reset(state) + Result[void, cstring].ok() + +# Historical roots update +# --------------------------------------------------------------- +runSuite(HistoricalSummariesUpdateDir, "Historical summaries update"): + process_historical_summaries_update(state) + +# Participation flag updates +# --------------------------------------------------------------- +runSuite(ParticipationFlagDir, "Participation flag updates"): + process_participation_flag_updates(state) + Result[void, cstring].ok() + +# Pending deposits +# --------------------------------------------------------------- +runSuite(PendingDepositsDir, "Pending deposits"): + process_pending_deposits(cfg, state, cache) + +# Pending consolidations +# --------------------------------------------------------------- +runSuite(PendingConsolidationsDir, "Pending consolidations"): + process_pending_consolidations(cfg, state) + +# Proposer lookahead +# --------------------------------------------------------------- +runSuite(ProposerLookaheadDir, "Proposer lookahead"): + process_proposer_lookahead(state, cache) + +# Builder pending payments +# --------------------------------------------------------------- +runSuite(BuilderPendingPaymentsDir, "Builder pending payments"): + process_builder_pending_payments(cfg, state, cache) + +# Sync committee updates +# --------------------------------------------------------------- + +# These are only for minimal, not mainnet +when const_preset == "minimal": + runSuite(SyncCommitteeDir, "Sync committee updates"): + process_sync_committee_updates(state) + Result[void, cstring].ok() +else: + doAssert not dirExists(SyncCommitteeDir) diff --git a/tests/consensus_spec/phase0/test_fixture_ssz_consensus_objects.nim b/tests/consensus_spec/phase0/test_fixture_ssz_consensus_objects.nim index 8d395fac1e..7772ab81b8 100644 --- a/tests/consensus_spec/phase0/test_fixture_ssz_consensus_objects.nim +++ b/tests/consensus_spec/phase0/test_fixture_ssz_consensus_objects.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -80,10 +80,8 @@ proc checkSSZ( # TODO check the value (requires YAML loader) -proc loadExpectedHashTreeRoot( - dir: string -): SSZHashTreeRoot {.raises: [ - Exception, IOError, OSError, YamlConstructionError, YamlParserError].} = +proc loadExpectedHashTreeRoot(dir: string): SSZHashTreeRoot + {.raises: [IOError, OSError, YamlConstructionError, YamlParserError].} = let s = openFileStream(dir/"roots.yaml") yaml.load(s, result) s.close() diff --git a/tests/consensus_spec/test_fixture_fork.nim b/tests/consensus_spec/test_fixture_fork.nim index 6db9a733e7..c89512e5c8 100644 --- a/tests/consensus_spec/test_fixture_fork.nim +++ b/tests/consensus_spec/test_fixture_fork.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import @@ -95,3 +95,12 @@ suite "EF - Fulu - Fork " & preset(): for kind, path in walkDir(OpForkDir, relative = true, checkDir = true): runTest(electra.BeaconState, fulu.BeaconState, "Fulu", OpForkDir, upgrade_to_fulu, suiteName, path) + +from ../../beacon_chain/spec/datatypes/gloas import BeaconState + +suite "EF - Gloas - Fork " & preset(): + const OpForkDir = + SszTestsDir/const_preset/"gloas"/"fork"/"fork"/"pyspec_tests" + for kind, path in walkDir(OpForkDir, relative = true, checkDir = true): + runTest(fulu.BeaconState, gloas.BeaconState, "Gloas", OpForkDir, + upgrade_to_gloas, suiteName, path) diff --git a/tests/consensus_spec/test_fixture_fork_choice.nim b/tests/consensus_spec/test_fixture_fork_choice.nim index b2ab66ced1..890fc2443c 100644 --- a/tests/consensus_spec/test_fixture_fork_choice.nim +++ b/tests/consensus_spec/test_fixture_fork_choice.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import @@ -27,12 +27,21 @@ import from std/json import JsonNode, getBool, getInt, getStr, hasKey, items, len, pairs, `$`, `[]` from std/sequtils import mapIt, toSeq -from std/strutils import contains +from std/strutils import contains, rsplit from stew/byteutils import fromHex from ../testbcutil import addHeadBlock +from ../../beacon_chain/spec/peerdas_helpers import + verify_data_column_sidecar_inclusion_proof, + verify_data_column_sidecar_kzg_proofs from ../../beacon_chain/spec/state_transition_block import check_attester_slashing, validate_blobs +block: + template sourceDir: string = currentSourcePath.rsplit(io2.DirSep, 1)[0] + doAssert loadTrustedSetup( + sourceDir & + "/../../vendor/nim-kzg4844/kzg4844/csources/src/trusted_setup.txt", 0).isOk + # Test format described at https://github.com/ethereum/consensus-specs/tree/v1.3.0/tests/formats/fork_choice # Note that our implementation has been optimized with "ProtoArray" # instead of following the spec (in particular the "store"). @@ -66,6 +75,7 @@ type of opOnBlock: blck: ForkedSignedBeaconBlock blobData: Opt[BlobData] + columnsValid: bool of opOnMergeBlock: powBlock: PowBlock of opOnPhase0AttesterSlashing: @@ -81,8 +91,7 @@ type proc initialLoad( path: string, db: BeaconChainDB, StateType, BlockType: typedesc -): tuple[dag: ChainDAGRef, fkChoice: ref ForkChoice] {.raises: [ - IOError, UnconsumedInput].} = +): tuple[dag: ChainDAGRef, fkChoice: ref ForkChoice] = let forkedState = loadForkedState( path/"anchor_state.ssz_snappy", @@ -91,9 +100,9 @@ proc initialLoad( ChainDAGRef.preInit(db, forkedState[]) let - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = ChainDAGRef.init( - forkedState[].kind.genesisTestRuntimeConfig, db, validatorMonitor, {}) + cfg = forkedState[].kind.genesisTestRuntimeConfig + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + dag = ChainDAGRef.init(cfg, db, validatorMonitor, {}) fkChoice = newClone(ForkChoice.init( dag.getFinalizedEpochRef(), dag.finalizedHead.blck)) @@ -102,9 +111,7 @@ proc initialLoad( proc loadOps( path: string, fork: ConsensusFork -): seq[Operation] {.raises: [ - IOError, KeyError, UnconsumedInput, ValueError, - YamlConstructionError, YamlParserError].} = +): seq[Operation] {.raises: [KeyError, ValueError].} = let stepsYAML = os_ops.readFile(path/"steps.yaml") let steps = loadToJson(stepsYAML) @@ -131,7 +138,8 @@ proc loadOps( let blck = loadBlock(path/filename & ".ssz_snappy", consensusFork) blobData = - when consensusFork >= ConsensusFork.Deneb: + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + doAssert not step.hasKey"columns" if step.hasKey"blobs": numExtraFields += 2 Opt.some BlobData( @@ -146,9 +154,29 @@ proc loadOps( doAssert not step.hasKey"blobs" Opt.none(BlobData) + var columnsValid = true + when consensusFork >= ConsensusFork.Fulu: + doAssert not step.hasKey"blobs" + if step.hasKey"columns": + numExtraFields += 1 + if step["columns"].len < 64: + columnsValid = false + for column_name in step["columns"]: + let column = parseTest( + path/(column_name.getStr()) & ".ssz_snappy", SSZ, + fulu.DataColumnSidecar) + columnsValid = columnsValid and + verify_data_column_sidecar_inclusion_proof(column).isOk and + verify_data_column_sidecar_kzg_proofs(column).isOk + if not columnsValid: + break + else: + doAssert not step.hasKey"columns" + result.add Operation(kind: opOnBlock, blck: ForkedSignedBeaconBlock.init(blck), - blobData: blobData) + blobData: blobData, + columnsValid: columnsValid) elif step.hasKey"attester_slashing": let filename = step["attester_slashing"].getStr() if fork >= ConsensusFork.Electra: @@ -187,11 +215,12 @@ proc stepOnBlock( stateCache: var StateCache, signedBlock: ForkySignedBeaconBlock, blobData: Opt[BlobData], + columnsValid: bool, time: BeaconTime, invalidatedHashes: Table[Eth2Digest, Eth2Digest]): Result[BlockRef, VerifierError] = - # 1. Validate blobs - when typeof(signedBlock).kind >= ConsensusFork.Deneb: + # 1. Validate blobs and columns + when typeof(signedBlock).kind in [ConsensusFork.Deneb, ConsensusFork.Electra]: let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq if kzgCommits.len > 0 or blobData.isSome: if blobData.isNone or kzgCommits.validate_blobs( @@ -200,6 +229,9 @@ proc stepOnBlock( else: doAssert blobData.isNone, "Pre-Deneb test with specified blob data" + if not columnsValid: + return err(VerifierError.Invalid) + # 2. Move state to proper slot doAssert dag.updateState( state, @@ -218,7 +250,8 @@ proc stepOnBlock( # this wouldn't be part of this check, presumably, their FC test vector step # would also have `true` validity because it'd not be known they weren't, so # adding this mock of the block processor is realistic and sufficient. - when consensusFork >= ConsensusFork.Bellatrix: + when consensusFork >= ConsensusFork.Bellatrix and consensusFork != ConsensusFork.Gloas: + debugGloasComment "skip execution payload for Gloas?" let executionBlockHash = signedBlock.message.body.execution_payload.block_hash if executionBlockHash in invalidatedHashes: @@ -236,6 +269,7 @@ proc stepOnBlock( let blockAdded = dag.addHeadBlock(verifier, signedBlock) do ( blckRef: BlockRef, signedBlock: consensusFork.TrustedSignedBeaconBlock, + state: consensusFork.Beaconstate, epochRef: EpochRef, unrealized: FinalityCheckpoints): # 4. Update fork choice if valid @@ -244,7 +278,7 @@ proc stepOnBlock( doAssert status.isOk() # 5. Update DAG with new head - var quarantine = Quarantine.init() + var quarantine = Quarantine.init(dag.cfg) let newHead = fkChoice[].get_head(dag, time).get() dag.updateHead(dag.getBlockRef(newHead).get(), quarantine, []) if dag.needStateCachesAndForkChoicePruning(): @@ -297,12 +331,10 @@ proc stepChecks( raiseAssert "Unsupported check '" & $check & "'" proc doRunTest( - path: string, - fork: ConsensusFork -) {.raises: [ - IOError, KeyError, UnconsumedInput, ValueError, - YamlConstructionError, YamlParserError].} = - let db = BeaconChainDB.new("", inMemory = true) + path: string, fork: ConsensusFork) {.raises: [KeyError, ValueError].} = + let db = withConsensusFork(fork): + BeaconChainDB.new( + "", consensusFork.genesisTestRuntimeConfig, inMemory = true) defer: db.close() @@ -350,7 +382,7 @@ proc doRunTest( let status = stepOnBlock( stores.dag, stores.fkChoice, verifier, state[], stateCache, - forkyBlck, step.blobData, time, invalidatedHashes) + forkyBlck, step.blobData, step.columnsValid, time, invalidatedHashes) doAssert status.isOk == step.valid of opOnPhase0AttesterSlashing: let indices = check_attester_slashing( @@ -411,6 +443,8 @@ template fcSuite(suiteName: static[string], testPathElem: static[string]) = let testsPath = presetPath/path/testPathElem if kind != pcDir or not os_ops.dirExists(testsPath): continue + if path.contains("eip7732") or path.contains("eip7805") or path.contains("gloas"): + continue let fork = forkForPathComponent(path).valueOr: raiseAssert "Unknown test fork: " & testsPath for kind, path in walkDir(testsPath, relative = true, checkDir = true): diff --git a/tests/consensus_spec/test_fixture_fork_digest.nim b/tests/consensus_spec/test_fixture_fork_digest.nim new file mode 100644 index 0000000000..a96dd0668a --- /dev/null +++ b/tests/consensus_spec/test_fixture_fork_digest.nim @@ -0,0 +1,87 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +# https://github.com/ethereum/consensus-specs/blob/18387696969c0bb34e96164434a3a36edca296c9/tests/core/pyspec/eth2spec/test/fulu/validator/test_compute_fork_digest.py + +{.push raises: [].} +{.used.} + +import + unittest2, + ../../beacon_chain/spec/forks + +var cfg = defaultRuntimeConfig +cfg.ALTAIR_FORK_EPOCH = GENESIS_EPOCH +cfg.BELLATRIX_FORK_EPOCH = GENESIS_EPOCH +cfg.CAPELLA_FORK_EPOCH = GENESIS_EPOCH +cfg.DENEB_FORK_EPOCH = GENESIS_EPOCH +cfg.ELECTRA_FORK_EPOCH = 9.Epoch +cfg.FULU_FORK_EPOCH = 100.Epoch +cfg.BLOB_SCHEDULE = @[ + BlobParameters(EPOCH: 300.Epoch, MAX_BLOBS_PER_BLOCK: 300), + BlobParameters(EPOCH: 250.Epoch, MAX_BLOBS_PER_BLOCK: 275), + BlobParameters(EPOCH: 200.Epoch, MAX_BLOBS_PER_BLOCK: 200), + BlobParameters(EPOCH: 150.Epoch, MAX_BLOBS_PER_BLOCK: 175), + BlobParameters(EPOCH: 100.Epoch, MAX_BLOBS_PER_BLOCK: 100), + BlobParameters(EPOCH: 9.Epoch, MAX_BLOBS_PER_BLOCK: 9)] + +proc cfd( + cfg: RuntimeConfig, epoch: uint64, genesis_validators_root: Eth2Digest, + fork_version: array[4, byte], expected: array[4, byte]) = + var cfg = cfg + cfg.FULU_FORK_VERSION = Version(fork_version) + check: + ForkDigest(expected) == atEpoch( + ForkDigests.init(cfg, genesis_validators_root), epoch.Epoch, cfg) + ForkDigest(expected) == compute_fork_digest_fulu( + cfg, genesis_validators_root, epoch.Epoch) + +func getGvr(filling: uint8): Eth2Digest = + var res: Eth2Digest + for i in 0 ..< res.data.len: + res.data[i] = filling + res + +suite "EF - Fulu - BPO forkdigests": + test "Different lengths and blob limits": + cfg.cfd(100, getGvr(0), [6'u8, 0, 0, 0], [0xdf'u8, 0x67, 0x55, 0x7b]) + cfg.cfd(101, getGvr(0), [6'u8, 0, 0, 0], [0xdf'u8, 0x67, 0x55, 0x7b]) + cfg.cfd(150, getGvr(0), [6'u8, 0, 0, 0], [0x8a'u8, 0xb3, 0x8b, 0x59]) + cfg.cfd(199, getGvr(0), [6'u8, 0, 0, 0], [0x8a'u8, 0xb3, 0x8b, 0x59]) + cfg.cfd(200, getGvr(0), [6'u8, 0, 0, 0], [0xd9'u8, 0xb8, 0x14, 0x38]) + cfg.cfd(201, getGvr(0), [6'u8, 0, 0, 0], [0xd9'u8, 0xb8, 0x14, 0x38]) + cfg.cfd(250, getGvr(0), [6'u8, 0, 0, 0], [0x4e'u8, 0xf3, 0x2a, 0x62]) + cfg.cfd(299, getGvr(0), [6'u8, 0, 0, 0], [0x4e'u8, 0xf3, 0x2a, 0x62]) + cfg.cfd(300, getGvr(0), [6'u8, 0, 0, 0], [0xca'u8, 0x10, 0x0d, 0x64]) + cfg.cfd(301, getGvr(0), [6'u8, 0, 0, 0], [0xca'u8, 0x10, 0x0d, 0x64]) + + test "Different genesis validators roots": + cfg.cfd(100, getGvr(1), [6'u8, 0, 0, 0], [0xfd'u8, 0x3a, 0xa2, 0xa2]) + cfg.cfd(100, getGvr(2), [6'u8, 0, 0, 0], [0x80'u8, 0xc6, 0xbd, 0x97]) + cfg.cfd(100, getGvr(3), [6'u8, 0, 0, 0], [0xf2'u8, 0x09, 0xfd, 0xfc]) + + test "Different fork versions": + cfg.cfd(100, getGvr(0), [6'u8, 0, 0, 1], [0x44'u8, 0xa5, 0x71, 0xe8]) + cfg.cfd(100, getGvr(0), [7'u8, 0, 0, 0], [0x70'u8, 0x6f, 0x46, 0x1a]) + cfg.cfd(100, getGvr(0), [7'u8, 0, 0, 1], [0x1a'u8, 0x34, 0x15, 0xc2]) + + test "Fusaka devnet-2": + var cfg = cfg + cfg.ELECTRA_FORK_EPOCH = GENESIS_EPOCH + cfg.ELECTRA_FORK_VERSION = Version([0x60'u8, 0x93, 0x75, 0x44]) + cfg.FULU_FORK_EPOCH = 256.Epoch + cfg.BLOB_SCHEDULE = @[ + BlobParameters(EPOCH: 1584.Epoch, MAX_BLOBS_PER_BLOCK: 20), + BlobParameters(EPOCH: 1280.Epoch, MAX_BLOBS_PER_BLOCK: 9), + BlobParameters(EPOCH: 1024.Epoch, MAX_BLOBS_PER_BLOCK: 18), + BlobParameters(EPOCH: 768.Epoch, MAX_BLOBS_PER_BLOCK: 15), + BlobParameters(EPOCH: 512.Epoch, MAX_BLOBS_PER_BLOCK: 12)] + + cfg.cfd( + 256, + Eth2Digest.fromHex("0xd9d36cce7e1e5b021676d15cbc674ec2e02183a98373ca191a3cbcefca479f9b"), + [0x70'u8, 0x93, 0x75, 0x44], [0x36'u8, 0x9f, 0x89, 0xf7]) diff --git a/tests/consensus_spec/test_fixture_kzg.nim b/tests/consensus_spec/test_fixture_kzg.nim index 9a2f51c6e5..ecacd1a55b 100644 --- a/tests/consensus_spec/test_fixture_kzg.nim +++ b/tests/consensus_spec/test_fixture_kzg.nim @@ -5,19 +5,23 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import std/json, yaml/tojson, kzg4844/[kzg, kzg_abi], + taskpools, ../testutil, ./fixtures_utils, ./os_ops -from std/sequtils import anyIt, mapIt, toSeq +from std/algorithm import sorted +from std/sequtils import anyIt, filterIt, mapIt, toSeq from std/strutils import rsplit from stew/byteutils import fromHex +from ../../beacon_chain/spec/peerdas_helpers import + recover_matrix, recover_cells_and_proofs_parallel func toUInt64(s: int): Opt[uint64] = if s < 0: @@ -76,7 +80,7 @@ proc runVerifyKzgProofTest(suiteName, suitePath, path: string) = y = fromHex[32](data["input"]["y"].getStr) proof = fromHex[48](data["input"]["proof"].getStr) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/tests/formats/kzg_4844/verify_kzg_proof.md#condition + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/tests/formats/kzg_4844/verify_kzg_proof.md#condition # "If the commitment or proof is invalid (e.g. not on the curve or not in # the G1 subgroup of the BLS curve) or `z` or `y` are not a valid BLS # field element, it should error, i.e. the output should be `null`." @@ -209,7 +213,7 @@ proc runComputeCellsTest(suiteName, suitePath, path: string) = output = data["output"] blob = fromHex[131072](data["input"]["blob"].getStr) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/tests/formats/kzg_7594/compute_cells.md#condition + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/tests/formats/kzg_7594/compute_cells.md#condition if blob.isNone: check output.kind == JNull else: @@ -256,7 +260,7 @@ proc runVerifyCellKzgProofBatchTest(suiteName, suitePath, path: string) = cells = data["input"]["cells"].mapIt(fromHex[2048](it.getStr)) proofs = data["input"]["proofs"].mapIt(fromHex[48](it.getStr)) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/tests/formats/kzg_7594/verify_cell_kzg_proof_batch.md#condition + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/tests/formats/kzg_7594/verify_cell_kzg_proof_batch.md#condition # If the blob is invalid (e.g. incorrect length or one of the 32-byte # blocks does not represent a BLS field element) it should error, i.e. the # the output should be `null`. @@ -305,7 +309,152 @@ proc runRecoverCellsAndKzgProofsTest(suiteName, suitePath, path: string) = check val.cells[i].bytes == fromHex[2048](output[0][i].getStr).get check val.proofs[i].bytes == fromHex[48](output[1][i].getStr).get -from std/algorithm import sorted +proc loadCellsAndKzgProofsValidCases( + suitePath: string): seq[MatrixEntry] + {.raises: [KeyError, OSError, YamlParserError, YamlConstructionError].} = + var + data: seq[MatrixEntry] + rowCount = 0 + for kind, path in walkDir(suitePath, relative = true, checkDir = true): + let + rowData = loadToJson(os_ops.readFile(suitePath/path/"data.yaml"))[0] + output = rowData["output"] + + # As per + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/tests/formats/kzg_7594/recover_cells_and_kzg_proofs.md#condition + # ensuring it is valid case + if output.kind == JNull: + continue + + for i in 0..= length of cells for each row + if invalidCells.len > invalidIndices.len: + continue + + var + shouldSkip = false + colInput = newSeq[ref fulu.DataColumnSidecar](invalidIndices.len) + for i in 0 ..< colInput.lenu64: + let cIdx = invalidIndices[i.int].getInt.toUInt64.get + var cells: seq[Cell] + + # insert rows from data of valid cases if it is a valid index + if cIdx < NUMBER_OF_COLUMNS: + for j in 0 ..< validRowCount: + let vIdx = NUMBER_OF_COLUMNS * j + cIdx + cells.add(Cell(bytes: validData[vIdx].cell.bytes)) + + # insert the invalid data as the last cell + if i < invalidCells.lenu64: + let cellBytes = fromHex[2048](invalidCells[i.int].getStr).valueOr: + # As per + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/tests/formats/kzg_7594/recover_cells_and_kzg_proofs.md#condition + # this is an invalid case. However, this is a limitation by design that + # when the cell is not in 2048-length, it will be in default value and + # recover without any failures + check invalidData["output"].kind == JNull + shouldSkip = true + break + cells.add(Cell(bytes: cellBytes)) + + # set data column + colInput[i] = (ref fulu.DataColumnSidecar)( + index: ColumnIndex(cIdx), + column: DataColumn(cells)) + + if shouldSkip: + continue + + # check error + var tp = Taskpool.new() + let v = tp.recover_cells_and_proofs_parallel(colInput) + check v.isErr var suiteName = "EF - KZG" @@ -313,11 +462,12 @@ suite suiteName: const suitePath = SszTestsDir/"general"/"deneb"/"kzg" # TODO also check that the only direct subdirectory of each is kzg-mainnet + # TODO `compute_challenge` isn't provided by nim-kzg4844 yet doAssert sorted(mapIt( toSeq(walkDir(suitePath, relative = true, checkDir = true)), it.path)) == - ["blob_to_kzg_commitment", "compute_blob_kzg_proof", "compute_kzg_proof", - "verify_blob_kzg_proof", "verify_blob_kzg_proof_batch", - "verify_kzg_proof"] + ["blob_to_kzg_commitment", "compute_blob_kzg_proof", "compute_challenge", + "compute_kzg_proof", "verify_blob_kzg_proof", + "verify_blob_kzg_proof_batch", "verify_kzg_proof"] block: let testsDir = suitePath/"blob_to_kzg_commitment"/"kzg-mainnet" @@ -355,9 +505,12 @@ suite suiteName: const suitePath = SszTestsDir/"general"/"fulu"/"kzg" # TODO also check that the only direct subdirectory of each is kzg-mainnet + # TODO `compute_verify_cell_kzg_proof_batch_challenge` isn't provided by + # nim-kzg4844 yet doAssert sorted(mapIt( toSeq(walkDir(suitePath, relative = true, checkDir = true)), it.path)) == ["compute_cells", "compute_cells_and_kzg_proofs", + "compute_verify_cell_kzg_proof_batch_challenge", "recover_cells_and_kzg_proofs", "verify_cell_kzg_proof_batch"] block: @@ -375,6 +528,11 @@ suite suiteName: for kind, path in walkDir(testsDir, relative = true, checkDir = true): runRecoverCellsAndKzgProofsTest(suiteName, testsDir, testsDir/path) + block: + let testsDir = suitePath/"recover_cells_and_kzg_proofs"/"kzg-mainnet" + runRecoverCellsAndKzgProofsParallelValidTest(suiteName, testsDir) + runRecoverCellsAndKzgProofsParallelInvalidTest(suiteName, testsDir) + block: let testsDir = suitePath/"verify_cell_kzg_proof_batch"/"kzg-mainnet" for kind, path in walkDir(testsDir, relative = true, checkDir = true): diff --git a/tests/consensus_spec/test_fixture_light_client_data_collection.nim b/tests/consensus_spec/test_fixture_light_client_data_collection.nim index 544d2f808b..ed087f65bd 100644 --- a/tests/consensus_spec/test_fixture_light_client_data_collection.nim +++ b/tests/consensus_spec/test_fixture_light_client_data_collection.nim @@ -82,8 +82,7 @@ proc loadForked[T: not Opt]( proc loadSteps( path: string, fork_digests: ForkDigests -): seq[TestStep] {.raises: [ - IOError, KeyError, ValueError, YamlConstructionError, YamlParserError].} = +): seq[TestStep] {.raises: [KeyError, ValueError].} = template loadForked[T](t: typedesc[T], s: JsonNode): T = loadForked(t, s, path, fork_digests) @@ -134,12 +133,12 @@ proc runTest(suiteName, path: string, consensusFork: static ConsensusFork) = (cfg, _) = readRuntimeConfig(path/"config.yaml") initial_state = loadForkedState( path/"initial_state.ssz_snappy", consensusFork) - db = BeaconChainDB.new("", cfg = cfg, inMemory = true) + db = BeaconChainDB.new("", cfg, inMemory = true) defer: db.close() ChainDAGRef.preInit(db, initial_state[]) let - validatorMonitor = newClone(ValidatorMonitor.init(false, false)) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time, false, false)) dag = ChainDAGRef.init(cfg, db, validatorMonitor, {}, lcDataConfig = LightClientDataConfig( serve: true, importMode: LightClientDataImportMode.Full)) @@ -147,7 +146,7 @@ proc runTest(suiteName, path: string, consensusFork: static ConsensusFork) = taskpool = Taskpool.new() var verifier = BatchVerifier.init(rng, taskpool) - quarantine = newClone(Quarantine.init()) + quarantine = newClone(Quarantine.init(cfg)) let steps = loadSteps(path, dag.forkDigests[]) for i, step in steps: @@ -155,7 +154,7 @@ proc runTest(suiteName, path: string, consensusFork: static ConsensusFork) = of TestStepKind.NewBlock: checkpoint $i & " new_block: " & $shortLog(step.blck.toBlockId()) let added = withBlck(step.blck): - const nilCallback = (consensusFork.OnBlockAddedCallback)(nil) + const nilCallback = OnBlockAdded[consensusFork](nil) dag.addHeadBlock(verifier, forkyBlck, nilCallback) check: added.isOk() of TestStepKind.NewHead: diff --git a/tests/consensus_spec/test_fixture_light_client_sync.nim b/tests/consensus_spec/test_fixture_light_client_sync.nim index a92ba73ee6..9a8f1f91e5 100644 --- a/tests/consensus_spec/test_fixture_light_client_sync.nim +++ b/tests/consensus_spec/test_fixture_light_client_sync.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -56,8 +56,7 @@ type proc loadSteps( path: string, fork_digests: ForkDigests -): seq[TestStep] {.raises: [ - KeyError, ValueError, YamlConstructionError, YamlParserError].} = +): seq[TestStep] {.raises: [KeyError, ValueError].} = let stepsYAML = os_ops.readFile(path/"steps.yaml") let steps = loadToJson(stepsYAML) @@ -130,8 +129,10 @@ proc runTest(suiteName, path: string) = let relativePathComponent = path.relativeTestPathComponent() test "Light client - Sync - " & relativePathComponent: # Reduce stack size by making this a `proc` - proc loadTestMeta(): (RuntimeConfig, TestMeta) {.raises: [ - Exception, IOError, PresetFileError, PresetIncompatibleError].} = + proc loadTestMeta(): (RuntimeConfig, TestMeta) + {.raises: [IOError, OSError, PresetFileError, + PresetIncompatibleError, ValueError, + YamlConstructionError, YamlParserError].} = let (cfg, _) = readRuntimeConfig(path/"config.yaml") type TestMetaYaml {.sparse.} = object diff --git a/tests/consensus_spec/test_fixture_merkle_proof.nim b/tests/consensus_spec/test_fixture_merkle_proof.nim index d19010469c..b790b521f5 100644 --- a/tests/consensus_spec/test_fixture_merkle_proof.nim +++ b/tests/consensus_spec/test_fixture_merkle_proof.nim @@ -1,11 +1,11 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import @@ -69,8 +69,14 @@ suite "EF - Merkle proof" & preset(): let objName = path withConsensusFork(fork): for kind, path in walkDir(suitePath, relative = true, checkDir = true): + let testPath = suitePath/path + if not fileExists(testPath/"proof.yaml"): + debugGloasComment "proof.yaml missing for Gloas" + test "Merkle proof - Single merkle proof - " & path: + skip() + continue case objName of "BeaconBlockBody": - runTest(suiteName, suitePath/path, consensusFork.BeaconBlockBody) + runTest(suiteName, testPath, consensusFork.BeaconBlockBody) else: - raiseAssert "Unknown test object: " & suitePath/path + raiseAssert "Unknown test object: " & testPath diff --git a/tests/consensus_spec/test_fixture_networking.nim b/tests/consensus_spec/test_fixture_networking.nim index 95dbadd010..8c0f774508 100644 --- a/tests/consensus_spec/test_fixture_networking.nim +++ b/tests/consensus_spec/test_fixture_networking.nim @@ -11,9 +11,8 @@ import std/[json, streams], yaml, - kzg4844/[kzg, kzg_abi], stint, - eth/p2p/discoveryv5/[node], + eth/p2p/discoveryv5/node, ../../beacon_chain/spec/peerdas_helpers, ../testutil, ./fixtures_utils, ./os_ops @@ -35,7 +34,8 @@ proc runComputeForCustodyGroup(suiteName, path: string) = custody_group = meta.custody_group var counter = 0 - for column in compute_columns_for_custody_group(custody_group): + for column in compute_columns_for_custody_group( + defaultRuntimeConfig, custody_group): check column == meta.result[counter] inc counter @@ -56,7 +56,8 @@ proc runGetCustodyGroups(suiteName, path: string) = node_id = UInt256.fromDecimal(meta.node_id) custody_group_count = meta.custody_group_count - let columns = get_custody_groups(node_id, custody_group_count) + let columns = defaultRuntimeConfig.get_custody_groups( + node_id, custody_group_count) for i in 0..= ConsensusFork.Electra: + pool.getAttestationsForBlock(forkyState, cache) + else: + raiseAssert "invalid fork" + +suite "Attestation pool electra processing" & preset(): ## For now just test that we can compile and execute block processing with ## mock data. setup: - # Genesis state that results in 6 members per committee + # Genesis state that results in 6 members per committee (2 committees total) let rng = HmacDrbgContext.new() + const TOTAL_COMMITTEES = 2 var - validatorMonitor = newClone(ValidatorMonitor.init()) + cfg = genesisTestRuntimeConfig(ConsensusFork.Electra) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) dag = init( - ChainDAGRef, defaultRuntimeConfig, makeTestDB(SLOTS_PER_EPOCH * 6), + ChainDAGRef, cfg, + cfg.makeTestDB( + TOTAL_COMMITTEES * TARGET_COMMITTEE_SIZE * SLOTS_PER_EPOCH), validatorMonitor, {}) taskpool = Taskpool.new() - verifier = BatchVerifier.init(rng, taskpool) - quarantine = newClone(Quarantine.init()) + verifier {.used.} = BatchVerifier.init(rng, taskpool) + quarantine = newClone(Quarantine.init(dag.cfg)) pool = newClone(AttestationPool.init(dag, quarantine)) state = newClone(dag.headState) cache = StateCache() @@ -87,7 +99,11 @@ suite "Attestation pool processing" & preset(): # Slot 0 is a finalized slot - won't be making attestations for it.. check: process_slots( - dag.cfg, state[], getStateField(state[], slot) + 1, cache, info, + dag.cfg, + state[], + getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, + cache, + info, {}).isOk() test "Attestation from different branch" & preset(): @@ -110,7 +126,7 @@ suite "Attestation pool processing" & preset(): blck = addTestBlock( state[], cache, attestations = attestations, cfg = dag.cfg) check dag.addHeadBlock( - verifier, blck.phase0Data, OnPhase0BlockAdded(nil)).isOk + verifier, blck.electraData, OnBlockAdded[ConsensusFork.Electra](nil)).isOk # History 1 contains all odd blocks state.fillToEpoch(cache) @@ -136,13 +152,13 @@ suite "Attestation pool processing" & preset(): slot = getStateField(state[], slot) parent_root = withState(state[]): forkyState.latest_block_root committee = get_beacon_committee(state[], slot, cIndex, cache) - makeAttestation(state[], parent_root, committee[0], cache) + makeElectraAttestation(state[], parent_root, committee[0], cache) att2 = block: let slot = getStateField(state2[], slot) parent_root = withState(state2[]): forkyState.latest_block_root committee = get_beacon_committee(state2[], slot, cIndex, cache2) - makeAttestation(state2[], parent_root, committee[0], cache2) + makeElectraAttestation(state2[], parent_root, committee[0], cache2) maxSlot = max(att1.data.slot, att2.data.slot) # Advance time so attestations become valid @@ -156,24 +172,28 @@ suite "Attestation pool processing" & preset(): # They should remain valid only within a compatible state withState(state[]): - check: - check_attestation(forkyState.data, att1, {}, cache).isOk - check_attestation(forkyState.data, att2, {}, cache).isErr + when consensusFork >= ConsensusFork.Electra: + check: + check_attestation(forkyState.data, att1, {}, cache, true).isOk + check_attestation(forkyState.data, att2, {}, cache, true).isErr withState(state2[]): - check: - check_attestation(forkyState.data, att1, {}, cache2).isErr - check_attestation(forkyState.data, att2, {}, cache2).isOk + when consensusFork >= ConsensusFork.Electra: + check: + check_attestation(forkyState.data, att1, {}, cache2, true).isErr + check_attestation(forkyState.data, att2, {}, cache2, true).isOk # If signature checks are skipped, state incompatibility is not detected - let flags = {skipBlsValidation} + const flags = {skipBlsValidation} withState(state[]): - check: - check_attestation(forkyState.data, att1, flags, cache).isOk - check_attestation(forkyState.data, att2, flags, cache).isOk + when consensusFork >= ConsensusFork.Electra: + check: + check_attestation(forkyState.data, att1, flags, cache, true).isOk + check_attestation(forkyState.data, att2, flags, cache, true).isOk withState(state2[]): - check: - check_attestation(forkyState.data, att1, flags, cache2).isOk - check_attestation(forkyState.data, att2, flags, cache2).isOk + when consensusFork >= ConsensusFork.Electra: + check: + check_attestation(forkyState.data, att1, flags, cache2, true).isOk + check_attestation(forkyState.data, att2, flags, cache2, true).isOk # An additional compatibility check catches that (used in block production) withState(state[]): @@ -185,67 +205,47 @@ suite "Attestation pool processing" & preset(): check_attestation_compatible(dag, forkyState, att1).isErr check_attestation_compatible(dag, forkyState, att2).isOk - test "Can add and retrieve simple attestations" & preset(): + test "Can add and retrieve simple electra attestations" & preset(): let # Create an attestation for slot 1! bc0 = get_beacon_committee( state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - attestation = makeAttestation( + attestation = makeElectraAttestation( state[], state[].latest_block_root, bc0[0], cache) pool[].addAttestation( attestation, @[bc0[0]], attestation.aggregation_bits.len, attestation.loadSig, attestation.data.slot.start_beacon_time) - check: - # Added attestation, should get it back - toSeq(pool[].attestations(Opt.none(Slot), Opt.none(CommitteeIndex))) == - @[attestation] - toSeq(pool[].attestations( - Opt.some(attestation.data.slot), Opt.none(CommitteeIndex))) == - @[attestation] - toSeq(pool[].attestations( - Opt.some(attestation.data.slot), Opt.some(attestation.data.index.CommitteeIndex))) == - @[attestation] - toSeq(pool[].attestations(Opt.none(Slot), Opt.some(attestation.data.index.CommitteeIndex))) == - @[attestation] - toSeq(pool[].attestations(Opt.some( - attestation.data.slot + 1), Opt.none(CommitteeIndex))) == [] - toSeq(pool[].attestations( - Opt.none(Slot), Opt.some(CommitteeIndex(attestation.data.index + 1)))) == [] + check cfg.process_slots( + state[], getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, + cache, info, {}).isOk() - process_slots( - defaultRuntimeConfig, state[], - getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, - info, {}).isOk() - - let attestations = pool[].getAttestationsForBlock(state[], cache) + let attestations = pool[].getElectraAttestationsForBlock(state[], cache) check: attestations.len == 1 - pool[].getPhase0AggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome() let root1 = addTestBlock( - state[], cache, attestations = attestations, - nextSlot = false).phase0Data.root + state[], cache, electraAttestations = attestations, + nextSlot = false).electraData.root bc1 = get_beacon_committee( state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - att1 = makeAttestation(state[], root1, bc1[0], cache) + att1 = makeElectraAttestation(state[], root1, bc1[0], cache) check: withState(state[]): forkyState.latest_block_root == root1 - process_slots( - defaultRuntimeConfig, state[], - getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, - info, {}).isOk() + cfg.process_slots( + state[], getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, + cache, info, {}).isOk() withState(state[]): forkyState.latest_block_root == root1 check: # shouldn't include already-included attestations - pool[].getAttestationsForBlock(state[], cache) == [] + pool[].getElectraAttestationsForBlock(state[], cache) == [] pool[].addAttestation( att1, @[bc1[0]], att1.aggregation_bits.len, att1.loadSig, @@ -253,16 +253,16 @@ suite "Attestation pool processing" & preset(): check: # but new ones should go in - pool[].getAttestationsForBlock(state[], cache).len() == 1 + pool[].getElectraAttestationsForBlock(state[], cache).len() == 1 let - att2 = makeAttestation(state[], root1, bc1[1], cache) + att2 = makeElectraAttestation(state[], root1, bc1[1], cache) pool[].addAttestation( att2, @[bc1[1]], att2.aggregation_bits.len, att2.loadSig, att2.data.slot.start_beacon_time) let - combined = pool[].getAttestationsForBlock(state[], cache) + combined = pool[].getElectraAttestationsForBlock(state[], cache) check: # New attestations should be combined with old attestations @@ -275,11 +275,11 @@ suite "Attestation pool processing" & preset(): check: # readding the combined attestation shouldn't have an effect - pool[].getAttestationsForBlock(state[], cache).len() == 1 + pool[].getElectraAttestationsForBlock(state[], cache).len() == 1 let # Someone votes for a different root - att3 = makeAttestation(state[], ZERO_HASH, bc1[2], cache) + att3 = makeElectraAttestation(state[], ZERO_HASH, bc1[2], cache) pool[].addAttestation( att3, @[bc1[2]], att3.aggregation_bits.len, att3.loadSig, att3.data.slot.start_beacon_time) @@ -287,119 +287,149 @@ suite "Attestation pool processing" & preset(): check: # We should now get both attestations for the block, but the aggregate # should be the one with the most votes - pool[].getAttestationsForBlock(state[], cache).len() == 2 - pool[].getPhase0AggregatedAttestation(2.Slot, 0.CommitteeIndex). - get().aggregation_bits.countOnes() == 2 - pool[].getPhase0AggregatedAttestation(2.Slot, hash_tree_root(att2.data)). - get().aggregation_bits.countOnes() == 2 + pool[].getElectraAttestationsForBlock(state[], cache).len() == 2 + pool[].getElectraAggregatedAttestation(2.Slot, hash_tree_root(combined[0].data), + 0.CommitteeIndex).get().aggregation_bits.countOnes() == 2 + pool[].getElectraAggregatedAttestation(2.Slot, hash_tree_root(att2.data), 0.CommitteeIndex). + get().aggregation_bits.countOnes() == 2 + # requests to get and aggregate from different committees should be empty + pool[].getElectraAggregatedAttestation( + 2.Slot, hash_tree_root(combined[0].data), 1.CommitteeIndex).isNone() + test "Attestations with disjoint comittee bits and equal data into single on-chain aggregate" & preset(): let - # Someone votes for a different root - att4 = makeAttestation(state[], ZERO_HASH, bc1[2], cache) + bc0 = get_beacon_committee( + state[], getStateField(state[], slot), 0.CommitteeIndex, cache) + + bc1 = get_beacon_committee( + state[], getStateField(state[], slot), 1.CommitteeIndex, cache) + + # atestation from committee 1 + attestation_1 = makeElectraAttestation( + state[], state[].latest_block_root, bc0[0], cache) + + # atestation from different committee with same data as + # attestaton 1 + attestation_2 = makeElectraAttestation( + state[], state[].latest_block_root, bc1[1], cache) + pool[].addAttestation( - att4, @[bc1[2]], att4.aggregation_bits.len, att3.loadSig, - att3.data.slot.start_beacon_time) + attestation_1, @[bc0[0]], attestation_1.aggregation_bits.len, + attestation_1.loadSig, attestation_1.data.slot.start_beacon_time) + + pool[].addAttestation( + attestation_2, @[bc0[1]], attestation_2.aggregation_bits.len, + attestation_2.loadSig, attestation_2.data.slot.start_beacon_time) + + check cfg.process_slots( + state[], getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, + cache, info, {}).isOk() + + let attestations = pool[].getElectraAttestationsForBlock(state[], cache) + + check: + # A single inal chain aggregated attestation should be created + # with same data and joint committee,aggregation bits + attestations.len == 1 + attestations[0].aggregation_bits.countOnes() == 2 + attestations[0].committee_bits.countOnes() == 2 + + test "Aggregated attestations with disjoint comittee bits into a single on-chain aggregate" & preset(): + proc verifyAttestationSignature(attestation: electra.Attestation): bool = + withState(state[]): + when consensusFork == ConsensusFork.Electra: + let + fork = pool.dag.cfg.forkAtEpoch(forkyState.data.slot.epoch) + attesting_indices = get_attesting_indices( + forkyState.data, attestation.data, attestation.aggregation_bits, + attestation.committee_bits, cache) + verify_attestation_signature( + fork, pool.dag.genesis_validators_root, attestation.data, + attesting_indices.mapIt(forkyState.data.validators.item(it).pubkey), + attestation.signature) + else: + raiseAssert "must be electra" - test "Working with aggregates" & preset(): let - # Create an attestation for slot 1! bc0 = get_beacon_committee( state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - var - att0 = makeAttestation( + bc1 = get_beacon_committee( + state[], getStateField(state[], slot), 1.CommitteeIndex, cache) + + # attestation from first committee + attestation_1 = makeElectraAttestation( state[], state[].latest_block_root, bc0[0], cache) - att0x = att0 - att1 = makeAttestation( + + # another attestation from first committee with same data + attestation_2 = makeElectraAttestation( state[], state[].latest_block_root, bc0[1], cache) - att2 = makeAttestation( - state[], state[].latest_block_root, bc0[2], cache) - att3 = makeAttestation( - state[], state[].latest_block_root, bc0[3], cache) - # Both attestations include member 2 but neither is a subset of the other - att0.combine(att2) - att1.combine(att2) + # attestation from different committee with same data as + # attestation 1 + attestation_3 = makeElectraAttestation( + state[], state[].latest_block_root, bc1[1], cache) check: - not pool[].covers(att0.data, att0.aggregation_bits) - not pool[].covers(att1.data, att1.aggregation_bits) + verifyAttestationSignature(attestation_1) + verifyAttestationSignature(attestation_2) + verifyAttestationSignature(attestation_3) pool[].addAttestation( - att0, @[bc0[0], bc0[2]], att0.aggregation_bits.len, att0.loadSig, - att0.data.slot.start_beacon_time) - pool[].addAttestation( - att1, @[bc0[1], bc0[2]], att1.aggregation_bits.len, att1.loadSig, - att1.data.slot.start_beacon_time) - - check: - process_slots( - defaultRuntimeConfig, state[], - getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, - info, {}).isOk() + attestation_1, @[bc0[0]], attestation_1.aggregation_bits.len, + attestation_1.loadSig, attestation_1.data.slot.start_beacon_time) - check: - pool[].covers(att0.data, att0.aggregation_bits) - pool[].covers(att1.data, att1.aggregation_bits) - pool[].getAttestationsForBlock(state[], cache).len() == 2 - # Can get either aggregate here, random! - pool[].getPhase0AggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome() + pool[].addAttestation( + attestation_2, @[bc0[1]], attestation_2.aggregation_bits.len, + attestation_2.loadSig, attestation_2.data.slot.start_beacon_time) - # Add in attestation 3 - both aggregates should now have it added pool[].addAttestation( - att3, @[bc0[3]], att3.aggregation_bits.len, att3.loadSig, - att3.data.slot.start_beacon_time) + attestation_3, @[bc1[1]], attestation_3.aggregation_bits.len, + attestation_3.loadSig, attestation_3.data.slot.start_beacon_time) - block: - let attestations = pool[].getAttestationsForBlock(state[], cache) - check: - attestations.len() == 2 - attestations[0].aggregation_bits.countOnes() == 3 - # Can get either aggregate here, random! - pool[].getPhase0AggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome() + check cfg.process_slots( + state[], getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, + cache, info, {}).isOk() - # Add in attestation 0 as single - attestation 1 is now a superset of the - # aggregates in the pool, so everything else should be removed - pool[].addAttestation( - att0x, @[bc0[0]], att0x.aggregation_bits.len, att0x.loadSig, - att0x.data.slot.start_beacon_time) + let attestations = pool[].getElectraAttestationsForBlock(state[], cache) - block: - let attestations = pool[].getAttestationsForBlock(state[], cache) - check: - attestations.len() == 1 - attestations[0].aggregation_bits.countOnes() == 4 - pool[].getPhase0AggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome() + check: + verifyAttestationSignature(attestations[0]) + check_attestation( + state[].electraData.data, attestations[0], {}, cache, true).isOk + + # A single final chain aggregated attestation should be created + # with same data, 2 committee bits and 3 aggregation bits + attestations.len == 1 + attestations[0].aggregation_bits.countOnes() == 3 + attestations[0].committee_bits.countOnes() == 2 test "Everyone voting for something different" & preset(): var attestations: int for i in 0.. MAX_ATTESTATIONS, - "6*SLOTS_PER_EPOCH validators > 128 mainnet MAX_ATTESTATIONS" + doAssert attestations.uint64 > MAX_ATTESTATIONS_ELECTRA, + "6*SLOTS_PER_EPOCH validators > 8 mainnet MAX_ATTESTATIONS_ELECTRA" check: # Fill block with attestations - pool[].getAttestationsForBlock(state[], cache).lenu64() == - MAX_ATTESTATIONS - pool[].getPhase0AggregatedAttestation( + pool[].getElectraAttestationsForBlock(state[], cache).lenu64() == + MAX_ATTESTATIONS_ELECTRA + pool[].getElectraAggregatedAttestation( getStateField(state[], slot) - 1, 0.CommitteeIndex).isSome() test "Attestations may arrive in any order" & preset(): @@ -408,18 +438,16 @@ suite "Attestation pool processing" & preset(): # Create an attestation for slot 1! bc0 = get_beacon_committee( state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - attestation0 = makeAttestation( + attestation0 = makeElectraAttestation( state[], state[].latest_block_root, bc0[0], cache) - check: - process_slots( - defaultRuntimeConfig, state[], getStateField(state[], slot) + 1, - cache, info, {}).isOk() + check cfg.process_slots( + state[], getStateField(state[], slot) + 1, cache, info, {}).isOk() let bc1 = get_beacon_committee(state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - attestation1 = makeAttestation( + attestation1 = makeElectraAttestation( state[], state[].latest_block_root, bc1[0], cache) # test reverse order @@ -430,7 +458,7 @@ suite "Attestation pool processing" & preset(): attestation0, @[bc0[0]], attestation0.aggregation_bits.len, attestation0.loadSig, attestation0.data.slot.start_beacon_time) - let attestations = pool[].getAttestationsForBlock(state[], cache) + let attestations = pool[].getElectraAttestationsForBlock(state[], cache) check: attestations.len == 1 @@ -442,9 +470,9 @@ suite "Attestation pool processing" & preset(): bc0 = get_beacon_committee( state[], getStateField(state[], slot), 0.CommitteeIndex, cache) attestation0 = - makeAttestation(state[], state[].latest_block_root, bc0[0], cache) + makeElectraAttestation(state[], state[].latest_block_root, bc0[0], cache) attestation1 = - makeAttestation(state[], state[].latest_block_root, bc0[1], cache) + makeElectraAttestation(state[], state[].latest_block_root, bc0[1], cache) pool[].addAttestation( attestation0, @[bc0[0]], attestation0.aggregation_bits.len, @@ -453,12 +481,11 @@ suite "Attestation pool processing" & preset(): attestation1, @[bc0[1]], attestation1.aggregation_bits.len, attestation1.loadSig, attestation1.data.slot.start_beacon_time) - check: - process_slots( - defaultRuntimeConfig, state[], - MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk() + check cfg.process_slots( + state[], MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, + cache, info, {}).isOk() - let attestations = pool[].getAttestationsForBlock(state[], cache) + let attestations = pool[].getElectraAttestationsForBlock(state[], cache) check: attestations.len == 1 @@ -470,9 +497,9 @@ suite "Attestation pool processing" & preset(): # Create an attestation for slot 1! bc0 = get_beacon_committee( state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - attestation0 = makeAttestation( + attestation0 = makeElectraAttestation( state[], state[].latest_block_root, bc0[0], cache) - attestation1 = makeAttestation( + attestation1 = makeElectraAttestation( state[], state[].latest_block_root, bc0[1], cache) attestation0.combine(attestation1) @@ -484,12 +511,11 @@ suite "Attestation pool processing" & preset(): attestation1, @[bc0[1]], attestation1.aggregation_bits.len, attestation1.loadSig, attestation1.data.slot.start_beacon_time) - check: - process_slots( - defaultRuntimeConfig, state[], - MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk() + check cfg.process_slots( + state[], MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, + cache, info, {}).isOk() - let attestations = pool[].getAttestationsForBlock(state[], cache) + let attestations = pool[].getElectraAttestationsForBlock(state[], cache) check: attestations.len == 1 @@ -500,9 +526,9 @@ suite "Attestation pool processing" & preset(): # Create an attestation for slot 1! bc0 = get_beacon_committee(state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - attestation0 = makeAttestation( + attestation0 = makeElectraAttestation( state[], state[].latest_block_root, bc0[0], cache) - attestation1 = makeAttestation( + attestation1 = makeElectraAttestation( state[], state[].latest_block_root, bc0[1], cache) attestation0.combine(attestation1) @@ -514,12 +540,11 @@ suite "Attestation pool processing" & preset(): attestation0, @[bc0[0]], attestation0.aggregation_bits.len, attestation0.loadSig, attestation0.data.slot.start_beacon_time) - check: - process_slots( - defaultRuntimeConfig, state[], - MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk() + check cfg.process_slots( + state[], MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, + cache, info, {}).isOk() - let attestations = pool[].getAttestationsForBlock(state[], cache) + let attestations = pool[].getElectraAttestationsForBlock(state[], cache) check: attestations.len == 1 @@ -527,9 +552,10 @@ suite "Attestation pool processing" & preset(): test "Fork choice returns latest block with no attestations": var cache = StateCache() let - b1 = addTestBlock(state[], cache).phase0Data + b1 = addTestBlock(state[], cache).electraData b1Add = dag.addHeadBlock(verifier, b1) do ( - blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + blckRef: BlockRef, signedBlock: electra.TrustedSignedBeaconBlock, + state: electra.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -542,9 +568,10 @@ suite "Attestation pool processing" & preset(): head == b1Add[] let - b2 = addTestBlock(state[], cache).phase0Data + b2 = addTestBlock(state[], cache).electraData b2Add = dag.addHeadBlock(verifier, b2) do ( - blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + blckRef: BlockRef, signedBlock: electra.TrustedSignedBeaconBlock, + state: electra.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -560,9 +587,10 @@ suite "Attestation pool processing" & preset(): test "Fork choice returns block with attestation": var cache = StateCache() let - b10 = makeTestBlock(state[], cache).phase0Data + b10 = makeTestBlock(state[], cache).electraData b10Add = dag.addHeadBlock(verifier, b10) do ( - blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + blckRef: BlockRef, signedBlock: electra.TrustedSignedBeaconBlock, + state: electra.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -580,9 +608,10 @@ suite "Attestation pool processing" & preset(): let b11 = makeTestBlock(state[], cache, graffiti = GraffitiBytes [1'u8, 0, 0, 0 ,0 ,0 ,0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ).phase0Data + ).electraData b11Add = dag.addHeadBlock(verifier, b11) do ( - blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + blckRef: BlockRef, signedBlock: electra.TrustedSignedBeaconBlock, + state: electra.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -592,7 +621,7 @@ suite "Attestation pool processing" & preset(): bc1 = get_beacon_committee( state[], getStateField(state[], slot) - 1, 1.CommitteeIndex, cache) - attestation0 = makeAttestation(state[], b10.root, bc1[0], cache) + attestation0 = makeElectraAttestation(state[], b10.root, bc1[0], cache) pool[].addAttestation( attestation0, @[bc1[0]], attestation0.aggregation_bits.len, @@ -606,8 +635,8 @@ suite "Attestation pool processing" & preset(): head2 == b10Add[] let - attestation1 = makeAttestation(state[], b11.root, bc1[1], cache) - attestation2 = makeAttestation(state[], b11.root, bc1[2], cache) + attestation1 = makeElectraAttestation(state[], b11.root, bc1[1], cache) + attestation2 = makeElectraAttestation(state[], b11.root, bc1[2], cache) pool[].addAttestation( attestation1, @[bc1[1]], attestation1.aggregation_bits.len, attestation1.loadSig, attestation1.data.slot.start_beacon_time) @@ -634,9 +663,10 @@ suite "Attestation pool processing" & preset(): test "Trying to add a block twice tags the second as an error": var cache = StateCache() let - b10 = makeTestBlock(state[], cache).phase0Data + b10 = makeTestBlock(state[], cache).electraData b10Add = dag.addHeadBlock(verifier, b10) do ( - blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + blckRef: BlockRef, signedBlock: electra.TrustedSignedBeaconBlock, + state: electra.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -653,7 +683,8 @@ suite "Attestation pool processing" & preset(): # Add back the old block to ensure we have a duplicate error let b10_clone = b10 # Assumes deep copy let b10Add_clone = dag.addHeadBlock(verifier, b10_clone) do ( - blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + blckRef: BlockRef, signedBlock: electra.TrustedSignedBeaconBlock, + state: electra.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -668,9 +699,10 @@ suite "Attestation pool processing" & preset(): dag.updateFlags.incl {skipBlsValidation} var cache = StateCache() let - b10 = addTestBlock(state[], cache).phase0Data + b10 = addTestBlock(state[], cache).electraData b10Add = dag.addHeadBlock(verifier, b10) do ( - blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + blckRef: BlockRef, signedBlock: electra.TrustedSignedBeaconBlock, + state: electra.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -687,7 +719,7 @@ suite "Attestation pool processing" & preset(): # ------------------------------------------------------------- # Pass an epoch - var attestations: seq[phase0.Attestation] + var attestations: seq[electra.Attestation] for epoch in 0 ..< 5: let start_slot = start_slot(Epoch epoch) @@ -695,10 +727,11 @@ suite "Attestation pool processing" & preset(): get_committee_count_per_slot(state[], Epoch epoch, cache) for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH: let new_block = addTestBlock( - state[], cache, attestations = attestations).phase0Data + state[], cache, electraAttestations = attestations).electraData let blockRef = dag.addHeadBlock(verifier, new_block) do ( - blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + blckRef: BlockRef, signedBlock: electra.TrustedSignedBeaconBlock, + state: electra.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -720,14 +753,18 @@ suite "Attestation pool processing" & preset(): # Create a bitfield filled with the given count per attestation, # exactly on the right-most part of the committee field. - var aggregation_bits = init(CommitteeValidatorsBits, committee.len) + var aggregation_bits = init(ElectraCommitteeValidatorsBits, committee.len) for v in 0 ..< committee.len * 2 div 3 + 1: aggregation_bits[v] = true - attestations.add phase0.Attestation( + var committee_bits: AttestationCommitteeBits + committee_bits[committee_index.int] = true + + attestations.add electra.Attestation( + committee_bits: committee_bits, aggregation_bits: aggregation_bits, data: makeAttestationData(state[], getStateField(state[], slot), - committee_index, blockRef.get().root) + 0.CommitteeIndex, blockRef.get().root) # signature: ValidatorSig() ) @@ -743,7 +780,8 @@ suite "Attestation pool processing" & preset(): # Add back the old block to ensure we have a duplicate error let b10Add_clone = dag.addHeadBlock(verifier, b10_clone) do ( - blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + blckRef: BlockRef, signedBlock: electra.TrustedSignedBeaconBlock, + state: electra.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -752,245 +790,6 @@ suite "Attestation pool processing" & preset(): doAssert: b10Add_clone.error == VerifierError.Duplicate -suite "Attestation pool electra processing" & preset(): - ## For now just test that we can compile and execute block processing with - ## mock data. - - setup: - # Genesis state that results in 6 members per committee (2 committees total) - const TOTAL_COMMITTEES = 2 - let rng = HmacDrbgContext.new() - var - validatorMonitor = newClone(ValidatorMonitor.init()) - cfg = genesisTestRuntimeConfig(ConsensusFork.Electra) - dag = init( - ChainDAGRef, cfg, - makeTestDB( - TOTAL_COMMITTEES * TARGET_COMMITTEE_SIZE * SLOTS_PER_EPOCH, cfg = cfg), - validatorMonitor, {}) - taskpool = Taskpool.new() - verifier = BatchVerifier.init(rng, taskpool) - quarantine = newClone(Quarantine.init()) - pool = newClone(AttestationPool.init(dag, quarantine)) - state = newClone(dag.headState) - cache = StateCache() - info = ForkedEpochInfo() - # Slot 0 is a finalized slot - won't be making attestations for it.. - check: - process_slots( - dag.cfg, - state[], - getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, - cache, - info, - {}).isOk() - - test "Can add and retrieve simple electra attestations" & preset(): - let - # Create an attestation for slot 1! - bc0 = get_beacon_committee( - state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - attestation = makeElectraAttestation( - state[], state[].latest_block_root, bc0[0], cache) - - pool[].addAttestation( - attestation, @[bc0[0]], attestation.aggregation_bits.len, - attestation.loadSig, attestation.data.slot.start_beacon_time) - - check: - process_slots( - defaultRuntimeConfig, state[], - getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, - info, {}).isOk() - - let attestations = pool[].getElectraAttestationsForBlock(state[], cache) - - check: - attestations.len == 1 - - let - root1 = addTestBlock( - state[], cache, electraAttestations = attestations, - nextSlot = false).electraData.root - bc1 = get_beacon_committee( - state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - att1 = makeElectraAttestation(state[], root1, bc1[0], cache) - - check: - withState(state[]): forkyState.latest_block_root == root1 - - process_slots( - defaultRuntimeConfig, state[], - getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, - info, {}).isOk() - - withState(state[]): forkyState.latest_block_root == root1 - - check: - # shouldn't include already-included attestations - pool[].getElectraAttestationsForBlock(state[], cache) == [] - - pool[].addAttestation( - att1, @[bc1[0]], att1.aggregation_bits.len, att1.loadSig, - att1.data.slot.start_beacon_time) - - check: - # but new ones should go in - pool[].getElectraAttestationsForBlock(state[], cache).len() == 1 - - let - att2 = makeElectraAttestation(state[], root1, bc1[1], cache) - pool[].addAttestation( - att2, @[bc1[1]], att2.aggregation_bits.len, att2.loadSig, - att2.data.slot.start_beacon_time) - - let - combined = pool[].getElectraAttestationsForBlock(state[], cache) - - check: - # New attestations should be combined with old attestations - combined.len() == 1 - combined[0].aggregation_bits.countOnes() == 2 - - pool[].addAttestation( - combined[0], @[bc1[1], bc1[0]], combined[0].aggregation_bits.len, - combined[0].loadSig, combined[0].data.slot.start_beacon_time) - - check: - # readding the combined attestation shouldn't have an effect - pool[].getElectraAttestationsForBlock(state[], cache).len() == 1 - - let - # Someone votes for a different root - att3 = makeElectraAttestation(state[], ZERO_HASH, bc1[2], cache) - pool[].addAttestation( - att3, @[bc1[2]], att3.aggregation_bits.len, att3.loadSig, - att3.data.slot.start_beacon_time) - - check: - # We should now get both attestations for the block, but the aggregate - # should be the one with the most votes - pool[].getElectraAttestationsForBlock(state[], cache).len() == 2 - pool[].getElectraAggregatedAttestation(2.Slot, hash_tree_root(combined[0].data), - 0.CommitteeIndex).get().aggregation_bits.countOnes() == 2 - pool[].getElectraAggregatedAttestation(2.Slot, hash_tree_root(att2.data), 0.CommitteeIndex). - get().aggregation_bits.countOnes() == 2 - # requests to get and aggregate from different committees should be empty - pool[].getElectraAggregatedAttestation( - 2.Slot, hash_tree_root(combined[0].data), 1.CommitteeIndex).isNone() - - test "Attestations with disjoint comittee bits and equal data into single on-chain aggregate" & preset(): - let - bc0 = get_beacon_committee( - state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - - bc1 = get_beacon_committee( - state[], getStateField(state[], slot), 1.CommitteeIndex, cache) - - # atestation from committee 1 - attestation_1 = makeElectraAttestation( - state[], state[].latest_block_root, bc0[0], cache) - - # atestation from different committee with same data as - # attestaton 1 - attestation_2 = makeElectraAttestation( - state[], state[].latest_block_root, bc1[1], cache) - - pool[].addAttestation( - attestation_1, @[bc0[0]], attestation_1.aggregation_bits.len, - attestation_1.loadSig, attestation_1.data.slot.start_beacon_time) - - pool[].addAttestation( - attestation_2, @[bc0[1]], attestation_2.aggregation_bits.len, - attestation_2.loadSig, attestation_2.data.slot.start_beacon_time) - - check: - process_slots( - defaultRuntimeConfig, state[], - getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, - info, {}).isOk() - - let attestations = pool[].getElectraAttestationsForBlock(state[], cache) - - check: - # A single inal chain aggregated attestation should be created - # with same data and joint committee,aggregation bits - attestations.len == 1 - attestations[0].aggregation_bits.countOnes() == 2 - attestations[0].committee_bits.countOnes() == 2 - - test "Aggregated attestations with disjoint comittee bits into a single on-chain aggregate" & preset(): - proc verifyAttestationSignature(attestation: electra.Attestation): bool = - withState(state[]): - when consensusFork == ConsensusFork.Electra: - let - fork = pool.dag.cfg.forkAtEpoch(forkyState.data.slot.epoch) - attesting_indices = get_attesting_indices( - forkyState.data, attestation.data, attestation.aggregation_bits, - attestation.committee_bits, cache) - verify_attestation_signature( - fork, pool.dag.genesis_validators_root, attestation.data, - attesting_indices.mapIt(forkyState.data.validators.item(it).pubkey), - attestation.signature) - else: - raiseAssert "must be electra" - - let - bc0 = get_beacon_committee( - state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - - bc1 = get_beacon_committee( - state[], getStateField(state[], slot), 1.CommitteeIndex, cache) - - # attestation from first committee - attestation_1 = makeElectraAttestation( - state[], state[].latest_block_root, bc0[0], cache) - - # another attestation from first committee with same data - attestation_2 = makeElectraAttestation( - state[], state[].latest_block_root, bc0[1], cache) - - # attestation from different committee with same data as - # attestation 1 - attestation_3 = makeElectraAttestation( - state[], state[].latest_block_root, bc1[1], cache) - - check: - verifyAttestationSignature(attestation_1) - verifyAttestationSignature(attestation_2) - verifyAttestationSignature(attestation_3) - - pool[].addAttestation( - attestation_1, @[bc0[0]], attestation_1.aggregation_bits.len, - attestation_1.loadSig, attestation_1.data.slot.start_beacon_time) - - pool[].addAttestation( - attestation_2, @[bc0[1]], attestation_2.aggregation_bits.len, - attestation_2.loadSig, attestation_2.data.slot.start_beacon_time) - - pool[].addAttestation( - attestation_3, @[bc1[1]], attestation_3.aggregation_bits.len, - attestation_3.loadSig, attestation_3.data.slot.start_beacon_time) - - check: - process_slots( - defaultRuntimeConfig, state[], - getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, - info, {}).isOk() - - let attestations = pool[].getElectraAttestationsForBlock(state[], cache) - - check: - verifyAttestationSignature(attestations[0]) - check_attestation( - state[].electraData.data, attestations[0], {}, cache, true).isOk - - # A single final chain aggregated attestation should be created - # with same data, 2 committee bits and 3 aggregation bits - attestations.len == 1 - attestations[0].aggregation_bits.countOnes() == 3 - attestations[0].committee_bits.countOnes() == 2 - test "Working with electra aggregates" & preset(): let # Create an attestation for slot 1! @@ -1056,10 +855,9 @@ suite "Attestation pool electra processing" & preset(): pool[].covers(att1.data, att1.aggregation_bits, att1.committee_bits) pool[].covers(att2.data, att2.aggregation_bits, att2.committee_bits) - process_slots( - defaultRuntimeConfig, state[], - getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, - info, {}).isOk() + cfg.process_slots( + state[], getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, + cache, info, {}).isOk() for att in pool[].electraAttestations(Opt.none Slot, Opt.none CommitteeIndex): check: verifyAttestationSignature(att) @@ -1167,10 +965,9 @@ suite "Attestation pool electra processing" & preset(): if att.data.slot > maxSlot: maxSlot = att.data.slot - check process_slots( - defaultRuntimeConfig, state[], - maxSlot + MIN_ATTESTATION_INCLUSION_DELAY, cache, - info, {}).isOk() + check cfg.process_slots( + state[], maxSlot + MIN_ATTESTATION_INCLUSION_DELAY, + cache, info, {}).isOk() let attestations = pool[].getElectraAttestationsForBlock(state[], cache) check: @@ -1187,89 +984,86 @@ suite "Attestation pool electra processing" & preset(): pool[].verifyAttestationSignature(state, cache, attestations[1]) test "Simple add and get with electra nonzero committee" & preset(): - let - bc0 = get_beacon_committee( - state[], getStateField(state[], slot), 0.CommitteeIndex, cache) + let + bc0 = get_beacon_committee( + state[], getStateField(state[], slot), 0.CommitteeIndex, cache) - bc1 = get_beacon_committee( - state[], getStateField(state[], slot), 1.CommitteeIndex, cache) + bc1 = get_beacon_committee( + state[], getStateField(state[], slot), 1.CommitteeIndex, cache) - attestation_1 = makeElectraAttestation( - state[], state[].latest_block_root, bc0[0], cache) + attestation_1 = makeElectraAttestation( + state[], state[].latest_block_root, bc0[0], cache) - attestation_2 = makeElectraAttestation( - state[], state[].latest_block_root, bc1[0], cache) + attestation_2 = makeElectraAttestation( + state[], state[].latest_block_root, bc1[0], cache) - pool[].addAttestation( - attestation_1, @[bc0[0]], attestation_1.aggregation_bits.len, - attestation_1.loadSig, attestation_1.data.slot.start_beacon_time) + pool[].addAttestation( + attestation_1, @[bc0[0]], attestation_1.aggregation_bits.len, + attestation_1.loadSig, attestation_1.data.slot.start_beacon_time) - pool[].addAttestation( - attestation_2, @[bc1[0]], attestation_2.aggregation_bits.len, - attestation_2.loadSig, attestation_2.data.slot.start_beacon_time) + pool[].addAttestation( + attestation_2, @[bc1[0]], attestation_2.aggregation_bits.len, + attestation_2.loadSig, attestation_2.data.slot.start_beacon_time) - check: - process_slots( - defaultRuntimeConfig, state[], - getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, - info, {}).isOk() + check: + cfg.process_slots( + state[], getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, + cache, info, {}).isOk() - check: - pool[].getElectraAggregatedAttestation(1.Slot, hash_tree_root(attestation_1.data), - 0.CommitteeIndex).isOk - pool[].getElectraAggregatedAttestation(1.Slot, hash_tree_root(attestation_2.data), - 1.CommitteeIndex).isOk + pool[].getElectraAggregatedAttestation(1.Slot, hash_tree_root(attestation_1.data), + 0.CommitteeIndex).isOk + pool[].getElectraAggregatedAttestation(1.Slot, hash_tree_root(attestation_2.data), + 1.CommitteeIndex).isOk test "Cache coherence on chain aggregates" & preset(): - # Add attestation from different committee - var maxSlot = 0.Slot + # Add attestation from different committee + var maxSlot = 0.Slot - for i in 0 ..< 4: - let - bc = get_beacon_committee( - state[], getStateField(state[], slot), i.CommitteeIndex, cache) - att = makeElectraAttestation( - state[], state[].latest_block_root, bc[0], cache) - var att2 = makeElectraAttestation( - state[], state[].latest_block_root, bc[1], cache) + for i in 0 ..< 4: + let + bc = get_beacon_committee( + state[], getStateField(state[], slot), i.CommitteeIndex, cache) + att = makeElectraAttestation( + state[], state[].latest_block_root, bc[0], cache) + var att2 = makeElectraAttestation( + state[], state[].latest_block_root, bc[1], cache) - pool[].addAttestation( - att, @[bc[0]], att.aggregation_bits.len, att.loadSig, - att.data.slot.start_beacon_time) + pool[].addAttestation( + att, @[bc[0]], att.aggregation_bits.len, att.loadSig, + att.data.slot.start_beacon_time) - if att.data.slot < 2: - pool[].addAttestation( - att2, @[bc[1]], att2.aggregation_bits.len, att2.loadSig, - att2.data.slot.start_beacon_time) + if att.data.slot < 2: + pool[].addAttestation( + att2, @[bc[1]], att2.aggregation_bits.len, att2.loadSig, + att2.data.slot.start_beacon_time) - if att.data.slot > maxSlot: - maxSlot = att.data.slot + if att.data.slot > maxSlot: + maxSlot = att.data.slot - check process_slots( - defaultRuntimeConfig, state[], - maxSlot + MIN_ATTESTATION_INCLUSION_DELAY, cache, - info, {}).isOk() + check cfg.process_slots( + state[], maxSlot + MIN_ATTESTATION_INCLUSION_DELAY, + cache, info, {}).isOk() - let attestations = pool[].getElectraAttestationsForBlock(state[], cache) - check: - ## Considering that all structures in getElectraAttestationsForBlock - ## are sorted, the most relevant should be at sequence head. - ## Given the attestations added, the most "scored" is on - ## slot 1 - attestations.len() == 2 + let attestations = pool[].getElectraAttestationsForBlock(state[], cache) + check: + ## Considering that all structures in getElectraAttestationsForBlock + ## are sorted, the most relevant should be at sequence head. + ## Given the attestations added, the most "scored" is on + ## slot 1 + attestations.len() == 2 - attestations[0].aggregation_bits.countOnes() == 4 - attestations[0].committee_bits.countOnes() == 2 - attestations[0].data.slot == 1.Slot + attestations[0].aggregation_bits.countOnes() == 4 + attestations[0].committee_bits.countOnes() == 2 + attestations[0].data.slot == 1.Slot - attestations[1].aggregation_bits.countOnes() == 2 - attestations[1].committee_bits.countOnes() == 2 - attestations[1].data.slot == 2.Slot + attestations[1].aggregation_bits.countOnes() == 2 + attestations[1].committee_bits.countOnes() == 2 + attestations[1].data.slot == 2.Slot - check_attestation( - state[].electraData.data, attestations[0], {}, cache, true).isOk - check_attestation( - state[].electraData.data, attestations[1], {}, cache, true).isOk - pool[].verifyAttestationSignature(state, cache, attestations[0]) - pool[].verifyAttestationSignature(state, cache, attestations[1]) + check_attestation( + state[].electraData.data, attestations[0], {}, cache, true).isOk + check_attestation( + state[].electraData.data, attestations[1], {}, cache, true).isOk + pool[].verifyAttestationSignature(state, cache, attestations[0]) + pool[].verifyAttestationSignature(state, cache, attestations[1]) diff --git a/tests/test_beacon_chain_db.nim b/tests/test_beacon_chain_db.nim index cfa0e8f95c..b39c929075 100644 --- a/tests/test_beacon_chain_db.nim +++ b/tests/test_beacon_chain_db.nim @@ -10,13 +10,13 @@ import unittest2, - ../beacon_chain/beacon_chain_db, + ../beacon_chain/[beacon_chain_db, beacon_chain_db_quarantine], ../beacon_chain/consensus_object_pools/block_dag, ../beacon_chain/spec/forks, ./testutil from std/algorithm import sort -from std/sequtils import toSeq +from std/sequtils import allIt, toSeq from snappy import encodeFramed, uncompressedLenFramed from ../beacon_chain/consensus_object_pools/block_pools_types import ChainDAGRef @@ -25,6 +25,7 @@ from ../beacon_chain/spec/beaconstate import initialize_hashed_beacon_state_from_eth1 from ../beacon_chain/spec/state_transition import noRollback from ../beacon_chain/validators/validator_monitor import ValidatorMonitor +from ./consensus_spec/fixtures_utils import genesisTestruntimeConfig from ./mocking/mock_genesis import mockEth1BlockHash from ./testblockutil import makeInitialDeposits from ./testdbutil import makeTestDB @@ -33,109 +34,89 @@ from ./teststateutil import getTestStates when isMainModule: import chronicles # or some random compile error happens... -proc getPhase0StateRef(db: BeaconChainDB, root: Eth2Digest): - phase0.NilableBeaconStateRef = +template BeaconStateRef(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Gloas: + gloas.BeaconStateRef + elif kind == ConsensusFork.Fulu: + fulu.BeaconStateRef + elif kind == ConsensusFork.Electra: + electra.BeaconStateRef + elif kind == ConsensusFork.Deneb: + deneb.BeaconStateRef + elif kind == ConsensusFork.Capella: + capella.BeaconStateRef + elif kind == ConsensusFork.Bellatrix: + bellatrix.BeaconStateRef + elif kind == ConsensusFork.Altair: + altair.BeaconStateRef + elif kind == ConsensusFork.Phase0: + phase0.BeaconStateRef + else: + {.error: "BeaconStateRef unsupported in " & $kind.} + +template NilableBeaconStateRef(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Gloas: + gloas.NilableBeaconStateRef + elif kind == ConsensusFork.Fulu: + fulu.NilableBeaconStateRef + elif kind == ConsensusFork.Electra: + electra.NilableBeaconStateRef + elif kind == ConsensusFork.Deneb: + deneb.NilableBeaconStateRef + elif kind == ConsensusFork.Capella: + capella.NilableBeaconStateRef + elif kind == ConsensusFork.Bellatrix: + bellatrix.NilableBeaconStateRef + elif kind == ConsensusFork.Altair: + altair.NilableBeaconStateRef + elif kind == ConsensusFork.Phase0: + phase0.NilableBeaconStateRef + else: + {.error: "NilableBeaconStateRef unsupported in " & $kind.} + +template TrustedBeaconBlock(kind: static ConsensusFork): typedesc = + when kind == ConsensusFork.Gloas: + gloas.TrustedBeaconBlock + elif kind == ConsensusFork.Fulu: + fulu.TrustedBeaconBlock + elif kind == ConsensusFork.Electra: + electra.TrustedBeaconBlock + elif kind == ConsensusFork.Deneb: + deneb.TrustedBeaconBlock + elif kind == ConsensusFork.Capella: + capella.TrustedBeaconBlock + elif kind == ConsensusFork.Bellatrix: + bellatrix.TrustedBeaconBlock + elif kind == ConsensusFork.Altair: + altair.TrustedBeaconBlock + elif kind == ConsensusFork.Phase0: + phase0.TrustedBeaconBlock + else: + {.error: "TrustedBeaconBlock unsupported in " & $kind.} + +proc getStateRef( + db: BeaconChainDB, + consensusFork: static ConsensusFork, + root: Eth2Digest): auto = # load beaconstate the way the block pool does it - into an existing instance - let res = (phase0.BeaconStateRef)() - if db.getState(root, res[], noRollback): - return res - -proc getAltairStateRef(db: BeaconChainDB, root: Eth2Digest): - altair.NilableBeaconStateRef = - # load beaconstate the way the block pool does it - into an existing instance - let res = (altair.BeaconStateRef)() - if db.getState(root, res[], noRollback): - return res - -proc getBellatrixStateRef(db: BeaconChainDB, root: Eth2Digest): - bellatrix.NilableBeaconStateRef = - # load beaconstate the way the block pool does it - into an existing instance - let res = (bellatrix.BeaconStateRef)() - if db.getState(root, res[], noRollback): - return res - -proc getCapellaStateRef(db: BeaconChainDB, root: Eth2Digest): - capella.NilableBeaconStateRef = - # load beaconstate the way the block pool does it - into an existing instance - let res = (capella.BeaconStateRef)() - if db.getState(root, res[], noRollback): - return res - -proc getDenebStateRef(db: BeaconChainDB, root: Eth2Digest): - deneb.NilableBeaconStateRef = - # load beaconstate the way the block pool does it - into an existing instance - let res = (deneb.BeaconStateRef)() - if db.getState(root, res[], noRollback): - return res - -proc getElectraStateRef(db: BeaconChainDB, root: Eth2Digest): - electra.NilableBeaconStateRef = - # load beaconstate the way the block pool does it - into an existing instance - let res = (electra.BeaconStateRef)() - if db.getState(root, res[], noRollback): - return res - -proc getFuluStateRef(db: BeaconChainDB, root: Eth2Digest): - fulu.NilableBeaconStateRef = - # load beaconstate the way the block pool does it - into an existence instance - let res = (fulu.BeaconStateRef)() - if db.getState(root, res[], noRollback): - return res - -func withDigest(blck: phase0.TrustedBeaconBlock): - phase0.TrustedSignedBeaconBlock = - phase0.TrustedSignedBeaconBlock( - message: blck, - root: hash_tree_root(blck) - ) - -func withDigest(blck: altair.TrustedBeaconBlock): - altair.TrustedSignedBeaconBlock = - altair.TrustedSignedBeaconBlock( - message: blck, - root: hash_tree_root(blck) - ) - -func withDigest(blck: bellatrix.TrustedBeaconBlock): - bellatrix.TrustedSignedBeaconBlock = - bellatrix.TrustedSignedBeaconBlock( - message: blck, - root: hash_tree_root(blck) - ) - -func withDigest(blck: capella.TrustedBeaconBlock): - capella.TrustedSignedBeaconBlock = - capella.TrustedSignedBeaconBlock( - message: blck, - root: hash_tree_root(blck) - ) - -func withDigest(blck: deneb.TrustedBeaconBlock): - deneb.TrustedSignedBeaconBlock = - deneb.TrustedSignedBeaconBlock( + var res: consensusFork.NilableBeaconStateRef = + (consensusFork.BeaconStateRef)() + if not db.getState(root, res[], noRollback): + res = nil + res + +func withDigest(blck: ForkyTrustedBeaconBlock): auto = + typeof(blck).kind.TrustedSignedBeaconBlock( message: blck, - root: hash_tree_root(blck) - ) + root: hash_tree_root(blck)) -func withDigest(blck: electra.TrustedBeaconBlock): - electra.TrustedSignedBeaconBlock = - electra.TrustedSignedBeaconBlock( - message: blck, - root: hash_tree_root(blck) - ) - -func withDigest(blck: fulu.TrustedBeaconBlock): - fulu.TrustedSignedBeaconBlock = - fulu.TrustedSignedBeaconBlock( - message: blck, - root: hash_tree_root(blck) - ) - -proc getTestStates(consensusFork: ConsensusFork): auto = +proc getTestStates( + cfg: RuntimeConfig, + consensusFork: ConsensusFork): seq[ref ForkedHashedBeaconState] = let - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) + db = cfg.makeTestDB(SLOTS_PER_EPOCH) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + dag = init(ChainDAGRef, cfg, db, validatorMonitor, {}) var testStates = getTestStates(dag.headState, consensusFork) # Ensure transitions beyond just adding validators and increasing slots @@ -146,830 +127,172 @@ proc getTestStates(consensusFork: ConsensusFork): auto = # Each set of states gets used twice, so scope them to module let - testStatesPhase0 = getTestStates(ConsensusFork.Phase0) - testStatesAltair = getTestStates(ConsensusFork.Altair) - testStatesBellatrix = getTestStates(ConsensusFork.Bellatrix) - testStatesCapella = getTestStates(ConsensusFork.Capella) - testStatesDeneb = getTestStates(ConsensusFork.Deneb) - testStatesElectra = getTestStates(ConsensusFork.Electra) - testStatesFulu = getTestStates(ConsensusFork.Fulu) - -doAssert len(testStatesPhase0) > 8 -doAssert len(testStatesAltair) > 8 -doAssert len(testStatesBellatrix) > 8 -doAssert len(testStatesCapella) > 8 -doAssert len(testStatesDeneb) > 8 -doAssert len(testStatesElectra) > 8 -doAssert len(testStatesFulu) > 8 + cfg = defaultRuntimeConfig + testStates = block: + var res: array[ConsensusFork, seq[ref ForkedHashedBeaconState]] + for consensusFork in ConsensusFork: + res[consensusFork] = cfg.getTestStates(consensusFork) + res +doAssert testStates.allIt(it.len > 8) suite "Beacon chain DB" & preset(): test "empty database" & preset(): - var - db = BeaconChainDB.new("", inMemory = true) + var db = BeaconChainDB.new("", cfg, inMemory = true) check: - db.getPhase0StateRef(ZERO_HASH).isNil + db.getStateRef(ConsensusFork.Phase0, ZERO_HASH).isNil db.getBlock(ZERO_HASH, phase0.TrustedSignedBeaconBlock).isNone - test "sanity check phase 0 blocks" & preset(): - let db = BeaconChainDB.new("", inMemory = true) - - let - signedBlock = withDigest((phase0.TrustedBeaconBlock)()) - root = hash_tree_root(signedBlock.message) - - db.putBlock(signedBlock) - - var tmp, tmp2: seq[byte] - check: - db.containsBlock(root) - db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - db.getBlock(root, phase0.TrustedSignedBeaconBlock).get() == signedBlock - db.getBlockSSZ(root, tmp, phase0.TrustedSignedBeaconBlock) - db.getBlockSZ(root, tmp2, phase0.TrustedSignedBeaconBlock) - tmp == SSZ.encode(signedBlock) - tmp2 == encodeFramed(tmp) - uncompressedLenFramed(tmp2).isSome - - check: - db.delBlock(ConsensusFork.Phase0, root) - not db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - not db.containsBlock(root, fulu.TrustedSignedBeaconBlock) - db.getBlock(root, phase0.TrustedSignedBeaconBlock).isErr() - not db.getBlockSSZ(root, tmp, phase0.TrustedSignedBeaconBlock) - not db.getBlockSZ(root, tmp2, phase0.TrustedSignedBeaconBlock) - - db.putStateRoot(root, signedBlock.message.slot, root) - var root2 = root - root2.data[0] = root.data[0] + 1 - db.putStateRoot(root, signedBlock.message.slot + 1, root2) - - check: - db.getStateRoot(root, signedBlock.message.slot).get() == root - db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2 - - db.close() - - test "sanity check Altair blocks" & preset(): - let db = BeaconChainDB.new("", inMemory = true) - - let - signedBlock = withDigest((altair.TrustedBeaconBlock)()) - root = hash_tree_root(signedBlock.message) - - db.putBlock(signedBlock) - - var tmp, tmp2: seq[byte] - check: - db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - not db.containsBlock(root, fulu.TrustedSignedBeaconBlock) - db.getBlock(root, altair.TrustedSignedBeaconBlock).get() == signedBlock - db.getBlockSSZ(root, tmp, altair.TrustedSignedBeaconBlock) - db.getBlockSZ(root, tmp2, altair.TrustedSignedBeaconBlock) - tmp == SSZ.encode(signedBlock) - tmp2 == encodeFramed(tmp) - uncompressedLenFramed(tmp2).isSome - - check: - db.delBlock(ConsensusFork.Altair, root) - not db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - not db.containsBlock(root, fulu.TrustedSignedBeaconBlock) - db.getBlock(root, altair.TrustedSignedBeaconBlock).isErr() - not db.getBlockSSZ(root, tmp, altair.TrustedSignedBeaconBlock) - not db.getBlockSZ(root, tmp2, altair.TrustedSignedBeaconBlock) - - db.putStateRoot(root, signedBlock.message.slot, root) - var root2 = root - root2.data[0] = root.data[0] + 1 - db.putStateRoot(root, signedBlock.message.slot + 1, root2) - - check: - db.getStateRoot(root, signedBlock.message.slot).get() == root - db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2 - - db.close() - - test "sanity check Bellatrix blocks" & preset(): - let db = BeaconChainDB.new("", inMemory = true) - - let - signedBlock = withDigest((bellatrix.TrustedBeaconBlock)()) - root = hash_tree_root(signedBlock.message) - - db.putBlock(signedBlock) - - var tmp, tmp2: seq[byte] - check: - db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - not db.containsBlock(root, fulu.TrustedSignedBeaconBlock) - db.getBlock(root, bellatrix.TrustedSignedBeaconBlock).get() == signedBlock - db.getBlockSSZ(root, tmp, bellatrix.TrustedSignedBeaconBlock) - db.getBlockSZ(root, tmp2, bellatrix.TrustedSignedBeaconBlock) - tmp == SSZ.encode(signedBlock) - tmp2 == encodeFramed(tmp) - uncompressedLenFramed(tmp2).isSome - - check: - db.delBlock(ConsensusFork.Bellatrix, root) - not db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - not db.containsBlock(root, fulu.TrustedSignedBeaconBlock) - db.getBlock(root, bellatrix.TrustedSignedBeaconBlock).isErr() - not db.getBlockSSZ(root, tmp, bellatrix.TrustedSignedBeaconBlock) - not db.getBlockSZ(root, tmp2, bellatrix.TrustedSignedBeaconBlock) - - db.putStateRoot(root, signedBlock.message.slot, root) - var root2 = root - root2.data[0] = root.data[0] + 1 - db.putStateRoot(root, signedBlock.message.slot + 1, root2) - - check: - db.getStateRoot(root, signedBlock.message.slot).get() == root - db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2 - - db.close() - - test "sanity check Capella blocks" & preset(): - let db = BeaconChainDB.new("", inMemory = true) - - let - signedBlock = withDigest((capella.TrustedBeaconBlock)()) - root = hash_tree_root(signedBlock.message) - - db.putBlock(signedBlock) - - var tmp, tmp2: seq[byte] - check: - db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - not db.containsBlock(root, fulu.TrustedSignedBeaconBlock) - db.containsBlock(root, capella.TrustedSignedBeaconBlock) - db.getBlock(root, capella.TrustedSignedBeaconBlock).get() == signedBlock - db.getBlockSSZ(root, tmp, capella.TrustedSignedBeaconBlock) - db.getBlockSZ(root, tmp2, capella.TrustedSignedBeaconBlock) - tmp == SSZ.encode(signedBlock) - tmp2 == encodeFramed(tmp) - uncompressedLenFramed(tmp2).isSome - - check: - db.delBlock(ConsensusFork.Capella, root) - not db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - not db.containsBlock(root, fulu.TrustedSignedBeaconBlock) - db.getBlock(root, capella.TrustedSignedBeaconBlock).isErr() - not db.getBlockSSZ(root, tmp, capella.TrustedSignedBeaconBlock) - not db.getBlockSZ(root, tmp2, capella.TrustedSignedBeaconBlock) - - db.putStateRoot(root, signedBlock.message.slot, root) - var root2 = root - root2.data[0] = root.data[0] + 1 - db.putStateRoot(root, signedBlock.message.slot + 1, root2) - - check: - db.getStateRoot(root, signedBlock.message.slot).get() == root - db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2 - - db.close() - - test "sanity check Deneb blocks" & preset(): - let db = BeaconChainDB.new("", inMemory = true) - - let - signedBlock = withDigest((deneb.TrustedBeaconBlock)()) - root = hash_tree_root(signedBlock.message) - - db.putBlock(signedBlock) - - var tmp, tmp2: seq[byte] - check: - db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - not db.containsBlock(root, fulu.TrustedSignedBeaconBlock) - db.getBlock(root, deneb.TrustedSignedBeaconBlock).get() == signedBlock - db.getBlockSSZ(root, tmp, deneb.TrustedSignedBeaconBlock) - db.getBlockSZ(root, tmp2, deneb.TrustedSignedBeaconBlock) - tmp == SSZ.encode(signedBlock) - tmp2 == encodeFramed(tmp) - uncompressedLenFramed(tmp2).isSome - - check: - db.delBlock(ConsensusFork.Deneb, root) - not db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - db.getBlock(root, deneb.TrustedSignedBeaconBlock).isErr() - not db.getBlockSSZ(root, tmp, deneb.TrustedSignedBeaconBlock) - not db.getBlockSZ(root, tmp2, deneb.TrustedSignedBeaconBlock) - - db.putStateRoot(root, signedBlock.message.slot, root) - var root2 = root - root2.data[0] = root.data[0] + 1 - db.putStateRoot(root, signedBlock.message.slot + 1, root2) - - check: - db.getStateRoot(root, signedBlock.message.slot).get() == root - db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2 - - db.close() - - test "sanity check Electra blocks" & preset(): - let db = BeaconChainDB.new("", inMemory = true) - - let - signedBlock = withDigest((electra.TrustedBeaconBlock)()) - root = hash_tree_root(signedBlock.message) - - db.putBlock(signedBlock) - - var tmp, tmp2: seq[byte] - check: - db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - db.containsBlock(root, electra.TrustedSignedBeaconBlock) - db.getBlock(root, electra.TrustedSignedBeaconBlock).get() == signedBlock - db.getBlockSSZ(root, tmp, electra.TrustedSignedBeaconBlock) - db.getBlockSZ(root, tmp2, electra.TrustedSignedBeaconBlock) - tmp == SSZ.encode(signedBlock) - tmp2 == encodeFramed(tmp) - uncompressedLenFramed(tmp2).isSome - - check: - db.delBlock(ConsensusFork.Electra, root) - not db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - db.getBlock(root, electra.TrustedSignedBeaconBlock).isErr() - not db.getBlockSSZ(root, tmp, electra.TrustedSignedBeaconBlock) - not db.getBlockSZ(root, tmp2, electra.TrustedSignedBeaconBlock) - - db.putStateRoot(root, signedBlock.message.slot, root) - var root2 = root - root2.data[0] = root.data[0] + 1 - db.putStateRoot(root, signedBlock.message.slot + 1, root2) - - check: - db.getStateRoot(root, signedBlock.message.slot).get() == root - db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2 - - db.close() - - test "sanity check Fulu blocks" & preset(): - let db = BeaconChainDB.new("", inMemory = true) - - let - signedBlock = withDigest((fulu.TrustedBeaconBlock)()) - root = hash_tree_root(signedBlock.message) - - db.putBlock(signedBlock) - - var tmp, tmp2: seq[byte] - check: - db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - db.containsBlock(root, fulu.TrustedSignedBeaconBlock) - db.getBlock(root, fulu.TrustedSignedBeaconBlock).get() == signedBlock - db.getBlockSSZ(root, tmp, fulu.TrustedSignedBeaconBlock) - db.getBlockSZ(root, tmp2, fulu.TrustedSignedBeaconBlock) - tmp == SSZ.encode(signedBlock) - tmp2 == encodeFramed(tmp) - uncompressedLenFramed(tmp2).isSome - - check: - db.delBlock(ConsensusFork.Fulu, root) - not db.containsBlock(root) - not db.containsBlock(root, phase0.TrustedSignedBeaconBlock) - not db.containsBlock(root, altair.TrustedSignedBeaconBlock) - not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock) - not db.containsBlock(root, capella.TrustedSignedBeaconBlock) - not db.containsBlock(root, deneb.TrustedSignedBeaconBlock) - not db.containsBlock(root, electra.TrustedSignedBeaconBlock) - not db.containsBlock(root, fulu.TrustedSignedBeaconBlock) - db.getBlock(root, fulu.TrustedSignedBeaconBlock).isErr() - not db.getBlockSSZ(root, tmp, fulu.TrustedSignedBeaconBlock) - not db.getBlockSZ(root, tmp2, fulu.TrustedSignedBeaconBlock) - - db.putStateRoot(root, signedBlock.message.slot, root) - var root2 = root - root2.data[0] = root.data[0] + 1 - db.putStateRoot(root, signedBlock.message.slot + 1, root2) - - check: - db.getStateRoot(root, signedBlock.message.slot).get() == root - db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2 - - db.close() - - test "sanity check phase 0 states" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - - for state in testStatesPhase0: - let root = state[].phase0Data.root - db.putState(root, state[].phase0Data.data) + template doBlockTest(consensusFork: static ConsensusFork): untyped = + block: + let db = BeaconChainDB.new( + "", consensusFork.genesisTestRuntimeConfig, inMemory = true) - check: - db.containsState(root) - hash_tree_root(db.getPhase0StateRef(root)[]) == root - - db.delState(ConsensusFork.Phase0, root) - check: - not db.containsState(root) - db.getPhase0StateRef(root).isNil - - db.close() - - test "sanity check Altair states" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - - for state in testStatesAltair: - let root = state[].altairData.root - db.putState(root, state[].altairData.data) - - check: - db.containsState(root) - hash_tree_root(db.getAltairStateRef(root)[]) == root - - db.delState(ConsensusFork.Altair, root) - check: - not db.containsState(root) - db.getAltairStateRef(root).isNil - - db.close() - - test "sanity check Bellatrix states" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - - for state in testStatesBellatrix: - let root = state[].bellatrixData.root - db.putState(root, state[].bellatrixData.data) - - check: - db.containsState(root) - hash_tree_root(db.getBellatrixStateRef(root)[]) == root - - db.delState(ConsensusFork.Bellatrix, root) - check: - not db.containsState(root) - db.getBellatrixStateRef(root).isNil - - db.close() - - test "sanity check Capella states" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - - for state in testStatesCapella: - let root = state[].capellaData.root - db.putState(root, state[].capellaData.data) - - check: - db.containsState(root) - hash_tree_root(db.getCapellaStateRef(root)[]) == root - - db.delState(ConsensusFork.Capella, root) - check: - not db.containsState(root) - db.getCapellaStateRef(root).isNil + let + signedBlock = withDigest((consensusFork.TrustedBeaconBlock)()) + root = hash_tree_root(signedBlock.message) - db.close() - - test "sanity check Deneb states" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - - for state in testStatesDeneb: - let root = state[].denebData.root - db.putState(root, state[].denebData.data) + db.putBlock(signedBlock) + var tmp, tmp2: seq[byte] + check db.containsBlock(root) + const fork = consensusFork + withAll(ConsensusFork): + let ok = db.containsBlock(root, consensusFork.TrustedSignedBeaconBlock) + check ok == (consensusFork == fork) check: - db.containsState(root) - hash_tree_root(db.getDenebStateRef(root)[]) == root - - db.delState(ConsensusFork.Deneb, root) + db.getBlock( + root, consensusFork.TrustedSignedBeaconBlock).get() == signedBlock + db.getBlockSSZ(root, tmp, consensusFork.TrustedSignedBeaconBlock) + db.getBlockSZ(root, tmp2, consensusFork.TrustedSignedBeaconBlock) + tmp == SSZ.encode(signedBlock) + tmp2 == encodeFramed(tmp) + uncompressedLenFramed(tmp2).isSome + + db.delBlock(consensusFork, root) + not db.containsBlock(root) + withAll(ConsensusFork): + check not db.containsBlock(root, consensusFork.TrustedSignedBeaconBlock) check: - not db.containsState(root) - db.getDenebStateRef(root).isNil + db.getBlock(root, consensusFork.TrustedSignedBeaconBlock).isErr() + not db.getBlockSSZ(root, tmp, consensusFork.TrustedSignedBeaconBlock) + not db.getBlockSZ(root, tmp2, consensusFork.TrustedSignedBeaconBlock) - db.close() - - test "sanity check Electra states" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - - for state in testStatesElectra: - let root = state[].electraData.root - db.putState(root, state[].electraData.data) - - check: - db.containsState(root) - hash_tree_root(db.getElectraStateRef(root)[]) == root + db.putStateRoot(root, signedBlock.message.slot, root) + var root2 = root + root2.data[0] = root.data[0] + 1 + db.putStateRoot(root, signedBlock.message.slot + 1, root2) - db.delState(ConsensusFork.Electra, root) check: - not db.containsState(root) - db.getElectraStateRef(root).isNil - - db.close() - - test "sanity check Fulu states" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - - for state in testStatesFulu: - let root = state[].fuluData.root - db.putState(root, state[].fuluData.data) - - check: - db.containsState(root) - hash_tree_root(db.getFuluStateRef(root)[]) == root - - db.delState(ConsensusFork.Fulu, root) - check: - not db.containsState(root) - db.getFuluStateRef(root).isNil - - db.close() - - test "sanity check phase 0 states, reusing buffers" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - let stateBuffer = (phase0.BeaconStateRef)() - - for state in testStatesPhase0: - let root = state[].phase0Data.root - db.putState(root, state[].phase0Data.data) - - check: - db.getState(root, stateBuffer[], noRollback) - db.containsState(root) - hash_tree_root(stateBuffer[]) == root - - db.delState(ConsensusFork.Phase0, root) - check: - not db.containsState(root) - not db.getState(root, stateBuffer[], noRollback) - - db.close() - - test "sanity check Altair states, reusing buffers" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - let stateBuffer = (altair.BeaconStateRef)() - - for state in testStatesAltair: - let root = state[].altairData.root - db.putState(root, state[].altairData.data) - - check: - db.getState(root, stateBuffer[], noRollback) - db.containsState(root) - hash_tree_root(stateBuffer[]) == root - - db.delState(ConsensusFork.Altair, root) - check: - not db.containsState(root) - not db.getState(root, stateBuffer[], noRollback) - - db.close() - - test "sanity check Bellatrix states, reusing buffers" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - let stateBuffer = (bellatrix.BeaconStateRef)() - - for state in testStatesBellatrix: - let root = state[].bellatrixData.root - db.putState(root, state[].bellatrixData.data) - - check: - db.getState(root, stateBuffer[], noRollback) - db.containsState(root) - hash_tree_root(stateBuffer[]) == root - - db.delState(ConsensusFork.Bellatrix, root) - check: - not db.containsState(root) - not db.getState(root, stateBuffer[], noRollback) - - db.close() - - test "sanity check Capella states, reusing buffers" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - let stateBuffer = (capella.BeaconStateRef)() - - for state in testStatesCapella: - let root = state[].capellaData.root - db.putState(root, state[].capellaData.data) - - check: - db.getState(root, stateBuffer[], noRollback) - db.containsState(root) - hash_tree_root(stateBuffer[]) == root - - db.delState(ConsensusFork.Capella, root) - check: - not db.containsState(root) - not db.getState(root, stateBuffer[], noRollback) - - db.close() - - test "sanity check Deneb states, reusing buffers" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - let stateBuffer = (deneb.BeaconStateRef)() - - for state in testStatesDeneb: - let root = state[].denebData.root - db.putState(root, state[].denebData.data) - - check: - db.getState(root, stateBuffer[], noRollback) - db.containsState(root) - hash_tree_root(stateBuffer[]) == root - - db.delState(ConsensusFork.Deneb, root) - check: - not db.containsState(root) - not db.getState(root, stateBuffer[], noRollback) - - db.close() - - test "sanity check Electra states, reusing buffers" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - let stateBuffer = (electra.BeaconStateRef)() - - for state in testStatesElectra: - let root = state[].electraData.root - db.putState(root, state[].electraData.data) - - check: - db.getState(root, stateBuffer[], noRollback) - db.containsState(root) - hash_tree_root(stateBuffer[]) == root - - db.delState(ConsensusFork.Electra, root) - check: - not db.containsState(root) - not db.getState(root, stateBuffer[], noRollback) - - db.close() - - test "sanity check Fulu states, reusing buffers" & preset(): - let db = makeTestDB(SLOTS_PER_EPOCH) - let stateBuffer = (fulu.BeaconStateRef)() - - for state in testStatesFulu: - let root = state[].fuluData.root - db.putState(root, state[].fuluData.data) - - check: - db.getState(root, stateBuffer[], noRollback) - db.containsState(root) - hash_tree_root(stateBuffer[]) == root - - db.delState(ConsensusFork.Fulu, root) - check: - not db.containsState(root) - not db.getState(root, stateBuffer[], noRollback) - - db.close() - - test "sanity check phase 0 getState rollback" & preset(): - var - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) - state = (ref ForkedHashedBeaconState)( - kind: ConsensusFork.Phase0, - phase0Data: phase0.HashedBeaconState(data: phase0.BeaconState( - slot: 10.Slot))) - root = Eth2Digest() - - db.putCorruptState(ConsensusFork.Phase0, root) - - let restoreAddr = addr dag.headState - - func restore() = - assign(state[], restoreAddr[]) - - check: - state[].phase0Data.data.slot == 10.Slot - not db.getState(root, state[].phase0Data.data, restore) - state[].phase0Data.data.slot != 10.Slot - - test "sanity check Altair and cross-fork getState rollback" & preset(): - var - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) - state = (ref ForkedHashedBeaconState)( - kind: ConsensusFork.Altair, - altairData: altair.HashedBeaconState(data: altair.BeaconState( - slot: 10.Slot))) - root = Eth2Digest() - - db.putCorruptState(ConsensusFork.Altair, root) - - let restoreAddr = addr dag.headState - - func restore() = - assign(state[], restoreAddr[]) - - check: - state[].altairData.data.slot == 10.Slot - not db.getState(root, state[].altairData.data, restore) - - # assign() has switched the case object fork - state[].kind == ConsensusFork.Phase0 - state[].phase0Data.data.slot != 10.Slot - - test "sanity check Bellatrix and cross-fork getState rollback" & preset(): - var - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) - state = (ref ForkedHashedBeaconState)( - kind: ConsensusFork.Bellatrix, - bellatrixData: bellatrix.HashedBeaconState(data: bellatrix.BeaconState( - slot: 10.Slot))) - root = Eth2Digest() - - db.putCorruptState(ConsensusFork.Bellatrix, root) - - let restoreAddr = addr dag.headState - - func restore() = - assign(state[], restoreAddr[]) - - check: - state[].bellatrixData.data.slot == 10.Slot - not db.getState(root, state[].bellatrixData.data, restore) - - # assign() has switched the case object fork - state[].kind == ConsensusFork.Phase0 - state[].phase0Data.data.slot != 10.Slot - - test "sanity check Capella and cross-fork getState rollback" & preset(): - var - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) - state = (ref ForkedHashedBeaconState)( - kind: ConsensusFork.Capella, - capellaData: capella.HashedBeaconState(data: capella.BeaconState( - slot: 10.Slot))) - root = Eth2Digest() - - db.putCorruptState(ConsensusFork.Capella, root) - - let restoreAddr = addr dag.headState - - func restore() = - assign(state[], restoreAddr[]) - - check: - state[].capellaData.data.slot == 10.Slot - not db.getState(root, state[].capellaData.data, restore) - - # assign() has switched the case object fork - state[].kind == ConsensusFork.Phase0 - state[].phase0Data.data.slot != 10.Slot - - test "sanity check Deneb and cross-fork getState rollback" & preset(): - var - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) - state = (ref ForkedHashedBeaconState)( - kind: ConsensusFork.Deneb, - denebData: deneb.HashedBeaconState(data: deneb.BeaconState( - slot: 10.Slot))) - root = Eth2Digest() - - db.putCorruptState(ConsensusFork.Deneb, root) - - let restoreAddr = addr dag.headState - - func restore() = - assign(state[], restoreAddr[]) - - check: - state[].denebData.data.slot == 10.Slot - not db.getState(root, state[].denebData.data, restore) + db.getStateRoot(root, signedBlock.message.slot).get() == root + db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2 + + db.close() + + withAll(ConsensusFork): + let name = "sanity check " & $consensusFork & " blocks" + test name & preset(): + when consensusFork >= ConsensusFork.Gloas: + skip() + else: + consensusFork.doBlockTest() + + template doStateTest(consensusFork: static ConsensusFork): untyped = + block: + let db = cfg.makeTestDB(SLOTS_PER_EPOCH) + + for state in testStates[consensusFork]: + let root = state[].forky(consensusFork).root + db.putState(root, state[].forky(consensusFork).data) + + check: + db.containsState(root) + hash_tree_root(db.getStateRef(consensusFork, root)[]) == root + + db.delState(consensusFork, root) + check: + not db.containsState(root) + db.getStateRef(consensusFork, root).isNil + + db.close() + + withAll(ConsensusFork): + let name = "sanity check " & $consensusFork & " states" + test name & preset(): + when consensusFork >= ConsensusFork.Gloas: + skip() + else: + consensusFork.doStateTest() + + template doStateTestReusingBuffers( + consensusFork: static ConsensusFork): untyped = + block: + let + db = cfg.makeTestDB(SLOTS_PER_EPOCH) + stateBuffer = (consensusFork.BeaconStateRef)() + + for state in testStates[consensusFork]: + let root = state[].forky(consensusFork).root + db.putState(root, state[].forky(consensusFork).data) + + check: + db.getState(root, stateBuffer[], noRollback) + db.containsState(root) + hash_tree_root(stateBuffer[]) == root + + db.delState(consensusFork, root) + check: + not db.containsState(root) + not db.getState(root, stateBuffer[], noRollback) + + db.close() + + withAll(ConsensusFork): + let name = "sanity check " & $consensusFork & " states, reusing buffers" + test name & preset(): + when consensusFork >= ConsensusFork.Gloas: + skip() + else: + consensusFork.doStateTestReusingBuffers() + + template doRollbackTest(consensusFork: static ConsensusFork): untyped = + block: + var + db = cfg.makeTestDB(SLOTS_PER_EPOCH) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + dag = init(ChainDAGRef, cfg, db, validatorMonitor, {}) + state = ForkedHashedBeaconState.new( + (ref consensusFork.BeaconState)(slot: 10.Slot)[]) + root = Eth2Digest() + + db.putCorruptState(consensusFork, root) + + let restoreAddr = addr dag.headState + + func restore() = + assign(state[], restoreAddr[]) + + withState(state[]): + check: + forkyState.data.slot == 10.Slot + not db.getState(root, forkyState.data, restore) # assign() has switched the case object fork - state[].kind == ConsensusFork.Phase0 - state[].phase0Data.data.slot != 10.Slot - - test "sanity check Electra and cross-fork getState rollback" & preset(): - var - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) - state = (ref ForkedHashedBeaconState)( - kind: ConsensusFork.Electra, - electraData: electra.HashedBeaconState(data: electra.BeaconState( - slot: 10.Slot))) - root = Eth2Digest() - - db.putCorruptState(ConsensusFork.Electra, root) - - let restoreAddr = addr dag.headState - - func restore() = - assign(state[], restoreAddr[]) - - check: - state[].electraData.data.slot == 10.Slot - not db.getState(root, state[].electraData.data, restore) - - # assign() has switched the case object fork - state[].kind == ConsensusFork.Phase0 - state[].phase0Data.data.slot != 10.Slot - - test "sanity check Fulu and cross-fork getState rollback" & preset(): - var - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) - state = (ref ForkedHashedBeaconState)( - kind: ConsensusFork.Fulu, - fuluData: fulu.HashedBeaconState(data: fulu.BeaconState( - slot: 10.Slot))) - root = Eth2Digest() - - db.putCorruptState(ConsensusFork.Fulu, root) - - let restoreAddr = addr dag.headState - - func restore() = - assign(state[], restoreAddr[]) - - check: - state[].fuluData.data.slot == 10.Slot - not db.getState(root, state[].fuluData.data, restore) - - # assign() has switched the case object fork - state[].kind == ConsensusFork.Phase0 - state[].phase0Data.data.slot != 10.Slot + check: + state[].kind == ConsensusFork.Phase0 + state[].phase0Data.data.slot != 10.Slot + + withAll(ConsensusFork): + let name = "sanity check " & $consensusFork & + (if consensusFork > ConsensusFork.Phase0: " and cross-fork" else: "") & + " getState rollback" + test name & preset(): + when consensusFork >= ConsensusFork.Gloas: + skip() + else: + consensusFork.doRollbackTest() test "find ancestors" & preset(): - var db = BeaconChainDB.new("", inMemory = true) + var db = BeaconChainDB.new("", cfg, inMemory = true) let a0 = withDigest( @@ -1004,17 +327,17 @@ suite "Beacon chain DB" & preset(): # state. We've been bit by this because we've had a bug in the BLS # serialization where an all-zero default-initialized bls signature could # not be deserialized because the deserialization was too strict. - var db = BeaconChainDB.new("", inMemory = true) + var db = BeaconChainDB.new("", cfg, inMemory = true) let state = newClone(initialize_hashed_beacon_state_from_eth1( - defaultRuntimeConfig, mockEth1BlockHash, 0, + cfg, mockEth1BlockHash, 0, makeInitialDeposits(SLOTS_PER_EPOCH), {skipBlsValidation})) db.putState(state[].root, state[].data) check db.containsState(state[].root) - let state2 = db.getPhase0StateRef(state[].root) + let state2 = db.getStateRef(ConsensusFork.Phase0, state[].root) db.delState(ConsensusFork.Phase0, state[].root) check not db.containsState(state[].root) db.close() @@ -1023,7 +346,7 @@ suite "Beacon chain DB" & preset(): hash_tree_root(state2[]) == state[].root test "sanity check state diff roundtrip" & preset(): - var db = BeaconChainDB.new("", inMemory = true) + var db = BeaconChainDB.new("", cfg, inMemory = true) # TODO htr(diff) probably not interesting/useful, but stand-in let @@ -1057,7 +380,7 @@ suite "Beacon chain DB" & preset(): blobSidecar1 = BlobSidecar(signed_block_header: blockHeader0, index: 2) blobSidecar2 = BlobSidecar(signed_block_header: blockHeader1, index: 2) - db = makeTestDB(SLOTS_PER_EPOCH) + db = cfg.makeTestDB(SLOTS_PER_EPOCH) var buf: seq[byte] @@ -1151,17 +474,17 @@ suite "Beacon chain DB" & preset(): blockRoot0 = hash_tree_root(blockHeader0.message) blockRoot1 = hash_tree_root(blockHeader1.message) - # Ensure minimal-difference pairs on both block root and + # Ensure minimal-difference pairs on both block root and # data column index to verify that the columnkey uses both - dataColumnSidecar0 = DataColumnSidecar(signed_block_header: blockHeader0, index: 3) - dataColumnSidecar1 = DataColumnSidecar(signed_block_header: blockHeader0, index: 2) - dataColumnSidecar2 = DataColumnSidecar(signed_block_header: blockHeader1, index: 2) + dataColumnSidecar0 = fulu.DataColumnSidecar(signed_block_header: blockHeader0, index: 3) + dataColumnSidecar1 = fulu.DataColumnSidecar(signed_block_header: blockHeader0, index: 2) + dataColumnSidecar2 = fulu.DataColumnSidecar(signed_block_header: blockHeader1, index: 2) - db = makeTestDB(SLOTS_PER_EPOCH) + db = cfg.makeTestDB(SLOTS_PER_EPOCH) var buf: seq[byte] - dataColumnSidecar: DataColumnSidecar + dataColumnSidecar: fulu.DataColumnSidecar check: not db.getDataColumnSidecar(blockRoot0, 3, dataColumnSidecar) @@ -1172,7 +495,7 @@ suite "Beacon chain DB" & preset(): not db.getDataColumnSidecarSZ(blockRoot1, 2, buf) db.putDataColumnSidecar(dataColumnSidecar0) - + check: db.getDataColumnSidecar(blockRoot0, 3, dataColumnSidecar) dataColumnSidecar == dataColumnSidecar0 @@ -1240,6 +563,221 @@ suite "Beacon chain DB" & preset(): db.close() +suite "Quarantine" & preset(): + setup: + let + db = BeaconChainDB.new("", cfg, inMemory = true) + quarantine = db.getQuarantineDB() + + teardown: + db.close() + + func genBlockRoot(index: int): Eth2Digest = + var res: Eth2Digest + let tmp = uint64(index).toBytesLE() + copyMem(addr res.data[0], unsafeAddr tmp[0], sizeof(uint64)) + res + + func genKzgCommitment(index: int): KzgCommitment = + var res: KzgCommitment + let tmp = uint64(index).toBytesLE() + copyMem(addr res.bytes[0], unsafeAddr tmp[0], sizeof(uint64)) + res + + func genBlobSidecar( + index: int, + slot: int, + kzg_commitment: int, + proposer_index: int + ): BlobSidecar = + BlobSidecar( + index: BlobIndex(index), + kzg_commitment: genKzgCommitment(kzg_commitment), + signed_block_header: SignedBeaconBlockHeader( + message: BeaconBlockHeader( + slot: Slot(slot), + proposer_index: uint64(proposer_index)))) + + func genDataColumnSidecar( + index: int, + slot: int, + proposer_index: int + ): fulu.DataColumnSidecar = + fulu.DataColumnSidecar( + index: ColumnIndex(index), + signed_block_header: SignedBeaconBlockHeader( + message: BeaconBlockHeader( + slot: Slot(slot), + proposer_index: uint64(proposer_index)))) + + proc cmp( + a: openArray[ref BlobSidecar|ref fulu.DataColumnSidecar], + b: openArray[ref BlobSidecar|ref fulu.DataColumnSidecar] + ): bool = + if len(a) != len(b): + return false + for index in 0 ..< len(a): + if a[index][] != b[index][]: + return false + true + + proc generateBlobSidecars(): seq[ref BlobSidecar] = + @[ + newClone(genBlobSidecar(0, 100, 10, 24)), + newClone(genBlobSidecar(1, 100, 11, 24)), + newClone(genBlobSidecar(2, 100, 12, 24)), + newClone(genBlobSidecar(3, 100, 13, 24)), + newClone(genBlobSidecar(4, 100, 14, 24)), + newClone(genBlobSidecar(5, 100, 15, 24)), + newClone(genBlobSidecar(6, 100, 16, 24)), + newClone(genBlobSidecar(7, 100, 17, 24)), + newClone(genBlobSidecar(8, 100, 18, 24)) + ] + + proc generateDataColumnSidecars(): seq[ref fulu.DataColumnSidecar] = + @[ + newClone(genDataColumnSidecar(0, 200, 100234)), + newClone(genDataColumnSidecar(7, 200, 100234)), + newClone(genDataColumnSidecar(14, 200, 100234)), + newClone(genDataColumnSidecar(21, 200, 100234)), + newClone(genDataColumnSidecar(28, 200, 100234)), + newClone(genDataColumnSidecar(35, 200, 100234)), + newClone(genDataColumnSidecar(42, 200, 100234)), + newClone(genDataColumnSidecar(49, 200, 100234)), + newClone(genDataColumnSidecar(56, 200, 100234)), + newClone(genDataColumnSidecar(63, 200, 100234)), + newClone(genDataColumnSidecar(70, 200, 100234)), + newClone(genDataColumnSidecar(77, 200, 100234)), + newClone(genDataColumnSidecar(84, 200, 100234)), + newClone(genDataColumnSidecar(91, 200, 100234)), + newClone(genDataColumnSidecar(98, 200, 100234)), + newClone(genDataColumnSidecar(127, 200, 100234)), + ] + + proc getSidecars( + quarantine: QuarantineDB, + T: typedesc[BlobSidecar|fulu.DataColumnSidecar], + blockRoot: Eth2Digest + ): seq[ref T] = + var res: seq[ref T] + for item in quarantine.sidecars(T, blockRoot): + res.add(newClone(item)) + res + + proc runDataSidecarTest( + quarantine: QuarantineDB, + T: typedesc[ForkyDataSidecar] + ) = + let + broots = @[ + genBlockRoot(100), genBlockRoot(200), genBlockRoot(300) + ] + sidecars = + when T is deneb.BlobSidecar: + generateBlobSidecars() + else: + generateDataColumnSidecars() + offsets = + when T is deneb.BlobSidecar: + @[(0, 8), (0, 3), (0, 5)] + else: + @[(0, 15), (4, 11), (0, 7)] + + check: + len(quarantine.getSidecars(T, broots[0])) == 0 + len(quarantine.getSidecars(T, broots[1])) == 0 + len(quarantine.getSidecars(T, broots[2])) == 0 + quarantine.sidecarsCount(T) == 0 + + quarantine.removeDataSidecars(T, broots[0]) + quarantine.removeDataSidecars(T, broots[1]) + quarantine.removeDataSidecars(T, broots[2]) + + quarantine.putDataSidecars(broots[0], + sidecars.toOpenArray(offsets[0][0], offsets[0][1])) + + block: + let + res1 = quarantine.getSidecars(T, broots[0]) + check: + quarantine.sidecarsCount(T) == len(res1) + len(res1) == (offsets[0][1] - offsets[0][0] + 1) + cmp(res1, sidecars.toOpenArray(offsets[0][0], offsets[0][1])) == true + len(quarantine.getSidecars(T, broots[1])) == 0 + len(quarantine.getSidecars(T, broots[2])) == 0 + + quarantine.putDataSidecars(broots[1], + sidecars.toOpenArray(offsets[1][0], offsets[1][1])) + + block: + let + res1 = quarantine.getSidecars(T, broots[0]) + res2 = quarantine.getSidecars(T, broots[1]) + check: + quarantine.sidecarsCount(T) == len(res1) + len(res2) + len(res1) == (offsets[0][1] - offsets[0][0] + 1) + len(res2) == (offsets[1][1] - offsets[1][0] + 1) + cmp(res1, sidecars.toOpenArray(offsets[0][0], offsets[0][1])) == true + cmp(res2, sidecars.toOpenArray(offsets[1][0], offsets[1][1])) == true + len(quarantine.getSidecars(T, broots[2])) == 0 + + quarantine.putDataSidecars(broots[2], + sidecars.toOpenArray(offsets[2][0], offsets[2][1])) + + block: + let + res1 = quarantine.getSidecars(T, broots[0]) + res2 = quarantine.getSidecars(T, broots[1]) + res3 = quarantine.getSidecars(T, broots[2]) + check: + len(res1) == (offsets[0][1] - offsets[0][0] + 1) + len(res2) == (offsets[1][1] - offsets[1][0] + 1) + len(res3) == (offsets[2][1] - offsets[2][0] + 1) + quarantine.sidecarsCount(T) == len(res1) + len(res2) + len(res3) + cmp(res1, sidecars.toOpenArray(offsets[0][0], offsets[0][1])) == true + cmp(res2, sidecars.toOpenArray(offsets[1][0], offsets[1][1])) == true + cmp(res3, sidecars.toOpenArray(offsets[2][0], offsets[2][1])) == true + + quarantine.removeDataSidecars(T, broots[1]) + + block: + let + res1 = quarantine.getSidecars(T, broots[0]) + res3 = quarantine.getSidecars(T, broots[2]) + check: + len(res1) == (offsets[0][1] - offsets[0][0] + 1) + cmp(res1, sidecars.toOpenArray(offsets[0][0], offsets[0][1])) == true + len(quarantine.getSidecars(T, broots[1])) == 0 + len(res3) == (offsets[2][1] - offsets[2][0] + 1) + cmp(res3, sidecars.toOpenArray(offsets[2][0], offsets[2][1])) == true + quarantine.sidecarsCount(T) == len(res1) + len(res3) + + quarantine.removeDataSidecars(T, broots[0]) + + block: + let + res3 = quarantine.getSidecars(T, broots[2]) + check: + len(quarantine.getSidecars(T, broots[0])) == 0 + len(quarantine.getSidecars(T, broots[1])) == 0 + len(res3) == (offsets[2][1] - offsets[2][0] + 1) + cmp(res3, sidecars.toOpenArray(offsets[2][0], offsets[2][1])) == true + quarantine.sidecarsCount(T) == len(res3) + + quarantine.removeDataSidecars(T, broots[2]) + + check: + len(quarantine.getSidecars(T, broots[0])) == 0 + len(quarantine.getSidecars(T, broots[1])) == 0 + len(quarantine.getSidecars(T, broots[2])) == 0 + quarantine.sidecarsCount(T) == 0 + + test "put/iterate/remove test [BlobSidecars]": + quarantine.runDataSidecarTest(deneb.BlobSidecar) + + test "put/iterate/remove test [DataColumnSidecar]": + quarantine.runDataSidecarTest(fulu.DataColumnSidecar) + suite "FinalizedBlocks" & preset(): test "Basic ops" & preset(): var @@ -1267,4 +805,4 @@ suite "FinalizedBlocks" & preset(): check: k in [Slot 0, Slot 5] items += 1 - check: items == 2 \ No newline at end of file + check: items == 2 diff --git a/tests/test_block_dag.nim b/tests/test_block_dag.nim index 457bb063fa..bd12fd865f 100644 --- a/tests/test_block_dag.nim +++ b/tests/test_block_dag.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -59,8 +59,6 @@ suite "BlockSlot and helpers": s1 = BlockRef(bid: BlockId(slot: Slot(1)), parent: s0) s2 = BlockRef(bid: BlockId(slot: Slot(2)), parent: s1) s4 = BlockRef(bid: BlockId(slot: Slot(4)), parent: s2) - se1 = BlockRef(bid: - BlockId(slot: Epoch(1).start_slot()), parent: s2) check: s0.atSlot(Slot(0)).blck == s0 diff --git a/tests/test_beacon_validators.nim b/tests/test_block_payloads.nim similarity index 94% rename from tests/test_beacon_validators.nim rename to tests/test_block_payloads.nim index 9d72b60f34..1e43bf639e 100644 --- a/tests/test_beacon_validators.nim +++ b/tests/test_block_payloads.nim @@ -8,10 +8,10 @@ {.push raises: [].} {.used.} -import unittest2, results, chronos, stint -import ../beacon_chain/validators/beacon_validators, - ../beacon_chain/spec/datatypes/base, - ../beacon_chain/spec/eth2_apis/eth2_rest_serialization +import + unittest2, + ../beacon_chain/spec/eth2_apis/eth2_rest_serialization, + ../beacon_chain/validators/block_payloads suite "Beacon validators test suite": test "builderBetterBid(builderBoostFactor) test": diff --git a/tests/test_block_processor.nim b/tests/test_block_processor.nim index 67a392c666..89cc4d8d90 100644 --- a/tests/test_block_processor.nim +++ b/tests/test_block_processor.nim @@ -1,11 +1,11 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import @@ -15,7 +15,7 @@ import taskpools, ../beacon_chain/conf, ../beacon_chain/spec/[beaconstate, forks, helpers, state_transition], - ../beacon_chain/spec/datatypes/deneb, + ../beacon_chain/spec/datatypes/[deneb, fulu], ../beacon_chain/gossip_processing/block_processor, ../beacon_chain/consensus_object_pools/[ attestation_pool, blockchain_dag, blob_quarantine, block_quarantine, @@ -35,14 +35,21 @@ proc pruneAtFinalization(dag: ChainDAGRef) = suite "Block processor" & preset(): setup: - let rng = HmacDrbgContext.new() + let + rng = HmacDrbgContext.new() + cfg = block: + var res = defaultRuntimeConfig + res.ALTAIR_FORK_EPOCH = GENESIS_EPOCH + res.BELLATRIX_FORK_EPOCH = GENESIS_EPOCH + res + db = cfg.makeTestDB(SLOTS_PER_EPOCH) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + dag = init(ChainDAGRef, cfg, db, validatorMonitor, {}) var - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) taskpool = Taskpool.new() - quarantine = newClone(Quarantine.init()) + quarantine = newClone(Quarantine.init(cfg)) blobQuarantine = newClone(BlobQuarantine()) + dataColumnQuarantine = newClone(ColumnQuarantine()) attestationPool = newClone(AttestationPool.init(dag, quarantine)) elManager = new ELManager # TODO: initialise this properly actionTracker: ActionTracker @@ -51,21 +58,21 @@ suite "Block processor" & preset(): newClone(DynamicFeeRecipientsStore.init()), "", Opt.some default(Eth1Address), defaultGasLimit) state = newClone(dag.headState) - cache = StateCache() - b1 = addTestBlock(state[], cache).phase0Data - b2 = addTestBlock(state[], cache).phase0Data + cache: StateCache + info: ForkedEpochInfo + cfg.process_slots( + state[], cfg.lastPremergeSlotInTestCfg, cache, info, {}).expect("OK") + var + b1 = addTestBlock(state[], cache, cfg = cfg).bellatrixData + b2 = addTestBlock(state[], cache, cfg = cfg).bellatrixData getTimeFn = proc(): BeaconTime = b2.message.slot.start_beacon_time() batchVerifier = BatchVerifier.new(rng, taskpool) processor = BlockProcessor.new( false, "", "", batchVerifier, consensusManager, - validatorMonitor, blobQuarantine, getTimeFn) - processorFut = processor.runQueueProcessingLoop() + validatorMonitor, blobQuarantine, dataColumnQuarantine, getTimeFn) asyncTest "Reverse order block add & get" & preset(): - let - missing = await processor[].addBlock( - MsgSource.gossip, ForkedSignedBeaconBlock.init(b2), - Opt.none(BlobSidecars)) + let missing = await processor.addBlock(MsgSource.gossip, b2, noSidecars) check: missing.error == VerifierError.MissingParent @@ -75,9 +82,7 @@ suite "Block processor" & preset(): FetchRecord(root: b1.root) in quarantine[].checkMissing(32) let - status = await processor[].addBlock( - MsgSource.gossip, ForkedSignedBeaconBlock.init(b1), - Opt.none(BlobSidecars)) + status = await processor.addBlock(MsgSource.gossip, b1, noSidecars) b1Get = dag.getBlockRef(b1.root) check: @@ -107,8 +112,8 @@ suite "Block processor" & preset(): # check that init also reloads block graph var - validatorMonitor2 = newClone(ValidatorMonitor.init()) - dag2 = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor2, {}) + validatorMonitor2 = newClone(ValidatorMonitor.init(cfg.time)) + dag2 = init(ChainDAGRef, cfg, db, validatorMonitor2, {}) check: # ensure we loaded the correct head state @@ -118,3 +123,36 @@ suite "Block processor" & preset(): dag2.getBlockRef(b2.root).isSome() dag2.heads.len == 1 dag2.heads[0].root == b2.root + + asyncTest "Invalidate block root" & preset(): + let + processor = BlockProcessor.new( + false, "", "", batchVerifier, consensusManager, + validatorMonitor, blobQuarantine, dataColumnQuarantine, + getTimeFn, invalidBlockRoots = @[b2.root]) + + block: + let res = await processor.addBlock(MsgSource.gossip, b2, noSidecars) + check: + res.isErr + not dag.containsForkBlock(b1.root) + not dag.containsForkBlock(b2.root) + + block: + let res = await processor.addBlock(MsgSource.gossip, b1, noSidecars) + check: + res.isOk + dag.containsForkBlock(b1.root) + not dag.containsForkBlock(b2.root) + while processor[].hasBlocks(): + poll() + check: + dag.containsForkBlock(b1.root) + not dag.containsForkBlock(b2.root) + + block: + let res = await processor.addBlock(MsgSource.gossip, b2, noSidecars) + check: + res == Result[void, VerifierError].err VerifierError.Invalid + dag.containsForkBlock(b1.root) + not dag.containsForkBlock(b2.root) diff --git a/tests/test_block_quarantine.nim b/tests/test_block_quarantine.nim index 85b6c140b7..ee16204a28 100644 --- a/tests/test_block_quarantine.nim +++ b/tests/test_block_quarantine.nim @@ -10,8 +10,7 @@ import unittest2, - chronicles, - ../beacon_chain/spec/forks, + ../beacon_chain/spec/[forks, presets], ../beacon_chain/spec/datatypes/[phase0, deneb], ../beacon_chain/consensus_object_pools/block_quarantine @@ -40,7 +39,7 @@ suite "Block quarantine": b5 = makeBlobbyBlock(Slot 4, b3.root) b6 = makeBlobbyBlock(Slot 4, b4.root) - var quarantine: Quarantine + var quarantine = Quarantine.init(defaultRuntimeConfig) quarantine.addMissing(b1.root) check: @@ -54,20 +53,20 @@ suite "Block quarantine": quarantine.addOrphan(Slot 0, b3).isOk quarantine.addOrphan(Slot 0, b4).isOk - quarantine.addBlobless(Slot 0, b5) - quarantine.addBlobless(Slot 0, b6) + quarantine.addSidecarless(Slot 0, b5) + quarantine.addSidecarless(Slot 0, b6) (b4.root, ValidatorSig()) in quarantine.orphans - b5.root in quarantine.blobless - b6.root in quarantine.blobless + b5.root in quarantine.sidecarless + b6.root in quarantine.sidecarless quarantine.addUnviable(b4.root) check: (b4.root, ValidatorSig()) notin quarantine.orphans - b5.root in quarantine.blobless - b6.root notin quarantine.blobless + b5.root in quarantine.sidecarless + b6.root notin quarantine.sidecarless quarantine.addUnviable(b1.root) @@ -76,8 +75,8 @@ suite "Block quarantine": (b2.root, ValidatorSig()) notin quarantine.orphans (b3.root, ValidatorSig()) notin quarantine.orphans - b5.root notin quarantine.blobless - b6.root notin quarantine.blobless + b5.root notin quarantine.sidecarless + b6.root notin quarantine.sidecarless test "Recursive missing parent": let @@ -85,7 +84,7 @@ suite "Block quarantine": b1 = makeBlock(Slot 1, b0.root) b2 = makeBlock(Slot 2, b1.root) - var quarantine: Quarantine + var quarantine = Quarantine.init(defaultRuntimeConfig) check: b0.root notin quarantine.missing b1.root notin quarantine.missing @@ -121,7 +120,7 @@ suite "Block quarantine": b2.root notin quarantine.missing test "Keep downloading parent chain even if we hit missing limit": - var quarantine: Quarantine + var quarantine = Quarantine.init(defaultRuntimeConfig) var blocks = @[makeBlock(Slot 0, ZERO_HASH)] for i in 0.. 1: @@ -1024,30 +1027,31 @@ suite "Backfill": block: let - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + dag = init(ChainDAGRef, cfg, db, validatorMonitor, {}) genBlock = get_initial_beacon_block(genState[]) check: dag.addBackfillBlock(genBlock.phase0Data.asSigned()).isOk() dag.backfill == default(BeaconBlockSummary) let - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + dag = init(ChainDAGRef, cfg, db, validatorMonitor, {}) check dag.backfill == default(BeaconBlockSummary) suite "Starting states": setup: let + cfg = defaultRuntimeConfig genState = (ref ForkedHashedBeaconState)( kind: ConsensusFork.Phase0, phase0Data: initialize_hashed_beacon_state_from_eth1( - defaultRuntimeConfig, ZERO_HASH, 0, + cfg, ZERO_HASH, 0, makeInitialDeposits(SLOTS_PER_EPOCH.uint64, flags = {skipBlsValidation}), {skipBlsValidation})) tailState = assignClone(genState[]) - db = BeaconChainDB.new("", inMemory = true) - quarantine = newClone(Quarantine.init()) + db = BeaconChainDB.new("", cfg, inMemory = true) + quarantine = newClone(Quarantine.init(cfg)) test "Starting state without block": var @@ -1062,15 +1066,14 @@ suite "Starting states": blocks tailBlock = blocks[^1] - check process_slots( - defaultRuntimeConfig, tailState[], Slot(SLOTS_PER_EPOCH), cache, info, - {}).isOk() + check cfg.process_slots( + tailState[], Slot(SLOTS_PER_EPOCH), cache, info, {}).isOk() ChainDAGRef.preInit(db, tailState[]) let - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + dag = init(ChainDAGRef, cfg, db, validatorMonitor, {}) # check that we can update head to itself dag.updateHead(dag.head, quarantine[], []) @@ -1162,36 +1165,36 @@ suite "Starting states": suite "Latest valid hash" & preset(): setup: - let rng = HmacDrbgContext.new() - - var runtimeConfig = defaultRuntimeConfig - runtimeConfig.ALTAIR_FORK_EPOCH = 1.Epoch - runtimeConfig.BELLATRIX_FORK_EPOCH = 2.Epoch - + let + rng = HmacDrbgContext.new() + cfg = block: + var res = defaultRuntimeConfig + res.ALTAIR_FORK_EPOCH = 1.Epoch + res.BELLATRIX_FORK_EPOCH = 2.Epoch + res var - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, runtimeConfig, db, validatorMonitor, {}) + db = cfg.makeTestDB(SLOTS_PER_EPOCH) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + dag = init(ChainDAGRef, cfg, db, validatorMonitor, {}) taskpool = Taskpool.new() verifier = BatchVerifier.init(rng, taskpool) - quarantine = newClone(Quarantine.init()) + quarantine = newClone(Quarantine.init(dag.cfg)) cache = StateCache() info = ForkedEpochInfo() state = newClone(dag.headState) test "LVH searching": # Reach Bellatrix, where execution payloads exist - check process_slots( - runtimeConfig, state[], - getStateField(state[], slot) + (3 * SLOTS_PER_EPOCH).uint64, + check cfg.process_slots( + state[], getStateField(state[], slot) + (3 * SLOTS_PER_EPOCH), cache, info, {}).isOk() var - b1 = addTestBlock(state[], cache, cfg = runtimeConfig).bellatrixData + b1 = addTestBlock(state[], cache, cfg = cfg).bellatrixData b1Add = dag.addHeadBlock(verifier, b1, nilBellatrixCallback) - b2 = addTestBlock(state[], cache, cfg = runtimeConfig).bellatrixData + b2 = addTestBlock(state[], cache, cfg = cfg).bellatrixData b2Add = dag.addHeadBlock(verifier, b2, nilBellatrixCallback) - b3 = addTestBlock(state[], cache, cfg = runtimeConfig).bellatrixData + b3 = addTestBlock(state[], cache, cfg = cfg).bellatrixData b3Add = dag.addHeadBlock(verifier, b3, nilBellatrixCallback) dag.updateHead(b3Add[], quarantine[], []) @@ -1237,15 +1240,15 @@ suite "Pruning": res.MIN_EPOCHS_FOR_BLOCK_REQUESTS = res.safeMinEpochsForBlockRequests() doAssert res.MIN_EPOCHS_FOR_BLOCK_REQUESTS == 4 res - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) + db = cfg.makeTestDB(SLOTS_PER_EPOCH) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) dag = init(ChainDAGRef, cfg, db, validatorMonitor, {}) tmpState = assignClone(dag.headState) var taskpool = Taskpool.new() verifier = BatchVerifier.init(rng, taskpool) - quarantine = Quarantine.init() + quarantine = Quarantine.init(dag.cfg) cache = StateCache() blocks = @[dag.head] @@ -1290,11 +1293,11 @@ suite "State history": const numValidators = SLOTS_PER_EPOCH let cfg = defaultRuntimeConfig - validatorMonitor = newClone(ValidatorMonitor.init()) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) dag = ChainDAGRef.init( - cfg, makeTestDB(numValidators, cfg = cfg), + cfg, cfg.makeTestDB(numValidators), validatorMonitor, {}) - quarantine = newClone(Quarantine.init()) + quarantine = newClone(Quarantine.init(dag.cfg)) rng = HmacDrbgContext.new() taskpool = Taskpool.new() var verifier = BatchVerifier.init(rng, taskpool) @@ -1411,11 +1414,11 @@ suite "Ancestry": const numValidators = SLOTS_PER_EPOCH let cfg = defaultRuntimeConfig - validatorMonitor = newClone(ValidatorMonitor.init()) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) dag = ChainDAGRef.init( - cfg, makeTestDB(numValidators, cfg = cfg), + cfg, cfg.makeTestDB(numValidators), validatorMonitor, {}) - quarantine = newClone(Quarantine.init()) + quarantine = newClone(Quarantine.init(dag.cfg)) rng = HmacDrbgContext.new() taskpool = Taskpool.new() @@ -1704,13 +1707,12 @@ template runShufflingTests(cfg: RuntimeConfig, numRandomTests: int) = eth1Data = Eth1Data( deposit_root: deposits.attachMerkleProofs(), deposit_count: deposits.lenu64) - validatorMonitor = newClone(ValidatorMonitor.init()) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) dag = ChainDAGRef.init( - cfg, makeTestDB( - numValidators, eth1Data = Opt.some(eth1Data), - flags = {}, cfg = cfg), + cfg, cfg.makeTestDB( + numValidators, eth1Data = Opt.some(eth1Data), flags = {}), validatorMonitor, {}) - quarantine = newClone(Quarantine.init()) + quarantine = newClone(Quarantine.init(dag.cfg)) rng = HmacDrbgContext.new() taskpool = Taskpool.new() @@ -1724,7 +1726,7 @@ template runShufflingTests(cfg: RuntimeConfig, numRandomTests: int) = attested = attested, allDeposits = deposits, graffiti = graffiti, cfg = cfg): let added = withBlck(forkedBlck): - const nilCallback = (consensusFork.OnBlockAddedCallback)(nil) + const nilCallback = OnBlockAdded[consensusFork](nil) dag.addHeadBlock(verifier, forkyBlck, nilCallback) check added.isOk() dag.updateHead(added[], quarantine[], []) diff --git a/tests/test_column_map.nim b/tests/test_column_map.nim new file mode 100644 index 0000000000..d6c6eccba1 --- /dev/null +++ b/tests/test_column_map.nim @@ -0,0 +1,118 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} +{.used.} + +import + std/[strutils, sequtils], unittest2, + ../beacon_chain/spec/column_map, + ../beacon_chain/spec/datatypes/fulu + +suite "ColumnMap test suite": + test "ColumnMap test": + # Filling columns of different sizes with all bits [8, 128) + for columnSize in 8 .. 128: + let + columnsCount = 128 div columnSize + lastColumnSize = 128 mod columnSize + + for i in 0 ..< columnsCount: + let + start = i * columnSize + finish = start + columnSize + var + columns: seq[ColumnIndex] + numbers: seq[int] + for k in start ..< finish: + columns.add(ColumnIndex(k)) + numbers.add(k) + + check: + $ColumnMap.init(columns) == + "[" & $ numbers.mapIt($it).join(", ") & "]" + + if lastColumnSize > 0: + let + start = columnsCount * columnSize + finish = start + lastColumnSize + var + columns: seq[ColumnIndex] + numbers: seq[int] + for k in start ..< finish: + columns.add(ColumnIndex(k)) + numbers.add(k) + + check: + $ColumnMap.init(columns) == + "[" & $ numbers.mapIt($it).join(", ") & "]" + + # Verify `and` operation is correct + const TestVectors = [ + ( + [1, 2, 3, 4, 5, 6, 7, 8], + [5, 6, 7, 8, 9, 10, 11, 12], + "[5, 6, 7, 8]" + ), + ( + [56, 57, 58, 59, 60, 61, 62, 63], + [60, 61, 62, 63, 64, 65, 66, 67], + "[60, 61, 62, 63]" + ), + ( + [1, 5, 10, 15, 20, 25, 64, 65], + [1, 5, 6, 7, 8, 9, 64, 65], + "[1, 5, 64, 65]" + ), + ( + [60, 61, 62, 63, 124, 125, 126, 127], + [60, 61, 62, 63, 124, 125, 126, 127], + "[60, 61, 62, 63, 124, 125, 126, 127]" + ), + ( + [0, 1, 63, 64, 65, 93, 126, 127], + [0, 2, 63, 64, 67, 94, 126, 127], + "[0, 63, 64, 126, 127]" + ) + ] + + for vector in TestVectors: + let + map1 = ColumnMap.init(vector[0].mapIt(ColumnIndex(it))) + map2 = ColumnMap.init(vector[1].mapIt(ColumnIndex(it))) + check: + $(map1 and map2) == vector[2] + + for vector in TestVectors: + let + map1 = ColumnMap.init(vector[0].mapIt(ColumnIndex(it))) + map2 = ColumnMap.init(vector[1].mapIt(ColumnIndex(it))) + map3 = map1 and map2 + + check: + map1.items().toSeq().mapIt($int(it)).join(", ") == + vector[0].mapIt($it).join(", ") + map2.items().toSeq().mapIt($int(it)).join(", ") == + vector[1].mapIt($it).join(", ") + "[" & map3.items().toSeq().mapIt($int(it)).join(", ") & "]" == + vector[2] + + var columns: seq[ColumnIndex] + for i in 0 ..< NUMBER_OF_COLUMNS: + columns.add(ColumnIndex(i)) + let map = ColumnMap.init(columns) + check: + map.items().toSeq().mapIt($int(it)).join(", ") == + columns.mapIt($it).join(", ") + + for i in 0 ..< NUMBER_OF_COLUMNS: + let testMap = ColumnMap.init([ColumnIndex(i)]) + for k in 0 ..< NUMBER_OF_COLUMNS: + if k == i: + check ColumnIndex(k) in testMap == true + else: + check ColumnIndex(k) in testMap == false diff --git a/tests/test_deposit_snapshots.nim b/tests/test_deposit_snapshots.nim deleted file mode 100644 index d7b1df40fe..0000000000 --- a/tests/test_deposit_snapshots.nim +++ /dev/null @@ -1,311 +0,0 @@ -# beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} -{.used.} - -import - std/[json, os, random, sequtils, strutils, times], - chronos, - stew/base10, chronicles, unittest2, - yaml/tojson, - ../beacon_chain/beacon_chain_db, - ../beacon_chain/spec/deposit_snapshots, - ./consensus_spec/os_ops - -from eth/db/kvstore import kvStore -from nimcrypto import toDigest -from snappy import encode -from stew/byteutils import hexToSeqByte - -const ROOT = "342cecb5a18945fbbda7c62ede3016f3" - -template databaseRoot: string = getTempDir().joinPath(ROOT) -template key1: array[1, byte] = [byte(kOldDepositContractSnapshot)] - -type - DepositSnapshotUpgradeProc = proc( - old: OldDepositContractSnapshot - ): DepositContractSnapshot {.gcsafe, raises: [].} - -proc ifNecessaryMigrateDCS(db: BeaconChainDB, - upgradeProc: DepositSnapshotUpgradeProc) = - if not db.hasDepositContractSnapshot(): - let oldSnapshot = db.getUpgradableDepositSnapshot() - if oldSnapshot.isSome: - db.putDepositContractSnapshot upgradeProc(oldSnapshot.get) - -# Hexlified copy of -# mainnet/metadata/genesis_deposit_contract_snapshot.ssz -let ds1: seq[byte] = hexToSeqByte( - """ - eeea1373d4aa9e099d7c9deddb694db9aeb4577755ef83f9b6345ce4357d9abfca3bfce2c - 304c4f52e0c83f96daf8c98a05f80281b62cf08f6be9c1bc10c0adbabcf2f74605a9eb36c - f243bb5009259a3717d44df3caf02acc53ab49cfd2eeb6d4079d31e57638b3a6928ff3940 - d0d06545ae164278597bb8d46053084c335eaf9585ef52fc5eaf1f11718df7988d3f414d8 - b0be2e56e15d7ade9f5ee4cc7ee4a4c96f16c3a300034788ba8bf79c3125a697488006a4a - 4288c38fdc4e9891891cae036d14b83ff1523749d4fabf5c91e8d455dce2f14eae3408dce - 22f901efc7858ccad1a32af9e9796d3026ba18925103cad44cba4bdc1f3d3c23be125bba1 - 811f1e08405d5d180444147397ea0d4aebf12edff5cebc52cb05983c8d4bd2d4a93d66676 - 459ab2c5ca9d553a5c5599cc6992ed90edc939c51cc99d1820b5691914bfcab6eb8016c51 - 77e9e8f006e7893ea46b232b91b1f923b05273a927cd6d0aa14720bc149ce68f20809d6fe - 55816acf09e72c14b54637dea24eb961558a7ac726d03ced287a817fa8fea71c90bd89955 - b093d7c5908305177efa8289457190435298b2d5b2b67543e4dceaf2c8b7fdbdac12836a7 - 0ed910c34abcd10b3ddf53f640c85e35fef7e7ba4ab8c561fe9f1d763a32c65a1fbad5756 - 6bda135236257aa502116cb72c9347d10dca1b64a342b41a829cc7ba95e71499f57be2be3 - cd00000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000 - 00000000000000000000000000000000000000000000000000000005251 - """.replace(" ", "").replace("\n", "") -) - -const - ds1Root = toDigest("1a4c3cce02935defd159e4e207890ae26a325bf03e205c9ee94ca040ecce008a") - -proc fixture1() = - ## Inserts a OldDepositContractSnapshot fixture. - let - compressed = snappy.encode(ds1) - db = SqStoreRef.init(databaseRoot, "nbc").expect("") - kv = kvStore(db.openKvStore("key_values", true).expect("")) - kv.put(key1, compressed).expect("") - db.close() - -proc inspectDCS( - snapshot: OldDepositContractSnapshot | DepositContractSnapshot) = - ## Inspects a DCS and checks if all of its data corresponds to - ## what's encoded in ds1. - const zero = toDigest("0000000000000000000000000000000000000000000000000000000000000000") - const root = toDigest("1a4c3cce02935defd159e4e207890ae26a325bf03e205c9ee94ca040ecce008a") - const want = [ - "ca3bfce2c304c4f52e0c83f96daf8c98a05f80281b62cf08f6be9c1bc10c0adb", - "abcf2f74605a9eb36cf243bb5009259a3717d44df3caf02acc53ab49cfd2eeb6", - "d4079d31e57638b3a6928ff3940d0d06545ae164278597bb8d46053084c335ea", - "f9585ef52fc5eaf1f11718df7988d3f414d8b0be2e56e15d7ade9f5ee4cc7ee4", - "a4c96f16c3a300034788ba8bf79c3125a697488006a4a4288c38fdc4e9891891", - "cae036d14b83ff1523749d4fabf5c91e8d455dce2f14eae3408dce22f901efc7", - "858ccad1a32af9e9796d3026ba18925103cad44cba4bdc1f3d3c23be125bba18", - "11f1e08405d5d180444147397ea0d4aebf12edff5cebc52cb05983c8d4bd2d4a", - "93d66676459ab2c5ca9d553a5c5599cc6992ed90edc939c51cc99d1820b56919", - "14bfcab6eb8016c5177e9e8f006e7893ea46b232b91b1f923b05273a927cd6d0", - "aa14720bc149ce68f20809d6fe55816acf09e72c14b54637dea24eb961558a7a", - "c726d03ced287a817fa8fea71c90bd89955b093d7c5908305177efa828945719", - "0435298b2d5b2b67543e4dceaf2c8b7fdbdac12836a70ed910c34abcd10b3ddf", - "53f640c85e35fef7e7ba4ab8c561fe9f1d763a32c65a1fbad57566bda1352362", - "57aa502116cb72c9347d10dca1b64a342b41a829cc7ba95e71499f57be2be3cd", - ] - # Check eth1Block. - check($snapshot.eth1Block == "eeea1373d4aa9e099d7c9deddb694db9aeb4577755ef83f9b6345ce4357d9abf") - # Check branch. - for i in 0..want.high(): - check($snapshot.depositContractState.branch[i] == want[i]) - for i in (want.high() + 1)..31: - check(snapshot.depositContractState.branch[i] == zero) - # Check deposit_count. - check(snapshot.getDepositCountU64() == 21073) - # Check deposit root. - check(snapshot.getDepositRoot == root) - -proc inspectDCS(snapshot: DepositContractSnapshot, wantedBlockHeight: uint64) = - inspectDCS(snapshot) - check(snapshot.blockHeight == wantedBlockHeight) - -suite "DepositContractSnapshot": - setup: - randomize() - - teardown: - # removeDir(databaseRoot) - discard - - test "SSZ": - var snapshot = OldDepositContractSnapshot() - check(decodeSSZ(ds1, snapshot)) - inspectDCS(snapshot) - - test "Migration": - # Start with a fresh database. - removeDir(databaseRoot) - createDir(databaseRoot) - # Make sure there's no DepositContractSnapshot yet. - let db = BeaconChainDB.new(databaseRoot, inMemory=false) - check(db.getDepositContractSnapshot().isErr()) - # Setup fixture. - fixture1() - # Make sure there's still no DepositContractSnapshot as - # BeaconChainDB::getDepositContractSnapshot() checks only for DCSv2. - check(db.getDepositContractSnapshot().isErr()) - # Migrate DB. - db.ifNecessaryMigrateDCS do ( - d: OldDepositContractSnapshot) -> DepositContractSnapshot: - d.toDepositContractSnapshot(11052984) - # Make sure now there actually is a snapshot. - check(db.getDepositContractSnapshot().isOk()) - # Inspect content. - let snapshot = db.getDepositContractSnapshot().expect("") - inspectDCS(snapshot, 11052984) - - test "depositCount": - var rand = initRand(12345678) - for i in 1..1000: - let n = rand.next() - let m = n mod 4294967296'u64 - check(depositCountU64(depositCountBytes(m)) == m) - - test "isValid": - const ZERO = toDigest("0000000000000000000000000000000000000000000000000000000000000000") - # Use our hard-coded ds1 as a model. - var model: OldDepositContractSnapshot - check(decodeSSZ(ds1, model)) - # Check initialization. blockHeight cannot be validated and may be 0. - var dcs = model.toDepositContractSnapshot(11052984) - check(dcs.isValid(ds1Root)) - # Check eth1Block. - dcs.eth1Block = ZERO - check(not dcs.isValid(ds1Root)) - dcs.eth1Block = model.eth1Block - check(dcs.isValid(ds1Root)) - # Check branch. - for i in 0..len(dcs.depositContractState.branch)-1: - dcs.depositContractState.branch[i] = ZERO - check(not dcs.isValid(ds1Root)) - dcs.depositContractState.branch = model.depositContractState.branch - check(dcs.isValid(ds1Root)) - # Check deposit count. - for i in 0..len(dcs.depositContractState.deposit_count)-1: - dcs.depositContractState.deposit_count[i] = 0 - check(not dcs.isValid(ds1Root)) - dcs.depositContractState.deposit_count = - model.depositContractState.deposit_count - check(dcs.isValid(ds1Root)) - -suite "EIP-4881": - type DepositTestCase = object - deposit_data: DepositData - deposit_data_root: Eth2Digest - eth1_data: Eth1Data - block_height: uint64 - snapshot: DepositTreeSnapshot - - proc loadTestCases( - path: string - ): seq[DepositTestCase] {.raises: [ - IOError, KeyError, ValueError, YamlConstructionError, YamlParserError].} = - loadToJson(os_ops.readFile(path))[0].mapIt: - DepositTestCase( - deposit_data: DepositData( - pubkey: ValidatorPubKey.fromHex( - it["deposit_data"]["pubkey"].getStr()).expect("valid"), - withdrawal_credentials: Eth2Digest.fromHex( - it["deposit_data"]["withdrawal_credentials"].getStr()), - amount: Gwei(Base10.decode(uint64, - it["deposit_data"]["amount"].getStr()).expect("valid")), - signature: ValidatorSig.fromHex( - it["deposit_data"]["signature"].getStr()).expect("valid")), - deposit_data_root: Eth2Digest.fromHex(it["deposit_data_root"].getStr()), - eth1_data: Eth1Data( - deposit_root: Eth2Digest.fromHex( - it["eth1_data"]["deposit_root"].getStr()), - deposit_count: Base10.decode(uint64, - it["eth1_data"]["deposit_count"].getStr()).expect("valid"), - block_hash: Eth2Digest.fromHex( - it["eth1_data"]["block_hash"].getStr())), - block_height: uint64(it["block_height"].getInt()), - snapshot: DepositTreeSnapshot( - finalized: it["snapshot"]["finalized"].foldl((block: - check: a[].add Eth2Digest.fromHex(b.getStr()) - a), newClone default(List[ - Eth2Digest, Limit DEPOSIT_CONTRACT_TREE_DEPTH]))[], - deposit_root: Eth2Digest.fromHex( - it["snapshot"]["deposit_root"].getStr()), - deposit_count: uint64( - it["snapshot"]["deposit_count"].getInt()), - execution_block_hash: Eth2Digest.fromHex( - it["snapshot"]["execution_block_hash"].getStr()), - execution_block_height: uint64( - it["snapshot"]["execution_block_height"].getInt()))) - - const path = currentSourcePath.rsplit(DirSep, 1)[0]/ - ".."/"vendor"/"EIPs"/"assets"/"eip-4881"/"test_cases.yaml" - let testCases = loadTestCases(path) - for testCase in testCases: - check testCase.deposit_data_root == hash_tree_root(testCase.deposit_data) - - test "empty_root": - var empty = DepositsMerkleizer.init() - check empty.getDepositsRoot() == Eth2Digest.fromHex( - "0xd70a234731285c6804c2a4f56711ddb8c82c99740f207854891028af34e27e5e") - - test "deposit_cases": - var tree = DepositsMerkleizer.init() - for testCase in testCases: - tree.addChunk testCase.deposit_data_root.data - var snapshot = DepositsMerkleizer.init(tree.toDepositContractState()) - let expected = testCase.eth1_data.deposit_root - check: - snapshot.getDepositsRoot() == expected - tree.getDepositsRoot() == expected - - test "finalization": - var tree = DepositsMerkleizer.init() - for testCase in testCases[0 ..< 128]: - tree.addChunk testCase.deposit_data_root.data - let originalRoot = tree.getDepositsRoot() - check originalRoot == testCases[127].eth1_data.deposit_root - var finalized = DepositsMerkleizer.init() - for testCase in testCases[0 .. 100]: - finalized.addChunk testCase.deposit_data_root.data - var snapshot = finalized.getTreeSnapshot( - testCases[100].eth1_data.block_hash, testCases[100].block_height) - check snapshot == testCases[100].snapshot - var copy = DepositsMerkleizer.init(snapshot).expect("just produced") - for testCase in testCases[101 ..< 128]: - copy.addChunk testCase.deposit_data_root.data - check tree.getDepositsRoot() == copy.getDepositsRoot() - for testCase in testCases[101 .. 105]: - finalized.addChunk testCase.deposit_data_root.data - snapshot = finalized.getTreeSnapshot( - testCases[105].eth1_data.block_hash, testCases[105].block_height) - copy = DepositsMerkleizer.init(snapshot).expect("just produced") - var fullTreeCopy = DepositsMerkleizer.init() - for testCase in testCases[0 .. 105]: - fullTreeCopy.addChunk testCase.deposit_data_root.data - let - depositRoots = testCases[106 ..< 128].mapIt(it.deposit_data_root) - proofs1 = copy.addChunksAndGenMerkleProofs(depositRoots) - proofs2 = fullTreeCopy.addChunksAndGenMerkleProofs(depositRoots) - check proofs1 == proofs2 - - test "snapshot_cases": - var tree = DepositsMerkleizer.init() - for testCase in testCases: - tree.addChunk testCase.deposit_data_root.data - let snapshot = tree.getTreeSnapshot( - testCase.eth1_data.block_hash, testCase.block_height) - check snapshot == testCase.snapshot - - test "invalid_snapshot": - let invalidSnapshot = DepositTreeSnapshot( - finalized: default(FinalizedDepositTreeBranch), - deposit_root: ZERO_HASH, - deposit_count: 0, - execution_block_hash: ZERO_HASH, - execution_block_height: 0) - check DepositsMerkleizer.init(invalidSnapshot).isNone() diff --git a/tests/test_discovery.nim b/tests/test_discovery.nim index ea640f8283..d4b0162445 100644 --- a/tests/test_discovery.nim +++ b/tests/test_discovery.nim @@ -5,12 +5,12 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import - testutils/unittests, - chronos, eth/p2p/discoveryv5/enr, + chronos/unittest2/asynctests, + chronos, eth/enr/enr, ../beacon_chain/spec/[forks, network], ../beacon_chain/networking/[eth2_network, eth2_discovery], ./testutil @@ -38,13 +38,14 @@ proc generateNode(rng: ref HmacDrbgContext, port: Port, const noSyncnetsPreference = SyncnetBits() const noCgcnetsPreference = CgcBits() -procSuite "Eth2 specific discovery tests": - let - rng = HmacDrbgContext.new() - enrForkId = ENRForkID( - fork_digest: ForkDigest([byte 0, 1, 2, 3]), - next_fork_version: Version([byte 0, 0, 0, 0]), - next_fork_epoch: Epoch(0)) +suite "Eth2 specific discovery tests": + setup: + let + rng = HmacDrbgContext.new() + enrForkId = ENRForkID( + fork_digest: ForkDigest([byte 0, 1, 2, 3]), + next_fork_version: Version([byte 0, 0, 0, 0]), + next_fork_epoch: Epoch(0)) asyncTest "Subnet query": var attnets: AttnetBits @@ -234,7 +235,7 @@ suite "Discovery fork ID": forkId = ENRForkID( fork_digest: fork_digest, next_fork_version: next_fork_version, - next_fork_epoch: FAR_FUTURE_EPOCH) + next_fork_epoch: cfg.ALTAIR_FORK_EPOCH) for epoch in GENESIS_EPOCH ..< cfg.ALTAIR_FORK_EPOCH - 1: check cfg.getDiscoveryForkID(epoch, genesis_validators_root) == forkId forkId @@ -264,7 +265,7 @@ suite "Discovery fork ID": forkId = ENRForkID( fork_digest: fork_digest, next_fork_version: next_fork_version, - next_fork_epoch: FAR_FUTURE_EPOCH) + next_fork_epoch: cfg.BELLATRIX_FORK_EPOCH) for epoch in cfg.ALTAIR_FORK_EPOCH ..< cfg.BELLATRIX_FORK_EPOCH - 1: check cfg.getDiscoveryForkID(epoch, genesis_validators_root) == forkId forkId diff --git a/tests/test_el_conf.nim b/tests/test_el_conf.nim index fc8e948e61..ee4c836710 100644 --- a/tests/test_el_conf.nim +++ b/tests/test_el_conf.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -22,7 +22,7 @@ type proc loadExampleConfig( content: string, cmdLine = newSeq[string]() -): ExampleConfigFile {.raises: [ConfigurationError, OSError].} = +): ExampleConfigFile {.raises: [ConfigurationError].} = ExampleConfigFile.load( cmdLine = cmdLine, secondarySources = proc ( diff --git a/tests/test_el_manager.nim b/tests/test_el_manager.nim index 7845597c7d..6e84cc2612 100644 --- a/tests/test_el_manager.nim +++ b/tests/test_el_manager.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,7 +10,7 @@ import unittest2, - ../beacon_chain/el/[el_conf, el_manager], + ../beacon_chain/el/el_conf, ./testutil suite "Eth1 monitor": @@ -32,31 +32,3 @@ suite "Eth1 monitor": unspecifiedProtocolUrl == "ws://localhost:8545" gethWsUrl == "ws://localhost:8545" - - test "Deposits chain": - var - chain = Eth1Chain() - depositIndex = 0.uint64 - for i in 0 ..< (MAX_DEPOSITS + 1) * 3: - var deposits = newSeqOfCap[DepositData](i) - for _ in 0 ..< i mod (MAX_DEPOSITS + 1): - deposits.add DepositData(amount: depositIndex.Gwei) - inc depositIndex - - const interval = defaultRuntimeConfig.SECONDS_PER_ETH1_BLOCK - chain.blocks.addLast Eth1Block( - number: i.Eth1BlockNumber, - timestamp: i.Eth1BlockTimestamp * interval, - deposits: deposits, - depositCount: depositIndex) - - proc doTest(first, last: uint64) = - var idx = first - for data in chain.getDepositsRange(first, last): - check data.amount == idx.Gwei - inc idx - check idx == last - - for i in 0 .. depositIndex: - for j in i .. depositIndex: - doTest(i, j) \ No newline at end of file diff --git a/tests/test_engine_api_conversions.nim b/tests/test_engine_api_conversions.nim index 49352d0dc4..77afeaf82d 100644 --- a/tests/test_engine_api_conversions.nim +++ b/tests/test_engine_api_conversions.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -22,7 +22,7 @@ from ../beacon_chain/spec/datatypes/bellatrix import from ../beacon_chain/spec/datatypes/capella import ExecutionPayload from ../beacon_chain/spec/datatypes/deneb import ExecutionPayload from ../beacon_chain/spec/datatypes/electra import - ConsolidationRequest, DepositRequest, ExecutionPayload, WithdrawalRequest + ConsolidationRequest, DepositRequest, WithdrawalRequest from ../beacon_chain/spec/digest import Eth2Digest from ../beacon_chain/spec/presets import MAX_BYTES_PER_TRANSACTION, MAX_EXTRA_DATA_BYTES, MAX_TRANSACTIONS_PER_PAYLOAD @@ -464,7 +464,7 @@ suite "Engine API conversions": for blockBody in blockBodies: check: blockBody.execution_payload == asConsensusType( - asEngineExecutionPayload(blockBody)) + asEngineExecutionPayload(blockBody.execution_payload)) test "Roundtrip engine RPC V2 and capella ExecutionPayload representations": # Each Eth2Digest field is chosen randomly. Each uint64 field is random, @@ -1009,7 +1009,7 @@ suite "Engine API conversions": for blockBody in blockBodies: check: blockBody.execution_payload == asConsensusType( - asEngineExecutionPayload(blockBody)) + asEngineExecutionPayload(blockBody.execution_payload)) test "Roundtrip engine RPC V3 and deneb ExecutionPayload representations": # Each Eth2Digest field is chosen randomly. Each uint64 field is random, @@ -1604,4 +1604,4 @@ suite "Engine API conversions": for blockBody in blockBodies: check: blockBody.execution_payload == asConsensusType( - asEngineExecutionPayload(blockBody)) + asEngineExecutionPayload(blockBody.execution_payload)) diff --git a/tests/test_serialization.nim b/tests/test_eth2_rest_serialization.nim similarity index 63% rename from tests/test_serialization.nim rename to tests/test_eth2_rest_serialization.nim index 234a459a0d..e05943e793 100644 --- a/tests/test_serialization.nim +++ b/tests/test_eth2_rest_serialization.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -9,12 +9,140 @@ {.used.} import - presto/client, - testutils/unittests, chronicles, - ../beacon_chain/spec/eth2_apis/[eth2_rest_serialization, rest_types], - ./testutil + std/strutils, + unittest2, + stew/byteutils, + json_serialization/std/tables, + ../beacon_chain/spec/eth2_apis/eth2_rest_serialization + +template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0] + +const denebSignedContents = staticRead(sourceDir & "/test_files/denebSignedContents.json") + +# Examples from: +# https://github.com/ethereum/remote-signing-api/blob/87a392deb4e43209ca896dde6b4ec40bef7ee02c/signing/paths/sign.yaml +# via https://jsonformatter.org/yaml-to-json with pre-bellatrix removed +const Web3SignerExamples = staticRead(sourceDir & "/test_files/web3signer.examples.json") + +# Can't be in same namespace as some other KZG-related fromHex overloads due to +# https://github.com/nim-lang/Nim/issues/22861 +func fromHex(T: typedesc[KzgCommitment], s: string): T {. + raises: [ValueError].} = + var res: T + hexToByteArray(s, res.bytes) + res + +suite "REST encoding and decoding": + test "DenebSignedBlockContents decoding": + let blck = RestJson.decode(denebSignedContents, DenebSignedBlockContents) + check: + hash_tree_root(blck.signed_block.message) == Eth2Digest.fromHex( + "0xc67166e600d76d9d129244d10e4f35279d75d800fb39a5ce35e98328d53939da") + blck.signed_block.root == Eth2Digest.fromHex( + "0xc67166e600d76d9d129244d10e4f35279d75d800fb39a5ce35e98328d53939da") + blck.signed_block.signature == ValidatorSig.fromHex( + "0x8e2cd6cf4457825818eb380f1ea74f2fc99665041194ab5bcbdbf96f2e22bad4376d2a94f69d762c999ffd500e2525ab0561b01a79158456c83cf5bf0f2104e26f7b0d22f41dcc8f49a0e1cc29bb09aee1c548903fa04bdfcd20603c400d948d")[] + blck.kzg_proofs.len == 0 + blck.blobs.len == 0 + blck == RestJson.decode(RestJson.encode(blck), DenebSignedBlockContents) + # SSZ encoding is also used in rest! + blck == SSZ.decode(SSZ.encode(blck), DenebSignedBlockContents) + + test "KzgCommitment": + let + zeroString = + "\"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"" + randString = + "\"0xe2822fdd03685968091c79b1f81d17ed646196c920baecf927a6abbe45cd2d930a692e85ff5d96ebe36d99a57c74d5cb\"" + zeroKzgCommitment = KzgCommitment.fromHex( + "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") + randKzgCommitment = KzgCommitment.fromHex( + "0xe2822fdd03685968091c79b1f81d17ed646196c920baecf927a6abbe45cd2d930a692e85ff5d96ebe36d99a57c74d5cb") + + check: + RestJson.decode(zeroString, KzgCommitment) == zeroKzgCommitment + RestJson.decode(zeroString, KzgCommitment) != randKzgCommitment + RestJson.decode(randString, KzgCommitment) != zeroKzgCommitment + RestJson.decode(randString, KzgCommitment) == randKzgCommitment + + RestJson.encode(zeroKzgCommitment) == zeroString + RestJson.encode(zeroKzgCommitment) != randString + RestJson.encode(randKzgCommitment) != zeroString + RestJson.encode(randKzgCommitment) == randString + + test "KzgProof": + let + zeroString = + "\"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"" + randString = + "\"0xe2822fdd03685968091c79b1f81d17ed646196c920baecf927a6abbe45cd2d930a692e85ff5d96ebe36d99a57c74d5cb\"" + zeroKzgProof = KzgProof.fromHex( + "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") + randKzgProof = KzgProof.fromHex( + "0xe2822fdd03685968091c79b1f81d17ed646196c920baecf927a6abbe45cd2d930a692e85ff5d96ebe36d99a57c74d5cb") + + check: + RestJson.decode(zeroString, KzgProof) == zeroKzgProof + RestJson.decode(zeroString, KzgProof) != randKzgProof + RestJson.decode(randString, KzgProof) != zeroKzgProof + RestJson.decode(randString, KzgProof) == randKzgProof + + RestJson.encode(zeroKzgProof) == zeroString + RestJson.encode(zeroKzgProof) != randString + RestJson.encode(randKzgProof) != zeroString + RestJson.encode(randKzgProof) == randString + + test "Blob": + let + zeroBlob = new Blob + nonzeroBlob = new Blob + blobLen = distinctBase(nonzeroBlob[]).lenu64 + + for i in 0 ..< blobLen: + nonzeroBlob[i] = 17.byte + + let + zeroString = newClone(RestJson.encode(zeroBlob[])) + nonzeroString = newClone(RestJson.encode(nonzeroBlob[])) + + let + zeroBlobRoundTrip = + newClone(RestJson.decode(zeroString[], Blob)) + nonzeroBlobRoundTrip = + newClone(RestJson.decode(nonzeroString[], Blob)) + + check: + zeroString[].startsWith "\"0x0000000000000000000000000000000000000000000000000" + nonzeroString[].startsWith "\"0x111111111111111111111111111111111111111111111111" + zeroString[].endsWith "0000000000000000000000000000000000000000000000\"" + nonzeroString[].endsWith "1111111111111111111111111111111111111111111111\"" + zeroString[].lenu64 == 2*blobLen + 4 # quotation marks and 0x prefix + nonzeroString[].lenu64 == 2*blobLen + 4 # quotation marks and 0x prefix + zeroBlob[] == zeroBlobRoundTrip[] + nonzeroBlob[] == nonzeroBlobRoundTrip[] + zeroBlob[] != nonzeroBlob[] + + test "Validator pubkey hack": + + let + encoded = """ + { + "pubkey": "0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95", + "withdrawal_credentials": "0x00f50428677c60f997aadeab24aabf7fceaef491c96a52b463ae91f95611cf71", + "effective_balance": "32000000000", + "slashed": false, + "activation_eligibility_epoch": "0", + "activation_epoch": "0", + "exit_epoch": "18446744073709551615", + "withdrawable_epoch": "18446744073709551615" + }""" + + let validator = RestJson.decode(encoded, Validator) + check: + validator.pubkey == ValidatorPubKey.fromHex( + "0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95")[] + validator.exit_epoch == FAR_FUTURE_EPOCH -suite "Serialization/deserialization test suite": test "RestErrorMessage parser tests": const GoodTestVectors = [ ( @@ -130,7 +258,7 @@ suite "Serialization/deserialization test suite": proc `==`(a: RestApiResponse, b: string): bool = case a.kind of RestApiResponseKind.Content: - a.content.data.bytesToString() == b + string.fromBytes(a.content.data) == b of RestApiResponseKind.Error: a.errobj.message == b else: @@ -272,3 +400,15 @@ suite "Serialization/deserialization test suite": for vector in InvalidCharsVectors2: let res = strictParse(vector, UInt256, 2) check res.isErr() + + let examples = Json.decode(Web3SignerExamples, Table[string, Table[string, JsonString]]) + + for name, example in examples: + test "remote signing example " & name: + let + decoded = RestJson.decode(string(example["value"]), Web3SignerRequest) + encoded = RestJson.encode(decoded) + recoded = RestJson.encode(RestJson.decode(encoded, Web3SignerRequest)) + + check: + encoded == recoded diff --git a/tests/test_eth2_ssz_serialization.nim b/tests/test_eth2_ssz_serialization.nim index 682f3e2a0b..8bfe9eb8a8 100644 --- a/tests/test_eth2_ssz_serialization.nim +++ b/tests/test_eth2_ssz_serialization.nim @@ -11,8 +11,7 @@ import std/strutils, unittest2, - ../beacon_chain/spec/datatypes/[phase0, altair, bellatrix, deneb], - ../beacon_chain/spec/eth2_ssz_serialization, + ../beacon_chain/spec/[eth2_ssz_serialization, forks], ./consensus_spec/os_ops static: @@ -47,10 +46,9 @@ suite "Specific field types": check: t.root.isZero - testit(phase0.SignedBeaconBlock) - testit(phase0.TrustedSignedBeaconBlock) - testit(altair.SignedBeaconBlock) - testit(altair.TrustedSignedBeaconBlock) + ConsensusFork.withAll: + testit(consensusFork.SignedBeaconBlock) + testit(consensusFork.TrustedSignedBeaconBlock) suite "Size bounds": test "SignedBeaconBlockDeneb": @@ -68,7 +66,12 @@ suite "Size bounds": "{min=" & $T.minSize & ", max=" & $T.maxSize & "}\n" loc[^1].add "[element]" byte.record() - elif T is ExecutionAddress|BloomLogs: + elif T is ExecutionAddress: + res.add loc.join(".") & "[" & $sizeof(T) & "]: SszLengthBounds" & + "{min=" & $T.minSize & ", max=" & $T.maxSize & "}\n" + loc[^1].add "[element]" + byte.record() + elif T is BloomLogs: res.add loc.join(".") & "[" & $T.data.len & "]: SszLengthBounds" & "{min=" & $T.minSize & ", max=" & $T.maxSize & "}\n" loc[^1].add "[element]" diff --git a/tests/test_rest_json_serialization.nim b/tests/test_files/denebSignedContents.json similarity index 71% rename from tests/test_rest_json_serialization.nim rename to tests/test_files/denebSignedContents.json index 698f39fc5e..1ebafdce5b 100644 --- a/tests/test_rest_json_serialization.nim +++ b/tests/test_files/denebSignedContents.json @@ -1,20 +1,3 @@ -# beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} -{.used.} - -import - unittest2, - ../beacon_chain/spec/eth2_apis/eth2_rest_serialization - -from std/strutils import endsWith, startsWith - -const denebSignedContents = """ { "signed_block": { "message": { @@ -205,140 +188,3 @@ const denebSignedContents = """ "kzg_proofs": [], "blobs": [] } -""" - -# Can't be in same namespace as some other KZG-related fromHex overloads due to -# https://github.com/nim-lang/Nim/issues/22861 -from stew/byteutils import hexToByteArray -func fromHex(T: typedesc[KzgCommitment], s: string): T {. - raises: [ValueError].} = - var res: T - hexToByteArray(s, res.bytes) - res - -suite "REST JSON encoding and decoding": - test "DenebSignedBlockContents decoding": - let blck = RestJson.decode( - denebSignedContents, DenebSignedBlockContents, requireAllFields = true, - allowUnknownFields = true) - check: - hash_tree_root(blck.signed_block.message) == Eth2Digest.fromHex( - "0xc67166e600d76d9d129244d10e4f35279d75d800fb39a5ce35e98328d53939da") - blck.signed_block.signature == ValidatorSig.fromHex( - "0x8e2cd6cf4457825818eb380f1ea74f2fc99665041194ab5bcbdbf96f2e22bad4376d2a94f69d762c999ffd500e2525ab0561b01a79158456c83cf5bf0f2104e26f7b0d22f41dcc8f49a0e1cc29bb09aee1c548903fa04bdfcd20603c400d948d")[] - blck.kzg_proofs.len == 0 - blck.blobs.len == 0 - - test "KzgCommitment": - let - zeroString = - "\"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"" - randString = - "\"0xe2822fdd03685968091c79b1f81d17ed646196c920baecf927a6abbe45cd2d930a692e85ff5d96ebe36d99a57c74d5cb\"" - zeroKzgCommitment = KzgCommitment.fromHex( - "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") - randKzgCommitment = KzgCommitment.fromHex( - "0xe2822fdd03685968091c79b1f81d17ed646196c920baecf927a6abbe45cd2d930a692e85ff5d96ebe36d99a57c74d5cb") - - check: - RestJson.decode( - zeroString, KzgCommitment, requireAllFields = true, - allowUnknownFields = true) == zeroKzgCommitment - RestJson.decode( - zeroString, KzgCommitment, requireAllFields = true, - allowUnknownFields = true) != randKzgCommitment - RestJson.decode( - randString, KzgCommitment, requireAllFields = true, - allowUnknownFields = true) != zeroKzgCommitment - RestJson.decode( - randString, KzgCommitment, requireAllFields = true, - allowUnknownFields = true) == randKzgCommitment - - RestJson.encode(zeroKzgCommitment) == zeroString - RestJson.encode(zeroKzgCommitment) != randString - RestJson.encode(randKzgCommitment) != zeroString - RestJson.encode(randKzgCommitment) == randString - - test "KzgProof": - let - zeroString = - "\"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"" - randString = - "\"0xe2822fdd03685968091c79b1f81d17ed646196c920baecf927a6abbe45cd2d930a692e85ff5d96ebe36d99a57c74d5cb\"" - zeroKzgProof = KzgProof.fromHex( - "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") - randKzgProof = KzgProof.fromHex( - "0xe2822fdd03685968091c79b1f81d17ed646196c920baecf927a6abbe45cd2d930a692e85ff5d96ebe36d99a57c74d5cb") - - check: - RestJson.decode( - zeroString, KzgProof, requireAllFields = true, - allowUnknownFields = true) == zeroKzgProof - RestJson.decode( - zeroString, KzgProof, requireAllFields = true, - allowUnknownFields = true) != randKzgProof - RestJson.decode( - randString, KzgProof, requireAllFields = true, - allowUnknownFields = true) != zeroKzgProof - RestJson.decode( - randString, KzgProof, requireAllFields = true, - allowUnknownFields = true) == randKzgProof - - RestJson.encode(zeroKzgProof) == zeroString - RestJson.encode(zeroKzgProof) != randString - RestJson.encode(randKzgProof) != zeroString - RestJson.encode(randKzgProof) == randString - - test "Blob": - let - zeroBlob = new Blob - nonzeroBlob = new Blob - blobLen = distinctBase(nonzeroBlob[]).lenu64 - - for i in 0 ..< blobLen: - nonzeroBlob[i] = 17.byte - - let - zeroString = newClone(RestJson.encode(zeroBlob[])) - nonzeroString = newClone(RestJson.encode(nonzeroBlob[])) - - let - zeroBlobRoundTrip = - newClone(RestJson.decode( - zeroString[], Blob, requireAllFields = true, allowUnknownFields = true)) - nonzeroBlobRoundTrip = - newClone(RestJson.decode( - nonzeroString[], Blob, requireAllFields = true, - allowUnknownFields = true)) - - check: - zeroString[].startsWith "\"0x0000000000000000000000000000000000000000000000000" - nonzeroString[].startsWith "\"0x111111111111111111111111111111111111111111111111" - zeroString[].endsWith "0000000000000000000000000000000000000000000000\"" - nonzeroString[].endsWith "1111111111111111111111111111111111111111111111\"" - zeroString[].lenu64 == 2*blobLen + 4 # quotation marks and 0x prefix - nonzeroString[].lenu64 == 2*blobLen + 4 # quotation marks and 0x prefix - zeroBlob[] == zeroBlobRoundTrip[] - nonzeroBlob[] == nonzeroBlobRoundTrip[] - zeroBlob[] != nonzeroBlob[] - - test "Validator pubkey hack": - - let - encoded = """ - { - "pubkey": "0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95", - "withdrawal_credentials": "0x00f50428677c60f997aadeab24aabf7fceaef491c96a52b463ae91f95611cf71", - "effective_balance": "32000000000", - "slashed": false, - "activation_eligibility_epoch": "0", - "activation_epoch": "0", - "exit_epoch": "18446744073709551615", - "withdrawable_epoch": "18446744073709551615" - }""" - - let validator = RestJson.decode(encoded, Validator) - check: - validator.pubkey == ValidatorPubKey.fromHex( - "0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95")[] - validator.exit_epoch == FAR_FUTURE_EPOCH diff --git a/tests/test_files/web3signer.examples.json b/tests/test_files/web3signer.examples.json new file mode 100644 index 0000000000..3154055a17 --- /dev/null +++ b/tests/test_files/web3signer.examples.json @@ -0,0 +1,526 @@ +{ + "BLOCK_V2 (ELECTRA)": { + "value": { + "type": "BLOCK_V2", + "signingRoot": "0xaa2e0c465c1a45d7b6637fcce4ad6ceb71fc12064b548078d619a411f0de8adc", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "beacon_block": { + "version": "ELECTRA", + "block_header": { + "slot": "0", + "proposer_index": "4666673844721362956", + "parent_root": "0x367cbd40ac7318427aadb97345a91fa2e965daf3158d7f1846f1306305f41bef", + "state_root": "0xfd18cf40cc907a739be483f1ca0ee23ad65cdd3df23205eabc6d660a75d1f54e", + "body_root": "0xa759d8029a69d4fdd8b3996086e9722983977e4efc1f12f4098ea3d93e868a6b" + } + } + } + }, + "BLOCK_V2 (DENEB)": { + "value": { + "type": "BLOCK_V2", + "signingRoot": "0xaa2e0c465c1a45d7b6637fcce4ad6ceb71fc12064b548078d619a411f0de8adc", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "beacon_block": { + "version": "DENEB", + "block_header": { + "slot": "0", + "proposer_index": "4666673844721362956", + "parent_root": "0x367cbd40ac7318427aadb97345a91fa2e965daf3158d7f1846f1306305f41bef", + "state_root": "0xfd18cf40cc907a739be483f1ca0ee23ad65cdd3df23205eabc6d660a75d1f54e", + "body_root": "0xa759d8029a69d4fdd8b3996086e9722983977e4efc1f12f4098ea3d93e868a6b" + } + } + } + }, + "BLOCK_V2 (CAPELLA)": { + "value": { + "type": "BLOCK_V2", + "signingRoot": "0xaa2e0c465c1a45d7b6637fcce4ad6ceb71fc12064b548078d619a411f0de8adc", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "beacon_block": { + "version": "CAPELLA", + "block_header": { + "slot": "0", + "proposer_index": "4666673844721362956", + "parent_root": "0x367cbd40ac7318427aadb97345a91fa2e965daf3158d7f1846f1306305f41bef", + "state_root": "0xfd18cf40cc907a739be483f1ca0ee23ad65cdd3df23205eabc6d660a75d1f54e", + "body_root": "0xa759d8029a69d4fdd8b3996086e9722983977e4efc1f12f4098ea3d93e868a6b" + } + } + } + }, + "BLOCK_V2 (BELLATRIX)": { + "value": { + "type": "BLOCK_V2", + "signingRoot": "0x26d0ee0b6c2261cd6010112a024de4f3d2e1e9844d11d60b057fac344c745464", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "beacon_block": { + "version": "BELLATRIX", + "block_header": { + "slot": "0", + "proposer_index": "4666673844721362956", + "parent_root": "0x367cbd40ac7318427aadb97345a91fa2e965daf3158d7f1846f1306305f41bef", + "state_root": "0xfd18cf40cc907a739be483f1ca0ee23ad65cdd3df23205eabc6d660a75d1f54e", + "body_root": "0xe74b0fc13f19ae2077403afa03fdc155484f22d05d93eb084473951bb3a8d1ae" + } + } + } + }, + "ATTESTATION": { + "value": { + "type": "ATTESTATION", + "signingRoot": "0x548c9a015f4c96cb8b1ddbbdfca85846f85bf9f344a434c140f378cdfb5341f0", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "attestation": { + "slot": "32", + "index": "0", + "beacon_block_root": "0xb2eedb01adbd02c828d5eec09b4c70cbba12ffffba525ebf48aca33028e8ad89", + "source": { + "epoch": "0", + "root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "target": { + "epoch": "0", + "root": "0xb2eedb01adbd02c828d5eec09b4c70cbba12ffffba525ebf48aca33028e8ad89" + } + } + } + }, + "AGGREGATION_SLOT": { + "value": { + "type": "AGGREGATION_SLOT", + "signingRoot": "0x1fb90dd6e8b2670e6949347bc4eaacd37f9b6cc6e42c559973e362c800e853b9", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "aggregation_slot": { + "slot": "119" + } + } + }, + "AGGREGATE_AND_PROOF_V2 (ELECTRA)": { + "value": { + "type": "AGGREGATE_AND_PROOF_V2", + "signingRoot": "0x247535806f76143fe4798427b2a79b85340c1a029a9e08581995b60e4e45c9e0", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "aggregate_and_proof": { + "version": "ELECTRA", + "data": { + "aggregator_index": "1", + "aggregate": { + "aggregation_bits": "0x0000000000000000000000000000000000000000000101", + "data": { + "slot": "0", + "index": "0", + "beacon_block_root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd", + "source": { + "epoch": "0", + "root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "target": { + "epoch": "0", + "root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd" + } + }, + "signature": "0xa627242e4a5853708f4ebf923960fb8192f93f2233cd347e05239d86dd9fb66b721ceec1baeae6647f498c9126074f1101a87854d674b6eebc220fd8c3d8405bdfd8e286b707975d9e00a56ec6cbbf762f23607d490f0bbb16c3e0e483d51875", + "committee_bits": "0x0000000000000001" + }, + "selection_proof": "0xa63f73a03f1f42b1fd0a988b614d511eb346d0a91c809694ef76df5ae021f0f144d64e612d735bc8820950cf6f7f84cd0ae194bfe3d4242fe79688f83462e3f69d9d33de71aab0721b7dab9d6960875e5fdfd26b171a75fb51af822043820c47" + } + } + } + }, + "AGGREGATE_AND_PROOF_V2 (DENEB)": { + "value": { + "type": "AGGREGATE_AND_PROOF_V2", + "signingRoot": "0x8d777156899cb02e0e66217afd832886239752a59a393218f6c603bcf615b4f8", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "aggregate_and_proof": { + "version": "DENEB", + "data": { + "aggregator_index": "1", + "aggregate": { + "aggregation_bits": "0x00000101", + "data": { + "slot": "0", + "index": "0", + "beacon_block_root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd", + "source": { + "epoch": "0", + "root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "target": { + "epoch": "0", + "root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd" + } + }, + "signature": "0xa627242e4a5853708f4ebf923960fb8192f93f2233cd347e05239d86dd9fb66b721ceec1baeae6647f498c9126074f1101a87854d674b6eebc220fd8c3d8405bdfd8e286b707975d9e00a56ec6cbbf762f23607d490f0bbb16c3e0e483d51875" + }, + "selection_proof": "0xa63f73a03f1f42b1fd0a988b614d511eb346d0a91c809694ef76df5ae021f0f144d64e612d735bc8820950cf6f7f84cd0ae194bfe3d4242fe79688f83462e3f69d9d33de71aab0721b7dab9d6960875e5fdfd26b171a75fb51af822043820c47" + } + } + } + }, + "AGGREGATE_AND_PROOF_V2 (CAPELLA)": { + "value": { + "type": "AGGREGATE_AND_PROOF_V2", + "signingRoot": "0x8d777156899cb02e0e66217afd832886239752a59a393218f6c603bcf615b4f8", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "aggregate_and_proof": { + "version": "CAPELLA", + "data": { + "aggregator_index": "1", + "aggregate": { + "aggregation_bits": "0x00000101", + "data": { + "slot": "0", + "index": "0", + "beacon_block_root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd", + "source": { + "epoch": "0", + "root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "target": { + "epoch": "0", + "root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd" + } + }, + "signature": "0xa627242e4a5853708f4ebf923960fb8192f93f2233cd347e05239d86dd9fb66b721ceec1baeae6647f498c9126074f1101a87854d674b6eebc220fd8c3d8405bdfd8e286b707975d9e00a56ec6cbbf762f23607d490f0bbb16c3e0e483d51875" + }, + "selection_proof": "0xa63f73a03f1f42b1fd0a988b614d511eb346d0a91c809694ef76df5ae021f0f144d64e612d735bc8820950cf6f7f84cd0ae194bfe3d4242fe79688f83462e3f69d9d33de71aab0721b7dab9d6960875e5fdfd26b171a75fb51af822043820c47" + } + } + } + }, + "AGGREGATE_AND_PROOF_V2 (BELLATRIX)": { + "value": { + "type": "AGGREGATE_AND_PROOF_V2", + "signingRoot": "0x8d777156899cb02e0e66217afd832886239752a59a393218f6c603bcf615b4f8", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "aggregate_and_proof": { + "version": "BELLATRIX", + "data": { + "aggregator_index": "1", + "aggregate": { + "aggregation_bits": "0x00000101", + "data": { + "slot": "0", + "index": "0", + "beacon_block_root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd", + "source": { + "epoch": "0", + "root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "target": { + "epoch": "0", + "root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd" + } + }, + "signature": "0xa627242e4a5853708f4ebf923960fb8192f93f2233cd347e05239d86dd9fb66b721ceec1baeae6647f498c9126074f1101a87854d674b6eebc220fd8c3d8405bdfd8e286b707975d9e00a56ec6cbbf762f23607d490f0bbb16c3e0e483d51875" + }, + "selection_proof": "0xa63f73a03f1f42b1fd0a988b614d511eb346d0a91c809694ef76df5ae021f0f144d64e612d735bc8820950cf6f7f84cd0ae194bfe3d4242fe79688f83462e3f69d9d33de71aab0721b7dab9d6960875e5fdfd26b171a75fb51af822043820c47" + } + } + } + }, + "AGGREGATE_AND_PROOF_V2 (ALTAIR)": { + "value": { + "type": "AGGREGATE_AND_PROOF_V2", + "signingRoot": "0x8d777156899cb02e0e66217afd832886239752a59a393218f6c603bcf615b4f8", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "aggregate_and_proof": { + "version": "ALTAIR", + "data": { + "aggregator_index": "1", + "aggregate": { + "aggregation_bits": "0x00000101", + "data": { + "slot": "0", + "index": "0", + "beacon_block_root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd", + "source": { + "epoch": "0", + "root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "target": { + "epoch": "0", + "root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd" + } + }, + "signature": "0xa627242e4a5853708f4ebf923960fb8192f93f2233cd347e05239d86dd9fb66b721ceec1baeae6647f498c9126074f1101a87854d674b6eebc220fd8c3d8405bdfd8e286b707975d9e00a56ec6cbbf762f23607d490f0bbb16c3e0e483d51875" + }, + "selection_proof": "0xa63f73a03f1f42b1fd0a988b614d511eb346d0a91c809694ef76df5ae021f0f144d64e612d735bc8820950cf6f7f84cd0ae194bfe3d4242fe79688f83462e3f69d9d33de71aab0721b7dab9d6960875e5fdfd26b171a75fb51af822043820c47" + } + } + } + }, + "AGGREGATE_AND_PROOF_V2 (PHASE 0)": { + "value": { + "type": "AGGREGATE_AND_PROOF_V2", + "signingRoot": "0x8d777156899cb02e0e66217afd832886239752a59a393218f6c603bcf615b4f8", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "aggregate_and_proof": { + "version": "PHASE0", + "data": { + "aggregator_index": "1", + "aggregate": { + "aggregation_bits": "0x00000101", + "data": { + "slot": "0", + "index": "0", + "beacon_block_root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd", + "source": { + "epoch": "0", + "root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "target": { + "epoch": "0", + "root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd" + } + }, + "signature": "0xa627242e4a5853708f4ebf923960fb8192f93f2233cd347e05239d86dd9fb66b721ceec1baeae6647f498c9126074f1101a87854d674b6eebc220fd8c3d8405bdfd8e286b707975d9e00a56ec6cbbf762f23607d490f0bbb16c3e0e483d51875" + }, + "selection_proof": "0xa63f73a03f1f42b1fd0a988b614d511eb346d0a91c809694ef76df5ae021f0f144d64e612d735bc8820950cf6f7f84cd0ae194bfe3d4242fe79688f83462e3f69d9d33de71aab0721b7dab9d6960875e5fdfd26b171a75fb51af822043820c47" + } + } + } + }, + "AGGREGATE_AND_PROOF (DEPRECATED)": { + "value": { + "type": "AGGREGATE_AND_PROOF", + "signingRoot": "0x8d777156899cb02e0e66217afd832886239752a59a393218f6c603bcf615b4f8", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "aggregate_and_proof": { + "aggregator_index": "1", + "aggregate": { + "aggregation_bits": "0x00000101", + "data": { + "slot": "0", + "index": "0", + "beacon_block_root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd", + "source": { + "epoch": "0", + "root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "target": { + "epoch": "0", + "root": "0x100814c335d0ced5014cfa9d2e375e6d9b4e197381f8ce8af0473200fdc917fd" + } + }, + "signature": "0xa627242e4a5853708f4ebf923960fb8192f93f2233cd347e05239d86dd9fb66b721ceec1baeae6647f498c9126074f1101a87854d674b6eebc220fd8c3d8405bdfd8e286b707975d9e00a56ec6cbbf762f23607d490f0bbb16c3e0e483d51875" + }, + "selection_proof": "0xa63f73a03f1f42b1fd0a988b614d511eb346d0a91c809694ef76df5ae021f0f144d64e612d735bc8820950cf6f7f84cd0ae194bfe3d4242fe79688f83462e3f69d9d33de71aab0721b7dab9d6960875e5fdfd26b171a75fb51af822043820c47" + } + } + }, + "RANDAO_REVEAL": { + "value": { + "type": "RANDAO_REVEAL", + "signingRoot": "0x3d047c51a8b03630781dc4c5519c17f7de87174246ff2deed0f195c6c775f91e", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "randao_reveal": { + "epoch": "3" + } + } + }, + "VOLUNTARY_EXIT": { + "value": { + "type": "VOLUNTARY_EXIT", + "signingRoot": "0x38e9f1cfe7926ce5366b633b7fc7113129025737394002d2637faaeefc56913d", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "voluntary_exit": { + "epoch": "119", + "validator_index": "0" + } + } + }, + "SYNC_COMMITTEE_MESSAGE": { + "value": { + "type": "SYNC_COMMITTEE_MESSAGE", + "signingRoot": "0xa6f60df2817ea5b52eed1fefebbad746ef64c6249fc05c90c9e0f520cc75bb95", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "sync_committee_message": { + "beacon_block_root": "0x235bc3400c2839fd856a524871200bd5e362db615fc4565e1870ed9a2a936464", + "slot": "0" + } + } + }, + "SYNC_COMMITTEE_SELECTION_PROOF": { + "value": { + "type": "SYNC_COMMITTEE_SELECTION_PROOF", + "signingRoot": "0x50d85c783ab27c1eb3f3efa914b91cb93ffd677137b15c27ba5bb548306e6963", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "sync_aggregator_selection_data": { + "slot": "0", + "subcommittee_index": "0" + } + } + }, + "SYNC_COMMITTEE_CONTRIBUTION_AND_PROOF": { + "value": { + "type": "SYNC_COMMITTEE_CONTRIBUTION_AND_PROOF", + "signingRoot": "0xae94702468b584a3b1c422bc1b39cc523d9175ba3b9ac1cccb699c00507cc1a5", + "fork_info": { + "fork": { + "previous_version": "0x00000001", + "current_version": "0x00000001", + "epoch": "1" + }, + "genesis_validators_root": "0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + }, + "contribution_and_proof": { + "aggregator_index": "11", + "selection_proof": "0x8f5c34de9e22ceaa7e8d165fc0553b32f02188539e89e2cc91e2eb9077645986550d872ee3403204ae5d554eae3cac12124e18d2324bccc814775316aaef352abc0450812b3ca9fde96ecafa911b3b8bfddca8db4027f08e29c22a9c370ad933", + "contribution": { + "slot": "0", + "beacon_block_root": "0x235bc3400c2839fd856a524871200bd5e362db615fc4565e1870ed9a2a936464", + "subcommittee_index": "1", + "aggregation_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + "signature": "0x9005ed0936f527d416609285b355fe6b9610d730c18b9d2f4942ba7d0eb95ba304ff46b6a2fb86f0c756bf09274db8e11399b7642f9fc5ae50b5bd9c1d87654277a19bfc3df78d36da16f44a48630d9550774a4ca9f3a5b55bbf33345ad2ec71" + } + } + } + }, + "VALIDATOR_REGISTRATION": { + "value": { + "type": "VALIDATOR_REGISTRATION", + "signingRoot": "0xe4d2b3dd1e23807b90af0b1768cc7de12d4353320adb486f1bdaeed6b67009ea", + "validator_registration": { + "fee_recipient": "0x6fdfab408c56b6105a76eff5c0435d09fc6ed7a9", + "gas_limit": "4658411424342975020", + "timestamp": "4663368873993027404", + "pubkey": "0x8f82597c919c056571a05dfe83e6a7d32acf9ad8931be04d11384e95468cd68b40129864ae12745f774654bbac09b057" + } + } + }, + "DEPOSIT": { + "value": { + "type": "DEPOSIT", + "signingRoot": "0x3a49cdd70862ee95fed10e7494a8caa16af1be2f53612fc74dad27260bb2d711", + "deposit": { + "pubkey": "0x8f82597c919c056571a05dfe83e6a7d32acf9ad8931be04d11384e95468cd68b40129864ae12745f774654bbac09b057", + "withdrawal_credentials": "0x39722cbbf8b91a4b9045c5e6175f1001eac32f7fcd5eccda5c6e62fc4e638508", + "amount": "32", + "genesis_fork_version": "0x00000001" + } + } + } +} \ No newline at end of file diff --git a/tests/test_gossip_transition.nim b/tests/test_gossip_transition.nim index d8094fe62f..8c95189585 100644 --- a/tests/test_gossip_transition.nim +++ b/tests/test_gossip_transition.nim @@ -13,498 +13,524 @@ import ./testutil, ../beacon_chain/spec/[forks, network] -template getTargetGossipState(a, b, c, d, e: int, isBehind: bool): auto = +from ../beacon_chain/spec/presets import RuntimeConfig + +type TargetFork {.pure.} = enum + Capella + Deneb + Electra + Fulu + BPO1 + +template checkTargetGossipState( + targetEpoch, denebForkEpoch, electraForkEpoch, fuluForkEpoch, + bpo1ForkEpoch: int, isBehind: bool, targetForks: set[TargetFork]): + auto = + var targetGossipState: GossipState + for targetFork in targetForks: + case targetFork + of TargetFork.Capella: targetGossipState.incl GENESIS_EPOCH + of TargetFork.Deneb: targetGossipState.incl denebForkEpoch.Epoch + of TargetFork.Electra: targetGossipState.incl electraForkEpoch.Epoch + of TargetFork.Fulu: targetGossipState.incl fuluForkEpoch.Epoch + of TargetFork.BPO1: targetGossipState.incl bpo1ForkEpoch.Epoch getTargetGossipState( - a.Epoch, GENESIS_EPOCH, GENESIS_EPOCH, b.Epoch, c.Epoch, d.Epoch, e.Epoch, - isBehind) + targetEpoch.Epoch, + RuntimeConfig( + ALTAIR_FORK_EPOCH: GENESIS_EPOCH, BELLATRIX_FORK_EPOCH: GENESIS_EPOCH, + CAPELLA_FORK_EPOCH: GENESIS_EPOCH, + DENEB_FORK_EPOCH: denebForkEpoch.Epoch, + ELECTRA_FORK_EPOCH: electraForkEpoch.Epoch, + FULU_FORK_EPOCH: fuluForkEpoch.Epoch, + BLOB_SCHEDULE: @[BlobParameters(EPOCH: bpo1ForkEpoch.Epoch)]), isBehind) == targetGossipState suite "Gossip fork transition": test "Gossip fork transition": check: - getTargetGossipState( 5, 0, 1, 8, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 1, 4, 7, 9, 11, true) == {} - getTargetGossipState( 3, 0, 5, 6, 10, false) == {ConsensusFork.Capella} - getTargetGossipState(11, 2, 6, 10, 11, false) == {ConsensusFork.Fulu} - getTargetGossipState( 8, 4, 6, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 9, 2, 4, 9, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 7, 2, 3, 5, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 9, 0, 4, 8, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState( 7, 1, 2, 3, 10, false) == {ConsensusFork.Electra} - getTargetGossipState(11, 3, 4, 5, 11, false) == {ConsensusFork.Fulu} - getTargetGossipState( 0, 0, 1, 2, 3, true) == {} - getTargetGossipState(10, 0, 6, 7, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState( 5, 1, 3, 4, 7, false) == {ConsensusFork.Electra} - getTargetGossipState( 0, 3, 7, 10, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState(10, 0, 5, 8, 11, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 8, 1, 3, 6, 10, false) == {ConsensusFork.Electra} - getTargetGossipState( 6, 1, 4, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 3, 0, 5, 7, 8, false) == {ConsensusFork.Capella} - getTargetGossipState( 3, 2, 3, 4, 7, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 4, 3, 6, 7, 8, false) == {ConsensusFork.Capella} - getTargetGossipState( 8, 2, 6, 7, 9, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState(11, 1, 5, 7, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 1, 2, 7, 10, true) == {} - getTargetGossipState( 2, 1, 2, 3, 9, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 4, 0, 4, 8, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 4, 4, 5, 7, 9, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 8, 0, 0, 4, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 6, 5, 7, 8, 10, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 6, 0, 2, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 2, 1, 2, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 5, 0, 2, 3, 6, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 2, 2, 6, 8, 11, false) == {ConsensusFork.Capella} - getTargetGossipState(10, 0, 6, 8, 11, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 8, 0, 0, 2, 3, true) == {} - getTargetGossipState( 4, 3, 7, 8, 9, true) == {} - getTargetGossipState( 0, 0, 2, 5, 10, false) == {ConsensusFork.Capella} - getTargetGossipState( 1, 3, 4, 5, 9, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 5, 2, 6, 8, 10, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 7, 0, 1, 4, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 3, 2, 3, 9, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState( 8, 3, 5, 9, 11, true) == {} - getTargetGossipState( 9, 3, 6, 7, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 0, 1, 2, 8, 9, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 6, 2, 7, 8, 11, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 2, 3, 4, 6, 7, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 1, 3, 5, 8, 10, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 1, 2, 5, 8, 10, true) == {} - getTargetGossipState( 9, 4, 5, 7, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState(11, 0, 5, 9, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 0, 1, 5, 7, 9, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 0, 2, 6, 8, 10, true) == {} - getTargetGossipState( 8, 0, 5, 8, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 7, 4, 7, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 6, 2, 3, 5, 7, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState(11, 0, 1, 3, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 6, 5, 6, 8, 9, false) == {ConsensusFork.Deneb} - getTargetGossipState( 1, 2, 5, 8, 11, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState(11, 1, 5, 9, 11, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 3, 6, 7, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState( 3, 0, 1, 6, 8, false) == {ConsensusFork.Deneb} - getTargetGossipState( 5, 1, 6, 8, 10, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 5, 2, 3, 8, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 4, 2, 8, 9, 10, false) == {ConsensusFork.Capella} - getTargetGossipState( 3, 1, 3, 4, 5, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState(11, 2, 7, 10, 11, false) == {ConsensusFork.Fulu} - getTargetGossipState(10, 3, 5, 10, 11, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 5, 2, 6, 8, 11, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 4, 2, 6, 8, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 2, 0, 1, 4, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 1, 1, 4, 6, 10, true) == {} - getTargetGossipState( 4, 0, 6, 7, 10, false) == {ConsensusFork.Capella} - getTargetGossipState( 7, 3, 4, 7, 8, true) == {} - getTargetGossipState( 1, 0, 0, 6, 8, false) == {ConsensusFork.Deneb} - getTargetGossipState( 2, 1, 3, 6, 10, true) == {} - getTargetGossipState( 7, 2, 3, 7, 8, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 3, 2, 4, 5, 11, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 1, 4, 5, 7, 8, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 1, 3, 7, 8, 10, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 8, 0, 1, 6, 10, false) == {ConsensusFork.Electra} - getTargetGossipState( 7, 1, 5, 7, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 1, 6, 8, 10, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 0, 1, 2, 3, 7, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState(11, 2, 4, 5, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 6, 1, 5, 7, 9, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 7, 0, 3, 5, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 6, 0, 3, 9, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState( 8, 2, 7, 8, 10, false) == {ConsensusFork.Electra} - getTargetGossipState( 7, 3, 5, 6, 9, false) == {ConsensusFork.Electra} - getTargetGossipState( 8, 0, 4, 7, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 8, 0, 3, 8, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 4, 1, 2, 4, 6, false) == {ConsensusFork.Electra} - getTargetGossipState(10, 0, 2, 4, 11, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 1, 0, 2, 7, 11, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 6, 1, 2, 5, 7, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState(10, 0, 1, 8, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState( 0, 1, 3, 4, 5, true) == {} - getTargetGossipState(11, 0, 2, 9, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 7, 2, 7, 8, 9, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 0, 0, 5, 6, 9, false) == {ConsensusFork.Capella} - getTargetGossipState( 4, 2, 6, 8, 9, false) == {ConsensusFork.Capella} - getTargetGossipState( 2, 2, 6, 7, 8, false) == {ConsensusFork.Capella} - getTargetGossipState(11, 2, 8, 9, 10, true) == {} - getTargetGossipState( 8, 1, 2, 8, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 7, 0, 1, 2, 8, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 7, 0, 1, 8, 9, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 3, 1, 7, 9, 10, false) == {ConsensusFork.Capella} - getTargetGossipState( 6, 2, 6, 7, 11, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 2, 3, 5, 7, 10, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState(10, 4, 5, 7, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState( 8, 1, 4, 5, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 0, 2, 7, 10, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 8, 1, 5, 7, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 8, 1, 3, 7, 9, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 4, 0, 3, 4, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 0, 1, 5, 9, 10, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 3, 4, 6, 7, 8, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState(11, 4, 5, 6, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 4, 2, 4, 9, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState(11, 0, 3, 5, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 0, 3, 7, 11, false) == {ConsensusFork.Electra} - getTargetGossipState(11, 1, 7, 8, 11, false) == {ConsensusFork.Fulu} - getTargetGossipState( 0, 0, 1, 3, 4, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 3, 4, 5, 7, 9, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState(10, 3, 6, 8, 9, true) == {} - getTargetGossipState( 6, 7, 9, 10, 11, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 7, 2, 4, 5, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 0, 0, 6, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 4, 2, 3, 7, 10, true) == {} - getTargetGossipState( 3, 0, 2, 3, 5, false) == {ConsensusFork.Electra} - getTargetGossipState(10, 4, 6, 7, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 1, 2, 6, 9, 11, true) == {} - getTargetGossipState( 7, 0, 1, 3, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 0, 6, 8, 11, true) == {} - getTargetGossipState( 6, 2, 4, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 8, 1, 3, 7, 11, false) == {ConsensusFork.Electra} - getTargetGossipState(10, 0, 5, 7, 11, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 4, 0, 0, 4, 7, true) == {} - getTargetGossipState( 0, 1, 2, 5, 9, true) == {} - getTargetGossipState( 6, 2, 3, 10, 11, true) == {} - getTargetGossipState( 5, 1, 5, 9, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState(10, 3, 5, 7, 11, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 3, 0, 1, 2, 5, false) == {ConsensusFork.Electra} - getTargetGossipState( 8, 0, 1, 7, 10, false) == {ConsensusFork.Electra} - getTargetGossipState( 2, 0, 5, 7, 11, false) == {ConsensusFork.Capella} - getTargetGossipState(11, 0, 1, 3, 11, false) == {ConsensusFork.Fulu} - getTargetGossipState( 8, 5, 7, 8, 9, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 0, 0, 0, 1, 11, true) == {} - getTargetGossipState( 6, 1, 4, 5, 9, false) == {ConsensusFork.Electra} - getTargetGossipState( 8, 2, 4, 5, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 1, 5, 8, 9, 10, false) == {ConsensusFork.Bellatrix} - getTargetGossipState(10, 2, 5, 6, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState( 5, 1, 2, 5, 9, false) == {ConsensusFork.Electra} - getTargetGossipState(10, 1, 5, 6, 11, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 5, 0, 0, 1, 4, false) == {ConsensusFork.Fulu} - getTargetGossipState( 8, 0, 2, 5, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 7, 3, 4, 8, 10, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 9, 1, 6, 9, 10, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 5, 4, 5, 7, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState( 5, 2, 8, 9, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 0, 2, 4, 7, 10, false) == {ConsensusFork.Bellatrix} - getTargetGossipState(11, 1, 4, 9, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 4, 0, 6, 8, 10, false) == {ConsensusFork.Capella} - getTargetGossipState(10, 0, 1, 6, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 2, 0, 1, 3, 11, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 2, 2, 5, 6, 10, false) == {ConsensusFork.Capella} - getTargetGossipState( 1, 0, 4, 5, 8, false) == {ConsensusFork.Capella} - getTargetGossipState( 5, 0, 2, 3, 8, false) == {ConsensusFork.Electra} - getTargetGossipState( 2, 6, 7, 8, 9, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 2, 2, 4, 6, 7, false) == {ConsensusFork.Capella} - getTargetGossipState( 8, 2, 5, 6, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 4, 5, 8, 9, 10, true) == {} - getTargetGossipState( 0, 0, 3, 5, 10, false) == {ConsensusFork.Capella} - getTargetGossipState( 8, 0, 1, 2, 4, false) == {ConsensusFork.Fulu} - getTargetGossipState( 0, 5, 7, 9, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 8, 1, 3, 6, 9, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 1, 5, 6, 7, 8, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 5, 0, 5, 8, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 0, 0, 2, 9, 11, false) == {ConsensusFork.Capella} - getTargetGossipState(10, 4, 6, 9, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 1, 5, 9, 10, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState(10, 3, 5, 9, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 6, 0, 1, 2, 10, false) == {ConsensusFork.Electra} - getTargetGossipState( 2, 0, 5, 8, 9, false) == {ConsensusFork.Capella} - getTargetGossipState( 1, 1, 2, 8, 9, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 6, 4, 7, 9, 10, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState(10, 0, 1, 6, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 3, 5, 6, 9, 10, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 4, 0, 1, 6, 7, false) == {ConsensusFork.Deneb} - getTargetGossipState( 4, 1, 2, 5, 6, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 9, 0, 6, 9, 10, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState(11, 0, 2, 5, 11, false) == {ConsensusFork.Fulu} - getTargetGossipState( 2, 0, 2, 6, 9, true) == {} - getTargetGossipState( 5, 1, 5, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 0, 0, 1, 5, 7, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 9, 4, 5, 6, 7, true) == {} - getTargetGossipState( 6, 1, 2, 9, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState( 6, 0, 3, 4, 8, false) == {ConsensusFork.Electra} - getTargetGossipState( 3, 1, 3, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 8, 0, 3, 5, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 2, 1, 2, 5, 9, false) == {ConsensusFork.Deneb} - getTargetGossipState( 2, 1, 3, 8, 10, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 2, 0, 3, 4, 7, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState(11, 4, 7, 8, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState( 4, 0, 2, 3, 9, false) == {ConsensusFork.Electra} - getTargetGossipState( 4, 1, 2, 8, 10, true) == {} - getTargetGossipState( 6, 3, 5, 6, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 1, 3, 4, 9, 10, true) == {} - getTargetGossipState( 7, 0, 4, 7, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 1, 0, 5, 7, 10, false) == {ConsensusFork.Capella} - getTargetGossipState( 7, 3, 4, 7, 8, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState(10, 5, 7, 8, 9, true) == {} - getTargetGossipState( 9, 1, 3, 4, 6, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 0, 1, 8, 10, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 4, 0, 4, 7, 11, true) == {} - getTargetGossipState( 3, 0, 2, 5, 11, true) == {} - getTargetGossipState( 5, 1, 3, 7, 9, false) == {ConsensusFork.Deneb} - getTargetGossipState( 1, 2, 3, 8, 10, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 6, 0, 4, 5, 10, true) == {} - getTargetGossipState( 9, 0, 0, 4, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 7, 2, 3, 9, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState( 2, 2, 4, 7, 9, false) == {ConsensusFork.Capella} - getTargetGossipState( 4, 1, 6, 9, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 8, 1, 2, 9, 11, true) == {} - getTargetGossipState( 6, 1, 6, 7, 9, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 2, 2, 6, 9, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 6, 0, 1, 6, 10, false) == {ConsensusFork.Electra} - getTargetGossipState( 9, 1, 2, 5, 10, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 6, 1, 2, 5, 11, true) == {} - getTargetGossipState( 5, 3, 4, 8, 9, false) == {ConsensusFork.Deneb} - getTargetGossipState( 1, 0, 2, 5, 9, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 7, 1, 4, 9, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState( 5, 0, 4, 7, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState( 1, 4, 6, 8, 10, false) == {ConsensusFork.Bellatrix} - getTargetGossipState(11, 1, 2, 4, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 2, 6, 8, 9, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 1, 0, 9, 10, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 9, 1, 2, 6, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState( 2, 1, 6, 8, 11, true) == {} - getTargetGossipState( 6, 1, 4, 8, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 5, 2, 3, 5, 8, false) == {ConsensusFork.Electra} - getTargetGossipState( 0, 1, 2, 3, 11, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 4, 3, 6, 8, 9, false) == {ConsensusFork.Capella} - getTargetGossipState( 8, 0, 2, 4, 6, false) == {ConsensusFork.Fulu} - getTargetGossipState(10, 0, 1, 6, 10, true) == {} - getTargetGossipState( 3, 1, 9, 10, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 7, 2, 6, 8, 10, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 7, 2, 3, 7, 10, false) == {ConsensusFork.Electra} - getTargetGossipState(10, 5, 8, 9, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState(11, 3, 6, 9, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 8, 0, 0, 5, 11, false) == {ConsensusFork.Electra} - getTargetGossipState(10, 2, 6, 7, 11, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 2, 2, 3, 5, 7, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState(10, 2, 7, 8, 10, true) == {} - getTargetGossipState( 5, 0, 4, 5, 9, true) == {} - getTargetGossipState( 5, 0, 1, 2, 3, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 1, 2, 3, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 1, 1, 3, 4, 10, false) == {ConsensusFork.Capella} - getTargetGossipState( 5, 0, 1, 3, 9, false) == {ConsensusFork.Electra} - getTargetGossipState( 9, 0, 4, 10, 11, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 4, 1, 5, 7, 11, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 4, 0, 1, 4, 7, false) == {ConsensusFork.Electra} - getTargetGossipState( 8, 5, 8, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState(11, 2, 3, 5, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 6, 7, 9, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 8, 3, 4, 7, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 0, 1, 2, 3, 6, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 0, 3, 4, 5, 9, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 5, 3, 6, 7, 9, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 5, 5, 7, 8, 11, false) == {ConsensusFork.Capella} - getTargetGossipState(10, 1, 3, 7, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState(11, 0, 3, 9, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 2, 0, 2, 3, 4, true) == {} - getTargetGossipState( 1, 0, 1, 8, 9, false) == {ConsensusFork.Deneb} - getTargetGossipState( 6, 0, 6, 7, 8, true) == {} - getTargetGossipState( 4, 0, 1, 4, 10, false) == {ConsensusFork.Electra} - getTargetGossipState(11, 4, 5, 7, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 1, 1, 4, 5, 9, false) == {ConsensusFork.Capella} - getTargetGossipState( 6, 0, 2, 4, 7, true) == {} - getTargetGossipState( 6, 3, 8, 10, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 3, 0, 1, 7, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 1, 0, 6, 9, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 7, 2, 4, 6, 10, false) == {ConsensusFork.Electra} - getTargetGossipState(10, 0, 3, 5, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState(10, 0, 5, 7, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 0, 2, 8, 9, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 3, 0, 1, 5, 8, false) == {ConsensusFork.Deneb} - getTargetGossipState( 1, 0, 0, 3, 4, false) == {ConsensusFork.Deneb} - getTargetGossipState( 2, 2, 4, 5, 8, false) == {ConsensusFork.Capella} - getTargetGossipState( 6, 0, 0, 3, 8, false) == {ConsensusFork.Electra} - getTargetGossipState( 4, 0, 2, 5, 6, true) == {} - getTargetGossipState( 2, 0, 2, 3, 5, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 8, 0, 5, 6, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 7, 0, 2, 5, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState(11, 1, 2, 5, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState(10, 0, 3, 6, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 0, 0, 2, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 1, 1, 2, 7, 8, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 5, 0, 1, 2, 8, false) == {ConsensusFork.Electra} - getTargetGossipState( 5, 3, 6, 9, 11, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 4, 4, 5, 9, 10, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 1, 1, 5, 6, 7, false) == {ConsensusFork.Capella} - getTargetGossipState( 3, 0, 0, 4, 8, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 3, 0, 0, 1, 4, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 2, 0, 2, 5, 8, false) == {ConsensusFork.Deneb} - getTargetGossipState( 1, 4, 7, 8, 10, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 6, 0, 3, 8, 9, false) == {ConsensusFork.Deneb} - getTargetGossipState( 9, 2, 3, 5, 6, false) == {ConsensusFork.Fulu} - getTargetGossipState(11, 1, 6, 10, 11, false) == {ConsensusFork.Fulu} - getTargetGossipState( 2, 1, 4, 7, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 8, 4, 5, 7, 9, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 9, 1, 8, 10, 11, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 5, 0, 1, 4, 5, false) == {ConsensusFork.Fulu} - getTargetGossipState( 0, 2, 7, 8, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 7, 3, 6, 7, 8, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState(10, 3, 4, 7, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 4, 2, 3, 5, 8, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState(11, 1, 2, 3, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState( 1, 2, 8, 9, 11, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 1, 1, 5, 10, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 2, 2, 9, 10, 11, false) == {ConsensusFork.Capella} - getTargetGossipState(11, 0, 0, 1, 11, false) == {ConsensusFork.Fulu} - getTargetGossipState( 8, 2, 4, 6, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 1, 0, 3, 5, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 2, 3, 4, 9, 11, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 8, 1, 2, 4, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 0, 4, 5, 6, 7, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 5, 3, 7, 9, 10, false) == {ConsensusFork.Capella} - getTargetGossipState( 3, 0, 5, 6, 9, false) == {ConsensusFork.Capella} - getTargetGossipState(11, 4, 6, 9, 11, false) == {ConsensusFork.Fulu} - getTargetGossipState( 6, 5, 8, 10, 11, false) == {ConsensusFork.Capella} - getTargetGossipState(11, 2, 3, 4, 5, false) == {ConsensusFork.Fulu} - getTargetGossipState( 1, 3, 5, 6, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState(11, 5, 6, 7, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 5, 2, 6, 8, 9, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 1, 1, 3, 6, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 6, 0, 0, 1, 3, false) == {ConsensusFork.Fulu} - getTargetGossipState( 6, 2, 6, 8, 9, false) == {ConsensusFork.Deneb} - getTargetGossipState( 1, 3, 5, 6, 7, false) == {ConsensusFork.Bellatrix} - getTargetGossipState(10, 3, 4, 5, 8, true) == {} - getTargetGossipState( 8, 3, 7, 8, 10, false) == {ConsensusFork.Electra} - getTargetGossipState( 5, 1, 3, 6, 10, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 1, 0, 1, 2, 7, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 0, 0, 5, 6, 8, false) == {ConsensusFork.Capella} - getTargetGossipState( 9, 2, 4, 5, 8, true) == {} - getTargetGossipState( 1, 0, 0, 2, 10, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 3, 0, 3, 7, 8, false) == {ConsensusFork.Deneb} - getTargetGossipState(10, 2, 3, 5, 6, false) == {ConsensusFork.Fulu} - getTargetGossipState( 5, 3, 6, 9, 10, true) == {} - getTargetGossipState( 4, 2, 3, 8, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState( 2, 1, 3, 10, 11, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 4, 0, 5, 7, 11, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 8, 2, 3, 7, 8, true) == {} - getTargetGossipState( 8, 2, 4, 7, 9, true) == {} - getTargetGossipState(10, 4, 5, 6, 8, true) == {} - getTargetGossipState( 1, 1, 2, 8, 10, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 3, 5, 6, 8, 9, true) == {} - getTargetGossipState( 7, 0, 0, 1, 6, true) == {} - getTargetGossipState( 8, 0, 4, 5, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 2, 0, 6, 8, 9, false) == {ConsensusFork.Capella} - getTargetGossipState( 0, 1, 4, 8, 10, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState(10, 0, 0, 0, 4, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 1, 3, 5, 9, false) == {ConsensusFork.Fulu} - getTargetGossipState( 1, 1, 4, 10, 11, true) == {} - getTargetGossipState(11, 1, 8, 9, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 2, 0, 1, 4, 5, false) == {ConsensusFork.Deneb} - getTargetGossipState( 3, 4, 8, 10, 11, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 5, 7, 8, 9, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 6, 0, 1, 3, 6, false) == {ConsensusFork.Fulu} - getTargetGossipState(10, 0, 2, 6, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 3, 0, 5, 9, 10, false) == {ConsensusFork.Capella} - getTargetGossipState( 5, 0, 6, 7, 9, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 8, 6, 7, 8, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 2, 3, 4, 8, 11, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState(10, 6, 7, 9, 11, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 5, 1, 2, 8, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState(10, 4, 7, 9, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 2, 0, 0, 2, 3, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 6, 0, 5, 7, 8, true) == {} - getTargetGossipState(10, 1, 2, 3, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 3, 3, 5, 7, 8, true) == {} - getTargetGossipState( 7, 1, 2, 3, 6, false) == {ConsensusFork.Fulu} - getTargetGossipState( 6, 3, 4, 7, 9, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 4, 0, 3, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 9, 0, 0, 0, 2, false) == {ConsensusFork.Fulu} - getTargetGossipState( 5, 3, 6, 7, 11, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 9, 0, 2, 4, 6, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 1, 4, 9, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 6, 3, 5, 6, 10, false) == {ConsensusFork.Electra} - getTargetGossipState( 0, 4, 7, 9, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 9, 0, 5, 8, 10, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 5, 4, 5, 7, 9, true) == {} - getTargetGossipState( 4, 0, 1, 2, 3, false) == {ConsensusFork.Fulu} - getTargetGossipState( 4, 0, 2, 8, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState(10, 0, 1, 4, 5, false) == {ConsensusFork.Fulu} - getTargetGossipState( 0, 4, 5, 8, 9, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 7, 1, 3, 7, 11, true) == {} - getTargetGossipState(11, 0, 1, 2, 10, true) == {} - getTargetGossipState( 8, 1, 4, 8, 10, true) == {} - getTargetGossipState( 2, 2, 3, 9, 11, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 3, 2, 6, 8, 9, false) == {ConsensusFork.Capella} - getTargetGossipState( 7, 0, 7, 8, 9, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 7, 0, 5, 6, 8, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 9, 1, 9, 10, 11, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 9, 4, 5, 6, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 9, 1, 4, 6, 8, true) == {} - getTargetGossipState( 4, 0, 3, 8, 9, false) == {ConsensusFork.Deneb} - getTargetGossipState( 0, 2, 3, 6, 10, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 9, 2, 4, 7, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 3, 0, 3, 7, 9, false) == {ConsensusFork.Deneb} - getTargetGossipState( 9, 1, 2, 3, 8, true) == {} - getTargetGossipState( 0, 2, 3, 6, 8, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 7, 6, 9, 10, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 5, 1, 5, 8, 11, true) == {} - getTargetGossipState( 4, 1, 2, 5, 11, true) == {} - getTargetGossipState( 0, 4, 6, 10, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 7, 3, 6, 9, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 2, 0, 4, 6, 7, false) == {ConsensusFork.Capella} - getTargetGossipState( 8, 2, 4, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 4, 6, 7, 8, 10, true) == {} - getTargetGossipState(11, 0, 1, 7, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 0, 1, 2, 3, 4, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 3, 5, 6, 7, 9, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 8, 0, 2, 3, 5, false) == {ConsensusFork.Fulu} - getTargetGossipState(11, 0, 6, 7, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState(10, 1, 2, 3, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 9, 1, 3, 6, 10, true) == {} - getTargetGossipState( 0, 2, 7, 8, 9, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 2, 1, 2, 4, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 6, 0, 2, 7, 8, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 0, 1, 6, 7, 9, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 1, 5, 7, 9, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 1, 1, 8, 9, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 9, 2, 6, 8, 11, true) == {} - getTargetGossipState( 3, 0, 4, 8, 10, false) == {ConsensusFork.Capella, ConsensusFork.Deneb} - getTargetGossipState( 9, 2, 3, 9, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 6, 0, 1, 2, 6, false) == {ConsensusFork.Fulu} - getTargetGossipState(11, 0, 3, 8, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 2, 2, 4, 5, 6, false) == {ConsensusFork.Capella} - getTargetGossipState( 1, 1, 3, 6, 8, false) == {ConsensusFork.Capella} - getTargetGossipState( 5, 1, 3, 6, 9, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 5, 1, 3, 6, 10, true) == {} - getTargetGossipState( 3, 2, 3, 8, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 2, 1, 2, 3, 10, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 3, 0, 1, 9, 10, false) == {ConsensusFork.Deneb} - getTargetGossipState( 0, 1, 3, 7, 9, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 2, 3, 6, 10, 11, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 2, 0, 4, 6, 9, false) == {ConsensusFork.Capella} - getTargetGossipState( 8, 0, 8, 9, 11, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 0, 0, 4, 6, 9, false) == {ConsensusFork.Capella} - getTargetGossipState(10, 1, 2, 3, 4, false) == {ConsensusFork.Fulu} - getTargetGossipState( 4, 0, 3, 4, 7, false) == {ConsensusFork.Electra} - getTargetGossipState( 0, 2, 3, 8, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 0, 3, 5, 7, 10, true) == {} - getTargetGossipState( 9, 0, 0, 3, 7, false) == {ConsensusFork.Fulu} - getTargetGossipState( 2, 1, 5, 6, 11, false) == {ConsensusFork.Capella} - getTargetGossipState(10, 2, 3, 6, 10, false) == {ConsensusFork.Fulu} - getTargetGossipState( 5, 0, 4, 6, 11, true) == {} - getTargetGossipState( 1, 1, 3, 4, 5, false) == {ConsensusFork.Capella} - getTargetGossipState(11, 1, 7, 8, 11, true) == {} - getTargetGossipState( 3, 1, 5, 7, 9, true) == {} - getTargetGossipState( 6, 0, 2, 5, 9, false) == {ConsensusFork.Electra} - getTargetGossipState( 4, 0, 1, 4, 9, false) == {ConsensusFork.Electra} - getTargetGossipState( 6, 4, 8, 9, 11, true) == {} - getTargetGossipState(10, 0, 1, 2, 6, false) == {ConsensusFork.Fulu} - getTargetGossipState(10, 1, 3, 7, 11, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 3, 2, 5, 6, 7, false) == {ConsensusFork.Capella} - getTargetGossipState( 9, 4, 9, 10, 11, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 2, 2, 4, 9, 10, false) == {ConsensusFork.Capella} - getTargetGossipState( 8, 2, 4, 10, 11, true) == {} - getTargetGossipState(11, 0, 8, 10, 11, false) == {ConsensusFork.Fulu} - getTargetGossipState( 7, 0, 1, 7, 8, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 4, 2, 8, 10, 11, false) == {ConsensusFork.Capella} - getTargetGossipState( 8, 5, 6, 8, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 8, 1, 5, 8, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 6, 1, 6, 7, 11, false) == {ConsensusFork.Deneb, ConsensusFork.Electra} - getTargetGossipState( 9, 3, 4, 5, 6, false) == {ConsensusFork.Fulu} - getTargetGossipState( 1, 0, 3, 4, 7, false) == {ConsensusFork.Capella} - getTargetGossipState( 6, 1, 2, 3, 11, false) == {ConsensusFork.Electra} - getTargetGossipState( 1, 2, 5, 9, 10, false) == {ConsensusFork.Bellatrix, ConsensusFork.Capella} - getTargetGossipState( 5, 0, 5, 7, 8, false) == {ConsensusFork.Deneb} - getTargetGossipState( 8, 0, 3, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState(11, 3, 6, 7, 8, false) == {ConsensusFork.Fulu} - getTargetGossipState( 3, 6, 7, 9, 10, true) == {} - getTargetGossipState( 7, 1, 6, 10, 11, false) == {ConsensusFork.Deneb} - getTargetGossipState( 0, 6, 9, 10, 11, false) == {ConsensusFork.Bellatrix} - getTargetGossipState( 4, 1, 2, 3, 5, false) == {ConsensusFork.Electra, ConsensusFork.Fulu} - getTargetGossipState( 9, 1, 2, 7, 8, false) == {ConsensusFork.Fulu} \ No newline at end of file + checkTargetGossipState( 5, 0, 1, 8, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 1, 4, 7, 9, 11, true, default(set[TargetFork])) + checkTargetGossipState( 3, 0, 5, 6, 10, false, {TargetFork.Deneb}) + checkTargetGossipState(11, 2, 6, 10, 11, false, {TargetFork.BPO1}) + checkTargetGossipState( 8, 4, 6, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 9, 2, 4, 9, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 7, 2, 3, 5, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 9, 0, 4, 8, 9, false, {TargetFork.BPO1}) + checkTargetGossipState( 7, 1, 2, 3, 10, false, {TargetFork.Fulu}) + checkTargetGossipState(11, 3, 4, 5, 11, false, {TargetFork.BPO1}) + checkTargetGossipState( 0, 0, 1, 2, 3, true, default(set[TargetFork])) + checkTargetGossipState(10, 0, 6, 7, 9, false, {TargetFork.BPO1}) + checkTargetGossipState( 5, 1, 3, 4, 7, false, {TargetFork.Fulu}) + checkTargetGossipState( 0, 3, 7, 10, 11, false, {TargetFork.Capella}) + checkTargetGossipState(10, 0, 5, 8, 11, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 8, 1, 3, 6, 10, false, {TargetFork.Fulu}) + checkTargetGossipState( 6, 1, 4, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 3, 0, 5, 7, 8, false, {TargetFork.Deneb}) + checkTargetGossipState( 3, 2, 3, 4, 7, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 4, 3, 6, 7, 8, false, {TargetFork.Deneb}) + checkTargetGossipState( 8, 2, 6, 7, 9, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState(11, 1, 5, 7, 9, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 1, 2, 7, 10, true, default(set[TargetFork])) + checkTargetGossipState( 2, 1, 2, 3, 9, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 4, 0, 4, 8, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 4, 4, 5, 7, 9, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 8, 0, 0, 4, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 6, 5, 7, 8, 10, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 6, 0, 2, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 2, 1, 2, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 5, 0, 2, 3, 6, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 2, 2, 6, 8, 11, false, {TargetFork.Deneb}) + checkTargetGossipState(10, 0, 6, 8, 11, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 8, 0, 0, 2, 3, true, default(set[TargetFork])) + checkTargetGossipState( 4, 3, 7, 8, 9, true, default(set[TargetFork])) + checkTargetGossipState( 0, 0, 2, 5, 10, false, {TargetFork.Deneb}) + checkTargetGossipState( 1, 3, 4, 5, 9, false, {TargetFork.Capella}) + checkTargetGossipState( 5, 2, 6, 8, 10, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 7, 0, 1, 4, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 3, 2, 3, 9, 10, false, {TargetFork.Electra}) + checkTargetGossipState( 8, 3, 5, 9, 11, true, default(set[TargetFork])) + checkTargetGossipState( 9, 3, 6, 7, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 0, 1, 2, 8, 9, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 6, 2, 7, 8, 11, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 2, 3, 4, 6, 7, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 1, 3, 5, 8, 10, false, {TargetFork.Capella}) + checkTargetGossipState( 1, 2, 5, 8, 10, true, default(set[TargetFork])) + checkTargetGossipState( 9, 4, 5, 7, 9, false, {TargetFork.BPO1}) + checkTargetGossipState(11, 0, 5, 9, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 0, 1, 5, 7, 9, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 0, 2, 6, 8, 10, true, default(set[TargetFork])) + checkTargetGossipState( 8, 0, 5, 8, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 7, 4, 7, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 6, 2, 3, 5, 7, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState(11, 0, 1, 3, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 6, 5, 6, 8, 9, false, {TargetFork.Electra}) + checkTargetGossipState( 1, 2, 5, 8, 11, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState(11, 1, 5, 9, 11, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 3, 6, 7, 9, false, {TargetFork.BPO1}) + checkTargetGossipState( 3, 0, 1, 6, 8, false, {TargetFork.Electra}) + checkTargetGossipState( 5, 1, 6, 8, 10, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 5, 2, 3, 8, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 4, 2, 8, 9, 10, false, {TargetFork.Deneb}) + checkTargetGossipState( 3, 1, 3, 4, 5, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState(11, 2, 7, 10, 11, false, {TargetFork.BPO1}) + checkTargetGossipState(10, 3, 5, 10, 11, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 5, 2, 6, 8, 11, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 4, 2, 6, 8, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 2, 0, 1, 4, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 1, 1, 4, 6, 10, true, default(set[TargetFork])) + checkTargetGossipState( 4, 0, 6, 7, 10, false, {TargetFork.Deneb}) + checkTargetGossipState( 7, 3, 4, 7, 8, true, default(set[TargetFork])) + checkTargetGossipState( 1, 0, 0, 6, 8, false, {TargetFork.Electra}) + checkTargetGossipState( 2, 1, 3, 6, 10, true, default(set[TargetFork])) + checkTargetGossipState( 7, 2, 3, 7, 8, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 3, 2, 4, 5, 11, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 1, 4, 5, 7, 8, false, {TargetFork.Capella}) + checkTargetGossipState( 1, 3, 7, 8, 10, false, {TargetFork.Capella}) + checkTargetGossipState( 8, 0, 1, 6, 10, false, {TargetFork.Fulu}) + checkTargetGossipState( 7, 1, 5, 7, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 1, 6, 8, 10, 11, false, {TargetFork.Capella}) + checkTargetGossipState( 0, 1, 2, 3, 7, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState(11, 2, 4, 5, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 6, 1, 5, 7, 9, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 7, 0, 3, 5, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 6, 0, 3, 9, 10, false, {TargetFork.Electra}) + checkTargetGossipState( 8, 2, 7, 8, 10, false, {TargetFork.Fulu}) + checkTargetGossipState( 7, 3, 5, 6, 9, false, {TargetFork.Fulu}) + checkTargetGossipState( 8, 0, 4, 7, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 8, 0, 3, 8, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 4, 1, 2, 4, 6, false, {TargetFork.Fulu}) + checkTargetGossipState(10, 0, 2, 4, 11, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 1, 0, 2, 7, 11, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 6, 1, 2, 5, 7, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState(10, 0, 1, 8, 9, false, {TargetFork.BPO1}) + checkTargetGossipState( 0, 1, 3, 4, 5, true, default(set[TargetFork])) + checkTargetGossipState(11, 0, 2, 9, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 7, 2, 7, 8, 9, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 0, 0, 5, 6, 9, false, {TargetFork.Deneb}) + checkTargetGossipState( 4, 2, 6, 8, 9, false, {TargetFork.Deneb}) + checkTargetGossipState( 2, 2, 6, 7, 8, false, {TargetFork.Deneb}) + checkTargetGossipState(11, 2, 8, 9, 10, true, default(set[TargetFork])) + checkTargetGossipState( 8, 1, 2, 8, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 7, 0, 1, 2, 8, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 7, 0, 1, 8, 9, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 3, 1, 7, 9, 10, false, {TargetFork.Deneb}) + checkTargetGossipState( 6, 2, 6, 7, 11, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 2, 3, 5, 7, 10, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState(10, 4, 5, 7, 9, false, {TargetFork.BPO1}) + checkTargetGossipState( 8, 1, 4, 5, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 0, 2, 7, 10, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 8, 1, 5, 7, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 8, 1, 3, 7, 9, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 4, 0, 3, 4, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 0, 1, 5, 9, 10, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 3, 4, 6, 7, 8, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState(11, 4, 5, 6, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 4, 2, 4, 9, 10, false, {TargetFork.Electra}) + checkTargetGossipState(11, 0, 3, 5, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 0, 3, 7, 11, false, {TargetFork.Fulu}) + checkTargetGossipState(11, 1, 7, 8, 11, false, {TargetFork.BPO1}) + checkTargetGossipState( 0, 0, 1, 3, 4, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 3, 4, 5, 7, 9, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState(10, 3, 6, 8, 9, true, default(set[TargetFork])) + checkTargetGossipState( 6, 7, 9, 10, 11, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 7, 2, 4, 5, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 0, 0, 6, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 4, 2, 3, 7, 10, true, default(set[TargetFork])) + checkTargetGossipState( 3, 0, 2, 3, 5, false, {TargetFork.Fulu}) + checkTargetGossipState(10, 4, 6, 7, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 1, 2, 6, 9, 11, true, default(set[TargetFork])) + checkTargetGossipState( 7, 0, 1, 3, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 0, 6, 8, 11, true, default(set[TargetFork])) + checkTargetGossipState( 6, 2, 4, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 8, 1, 3, 7, 11, false, {TargetFork.Fulu}) + checkTargetGossipState(10, 0, 5, 7, 11, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 4, 0, 0, 4, 7, true, default(set[TargetFork])) + checkTargetGossipState( 0, 1, 2, 5, 9, true, default(set[TargetFork])) + checkTargetGossipState( 6, 2, 3, 10, 11, true, default(set[TargetFork])) + checkTargetGossipState( 5, 1, 5, 9, 10, false, {TargetFork.Electra}) + checkTargetGossipState(10, 3, 5, 7, 11, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 3, 0, 1, 2, 5, false, {TargetFork.Fulu}) + checkTargetGossipState( 8, 0, 1, 7, 10, false, {TargetFork.Fulu}) + checkTargetGossipState( 2, 0, 5, 7, 11, false, {TargetFork.Deneb}) + checkTargetGossipState(11, 0, 1, 3, 11, false, {TargetFork.BPO1}) + checkTargetGossipState( 8, 5, 7, 8, 9, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 0, 0, 0, 1, 11, true, default(set[TargetFork])) + checkTargetGossipState( 6, 1, 4, 5, 9, false, {TargetFork.Fulu}) + checkTargetGossipState( 8, 2, 4, 5, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 1, 5, 8, 9, 10, false, {TargetFork.Capella}) + checkTargetGossipState(10, 2, 5, 6, 9, false, {TargetFork.BPO1}) + checkTargetGossipState( 5, 1, 2, 5, 9, false, {TargetFork.Fulu}) + checkTargetGossipState(10, 1, 5, 6, 11, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 5, 0, 0, 1, 4, false, {TargetFork.BPO1}) + checkTargetGossipState( 8, 0, 2, 5, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 7, 3, 4, 8, 10, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 9, 1, 6, 9, 10, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 5, 4, 5, 7, 10, false, {TargetFork.Electra}) + checkTargetGossipState( 5, 2, 8, 9, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 0, 2, 4, 7, 10, false, {TargetFork.Capella}) + checkTargetGossipState(11, 1, 4, 9, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 4, 0, 6, 8, 10, false, {TargetFork.Deneb}) + checkTargetGossipState(10, 0, 1, 6, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 2, 0, 1, 3, 11, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 2, 2, 5, 6, 10, false, {TargetFork.Deneb}) + checkTargetGossipState( 1, 0, 4, 5, 8, false, {TargetFork.Deneb}) + checkTargetGossipState( 5, 0, 2, 3, 8, false, {TargetFork.Fulu}) + checkTargetGossipState( 2, 6, 7, 8, 9, false, {TargetFork.Capella}) + checkTargetGossipState( 2, 2, 4, 6, 7, false, {TargetFork.Deneb}) + checkTargetGossipState( 8, 2, 5, 6, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 4, 5, 8, 9, 10, true, default(set[TargetFork])) + checkTargetGossipState( 0, 0, 3, 5, 10, false, {TargetFork.Deneb}) + checkTargetGossipState( 8, 0, 1, 2, 4, false, {TargetFork.BPO1}) + checkTargetGossipState( 0, 5, 7, 9, 11, false, {TargetFork.Capella}) + checkTargetGossipState( 8, 1, 3, 6, 9, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 1, 5, 6, 7, 8, false, {TargetFork.Capella}) + checkTargetGossipState( 5, 0, 5, 8, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 0, 0, 2, 9, 11, false, {TargetFork.Deneb}) + checkTargetGossipState(10, 4, 6, 9, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 1, 5, 9, 10, 11, false, {TargetFork.Capella}) + checkTargetGossipState(10, 3, 5, 9, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 6, 0, 1, 2, 10, false, {TargetFork.Fulu}) + checkTargetGossipState( 2, 0, 5, 8, 9, false, {TargetFork.Deneb}) + checkTargetGossipState( 1, 1, 2, 8, 9, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 6, 4, 7, 9, 10, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState(10, 0, 1, 6, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 3, 5, 6, 9, 10, false, {TargetFork.Capella}) + checkTargetGossipState( 4, 0, 1, 6, 7, false, {TargetFork.Electra}) + checkTargetGossipState( 4, 1, 2, 5, 6, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 9, 0, 6, 9, 10, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState(11, 0, 2, 5, 11, false, {TargetFork.BPO1}) + checkTargetGossipState( 2, 0, 2, 6, 9, true, default(set[TargetFork])) + checkTargetGossipState( 5, 1, 5, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 0, 0, 1, 5, 7, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 9, 4, 5, 6, 7, true, default(set[TargetFork])) + checkTargetGossipState( 6, 1, 2, 9, 10, false, {TargetFork.Electra}) + checkTargetGossipState( 6, 0, 3, 4, 8, false, {TargetFork.Fulu}) + checkTargetGossipState( 3, 1, 3, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 8, 0, 3, 5, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 2, 1, 2, 5, 9, false, {TargetFork.Electra}) + checkTargetGossipState( 2, 1, 3, 8, 10, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 2, 0, 3, 4, 7, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState(11, 4, 7, 8, 9, false, {TargetFork.BPO1}) + checkTargetGossipState( 4, 0, 2, 3, 9, false, {TargetFork.Fulu}) + checkTargetGossipState( 4, 1, 2, 8, 10, true, default(set[TargetFork])) + checkTargetGossipState( 6, 3, 5, 6, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 1, 3, 4, 9, 10, true, default(set[TargetFork])) + checkTargetGossipState( 7, 0, 4, 7, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 1, 0, 5, 7, 10, false, {TargetFork.Deneb}) + checkTargetGossipState( 7, 3, 4, 7, 8, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState(10, 5, 7, 8, 9, true, default(set[TargetFork])) + checkTargetGossipState( 9, 1, 3, 4, 6, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 0, 1, 8, 10, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 4, 0, 4, 7, 11, true, default(set[TargetFork])) + checkTargetGossipState( 3, 0, 2, 5, 11, true, default(set[TargetFork])) + checkTargetGossipState( 5, 1, 3, 7, 9, false, {TargetFork.Electra}) + checkTargetGossipState( 1, 2, 3, 8, 10, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 6, 0, 4, 5, 10, true, default(set[TargetFork])) + checkTargetGossipState( 9, 0, 0, 4, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 7, 2, 3, 9, 10, false, {TargetFork.Electra}) + checkTargetGossipState( 2, 2, 4, 7, 9, false, {TargetFork.Deneb}) + checkTargetGossipState( 4, 1, 6, 9, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 8, 1, 2, 9, 11, true, default(set[TargetFork])) + checkTargetGossipState( 6, 1, 6, 7, 9, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 2, 2, 6, 9, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 6, 0, 1, 6, 10, false, {TargetFork.Fulu}) + checkTargetGossipState( 9, 1, 2, 5, 10, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 6, 1, 2, 5, 11, true, default(set[TargetFork])) + checkTargetGossipState( 5, 3, 4, 8, 9, false, {TargetFork.Electra}) + checkTargetGossipState( 1, 0, 2, 5, 9, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 7, 1, 4, 9, 10, false, {TargetFork.Electra}) + checkTargetGossipState( 5, 0, 4, 7, 10, false, {TargetFork.Electra}) + checkTargetGossipState( 1, 4, 6, 8, 10, false, {TargetFork.Capella}) + checkTargetGossipState(11, 1, 2, 4, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 2, 6, 8, 9, 11, false, {TargetFork.Capella}) + checkTargetGossipState( 1, 0, 9, 10, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 9, 1, 2, 6, 9, false, {TargetFork.BPO1}) + checkTargetGossipState( 2, 1, 6, 8, 11, true, default(set[TargetFork])) + checkTargetGossipState( 6, 1, 4, 8, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 5, 2, 3, 5, 8, false, {TargetFork.Fulu}) + checkTargetGossipState( 0, 1, 2, 3, 11, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 4, 3, 6, 8, 9, false, {TargetFork.Deneb}) + checkTargetGossipState( 8, 0, 2, 4, 6, false, {TargetFork.BPO1}) + checkTargetGossipState(10, 0, 1, 6, 10, true, default(set[TargetFork])) + checkTargetGossipState( 3, 1, 9, 10, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 7, 2, 6, 8, 10, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 7, 2, 3, 7, 10, false, {TargetFork.Fulu}) + checkTargetGossipState(10, 5, 8, 9, 10, false, {TargetFork.BPO1}) + checkTargetGossipState(11, 3, 6, 9, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 8, 0, 0, 5, 11, false, {TargetFork.Fulu}) + checkTargetGossipState(10, 2, 6, 7, 11, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 2, 2, 3, 5, 7, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState(10, 2, 7, 8, 10, true, default(set[TargetFork])) + checkTargetGossipState( 5, 0, 4, 5, 9, true, default(set[TargetFork])) + checkTargetGossipState( 5, 0, 1, 2, 3, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 1, 2, 3, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 1, 1, 3, 4, 10, false, {TargetFork.Deneb}) + checkTargetGossipState( 5, 0, 1, 3, 9, false, {TargetFork.Fulu}) + checkTargetGossipState( 9, 0, 4, 10, 11, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 4, 1, 5, 7, 11, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 4, 0, 1, 4, 7, false, {TargetFork.Fulu}) + checkTargetGossipState( 8, 5, 8, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState(11, 2, 3, 5, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 6, 7, 9, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 8, 3, 4, 7, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 0, 1, 2, 3, 6, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 0, 3, 4, 5, 9, false, {TargetFork.Capella}) + checkTargetGossipState( 5, 3, 6, 7, 9, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 5, 5, 7, 8, 11, false, {TargetFork.Deneb}) + checkTargetGossipState(10, 1, 3, 7, 9, false, {TargetFork.BPO1}) + checkTargetGossipState(11, 0, 3, 9, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 2, 0, 2, 3, 4, true, default(set[TargetFork])) + checkTargetGossipState( 1, 0, 1, 8, 9, false, {TargetFork.Electra}) + checkTargetGossipState( 6, 0, 6, 7, 8, true, default(set[TargetFork])) + checkTargetGossipState( 4, 0, 1, 4, 10, false, {TargetFork.Fulu}) + checkTargetGossipState(11, 4, 5, 7, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 1, 1, 4, 5, 9, false, {TargetFork.Deneb}) + checkTargetGossipState( 6, 0, 2, 4, 7, true, default(set[TargetFork])) + checkTargetGossipState( 6, 3, 8, 10, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 3, 0, 1, 7, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 1, 0, 6, 9, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 7, 2, 4, 6, 10, false, {TargetFork.Fulu}) + checkTargetGossipState(10, 0, 3, 5, 8, false, {TargetFork.BPO1}) + checkTargetGossipState(10, 0, 5, 7, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 0, 2, 8, 9, 11, false, {TargetFork.Capella}) + checkTargetGossipState( 3, 0, 1, 5, 8, false, {TargetFork.Electra}) + checkTargetGossipState( 1, 0, 0, 3, 4, false, {TargetFork.Electra}) + checkTargetGossipState( 2, 2, 4, 5, 8, false, {TargetFork.Deneb}) + checkTargetGossipState( 6, 0, 0, 3, 8, false, {TargetFork.Fulu}) + checkTargetGossipState( 4, 0, 2, 5, 6, true, default(set[TargetFork])) + checkTargetGossipState( 2, 0, 2, 3, 5, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 8, 0, 5, 6, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 7, 0, 2, 5, 7, false, {TargetFork.BPO1}) + checkTargetGossipState(11, 1, 2, 5, 8, false, {TargetFork.BPO1}) + checkTargetGossipState(10, 0, 3, 6, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 0, 0, 2, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 1, 1, 2, 7, 8, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 5, 0, 1, 2, 8, false, {TargetFork.Fulu}) + checkTargetGossipState( 5, 3, 6, 9, 11, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 4, 4, 5, 9, 10, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 1, 1, 5, 6, 7, false, {TargetFork.Deneb}) + checkTargetGossipState( 3, 0, 0, 4, 8, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 3, 0, 0, 1, 4, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 2, 0, 2, 5, 8, false, {TargetFork.Electra}) + checkTargetGossipState( 1, 4, 7, 8, 10, false, {TargetFork.Capella}) + checkTargetGossipState( 6, 0, 3, 8, 9, false, {TargetFork.Electra}) + checkTargetGossipState( 9, 2, 3, 5, 6, false, {TargetFork.BPO1}) + checkTargetGossipState(11, 1, 6, 10, 11, false, {TargetFork.BPO1}) + checkTargetGossipState( 2, 1, 4, 7, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 8, 4, 5, 7, 9, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 9, 1, 8, 10, 11, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 5, 0, 1, 4, 5, false, {TargetFork.BPO1}) + checkTargetGossipState( 0, 2, 7, 8, 11, false, {TargetFork.Capella}) + checkTargetGossipState( 7, 3, 6, 7, 8, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState(10, 3, 4, 7, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 4, 2, 3, 5, 8, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState(11, 1, 2, 3, 9, false, {TargetFork.BPO1}) + checkTargetGossipState( 1, 2, 8, 9, 11, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 1, 1, 5, 10, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 2, 2, 9, 10, 11, false, {TargetFork.Deneb}) + checkTargetGossipState(11, 0, 0, 1, 11, false, {TargetFork.BPO1}) + checkTargetGossipState( 8, 2, 4, 6, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 1, 0, 3, 5, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 2, 3, 4, 9, 11, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 8, 1, 2, 4, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 0, 4, 5, 6, 7, false, {TargetFork.Capella}) + checkTargetGossipState( 5, 3, 7, 9, 10, false, {TargetFork.Deneb}) + checkTargetGossipState( 3, 0, 5, 6, 9, false, {TargetFork.Deneb}) + checkTargetGossipState(11, 4, 6, 9, 11, false, {TargetFork.BPO1}) + checkTargetGossipState( 6, 5, 8, 10, 11, false, {TargetFork.Deneb}) + checkTargetGossipState(11, 2, 3, 4, 5, false, {TargetFork.BPO1}) + checkTargetGossipState( 1, 3, 5, 6, 11, false, {TargetFork.Capella}) + checkTargetGossipState(11, 5, 6, 7, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 5, 2, 6, 8, 9, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 1, 1, 3, 6, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 6, 0, 0, 1, 3, false, {TargetFork.BPO1}) + checkTargetGossipState( 6, 2, 6, 8, 9, false, {TargetFork.Electra}) + checkTargetGossipState( 1, 3, 5, 6, 7, false, {TargetFork.Capella}) + checkTargetGossipState(10, 3, 4, 5, 8, true, default(set[TargetFork])) + checkTargetGossipState( 8, 3, 7, 8, 10, false, {TargetFork.Fulu}) + checkTargetGossipState( 5, 1, 3, 6, 10, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 1, 0, 1, 2, 7, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 0, 0, 5, 6, 8, false, {TargetFork.Deneb}) + checkTargetGossipState( 9, 2, 4, 5, 8, true, default(set[TargetFork])) + checkTargetGossipState( 1, 0, 0, 2, 10, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 3, 0, 3, 7, 8, false, {TargetFork.Electra}) + checkTargetGossipState(10, 2, 3, 5, 6, false, {TargetFork.BPO1}) + checkTargetGossipState( 5, 3, 6, 9, 10, true, default(set[TargetFork])) + checkTargetGossipState( 4, 2, 3, 8, 10, false, {TargetFork.Electra}) + checkTargetGossipState( 2, 1, 3, 10, 11, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 4, 0, 5, 7, 11, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 8, 2, 3, 7, 8, true, default(set[TargetFork])) + checkTargetGossipState( 8, 2, 4, 7, 9, true, default(set[TargetFork])) + checkTargetGossipState(10, 4, 5, 6, 8, true, default(set[TargetFork])) + checkTargetGossipState( 1, 1, 2, 8, 10, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 3, 5, 6, 8, 9, true, default(set[TargetFork])) + checkTargetGossipState( 7, 0, 0, 1, 6, true, default(set[TargetFork])) + checkTargetGossipState( 8, 0, 4, 5, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 2, 0, 6, 8, 9, false, {TargetFork.Deneb}) + checkTargetGossipState( 0, 1, 4, 8, 10, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState(10, 0, 0, 0, 4, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 1, 3, 5, 9, false, {TargetFork.BPO1}) + checkTargetGossipState( 1, 1, 4, 10, 11, true, default(set[TargetFork])) + checkTargetGossipState(11, 1, 8, 9, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 2, 0, 1, 4, 5, false, {TargetFork.Electra}) + checkTargetGossipState( 3, 4, 8, 10, 11, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 5, 7, 8, 9, 11, false, {TargetFork.Capella}) + checkTargetGossipState( 6, 0, 1, 3, 6, false, {TargetFork.BPO1}) + checkTargetGossipState(10, 0, 2, 6, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 3, 0, 5, 9, 10, false, {TargetFork.Deneb}) + checkTargetGossipState( 5, 0, 6, 7, 9, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 8, 6, 7, 8, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 2, 3, 4, 8, 11, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState(10, 6, 7, 9, 11, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 5, 1, 2, 8, 11, false, {TargetFork.Electra}) + checkTargetGossipState(10, 4, 7, 9, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 2, 0, 0, 2, 3, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 6, 0, 5, 7, 8, true, default(set[TargetFork])) + checkTargetGossipState(10, 1, 2, 3, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 3, 3, 5, 7, 8, true, default(set[TargetFork])) + checkTargetGossipState( 7, 1, 2, 3, 6, false, {TargetFork.BPO1}) + checkTargetGossipState( 6, 3, 4, 7, 9, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 4, 0, 3, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 9, 0, 0, 0, 2, false, {TargetFork.BPO1}) + checkTargetGossipState( 5, 3, 6, 7, 11, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 9, 0, 2, 4, 6, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 1, 4, 9, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 6, 3, 5, 6, 10, false, {TargetFork.Fulu}) + checkTargetGossipState( 0, 4, 7, 9, 11, false, {TargetFork.Capella}) + checkTargetGossipState( 9, 0, 5, 8, 10, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 5, 4, 5, 7, 9, true, default(set[TargetFork])) + checkTargetGossipState( 4, 0, 1, 2, 3, false, {TargetFork.BPO1}) + checkTargetGossipState( 4, 0, 2, 8, 10, false, {TargetFork.Electra}) + checkTargetGossipState(10, 0, 1, 4, 5, false, {TargetFork.BPO1}) + checkTargetGossipState( 0, 4, 5, 8, 9, false, {TargetFork.Capella}) + checkTargetGossipState( 7, 1, 3, 7, 11, true, default(set[TargetFork])) + checkTargetGossipState(11, 0, 1, 2, 10, true, default(set[TargetFork])) + checkTargetGossipState( 8, 1, 4, 8, 10, true, default(set[TargetFork])) + checkTargetGossipState( 2, 2, 3, 9, 11, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 3, 2, 6, 8, 9, false, {TargetFork.Deneb}) + checkTargetGossipState( 7, 0, 7, 8, 9, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 7, 0, 5, 6, 8, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 9, 1, 9, 10, 11, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 9, 4, 5, 6, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 9, 1, 4, 6, 8, true, default(set[TargetFork])) + checkTargetGossipState( 4, 0, 3, 8, 9, false, {TargetFork.Electra}) + checkTargetGossipState( 0, 2, 3, 6, 10, false, {TargetFork.Capella}) + checkTargetGossipState( 9, 2, 4, 7, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 3, 0, 3, 7, 9, false, {TargetFork.Electra}) + checkTargetGossipState( 9, 1, 2, 3, 8, true, default(set[TargetFork])) + checkTargetGossipState( 0, 2, 3, 6, 8, false, {TargetFork.Capella}) + checkTargetGossipState( 7, 6, 9, 10, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 5, 1, 5, 8, 11, true, default(set[TargetFork])) + checkTargetGossipState( 4, 1, 2, 5, 11, true, default(set[TargetFork])) + checkTargetGossipState( 0, 4, 6, 10, 11, false, {TargetFork.Capella}) + checkTargetGossipState( 7, 3, 6, 9, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 2, 0, 4, 6, 7, false, {TargetFork.Deneb}) + checkTargetGossipState( 8, 2, 4, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 4, 6, 7, 8, 10, true, default(set[TargetFork])) + checkTargetGossipState(11, 0, 1, 7, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 0, 1, 2, 3, 4, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 3, 5, 6, 7, 9, false, {TargetFork.Capella}) + checkTargetGossipState( 8, 0, 2, 3, 5, false, {TargetFork.BPO1}) + checkTargetGossipState(11, 0, 6, 7, 8, false, {TargetFork.BPO1}) + checkTargetGossipState(10, 1, 2, 3, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 9, 1, 3, 6, 10, true, default(set[TargetFork])) + checkTargetGossipState( 0, 2, 7, 8, 9, false, {TargetFork.Capella}) + checkTargetGossipState( 2, 1, 2, 4, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 6, 0, 2, 7, 8, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 0, 1, 6, 7, 9, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 1, 5, 7, 9, 11, false, {TargetFork.Capella}) + checkTargetGossipState( 1, 1, 8, 9, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 9, 2, 6, 8, 11, true, default(set[TargetFork])) + checkTargetGossipState( 3, 0, 4, 8, 10, false, {TargetFork.Deneb, TargetFork.Electra}) + checkTargetGossipState( 9, 2, 3, 9, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 6, 0, 1, 2, 6, false, {TargetFork.BPO1}) + checkTargetGossipState(11, 0, 3, 8, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 2, 2, 4, 5, 6, false, {TargetFork.Deneb}) + checkTargetGossipState( 1, 1, 3, 6, 8, false, {TargetFork.Deneb}) + checkTargetGossipState( 5, 1, 3, 6, 9, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 5, 1, 3, 6, 10, true, default(set[TargetFork])) + checkTargetGossipState( 3, 2, 3, 8, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 2, 1, 2, 3, 10, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 3, 0, 1, 9, 10, false, {TargetFork.Electra}) + checkTargetGossipState( 0, 1, 3, 7, 9, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 2, 3, 6, 10, 11, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 2, 0, 4, 6, 9, false, {TargetFork.Deneb}) + checkTargetGossipState( 8, 0, 8, 9, 11, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 0, 0, 4, 6, 9, false, {TargetFork.Deneb}) + checkTargetGossipState(10, 1, 2, 3, 4, false, {TargetFork.BPO1}) + checkTargetGossipState( 4, 0, 3, 4, 7, false, {TargetFork.Fulu}) + checkTargetGossipState( 0, 2, 3, 8, 11, false, {TargetFork.Capella}) + checkTargetGossipState( 0, 3, 5, 7, 10, true, default(set[TargetFork])) + checkTargetGossipState( 9, 0, 0, 3, 7, false, {TargetFork.BPO1}) + checkTargetGossipState( 2, 1, 5, 6, 11, false, {TargetFork.Deneb}) + checkTargetGossipState(10, 2, 3, 6, 10, false, {TargetFork.BPO1}) + checkTargetGossipState( 5, 0, 4, 6, 11, true, default(set[TargetFork])) + checkTargetGossipState( 1, 1, 3, 4, 5, false, {TargetFork.Deneb}) + checkTargetGossipState(11, 1, 7, 8, 11, true, default(set[TargetFork])) + checkTargetGossipState( 3, 1, 5, 7, 9, true, default(set[TargetFork])) + checkTargetGossipState( 6, 0, 2, 5, 9, false, {TargetFork.Fulu}) + checkTargetGossipState( 4, 0, 1, 4, 9, false, {TargetFork.Fulu}) + checkTargetGossipState( 6, 4, 8, 9, 11, true, default(set[TargetFork])) + checkTargetGossipState(10, 0, 1, 2, 6, false, {TargetFork.BPO1}) + checkTargetGossipState(10, 1, 3, 7, 11, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 3, 2, 5, 6, 7, false, {TargetFork.Deneb}) + checkTargetGossipState( 9, 4, 9, 10, 11, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 2, 2, 4, 9, 10, false, {TargetFork.Deneb}) + checkTargetGossipState( 8, 2, 4, 10, 11, true, default(set[TargetFork])) + checkTargetGossipState(11, 0, 8, 10, 11, false, {TargetFork.BPO1}) + checkTargetGossipState( 7, 0, 1, 7, 8, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 4, 2, 8, 10, 11, false, {TargetFork.Deneb}) + checkTargetGossipState( 8, 5, 6, 8, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 8, 1, 5, 8, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 6, 1, 6, 7, 11, false, {TargetFork.Electra, TargetFork.Fulu}) + checkTargetGossipState( 9, 3, 4, 5, 6, false, {TargetFork.BPO1}) + checkTargetGossipState( 1, 0, 3, 4, 7, false, {TargetFork.Deneb}) + checkTargetGossipState( 6, 1, 2, 3, 11, false, {TargetFork.Fulu}) + checkTargetGossipState( 1, 2, 5, 9, 10, false, {TargetFork.Capella, TargetFork.Deneb}) + checkTargetGossipState( 5, 0, 5, 7, 8, false, {TargetFork.Electra}) + checkTargetGossipState( 8, 0, 3, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState(11, 3, 6, 7, 8, false, {TargetFork.BPO1}) + checkTargetGossipState( 3, 6, 7, 9, 10, true, default(set[TargetFork])) + checkTargetGossipState( 7, 1, 6, 10, 11, false, {TargetFork.Electra}) + checkTargetGossipState( 0, 6, 9, 10, 11, false, {TargetFork.Capella}) + checkTargetGossipState( 4, 1, 2, 3, 5, false, {TargetFork.Fulu, TargetFork.BPO1}) + checkTargetGossipState( 9, 1, 2, 7, 8, false, {TargetFork.BPO1}) diff --git a/tests/test_gossip_validation.nim b/tests/test_gossip_validation.nim index c6084fd067..7224d515e0 100644 --- a/tests/test_gossip_validation.nim +++ b/tests/test_gossip_validation.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -38,28 +38,27 @@ proc pruneAtFinalization(dag: ChainDAGRef, attPool: AttestationPool) = suite "Gossip validation " & preset(): setup: # Genesis state that results in 3 members per committee - let rng = HmacDrbgContext.new() + let + rng = HmacDrbgContext.new() + cfg = defaultRuntimeConfig var - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init( - ChainDAGRef, defaultRuntimeConfig, makeTestDB(SLOTS_PER_EPOCH * 3), - validatorMonitor, {}) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + dag = ChainDAGRef.init( + cfg, cfg.makeTestDB(SLOTS_PER_EPOCH * 3), validatorMonitor, {}) taskpool = Taskpool.new() - verifier = BatchVerifier.init(rng, taskpool) - quarantine = newClone(Quarantine.init()) - pool = newClone(AttestationPool.init(dag, quarantine)) + verifier {.used.} = BatchVerifier.init(rng, taskpool) + quarantine = newClone(Quarantine.init(dag.cfg)) + pool {.used.} = newClone(AttestationPool.init(dag, quarantine)) state = newClone(dag.headState) cache = StateCache() info = ForkedEpochInfo() - batchCrypto = BatchCrypto.new( + batchCrypto {.used.} = BatchCrypto.new( rng, eager = proc(): bool = false, genesis_validators_root = dag.genesis_validators_root, taskpool).expect( "working batcher") # Slot 0 is a finalized slot - won't be making attestations for it.. - check: - process_slots( - defaultRuntimeConfig, state[], getStateField(state[], slot) + 1, - cache, info, {}).isOk() + check cfg.process_slots( + state[], getStateField(state[], slot) + 1, cache, info, {}).isOk() test "Empty committee when no committee for slot": template committee(idx: uint64): untyped = @@ -84,6 +83,7 @@ suite "Gossip validation " & preset(): dag.headState, cache, int(SLOTS_PER_EPOCH * 5), attested = false): let added = dag.addHeadBlock(verifier, blck.phase0Data) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + state: phase0.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -218,7 +218,7 @@ suite "Gossip validation - Altair": dag.headState, cache, blocks = 1, attested = false, cfg = cfg): let added = withBlck(blck): - const nilCallback = (consensusFork.OnBlockAddedCallback)(nil) + const nilCallback = OnBlockAdded[consensusFork](nil) dag.addHeadBlock(verifier, forkyBlck, nilCallback) check: added.isOk() dag.updateHead(added[], quarantine, []) @@ -296,8 +296,8 @@ suite "Gossip validation - Altair": setup: let - validatorMonitor = newClone(ValidatorMonitor.init()) - quarantine = newClone(Quarantine.init()) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + quarantine = newClone(Quarantine.init(cfg)) rng = HmacDrbgContext.new() syncCommitteePool = newClone(SyncCommitteeMsgPool.init(rng, cfg)) var @@ -307,7 +307,7 @@ suite "Gossip validation - Altair": template prepare(numValidators: Natural): untyped {.dirty.} = let dag = ChainDAGRef.init( - cfg, makeTestDB(numValidators, cfg = cfg), validatorMonitor, {}) + cfg, cfg.makeTestDB(numValidators), validatorMonitor, {}) batchCrypto = BatchCrypto.new( rng, eager = proc(): bool = false, genesis_validators_root = dag.genesis_validators_root, taskpool).expect( diff --git a/tests/test_helpers.nim b/tests/test_helpers.nim index 7c59d789bc..051bd16fce 100644 --- a/tests/test_helpers.nim +++ b/tests/test_helpers.nim @@ -1,18 +1,16 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import # Status libraries stew/bitops2, - eth/common/eth_types as commonEthTypes, eth/common/eth_types_rlp, - web3/primitives, # Beacon chain internals ../beacon_chain/spec/[forks, helpers, state_transition], ../beacon_chain/spec/datatypes/[bellatrix, capella], @@ -67,190 +65,3 @@ suite "Spec helpers": process(fieldVar, i shl childDepth) i += 1 process(state, state.numLeaves) - - test "hypergeom_cdf": - # Generated with SciPy's hypergeom.cdf() function - const tests = [ - ( 0, 2, 1, 1, 0.5), - ( 8, 200, 162, 9, 0.85631007588636132), - ( 2, 20, 11, 5, 0.39551083591331271), - ( 2, 5, 4, 3, 0.59999999999999987), - ( 16, 100, 71, 28, 0.050496322336354399), - ( 1, 5, 2, 2, 0.90000000000000002), - ( 0, 5, 4, 1, 0.20000000000000004), - ( 27, 200, 110, 54, 0.24032479119039216), - ( 0, 10, 2, 5, 0.22222222222222224), - ( 3, 50, 27, 5, 0.77138514980460271), - ( 2, 50, 24, 8, 0.15067269856977925), - ( 4, 20, 16, 7, 0.10113519091847264), - ( 13, 500, 408, 15, 0.79686197891279686), - ( 0, 5, 3, 1, 0.40000000000000008), - ( 0, 20, 14, 2, 0.078947368421052627), - ( 49, 100, 62, 79, 0.6077614986362827), - ( 2, 10, 3, 6, 0.83333333333333337), - ( 0, 50, 31, 2, 0.13959183673469389), - ( 2, 5, 4, 3, 0.59999999999999987), - ( 4, 50, 21, 8, 0.81380887468704521), - ( 0, 10, 7, 2, 0.066666666666666652), - ( 0, 10, 1, 4, 0.59999999999999987), - ( 0, 20, 4, 2, 0.63157894736842102), - ( 0, 3, 2, 1, 0.33333333333333331), - ( 39, 500, 427, 51, 0.05047757656076568), - ( 2, 100, 6, 21, 0.89490672557682871), - ( 5, 20, 11, 9, 0.68904501071683733), - ( 0, 2, 1, 1, 0.5), - ( 0, 3, 1, 1, 0.66666666666666674), - ( 14, 50, 27, 30, 0.16250719969887772), - ( 0, 5, 4, 1, 0.20000000000000004), - ( 0, 5, 4, 1, 0.20000000000000004), - ( 2, 10, 8, 4, 0.13333333333333333), - ( 1, 5, 3, 2, 0.69999999999999996), - ( 25, 100, 77, 31, 0.79699287800204943), - ( 0, 3, 2, 1, 0.33333333333333331), - ( 7, 20, 15, 8, 0.94891640866873062), - ( 3, 50, 26, 7, 0.45339412360688952), - ( 1, 10, 8, 2, 0.37777777777777771), - ( 40, 200, 61, 134, 0.4491054454532335), - ( 1, 5, 2, 4, 0.40000000000000008), - ( 0, 10, 6, 1, 0.39999999999999991), - ( 1, 50, 10, 13, 0.19134773839560071), - ( 0, 2, 1, 1, 0.5), - ( 1, 20, 5, 2, 0.94736842105263153), - ( 7, 50, 12, 30, 0.57532691212157849), - ( 0, 3, 1, 1, 0.66666666666666674), - ( 6, 10, 7, 9, 0.69999999999999996), - ( 0, 20, 2, 1, 0.90000000000000002), - ( 2, 10, 5, 3, 0.91666666666666663), - ( 0, 10, 8, 1, 0.19999999999999998), - (258, 500, 372, 347, 0.53219975096883698), - ( 1, 3, 2, 2, 0.66666666666666674), - ( 45, 200, 129, 68, 0.69415691010446789), - ( 1, 10, 8, 2, 0.37777777777777771), - ( 0, 10, 2, 1, 0.80000000000000004), - ( 1, 10, 4, 5, 0.26190476190476192), - ( 3, 50, 36, 4, 0.74422492401215801), - ( 0, 20, 6, 1, 0.69999999999999996), - ( 0, 5, 2, 3, 0.10000000000000002), - ( 1, 200, 47, 9, 0.33197417194852796), - ( 20, 50, 32, 30, 0.78323921453982637), - ( 16, 50, 21, 34, 0.9149336897131396), - ( 17, 50, 38, 22, 0.69599001425795692), - ( 0, 5, 2, 3, 0.10000000000000002), - ( 1, 5, 3, 2, 0.69999999999999996), - ( 0, 10, 9, 1, 0.10000000000000001), - ( 0, 5, 2, 3, 0.10000000000000002), - ( 2, 10, 5, 6, 0.26190476190476192), - ( 0, 5, 2, 1, 0.59999999999999987), - ( 7, 20, 16, 9, 0.62538699690402466), - ( 1, 100, 27, 2, 0.92909090909090908), - ( 27, 100, 58, 50, 0.271780848715515), - ( 47, 100, 96, 51, 0.063730084348641039), - ( 1, 20, 6, 2, 0.92105263157894735), - ( 1, 10, 6, 2, 0.66666666666666674), - ( 0, 2, 1, 1, 0.5), - ( 0, 20, 11, 1, 0.45000000000000001), - ( 0, 3, 1, 1, 0.66666666666666674), - ( 0, 2, 1, 1, 0.5), - ( 0, 10, 1, 7, 0.29999999999999999), - ( 0, 2, 1, 1, 0.5), - ( 0, 100, 36, 1, 0.64000000000000001), - ( 1, 100, 68, 2, 0.53979797979797983), - ( 13, 200, 79, 29, 0.80029860188814683), - ( 0, 10, 5, 1, 0.49999999999999994), - ( 0, 3, 2, 1, 0.33333333333333331), - ( 13, 100, 64, 21, 0.5065368728909565), - ( 1, 10, 6, 4, 0.11904761904761905), - ( 0, 2, 1, 1, 0.5), - ( 0, 5, 1, 2, 0.59999999999999987), - ( 0, 2, 1, 1, 0.5), - ( 1, 5, 4, 2, 0.40000000000000008), - ( 14, 50, 41, 17, 0.65850372332742224), - ( 0, 2, 1, 1, 0.5), - ( 0, 3, 1, 1, 0.66666666666666674), - ( 1, 100, 2, 62, 0.61797979797979785), - ( 0, 2, 1, 1, 0.5), - ( 0, 2, 1, 1, 0.5), - ( 12, 500, 312, 16, 0.91020698917397613), - ( 0, 20, 2, 6, 0.47894736842105257), - ( 0, 3, 2, 1, 0.33333333333333331), - ( 1, 10, 3, 4, 0.66666666666666674), - ( 0, 3, 1, 1, 0.66666666666666674), - ( 0, 3, 2, 1, 0.33333333333333331), - ( 6, 50, 20, 14, 0.72026241648862666), - ( 3, 20, 14, 6, 0.22523219814241485), - ( 0, 2, 1, 1, 0.5), - ( 4, 100, 72, 7, 0.30429108474790234), - ( 0, 5, 1, 2, 0.59999999999999987), - ( 0, 10, 4, 1, 0.59999999999999998), - ( 1, 3, 2, 2, 0.66666666666666674), - ( 0, 3, 1, 1, 0.66666666666666674), - ( 22, 50, 46, 24, 0.66413373860182379), - ( 1, 5, 2, 4, 0.40000000000000008), - ( 62, 100, 80, 79, 0.3457586020522983), - ( 0, 3, 2, 1, 0.33333333333333331), - ( 0, 10, 2, 7, 0.066666666666666666), - ( 0, 2, 1, 1, 0.5), - ( 0, 5, 2, 1, 0.59999999999999987), - ( 42, 200, 145, 57, 0.65622325663713577), - ( 1, 20, 12, 3, 0.34385964912280703), - ( 0, 2, 1, 1, 0.5), - ( 2, 10, 4, 7, 0.33333333333333331), - ( 1, 5, 3, 2, 0.69999999999999996), - ( 0, 10, 6, 2, 0.1333333333333333), - ( 2, 10, 6, 5, 0.26190476190476192), - ( 0, 5, 2, 1, 0.59999999999999987), - ( 1, 3, 2, 2, 0.66666666666666674), - ( 0, 50, 25, 2, 0.24489795918367349), - ( 0, 50, 39, 1, 0.22), - ( 2, 5, 3, 3, 0.90000000000000002), - ( 9, 50, 46, 10, 0.60316977854971765), - ( 0, 5, 2, 1, 0.59999999999999987), - ( 72, 500, 324, 112, 0.49074275180525029), - ( 0, 50, 9, 7, 0.22507959200836167), - ( 0, 5, 2, 2, 0.30000000000000004), - ( 17, 100, 35, 60, 0.067474411926413541), - ( 15, 100, 83, 17, 0.83718038506483827), - ( 0, 10, 7, 1, 0.29999999999999999), - ( 28, 200, 87, 77, 0.071226044946921765), - (154, 500, 361, 212, 0.61327756805578304), - ( 1, 10, 2, 3, 0.93333333333333335), - ( 0, 10, 4, 4, 0.071428571428571425), - ( 0, 5, 1, 1, 0.79999999999999993), - ( 2, 5, 3, 4, 0.59999999999999987), - ( 0, 10, 4, 1, 0.59999999999999998), - ( 0, 3, 2, 1, 0.33333333333333331), - ( 0, 10, 3, 1, 0.69999999999999996), - ( 0, 50, 10, 1, 0.80000000000000004), - ( 0, 2, 1, 1, 0.5), - ( 0, 10, 1, 3, 0.69999999999999996), - ( 2, 20, 12, 4, 0.53457172342621262), - ( 0, 5, 4, 1, 0.20000000000000004), - ( 4, 20, 9, 7, 0.89821981424148611), - ( 2, 200, 188, 3, 0.17021775544388609), - (132, 500, 298, 215, 0.78880271135040059), - ( 2, 5, 4, 3, 0.59999999999999987), - ( 0, 2, 1, 1, 0.5), - ( 2, 10, 6, 5, 0.26190476190476192), - ( 0, 3, 1, 1, 0.66666666666666674), - (156, 200, 128, 174, 1), - ( 1, 20, 6, 4, 0.65737874097007221), - ( 0, 5, 0, 0, 1), - (488, 500, 198, 500, 1), - (143, 500, 8, 371, 1), - ( 2, 10, 6, 5, 0.26190476190476192), - ( 1, 5, 2, 4, 0.40000000000000008), - ( 0, 3, 2, 0, 1), - ( 12, 50, 7, 17, 1), - (129, 200, 43, 133, 1), - ( 0, 5, 3, 0, 1), - ( 0, 2, 1, 1, 0.5), - ( 5, 20, 20, 17, 0), - ( 4, 10, 4, 8, 1), - ( 46, 500, 478, 58, 5.1715118817799218e-07), - ( 0, 3, 2, 3, 0), - ( 0, 3, 1, 1, 0.66666666666666674), - ( 76, 500, 0, 120, 1), - ( 1, 100, 41, 12, 0.011989696504564528), - ] - for (k, population, successes, draws, val) in tests: - check: abs(hypergeom_cdf(k, population, successes, draws) - val) < 1e-11 diff --git a/tests/test_honest_validator.nim b/tests/test_honest_validator.nim index e7fa1b6c39..164039d4f0 100644 --- a/tests/test_honest_validator.nim +++ b/tests/test_honest_validator.nim @@ -326,3 +326,34 @@ suite "Honest validator": compute_inverted_shuffled_index( compute_shuffled_index( index, index_count, seed), index_count, seed) == index + + test "nextForkEpochAtEpoch with BPOs": + var cfg = defaultRuntimeConfig + cfg.ALTAIR_FORK_EPOCH = GENESIS_EPOCH + cfg.BELLATRIX_FORK_EPOCH = GENESIS_EPOCH + cfg.CAPELLA_FORK_EPOCH = GENESIS_EPOCH + cfg.DENEB_FORK_EPOCH = GENESIS_EPOCH + cfg.ELECTRA_FORK_EPOCH = 9.Epoch + cfg.FULU_FORK_EPOCH = 100.Epoch + cfg.BLOB_SCHEDULE = @[ + BlobParameters(EPOCH: 300.Epoch, MAX_BLOBS_PER_BLOCK: 300), + BlobParameters(EPOCH: 250.Epoch, MAX_BLOBS_PER_BLOCK: 275), + BlobParameters(EPOCH: 200.Epoch, MAX_BLOBS_PER_BLOCK: 200), + BlobParameters(EPOCH: 150.Epoch, MAX_BLOBS_PER_BLOCK: 175), + BlobParameters(EPOCH: 100.Epoch, MAX_BLOBS_PER_BLOCK: 100), + BlobParameters(EPOCH: 9.Epoch, MAX_BLOBS_PER_BLOCK: 9)] + check: + cfg.nextForkEpochAtEpoch(9.Epoch) == 100.Epoch + cfg.nextForkEpochAtEpoch(10.Epoch) == 100.Epoch + cfg.nextForkEpochAtEpoch(11.Epoch) == 100.Epoch + cfg.nextForkEpochAtEpoch(99.Epoch) == 100.Epoch + cfg.nextForkEpochAtEpoch(100.Epoch) == 150.Epoch + cfg.nextForkEpochAtEpoch(101.Epoch) == 150.Epoch + cfg.nextForkEpochAtEpoch(150.Epoch) == 200.Epoch + cfg.nextForkEpochAtEpoch(199.Epoch) == 200.Epoch + cfg.nextForkEpochAtEpoch(200.Epoch) == 250.Epoch + cfg.nextForkEpochAtEpoch(201.Epoch) == 250.Epoch + cfg.nextForkEpochAtEpoch(250.Epoch) == 300.Epoch + cfg.nextForkEpochAtEpoch(299.Epoch) == 300.Epoch + cfg.nextForkEpochAtEpoch(300.Epoch) == FAR_FUTURE_EPOCH + cfg.nextForkEpochAtEpoch(301.Epoch) == FAR_FUTURE_EPOCH diff --git a/tests/test_key_splitting.nim b/tests/test_key_splitting.nim index 74c320daf2..fa0aba490a 100644 --- a/tests/test_key_splitting.nim +++ b/tests/test_key_splitting.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -15,16 +15,12 @@ import ./testutil func sign(secrets: seq[SecretShare], message: seq[byte]): seq[SignatureShare] = - let msg = message return secrets.mapIt(it.key.blsSign(message).toSignatureShare(it.id)) suite "Key spliting": let privateKey = ValidatorPrivKey.init("0x25295f0d1d592a90b333e26e85149708208e9f8e8bc18f6c77bd62f8ad7a6866") pubKey = privateKey.toPubKey.toPubKey - password = string.fromBytes hexToSeqByte("7465737470617373776f7264f09f9491") - salt = hexToSeqByte "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" - iv = hexToSeqByte "264daa3f303d7259501c93d997d84fe6" rng = HmacDrbgContext.new() msg = rng[].generateBytes(32) diff --git a/tests/test_keymanager_api.nim b/tests/test_keymanager_api.nim index b2ac8090f6..b643114372 100644 --- a/tests/test_keymanager_api.nim +++ b/tests/test_keymanager_api.nim @@ -12,19 +12,21 @@ import std/[typetraits, os, options, json, sequtils, uri, algorithm], - testutils/unittests, chronicles, stint, json_serialization, confutils, - chronos, blscurve, libp2p/crypto/crypto as lcrypto, + unittest2, + chronicles, + chronos, + chronos/unittest2/asynctests, + confutils, + json_serialization, stew/[byteutils, io2], - ../beacon_chain/spec/[crypto, keystore, eth2_merkleization], ../beacon_chain/spec/datatypes/base, ../beacon_chain/spec/eth2_apis/[rest_keymanager_calls, rest_keymanager_types], - ../beacon_chain/validators/[keystore_management, slashing_protection_common, - validator_pool], + ../beacon_chain/validators/ + [keystore_management, slashing_protection_common, validator_pool], ../beacon_chain/networking/network_metadata, ../beacon_chain/rpc/rest_key_management_api, - ../beacon_chain/[conf, filepath, beacon_node, - nimbus_beacon_node, beacon_node_status], + ../beacon_chain/[conf, filepath, beacon_node, nimbus_beacon_node, process_state], ../beacon_chain/validator_client/common, ../ncli/ncli_testnet, ./testutil @@ -136,9 +138,9 @@ const nodeValidatorsDir = nodeDataDir / "validators" nodeSecretsDir = nodeDataDir / "secrets" - vcDataDir = dataDir / "validator-0" - vcValidatorsDir = vcDataDir / "validators" - vcSecretsDir = vcDataDir / "secrets" + # vcDataDir = dataDir / "validator-0" + # vcValidatorsDir = vcDataDir / "validators" + # vcSecretsDir = vcDataDir / "secrets" func specifiedFeeRecipient(x: int): Eth1Address = copyMem(addr result, unsafeAddr x, sizeof x) @@ -209,7 +211,6 @@ BELLATRIX_FORK_EPOCH: 0 "--total-validators=" & $simulationDepositsCount, "--deposits-file=" & depositsFile, "--output-genesis=" & genesisFile, - "--output-deposit-tree-snapshot=" & depositTreeSnapshotFile, "--output-bootstrap-file=" & bootstrapEnrFile, "--netkey-file=network_key.json", "--insecure-netkey-password=true", @@ -364,9 +365,11 @@ proc initBeaconNode(basePort: int): Future[BeaconNode] {.async: (raises: []).} = raiseAssert exc.msg try: - let metadata = - loadEth2NetworkMetadata(dataDir).expect("Metadata is compatible") - await BeaconNode.init(rng, runNodeConf, metadata) + let + metadata = loadEth2NetworkMetadata(dataDir).expect("Metadata is compatible") + taskpool = Taskpool.new() + + await BeaconNode.init(rng, runNodeConf, metadata, taskpool) except CatchableError as exc: raiseAssert exc.msg @@ -1900,9 +1903,7 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} = decoded = try: RestJson.decode(response.data, - DataEnclosedObject[seq[RemoteKeystoreStatus]], - requireAllFields = true, - allowUnknownFields = true) + DataEnclosedObject[seq[RemoteKeystoreStatus]]) except SerializationError: raiseAssert "Invalid response encoding" check: @@ -1932,9 +1933,7 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} = decoded = try: RestJson.decode(response.data, - DataEnclosedObject[seq[RemoteKeystoreStatus]], - requireAllFields = true, - allowUnknownFields = true) + DataEnclosedObject[seq[RemoteKeystoreStatus]]) except SerializationError: raiseAssert "Invalid response encoding" check: @@ -1965,9 +1964,7 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} = decoded = try: RestJson.decode(response.data, - DataEnclosedObject[seq[RemoteKeystoreStatus]], - requireAllFields = true, - allowUnknownFields = true) + DataEnclosedObject[seq[RemoteKeystoreStatus]]) except SerializationError: raiseAssert "Invalid response encoding" check: @@ -1997,9 +1994,7 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} = decoded = try: RestJson.decode(response.data, - DataEnclosedObject[seq[RemoteKeystoreStatus]], - requireAllFields = true, - allowUnknownFields = true) + DataEnclosedObject[seq[RemoteKeystoreStatus]]) except SerializationError: raiseAssert "Invalid response encoding" check: @@ -2022,15 +2017,15 @@ proc delayedTests(basePort: int, pool: ref ValidatorPool, validatorPool: pool, keymanagerHost: host) - validatorClientKeymanager = KeymanagerToTest( - ident: "Validator Client", - port: basePort + PortKind.KeymanagerVC.ord, - validatorsDir: vcValidatorsDir, - secretsDir: vcSecretsDir, - validatorPool: pool, - keymanagerHost: host) + # validatorClientKeymanager = KeymanagerToTest( + # ident: "Validator Client", + # port: basePort + PortKind.KeymanagerVC.ord, + # validatorsDir: vcValidatorsDir, + # secretsDir: vcSecretsDir, + # validatorPool: pool, + # keymanagerHost: host) - while bnStatus != BeaconNodeStatus.Running: + while not ProcessState.running: await sleepAsync(1.seconds) # asyncSpawn startValidatorClient(basePort) @@ -2045,9 +2040,12 @@ proc delayedTests(basePort: int, pool: ref ValidatorPool, # Re-enable it in a follow-up PR # await runTests(validatorClientKeymanager) - bnStatus = BeaconNodeStatus.Stopping + ProcessState.scheduleStop("stop") proc main(basePort: int) {.async.} = + # Overwrite the standard nim stop handlers + ProcessState.setupStopHandlers() + if dirExists(dataDir): os.removeDir dataDir @@ -2058,7 +2056,7 @@ proc main(basePort: int) {.async.} = asyncSpawn delayedTests(basePort, node.attachedValidators, node.keymanagerHost) - node.start() + node.run(nil) let basePortStr = os.getEnv("NIMBUS_TEST_KEYMANAGER_BASE_PORT", $defaultBasePort) diff --git a/tests/test_keystore.nim b/tests/test_keystore.nim index f31f202266..1cef7b8afe 100644 --- a/tests/test_keystore.nim +++ b/tests/test_keystore.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -240,8 +240,8 @@ let suite "KeyStorage testing suite": setup: - let secret = ValidatorPrivKey.fromRaw(secretBytes).get - let nsecret = init(lcrypto.PrivateKey, secretNetBytes).get + let secret {.used.} = ValidatorPrivKey.fromRaw(secretBytes).get + let nsecret {.used.} = init(lcrypto.PrivateKey, secretNetBytes).get test "Load Prysm keystore": let keystore = parseKeystore(prysmKeystore) diff --git a/tests/test_light_client.nim b/tests/test_light_client.nim index 7ad26c1340..bf48f5b8cb 100644 --- a/tests/test_light_client.nim +++ b/tests/test_light_client.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -25,7 +25,7 @@ suite "Light client" & preset(): headPeriod = 4.SyncCommitteePeriod let cfg = block: # Fork schedule so that each `LightClientDataFork` is covered - static: doAssert ConsensusFork.high == ConsensusFork.Fulu + static: doAssert ConsensusFork.high == ConsensusFork.Gloas var res = defaultRuntimeConfig res.ALTAIR_FORK_EPOCH = 1.Epoch res.BELLATRIX_FORK_EPOCH = 2.Epoch @@ -69,7 +69,7 @@ suite "Light client" & preset(): dag.headState, cache, blocks.int, attested = attested, syncCommitteeRatio = syncCommitteeRatio, cfg = cfg): let added = withBlck(blck): - const nilCallback = (consensusFork.OnBlockAddedCallback)(nil) + const nilCallback = OnBlockAdded[consensusFork](nil) dag.addHeadBlock(verifier, forkyBlck, nilCallback) check: added.isOk() dag.updateHead(added[], quarantine, []) @@ -77,13 +77,13 @@ suite "Light client" & preset(): setup: const num_validators = SLOTS_PER_EPOCH let - validatorMonitor = newClone(ValidatorMonitor.init()) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) dag = ChainDAGRef.init( - cfg, makeTestDB(num_validators, cfg = cfg), validatorMonitor, {}, + cfg, cfg.makeTestDB(num_validators), validatorMonitor, {}, lcDataConfig = LightClientDataConfig( serve: true, importMode: LightClientDataImportMode.OnlyNew)) - quarantine = newClone(Quarantine.init()) + quarantine = newClone(Quarantine.init(cfg)) rng = HmacDrbgContext.new() taskpool = Taskpool.new() var @@ -233,7 +233,7 @@ suite "Light client" & preset(): dag.advanceToSlot(finalizedSlot, verifier, quarantine[]) # Initialize new DAG from checkpoint - let cpDb = BeaconChainDB.new("", cfg = cfg, inMemory = true) + let cpDb = BeaconChainDB.new("", cfg, inMemory = true) ChainDAGRef.preInit(cpDb, genesisState[]) ChainDAGRef.preInit(cpDb, dag.headState) # dag.getForkedBlock(dag.head.bid).get) let cpDag = ChainDAGRef.init( diff --git a/tests/test_light_client_processor.nim b/tests/test_light_client_processor.nim index 5f9c060346..aaef841f2c 100644 --- a/tests/test_light_client_processor.nim +++ b/tests/test_light_client_processor.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -26,9 +26,10 @@ suite "Light client processor" & preset(): lowPeriod = 0.SyncCommitteePeriod lastPeriodWithSupermajority = 4.SyncCommitteePeriod highPeriod = 6.SyncCommitteePeriod + debugGloasComment "add res.GLOAS_FORK_EPOCH = ..." let cfg = block: # Fork schedule so that each `LightClientDataFork` is covered - static: doAssert ConsensusFork.high == ConsensusFork.Fulu + static: doAssert ConsensusFork.high == ConsensusFork.Gloas var res = defaultRuntimeConfig res.ALTAIR_FORK_EPOCH = 1.Epoch res.BELLATRIX_FORK_EPOCH = 2.Epoch @@ -40,13 +41,13 @@ suite "Light client processor" & preset(): const numValidators = SLOTS_PER_EPOCH let - validatorMonitor = newClone(ValidatorMonitor.init()) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) dag = ChainDAGRef.init( - cfg, makeTestDB(numValidators, cfg = cfg), validatorMonitor, {}, + cfg, cfg.makeTestDB(numValidators), validatorMonitor, {}, lcDataConfig = LightClientDataConfig( serve: true, importMode: LightClientDataImportMode.OnlyNew)) - quarantine = newClone(Quarantine.init()) + quarantine = newClone(Quarantine.init(dag.cfg)) rng = HmacDrbgContext.new() taskpool = Taskpool.new() var verifier =BatchVerifier.init(rng, taskpool) @@ -57,7 +58,7 @@ suite "Light client processor" & preset(): dag.headState, cache, blocks.int, attested = true, syncCommitteeRatio = syncCommitteeRatio, cfg = cfg): let added = withBlck(blck): - const nilCallback = (consensusFork.OnBlockAddedCallback)(nil) + const nilCallback = OnBlockAdded[consensusFork](nil) dag.addHeadBlock(verifier, forkyBlck, nilCallback) doAssert added.isOk() dag.updateHead(added[], quarantine[], []) @@ -100,7 +101,7 @@ suite "Light client processor" & preset(): processor = LightClientProcessor.new( false, "", "", cfg, genesis_validators_root, finalizationMode, store, getBeaconTime, getTrustedBlockRoot, onStoreInitialized) - res: Result[bool, VerifierError] + res: Result[bool, LightClientVerifierError] test "Sync" & testNameSuffix: var bootstrap = dag.getLightClientBootstrap(trustedBlockRoot) @@ -173,7 +174,7 @@ suite "Light client processor" & preset(): template forkyUpdate: untyped = upgraded[].forky(lcDataFork) check: res.isErr - res.error == VerifierError.Duplicate + res.error == LightClientVerifierError.Duplicate forkyStore.best_valid_update.isSome forkyStore.best_valid_update.get.matches(forkyUpdate) else: @@ -184,7 +185,7 @@ suite "Light client processor" & preset(): template forkyUpdate: untyped = upgraded[].forky(lcDataFork) check: res.isErr - res.error == VerifierError.MissingParent + res.error == LightClientVerifierError.MissingParent forkyStore.best_valid_update.isSome not forkyStore.best_valid_update.get.matches(forkyUpdate) @@ -204,7 +205,7 @@ suite "Light client processor" & preset(): template forkyUpdate: untyped = upgraded[].forky(lcDataFork) check: res.isErr - res.error == VerifierError.Duplicate + res.error == LightClientVerifierError.Duplicate forkyStore.best_valid_update.isSome forkyStore.best_valid_update.get.matches(forkyUpdate) else: @@ -215,7 +216,7 @@ suite "Light client processor" & preset(): template forkyUpdate: untyped = upgraded[].forky(lcDataFork) check: res.isErr - res.error == VerifierError.MissingParent + res.error == LightClientVerifierError.MissingParent forkyStore.best_valid_update.isSome not forkyStore.best_valid_update.get.matches(forkyUpdate) @@ -237,7 +238,7 @@ suite "Light client processor" & preset(): template forkyUpdate: untyped = upgraded[].forky(lcDataFork) check: res.isErr - res.error == VerifierError.Duplicate + res.error == LightClientVerifierError.Duplicate forkyStore.best_valid_update.isNone if forkyStore.finalized_header == forkyUpdate.attested_header: break @@ -251,7 +252,7 @@ suite "Light client processor" & preset(): template forkyUpdate: untyped = upgraded[].forky(lcDataFork) check: res.isErr - res.error == VerifierError.Duplicate + res.error == LightClientVerifierError.Duplicate forkyStore.best_valid_update.isSome forkyStore.best_valid_update.get.matches(forkyUpdate) else: @@ -262,7 +263,7 @@ suite "Light client processor" & preset(): template forkyUpdate: untyped = upgraded[].forky(lcDataFork) check: res.isErr - res.error == VerifierError.MissingParent + res.error == LightClientVerifierError.MissingParent forkyStore.best_valid_update.isSome not forkyStore.best_valid_update.get.matches(forkyUpdate) @@ -318,9 +319,9 @@ suite "Light client processor" & preset(): forkyStore.best_valid_update.get.matches(forkyUpdate) forkyStore.optimistic_header == forkyUpdate.attested_header elif finalizationMode == LightClientFinalizationMode.Optimistic: - check res.error == VerifierError.Duplicate + check res.error == LightClientVerifierError.Duplicate else: - check res.error == VerifierError.MissingParent + check res.error == LightClientVerifierError.MissingParent check numOnStoreInitializedCalls == 1 test "Invalid bootstrap" & testNameSuffix: @@ -334,7 +335,7 @@ suite "Light client processor" & preset(): MsgSource.gossip, getBeaconTime(), bootstrap) check: res.isErr - res.error == VerifierError.Invalid + res.error == LightClientVerifierError.Invalid numOnStoreInitializedCalls == 0 test "Duplicate bootstrap" & testNameSuffix: @@ -352,7 +353,7 @@ suite "Light client processor" & preset(): MsgSource.gossip, getBeaconTime(), bootstrap) check: res.isErr - res.error == VerifierError.Duplicate + res.error == LightClientVerifierError.Duplicate numOnStoreInitializedCalls == 1 test "Missing bootstrap (update)" & testNameSuffix: @@ -365,7 +366,7 @@ suite "Light client processor" & preset(): MsgSource.gossip, getBeaconTime(), update) check: res.isErr - res.error == VerifierError.MissingParent + res.error == LightClientVerifierError.MissingParent numOnStoreInitializedCalls == 0 test "Missing bootstrap (finality update)" & testNameSuffix: @@ -378,7 +379,7 @@ suite "Light client processor" & preset(): MsgSource.gossip, getBeaconTime(), finalityUpdate) check: res.isErr - res.error == VerifierError.MissingParent + res.error == LightClientVerifierError.MissingParent numOnStoreInitializedCalls == 0 test "Missing bootstrap (optimistic update)" & testNameSuffix: @@ -391,5 +392,5 @@ suite "Light client processor" & preset(): MsgSource.gossip, getBeaconTime(), optimisticUpdate) check: res.isErr - res.error == VerifierError.MissingParent + res.error == LightClientVerifierError.MissingParent numOnStoreInitializedCalls == 0 diff --git a/tests/test_message_signatures.nim b/tests/test_message_signatures.nim index 86ade21ba8..6afdbac4fb 100644 --- a/tests/test_message_signatures.nim +++ b/tests/test_message_signatures.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -272,4 +272,104 @@ suite "Message signatures": fork0, genesis_validators_root0, slot, subcommittee_index, load(pubkey0).get, get_sync_committee_selection_proof( fork0, genesis_validators_root1, slot, - subcommittee_index, privkey0).toValidatorSig) \ No newline at end of file + subcommittee_index, privkey0).toValidatorSig) + + test "execution payload bid signatures": + let + msg = gloas.SignedExecutionPayloadBid.new() + state = gloas.BeaconState.new() + + check: + # Matching public/private keys and genesis validator roots + verify_execution_payload_bid_signature( + fork0, genesis_validators_root0, msg[], state[], + load(pubkey0).get, get_execution_payload_bid_signature( + fork0, genesis_validators_root0, msg[], + state[], privkey0).toValidatorSig) + + # Mismatched public/private keys + not verify_execution_payload_bid_signature( + fork0, genesis_validators_root0, msg[], state[], + load(pubkey0).get, get_execution_payload_bid_signature( + fork0, genesis_validators_root0, msg[], + state[], privkey1).toValidatorSig) + + # Mismatched forks + not verify_execution_payload_bid_signature( + fork0, genesis_validators_root0, msg[], state[], + load(pubkey0).get, get_execution_payload_bid_signature( + fork1, genesis_validators_root0, msg[], + state[], privkey0).toValidatorSig) + + # Mismatched genesis validator roots + not verify_execution_payload_bid_signature( + fork0, genesis_validators_root0, msg[], state[], + load(pubkey0).get, get_execution_payload_bid_signature( + fork0, genesis_validators_root1, msg[], + state[], privkey0).toValidatorSig) + + test "execution payload envelope signatures": + let + msg = gloas.SignedExecutionPayloadEnvelope.new() + state = gloas.BeaconState.new() + + check: + # Matching public/private keys and genesis validator roots + verify_execution_payload_envelope_signature( + fork0, genesis_validators_root0, msg[], state[], + load(pubkey0).get, get_execution_payload_envelope_signature( + fork0, genesis_validators_root0, msg[], + state[], privkey0).toValidatorSig) + + # Mismatched public/private keys + not verify_execution_payload_envelope_signature( + fork0, genesis_validators_root0, msg[], state[], + load(pubkey0).get, get_execution_payload_envelope_signature( + fork0, genesis_validators_root0, msg[], + state[], privkey1).toValidatorSig) + + # Mismatched forks + not verify_execution_payload_envelope_signature( + fork0, genesis_validators_root0, msg[], state[], + load(pubkey0).get, get_execution_payload_envelope_signature( + fork1, genesis_validators_root0, msg[], + state[], privkey0).toValidatorSig) + + # Mismatched genesis validator roots + not verify_execution_payload_envelope_signature( + fork0, genesis_validators_root0, msg[], state[], + load(pubkey0).get, get_execution_payload_envelope_signature( + fork0, genesis_validators_root1, msg[], + state[], privkey0).toValidatorSig) + + test "payload attestation message signatures": + let msg = default(PayloadAttestationMessage) + + check: + # Matching public/private keys and genesis validator roots + verify_payload_attestation_message_signature( + fork0, genesis_validators_root0, msg, + load(pubkey0).get, get_payload_attestation_message_signature( + fork0, genesis_validators_root0, msg, + privkey0).toValidatorSig) + + # Mismatched public/private keys + not verify_payload_attestation_message_signature( + fork0, genesis_validators_root0, msg, + load(pubkey0).get, get_payload_attestation_message_signature( + fork0, genesis_validators_root0, msg, + privkey1).toValidatorSig) + + # Mismatched forks + not verify_payload_attestation_message_signature( + fork0, genesis_validators_root0, msg, + load(pubkey0).get, get_payload_attestation_message_signature( + fork1, genesis_validators_root0, msg, + privkey0).toValidatorSig) + + # Mismatched genesis validator roots + not verify_payload_attestation_message_signature( + fork0, genesis_validators_root0, msg, + load(pubkey0).get, get_payload_attestation_message_signature( + fork0, genesis_validators_root1, msg, + privkey0).toValidatorSig) diff --git a/tests/test_mev_calls.nim b/tests/test_mev_calls.nim index cba58f1124..3fa5ace1ee 100644 --- a/tests/test_mev_calls.nim +++ b/tests/test_mev_calls.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import @@ -15,23 +15,20 @@ import chronos/unittest2/asynctests, ../beacon_chain/spec/[presets, crypto, signatures, eth2_ssz_serialization, helpers, forks], - ../beacon_chain/spec/mev/[deneb_mev, electra_mev, fulu_mev, - rest_deneb_mev_calls, rest_electra_mev_calls, - rest_fulu_mev_calls], + ../beacon_chain/spec/mev/[electra_mev, fulu_mev, rest_mev_calls], ../beacon_chain/rpc/rest_utils from std/times import Time, toUnix, fromUnix, getTime const - DenebSlot = Slot(32000) ElectraSlot = Slot(64000) FuluSlot = Slot(96000) emptyFork = Fork() + emptyVersion = emptyFork.current_version emptyRoot = Eth2Digest() type - MevBlocks = deneb_mev.SignedBlindedBeaconBlock | - electra_mev.SignedBlindedBeaconBlock | + MevBlocks = electra_mev.SignedBlindedBeaconBlock | fulu_mev.SignedBlindedBeaconBlock TestNodeRef* = ref object @@ -53,7 +50,7 @@ func specifiedFeeRecipient(x: int): Eth1Address = copyMem(addr result, unsafeAddr x, sizeof x) proc prepareRegistration( - fork: Fork, + genesis_fork_version: Version, key: ValidatorPrivKey, gas_limit: uint64 = 0'u64, timestamp: Time, @@ -62,12 +59,13 @@ proc prepareRegistration( var msg = SignedValidatorRegistrationV1( message: ValidatorRegistrationV1( - fee_recipient: ExecutionAddress(data: distinctBase(feeRecipient)), + fee_recipient:feeRecipient, gas_limit: gas_limit, timestamp: uint64(timestamp.toUnix()), pubkey: key.toPubKey().toPubKey() )) - msg.signature = get_builder_signature(fork, msg.message, key).toValidatorSig() + msg.signature = + get_builder_signature(genesis_fork_version, msg.message, key).toValidatorSig() msg proc generateRegistrations( @@ -81,7 +79,7 @@ proc generateRegistrations( raiseAssert "Unable to generate private key" feeRecipient = specifiedFeeRecipient(index) res.add(prepareRegistration( - emptyFork, privateKey, 30_000_000'u64, getTime(), feeRecipient)) + emptyVersion, privateKey, 30_000_000'u64, getTime(), feeRecipient)) res proc prepare( @@ -108,8 +106,7 @@ proc prepare( proc jsonResponseSignedBuilderBid( t: typedesc[RestApiResponse], - bid: deneb_mev.SignedBuilderBid | electra_mev.SignedBuilderBid | - fulu_mev.SignedBuilderBid + bid: electra_mev.SignedBuilderBid | fulu_mev.SignedBuilderBid ): RestApiResponse = let consensusFork = typeof(bid).kind() @@ -129,9 +126,7 @@ proc jsonResponseSignedBuilderBid( proc jsonResponseExecutionPayloadAndBlobsBundle( t: typedesc[RestApiResponse], - payload: deneb_mev.ExecutionPayloadAndBlobsBundle | - electra_mev.ExecutionPayloadAndBlobsBundle | - fulu_mev.ExecutionPayloadAndBlobsBundle + payload: electra_mev.ExecutionPayloadAndBlobsBundle ): RestApiResponse = let consensusFork = typeof(payload).kind() @@ -151,8 +146,7 @@ proc jsonResponseExecutionPayloadAndBlobsBundle( proc sszResponseSignedBuilderBid*( t: typedesc[RestApiResponse], - bid: deneb_mev.SignedBuilderBid | electra_mev.SignedBuilderBid | - fulu_mev.SignedBuilderBid, + bid: electra_mev.SignedBuilderBid | fulu_mev.SignedBuilderBid, ): RestApiResponse = mixin kind let @@ -171,9 +165,7 @@ proc sszResponseSignedBuilderBid*( proc sszResponseExecutionPayloadAndBlobsBundle*( t: typedesc[RestApiResponse], - payload: deneb_mev.ExecutionPayloadAndBlobsBundle | - electra_mev.ExecutionPayloadAndBlobsBundle | - fulu_mev.ExecutionPayloadAndBlobsBundle, + payload: electra_mev.ExecutionPayloadAndBlobsBundle ): RestApiResponse = mixin kind let @@ -203,7 +195,7 @@ proc setupEngineAPI*(router: var RestRouter, node: TestNodeRef) = return RestApiResponse.jsonError(error) for item in registrations: - if not(verify_builder_signature(emptyFork, item.message, + if not(verify_builder_signature(emptyVersion, item.message, item.message.pubkey, item.signature)): return RestApiResponse.jsonError(Http400, "Signature verification failed") @@ -232,22 +224,16 @@ proc setupEngineAPI*(router: var RestRouter, node: TestNodeRef) = else: RestApiResponse.jsonError(Http415, "Invalid Accept") - if qslot == DenebSlot: - let bid = deneb_mev.SignedBuilderBid( - message: deneb_mev.BuilderBid( - header: deneb.ExecutionPayloadHeader(parent_hash: qhash)) - ) - respondSszOrJson(contentType, bid) - elif qslot == ElectraSlot: + if qslot == ElectraSlot: let bid = electra_mev.SignedBuilderBid( message: electra_mev.BuilderBid( - header: electra.ExecutionPayloadHeader(parent_hash: qhash)) + header: deneb.ExecutionPayloadHeader(parent_hash: qhash)) ) respondSszOrJson(contentType, bid) elif qslot == FuluSlot: let bid = fulu_mev.SignedBuilderBid( message: fulu_mev.BuilderBid( - header: fulu.ExecutionPayloadHeader(parent_hash: qhash)) + header: deneb.ExecutionPayloadHeader(parent_hash: qhash)) ) respondSszOrJson(contentType, bid) else: @@ -267,7 +253,7 @@ proc setupEngineAPI*(router: var RestRouter, node: TestNodeRef) = sszMediaType).valueOr: return RestApiResponse.jsonError(Http406, "Content type not acceptable") - if consensusFork < ConsensusFork.Deneb: + if consensusFork < ConsensusFork.Electra: return RestApiResponse.jsonError(Http400, "Unsupported fork version") template respondSszOrJson(contentType, payload: auto): RestApiResponse = @@ -278,66 +264,49 @@ proc setupEngineAPI*(router: var RestRouter, node: TestNodeRef) = else: RestApiResponse.jsonError(Http415, "Invalid Accept") - if consensusFork == ConsensusFork.Deneb: - let - blck = - decodeBodyJsonOrSsz(deneb_mev.SignedBlindedBeaconBlock, - contentBody.get()).valueOr: - return RestApiResponse.jsonError(error) - proposerKey = - if int(blck.message.proposer_index) < len(node.validators): - node.validators[int(blck.message.proposer_index)] - else: - ValidatorPubKey() - slot = blck.message.slot - blockRoot = hash_tree_root(blck.message) - - if not(verify_block_signature(emptyFork, emptyRoot, slot, blockRoot, - proposerKey, blck.signature)): - return RestApiResponse.jsonError(Http400, "Invalid signature") - - let - payload = deneb_mev.ExecutionPayloadAndBlobsBundle( - execution_payload: deneb.ExecutionPayload( - parent_hash: blck.message.body.execution_payload_header.parent_hash - ), - blobs_bundle: BlobsBundle() - ) - respondSszOrJson(contentType, payload) - elif consensusFork == ConsensusFork.Electra: + if consensusFork == ConsensusFork.Electra: let blck = decodeBodyJsonOrSsz(electra_mev.SignedBlindedBeaconBlock, contentBody.get()).valueOr: return RestApiResponse.jsonError(error) payload = electra_mev.ExecutionPayloadAndBlobsBundle( - execution_payload: electra.ExecutionPayload( - parent_hash: blck.message.body.execution_payload_header.parent_hash - ), - blobs_bundle: BlobsBundle() - ) - respondSszOrJson(contentType, payload) - elif consensusFork == ConsensusFork.Fulu: - let - blck = - decodeBodyJsonOrSsz(fulu_mev.SignedBlindedBeaconBlock, - contentBody.get()).valueOr: - return RestApiResponse.jsonError(error) - payload = fulu_mev.ExecutionPayloadAndBlobsBundle( - execution_payload: fulu.ExecutionPayload( + execution_payload: deneb.ExecutionPayload( parent_hash: blck.message.body.execution_payload_header.parent_hash ), - blobs_bundle: BlobsBundle() + blobs_bundle: deneb.BlobsBundle() ) respondSszOrJson(contentType, payload) else: raiseAssert "Unsupported fork version" + router.api2(MethodPost, "/eth/v2/builder/blinded_blocks") do ( + contentBody: Option[ContentBody]) -> RestApiResponse: + + if contentBody.isNone: + return RestApiResponse.jsonError(Http400, EmptyRequestBodyError) + + let + rawVersion = request.headers.getString("eth-consensus-version") + consensusFork = ConsensusFork.decodeString(rawVersion).valueOr: + return RestApiResponse.jsonError(Http400, "Invalid consensus version") + contentType = preferredContentType(jsonMediaType, + sszMediaType).valueOr: + return RestApiResponse.jsonError(Http406, "Content type not acceptable") + + if consensusFork < ConsensusFork.Fulu: + return RestApiResponse.jsonError(Http400, "Unsupported fork version") + + if contentType in [sszMediaType, jsonMediaType]: + RestApiResponse.response( + Http202, headers=[("eth-consensus-version", consensusFork.toString)]) + else: + RestApiResponse.jsonError(Http415, "Invalid Accept") + router.api2(MethodGet, "/eth/v1/builder/status") do () -> RestApiResponse: RestApiResponse.response(Http200) proc testSuite() = - suite "MEV calls serialization/deserialization and behavior test suite": let rng = HmacDrbgContext.new() @@ -399,13 +368,13 @@ proc testSuite() = let response1 = - await client.getHeaderDenebPlain(DenebSlot, parent_hash, + await client.getHeaderPlain(ElectraSlot, parent_hash, publicKey, restAcceptType = restAcceptType1) response2 = - await client.getHeaderElectraPlain(ElectraSlot, parent_hash, + await client.getHeaderPlain(ElectraSlot, parent_hash, publicKey, restAcceptType = restAcceptType2) response3 = - await client.getHeaderFuluPlain(FuluSlot, parent_hash, + await client.getHeaderPlain(FuluSlot, parent_hash, publicKey, restAcceptType = restAcceptType3) check: @@ -425,13 +394,13 @@ proc testSuite() = version3 = response3.headers.getString("eth-consensus-version") check: - version1 == ConsensusFork.Deneb.toString() + version1 == ConsensusFork.Electra.toString() version2 == ConsensusFork.Electra.toString() version3 == ConsensusFork.Fulu.toString() let bid1res = - decodeBytesJsonOrSsz(GetHeaderResponseDeneb, response1.data, + decodeBytesJsonOrSsz(GetHeaderResponseElectra, response1.data, response1.contentType, version1) bid2res = decodeBytesJsonOrSsz(GetHeaderResponseElectra, response2.data, @@ -473,7 +442,7 @@ proc testSuite() = let blck1 = - prepare(deneb_mev.SignedBlindedBeaconBlock, DenebSlot, parent_hash1, + prepare(electra_mev.SignedBlindedBeaconBlock, ElectraSlot, parent_hash1, 0'u64, privateKey1) blck2 = prepare(electra_mev.SignedBlindedBeaconBlock, ElectraSlot, parent_hash2, @@ -509,7 +478,7 @@ proc testSuite() = else: ("application/json,application/octet-stream;q=0.9", ApplicationJsonMediaType) - (restAcceptType3, responseMediaType3) = + (restAcceptType3, _) = if responseKind == TestKind.Ssz: ("application/json;q=0.5,application/octet-stream;q=1.0", OctetStreamMediaType) @@ -523,7 +492,7 @@ proc testSuite() = restContentType = restContentType1, restAcceptType = restAcceptType1, extraHeaders = @[("eth-consensus-version", - toString(ConsensusFork.Deneb))]) + toString(ConsensusFork.Electra))]) response2 = await client.submitBlindedBlockPlain( blck2, @@ -532,7 +501,7 @@ proc testSuite() = extraHeaders = @[("eth-consensus-version", toString(ConsensusFork.Electra))]) response3 = - await client.submitBlindedBlockPlain( + await client.submitBlindedBlockV2Plain( blck3, restContentType = restContentType3, restAcceptType = restAcceptType3, @@ -541,7 +510,7 @@ proc testSuite() = check: response1.status == 200 response2.status == 200 - response3.status == 200 + response3.status == 202 let version1 = response1.headers.getString("eth-consensus-version") @@ -551,32 +520,25 @@ proc testSuite() = check: response1.contentType.isSome() response2.contentType.isSome() - response3.contentType.isSome() response1.contentType.get().mediaType == responseMediaType1 response2.contentType.get().mediaType == responseMediaType2 - response3.contentType.get().mediaType == responseMediaType3 - version1 == ConsensusFork.Deneb.toString() + version1 == ConsensusFork.Electra.toString() version2 == ConsensusFork.Electra.toString() version3 == ConsensusFork.Fulu.toString() let payload1res = - decodeBytesJsonOrSsz(SubmitBlindedBlockResponseDeneb, + decodeBytesJsonOrSsz(SubmitBlindedBlockResponseElectra, response1.data, response1.contentType, version1) payload2res = decodeBytesJsonOrSsz(SubmitBlindedBlockResponseElectra, response2.data, response2.contentType, version2) - payload3res = - decodeBytesJsonOrSsz(SubmitBlindedBlockResponseFulu, - response3.data, response3.contentType, version3) check: payload1res.isOk() payload2res.isOk() - payload3res.isOk() payload1res.get().data.execution_payload.parent_hash == parent_hash1 payload2res.get().data.execution_payload.parent_hash == parent_hash2 - payload3res.get().data.execution_payload.parent_hash == parent_hash3 asyncTest "/eth/v1/builder/status test": let response = await client.getStatus() diff --git a/tests/test_network_metadata.nim b/tests/test_network_metadata.nim index ef9cb1b07b..aa22674acb 100644 --- a/tests/test_network_metadata.nim +++ b/tests/test_network_metadata.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -17,7 +17,6 @@ import template checkRoot(name, root) = let metadata = getMetadataForNetwork(name) - cfg = metadata.cfg state = newClone(readSszForkedHashedBeaconState( metadata.cfg, metadata.genesis.bakedBytes)) @@ -33,4 +32,4 @@ suite "Network metadata": test "sepolia": checkRoot( "sepolia", - "fb9afe32150fa39f4b346be2519a67e2a4f5efcd50a1dc192c3f6b3d013d2798") \ No newline at end of file + "fb9afe32150fa39f4b346be2519a67e2a4f5efcd50a1dc192c3f6b3d013d2798") diff --git a/tests/test_peerdas_helpers.nim b/tests/test_peerdas_helpers.nim index 1f7e1b97b4..3f03cfed5c 100644 --- a/tests/test_peerdas_helpers.nim +++ b/tests/test_peerdas_helpers.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import @@ -84,7 +84,6 @@ suite "EIP-7594 Unit Tests": var partial_matrix: seq[MatrixEntry] for blob_entries in chunks(extended_matrix.get, kzg_abi.CELLS_PER_EXT_BLOB): var blb_entry = blob_entries - rng.shuffle(blb_entry) partial_matrix.add(blb_entry[0..N_SAMPLES-1]) # Given the partial matrix, recover the missing entries @@ -94,37 +93,4 @@ suite "EIP-7594 Unit Tests": doAssert recovered_matrix.get == extended_matrix.get, "Both matrices don't match!" testRecoverMatrix() -suite "PeerDAS Sampling Tests": - test "PeerDAS: Extended Sample Count": - proc testExtendedSampleCount() = - let samplesPerSlot = 16 - const tests = [ - (0, 16), - (1, 20), - (2, 24), - (3, 27), - (4, 29), - (5, 32), - (6, 35), - (7, 37), - (8, 40), - (9, 42), - (10, 44), - (11, 47), - (12, 49), - (13, 51), - (14, 53), - (15, 55), - (16, 57), - (17, 59), - (18, 61), - (19, 63), - (20, 65) - ] - - for (allowed_failures, extendedSampleCount) in tests: - check: get_extended_sample_count( - samplesPerSlot, allowed_failures) == extendedSampleCount - testExtendedSampleCount() - -doAssert freeTrustedSetup().isOk \ No newline at end of file +doAssert freeTrustedSetup().isOk diff --git a/tests/test_quarantine.nim b/tests/test_quarantine.nim new file mode 100644 index 0000000000..a7571bf6d3 --- /dev/null +++ b/tests/test_quarantine.nim @@ -0,0 +1,2240 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} +{.used.} + +import std/sequtils, stew/endians2, + kzg4844/kzg, + unittest2, + ./testutil, + ../beacon_chain/[beacon_chain_db, beacon_chain_db_quarantine], + ../beacon_chain/spec/datatypes/[deneb, electra, fulu], + ../beacon_chain/spec/[presets, helpers], + ../beacon_chain/consensus_object_pools/blob_quarantine + +func genBlockRoot(index: int): Eth2Digest = + var res: Eth2Digest + let tmp = uint64(index).toBytesLE() + copyMem(addr res.data[0], unsafeAddr tmp[0], sizeof(uint64)) + res + +func genKzgCommitment(index: int): KzgCommitment = + var res: KzgCommitment + let tmp = uint64(index).toBytesLE() + copyMem(addr res.bytes[0], unsafeAddr tmp[0], sizeof(uint64)) + res + +func genBlobSidecar( + index: int, + slot: int, + kzg_commitment: int, + proposer_index: int +): BlobSidecar = + BlobSidecar( + index: BlobIndex(index), + kzg_commitment: genKzgCommitment(kzg_commitment), + signed_block_header: SignedBeaconBlockHeader( + message: BeaconBlockHeader( + slot: Slot(slot), + proposer_index: uint64(proposer_index)))) + +func genDataColumnSidecar( + index: int, + slot: int, + proposer_index: int +): fulu.DataColumnSidecar = + fulu.DataColumnSidecar( + index: ColumnIndex(index), + signed_block_header: SignedBeaconBlockHeader( + message: BeaconBlockHeader( + slot: Slot(slot), + proposer_index: uint64(proposer_index)))) + +func genDenebSignedBeaconBlock( + blockRoot: Eth2Digest, + sidecars: openArray[ref BlobSidecar] +): deneb.SignedBeaconBlock = + var res: seq[KzgCommitment] + for sidecar in sidecars: + res.add(sidecar[].kzg_commitment) + deneb.SignedBeaconBlock( + message: deneb.BeaconBlock( + body: deneb.BeaconBlockBody(blob_kzg_commitments: KzgCommitments(res))), + root: blockRoot) + +func genElectraSignedBeaconBlock( + blockRoot: Eth2Digest, + sidecars: openArray[ref BlobSidecar] +): electra.SignedBeaconBlock = + var res: seq[KzgCommitment] + for sidecar in sidecars: + res.add(sidecar[].kzg_commitment) + electra.SignedBeaconBlock( + message: electra.BeaconBlock( + body: electra.BeaconBlockBody(blob_kzg_commitments: KzgCommitments(res))), + root: blockRoot) + +func genFuluSignedBeaconBlock( + blockRoot: Eth2Digest, + commitments: openArray[KzgCommitment] +): fulu.SignedBeaconBlock = + var res = @commitments + fulu.SignedBeaconBlock( + message: fulu.BeaconBlock( + body: fulu.BeaconBlockBody(blob_kzg_commitments: KzgCommitments(res))), + root: blockRoot) + +func compareSidecars( + a, b: openArray[ref BlobSidecar|ref fulu.DataColumnSidecar] +): bool = + if len(a) != len(b): + return false + if len(a) == 0: + return true + for i in 0 ..< len(a): + if cast[uint64](a[i]) != cast[uint64](b[i]): + return false + true + +func compareSidecarsByValue( + a, b: openArray[ref BlobSidecar|ref fulu.DataColumnSidecar] +): bool = + if len(a) != len(b): + return false + if len(a) == 0: + return true + for i in 0 ..< len(a): + if a[i][] != b[i][]: + return false + true + +func compareSidecars( + blockRoot: Eth2Digest, + a: openArray[ref BlobSidecar], + b: openArray[BlobIdentifier] +): bool = + if len(a) != len(b): + return false + if len(a) == 0: + return true + for i in 0 ..< len(a): + if (a[i][].index != b[i].index) or (b[i].block_root != blockRoot): + return false + true + +func compareSidecars( + blockRoot: Eth2Digest, + a: openArray[ref fulu.DataColumnSidecar], + b: DataColumnsByRootIdentifier +): bool = + if len(a) != len(b.indices): + return false + if len(a) == 0: + return true + if b.block_root != blockRoot: + return false + for i in 0 ..< len(a): + if (a[i][].index != b.indices[i]): + return false + true + +func compareIdentifiers( + a, b: DataColumnsByRootIdentifier): bool = + if len(a.indices) != len(b.indices): + return false + if a.block_root != b.block_root: + return false + if len(a.indices) == 0: + return true + for i in 0 ..< len(a.indices): + if (a.indices[i] != b.indices[i]): + return false + true + +func supernodeColumns(): seq[ColumnIndex] = + var res: seq[ColumnIndex] + for i in 0 ..< 128: + res.add(ColumnIndex(i)) + res + +suite "BlobQuarantine data structure test suite " & preset(): + setup: + let + cfg {.used.} = defaultRuntimeConfig + db {.used.} = BeaconChainDB.new("", cfg, inMemory = true) + quarantine {.used.} = db.getQuarantineDB() + + teardown: + db.close() + + test "put()/hasSidecar(index, slot, proposer_index)/remove() test": + var bq = BlobQuarantine.init(cfg, quarantine, 0, nil) + let + broot1 = genBlockRoot(1) + broot2 = genBlockRoot(2) + broot3 = genBlockRoot(3) + broot4 = genBlockRoot(4) + broot5 = genBlockRoot(5) + sidecar1 = + newClone(genBlobSidecar(index = 0, slot = 1, 1, proposer_index = 5)) + sidecar2 = + newClone(genBlobSidecar(index = 1, slot = 1, 2, proposer_index = 5)) + sidecar3 = + newClone(genBlobSidecar(index = 2, slot = 1, 3, proposer_index = 5)) + sidecar4 = + newClone(genBlobSidecar(index = 4, slot = 2, 4, proposer_index = 6)) + sidecar5 = + newClone(genBlobSidecar(index = 5, slot = 3, 5, proposer_index = 7)) + sidecar6 = + newClone(genBlobSidecar(index = 6, slot = 3, 6, proposer_index = 8)) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(0)) == false + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(1)) == false + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(2)) == false + bq.hasSidecar(broot2, Slot(2), uint64(5), BlobIndex(4)) == false + bq.hasSidecar(broot3, Slot(3), uint64(5), BlobIndex(5)) == false + bq.hasSidecar(broot4, Slot(3), uint64(5), BlobIndex(6)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), BlobIndex(3)) == false + + bq.put(broot1, sidecar1) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(1)) == false + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(2)) == false + bq.hasSidecar(broot2, Slot(2), uint64(6), BlobIndex(4)) == false + bq.hasSidecar(broot3, Slot(3), uint64(7), BlobIndex(5)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), BlobIndex(6)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), BlobIndex(3)) == false + + bq.put(broot1, sidecar2) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(1)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(2)) == false + bq.hasSidecar(broot2, Slot(2), uint64(6), BlobIndex(4)) == false + bq.hasSidecar(broot3, Slot(3), uint64(7), BlobIndex(5)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), BlobIndex(6)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), BlobIndex(3)) == false + + bq.put(broot1, sidecar3) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(1)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(2)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), BlobIndex(4)) == false + bq.hasSidecar(broot3, Slot(3), uint64(7), BlobIndex(5)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), BlobIndex(6)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), BlobIndex(3)) == false + + bq.put(broot2, sidecar4) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(1)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(2)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), BlobIndex(4)) == true + bq.hasSidecar(broot3, Slot(3), uint64(7), BlobIndex(5)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), BlobIndex(6)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), BlobIndex(3)) == false + + bq.put(broot3, sidecar5) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(1)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(2)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), BlobIndex(4)) == true + bq.hasSidecar(broot3, Slot(3), uint64(7), BlobIndex(5)) == true + bq.hasSidecar(broot4, Slot(3), uint64(8), BlobIndex(6)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), BlobIndex(3)) == false + + bq.put(broot4, sidecar6) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(1)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(2)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), BlobIndex(4)) == true + bq.hasSidecar(broot3, Slot(3), uint64(7), BlobIndex(5)) == true + bq.hasSidecar(broot4, Slot(3), uint64(8), BlobIndex(6)) == true + bq.hasSidecar(broot5, Slot(10), uint64(100), BlobIndex(3)) == false + + bq.remove(broot4) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(1)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(2)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), BlobIndex(4)) == true + bq.hasSidecar(broot3, Slot(3), uint64(7), BlobIndex(5)) == true + bq.hasSidecar(broot4, Slot(3), uint64(8), BlobIndex(6)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), BlobIndex(3)) == false + + bq.remove(broot3) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(1)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(2)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), BlobIndex(4)) == true + bq.hasSidecar(broot3, Slot(3), uint64(7), BlobIndex(5)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), BlobIndex(6)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), BlobIndex(3)) == false + + bq.remove(broot2) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(1)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(2)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), BlobIndex(4)) == false + bq.hasSidecar(broot3, Slot(3), uint64(7), BlobIndex(5)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), BlobIndex(6)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), BlobIndex(3)) == false + + bq.remove(broot1) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(0)) == false + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(1)) == false + bq.hasSidecar(broot1, Slot(1), uint64(5), BlobIndex(2)) == false + bq.hasSidecar(broot2, Slot(2), uint64(6), BlobIndex(4)) == false + bq.hasSidecar(broot3, Slot(3), uint64(7), BlobIndex(5)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), BlobIndex(6)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), BlobIndex(3)) == false + len(bq) == 0 + + test "put(sidecar)/put([sidecars])/hasSidecars/popSidecars/remove() test": + var bq = BlobQuarantine.init(cfg, quarantine, 0, nil) + let + broot1 = genBlockRoot(1) + broot2 = genBlockRoot(2) + sidecars1 = + block: + var res: seq[ref BlobSidecar] + for i in 0 ..< cfg.MAX_BLOBS_PER_BLOCK_ELECTRA: + res.add(newClone(genBlobSidecar(index = int(i), slot = 1, + 1 + int(i), proposer_index = 5))) + res + sidecars2 = + block: + var res: seq[ref BlobSidecar] + for i in 0 ..< cfg.MAX_BLOBS_PER_BLOCK_ELECTRA: + res.add(newClone(genBlobSidecar(index = int(i), slot = 1, + 1 + int(i), proposer_index = 50))) + res + denebBlock = genDenebSignedBeaconBlock(broot1, sidecars1) + electraBlock = genElectraSignedBeaconBlock(broot2, sidecars2) + + check: + bq.hasSidecars(denebBlock) == false + bq.popSidecars(denebBlock).isNone() == true + bq.hasSidecars(electraBlock) == false + bq.popSidecars(electraBlock).isNone() == true + + bq.put(broot1, sidecars1) + + check: + len(bq) == len(sidecars1) + + var counter = 0 + for index in 0 ..< len(sidecars2): + if index mod 2 != 1: + bq.put(broot2, sidecars2[index]) + inc(counter) + check len(bq) == len(sidecars1) + counter + + check: + bq.hasSidecars(denebBlock) == true + bq.hasSidecars(electraBlock) == false + bq.popSidecars(electraBlock).isNone() == true + let dres = bq.popSidecars(denebBlock) + check: + dres.isOk() + compareSidecars(dres.get(), sidecars1) == true + len(bq) == counter + + bq.put(broot2, sidecars2[1]) + check: + bq.hasSidecars(electraBlock) == false + bq.popSidecars(electraBlock).isNone() == true + len(bq) == counter + 1 + + bq.put(broot2, sidecars2[3]) + check: + bq.hasSidecars(electraBlock) == false + bq.popSidecars(electraBlock).isNone() == true + len(bq) == counter + 2 + + bq.put(broot2, sidecars2[5]) + check: + bq.hasSidecars(electraBlock) == false + bq.popSidecars(electraBlock).isNone() == true + len(bq) == counter + 3 + + bq.put(broot2, sidecars2[7]) + check: + len(bq) == len(sidecars2) + bq.hasSidecars(electraBlock) == true + let eres = bq.popSidecars(electraBlock) + check: + eres.isOk() + compareSidecars(eres.get(), sidecars2) == true + len(bq) == 0 + + test "put()/fetchMissingSidecars/remove test": + var bq = BlobQuarantine.init(cfg, quarantine, 0, nil) + let + broot1 = genBlockRoot(1) + broot2 = genBlockRoot(2) + sidecars1 = + block: + var res: seq[ref BlobSidecar] + for i in 0 ..< cfg.MAX_BLOBS_PER_BLOCK_ELECTRA: + res.add(newClone(genBlobSidecar(index = int(i), slot = 1, + 1 + int(i), proposer_index = 5))) + res + sidecars2 = + block: + var res: seq[ref BlobSidecar] + for i in 0 ..< cfg.MAX_BLOBS_PER_BLOCK_ELECTRA: + res.add(newClone(genBlobSidecar(index = int(i), slot = 1, + 1 + int(i), proposer_index = 50))) + res + denebBlock = genDenebSignedBeaconBlock(broot1, sidecars1) + electraBlock = genElectraSignedBeaconBlock(broot2, sidecars2) + + for i in 0 ..< len(sidecars1) + 1: + let + missing1 = bq.fetchMissingSidecars(broot1, denebBlock) + missing2 = bq.fetchMissingSidecars(broot2, electraBlock) + + check: + compareSidecars( + broot1, + sidecars1.toOpenArray(i, len(sidecars1) - 1), missing1) == true + compareSidecars( + broot2, + sidecars2.toOpenArray(i, len(sidecars2) - 1), missing2) == true + + if i >= len(sidecars1): + break + + bq.put(broot1, sidecars1[i]) + bq.put(broot2, sidecars2[i]) + + bq.remove(broot1) + bq.remove(broot2) + check len(bq) == 0 + + test "popSidecars()/hasSidecars() return []/true on block without blobs": + var bq = BlobQuarantine.init(cfg, quarantine, 0, nil) + let + blockRoot1 = genBlockRoot(100) + blockRoot2 = genBlockRoot(5337) + blockRoot3 = genBlockRoot(191925) + blockRoot4 = genBlockRoot(1294967295) + denebBlock1 = genDenebSignedBeaconBlock(blockRoot1, []) + denebBlock2 = genDenebSignedBeaconBlock(blockRoot2, []) + electraBlock1 = genElectraSignedBeaconBlock(blockRoot3, []) + electraBlock2 = genElectraSignedBeaconBlock(blockRoot4, []) + check: + bq.hasSidecars(denebBlock1.root, denebBlock1) == true + bq.hasSidecars(denebBlock2.root, denebBlock2) == true + bq.hasSidecars(electraBlock1.root, electraBlock1) == true + bq.hasSidecars(electraBlock2.root, electraBlock2) == true + + let + res1 = bq.popSidecars(denebBlock1.root, denebBlock1) + res2 = bq.popSidecars(denebBlock2.root, denebBlock2) + res3 = bq.popSidecars(electraBlock1.root, electraBlock1) + res4 = bq.popSidecars(electraBlock2.root, electraBlock2) + + check: + res1.isOk() + len(res1.get()) == 0 + res2.isOk() + len(res2.get()) == 0 + res3.isOk() + len(res3.get()) == 0 + res4.isOk() + len(res4.get()) == 0 + + test "overfill protection test": + var + bq = BlobQuarantine.init(cfg, quarantine, 0, nil) + sidecars: seq[tuple[sidecar: ref BlobSidecar, blockRoot: Eth2Digest]] + + let maxSidecars = int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA * SLOTS_PER_EPOCH) * 3 + for i in 0 ..< maxSidecars: + let + index = i mod int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + slot = i div int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 100 + blockRoot = genBlockRoot(slot) + sidecar = newClone(genBlobSidecar(index, slot, i, proposer_index = i)) + sidecars.add((sidecar, blockRoot)) + + for item in sidecars: + bq.put(item.blockRoot, item.sidecar) + + # put(sidecar) test + + check len(bq) == maxSidecars + + for i in 0 ..< int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[i].sidecar[].index + ) == true + + let + sidecar = newClone(genBlobSidecar(index = 0, slot = 10000, 100000, + proposer_index = 1000000)) + blockRoot = genBlockRoot(10000) + check: + bq.hasSidecar(blockRoot = blockRoot, slot = Slot(10000), + proposer_index = 1000000'u64, index = BlobIndex(0)) == false + bq.put(blockRoot, sidecar) + check: + len(bq) == (len(sidecars) - int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 1) + bq.hasSidecar(blockRoot = blockRoot, slot = Slot(10000), + proposer_index = 1000000'u64, index = BlobIndex(0)) == true + + for i in 0 ..< int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[i].sidecar[].index + ) == false + + # put(openArray[sidecar]) test + + let + msidecars = + block: + var res: seq[ref BlobSidecar] + for i in 0 ..< int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + let sidecar = + newClone(genBlobSidecar(index = i, slot = 100_000, 200000, + proposer_index = 2000000)) + res.add(sidecar) + res + mblockRoot = genBlockRoot(20000) + + check: + len(bq) == (len(sidecars) - int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 1) + + let beforeLength = len(bq) + + for s in msidecars: + check: + bq.hasSidecar(mblockRoot, + s.signed_block_header.message.slot, + s.signed_block_header.message.proposer_index, + s.index) == false + + bq.put(mblockRoot, msidecars) + check len(bq) == beforeLength + + for s in msidecars: + check: + bq.hasSidecar(mblockRoot, + s.signed_block_header.message.slot, + s.signed_block_header.message.proposer_index, + s.index) == true + + for i in 0 ..< int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + let j = int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + i + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[j].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[j].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[j].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[j].sidecar[].index + ) == false + + test "put() duplicate items should not affect counters": + var + bq = BlobQuarantine.init(cfg, quarantine, 0, nil) + sidecars1: seq[ref BlobSidecar] + sidecars1d: seq[ref BlobSidecar] + sidecars2: seq[ref BlobSidecar] + sidecars2d: seq[ref BlobSidecar] + + for index in 0 ..< cfg.MAX_BLOBS_PER_BLOCK_ELECTRA: + let + sidecar1 = newClone(genBlobSidecar(int(index), 1, int(index), 64)) + sidecar1d = newClone(genBlobSidecar(int(index), 1, int(index), 64)) + sidecar2 = newClone(genBlobSidecar(int(index), 2, 50 + int(index), 65)) + sidecar2d = newClone(genBlobSidecar(int(index), 2, 50 + int(index), 65)) + sidecars1.add(sidecar1) + sidecars1d.add(sidecar1d) + sidecars2.add(sidecar2) + sidecars2d.add(sidecar2d) + + let + broot1 = genBlockRoot(100) + broot2 = genBlockRoot(200) + + electraBlock1 = genElectraSignedBeaconBlock(broot1, sidecars1) + electraBlock2 = genElectraSignedBeaconBlock(broot2, sidecars2) + + check: + len(bq) == 0 + len(bq.fetchMissingSidecars(broot1, electraBlock1)) == + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + len(bq.fetchMissingSidecars(broot2, electraBlock2)) == + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + + for index in 0 ..< int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + bq.put(broot1, sidecars1[index]) + check: + len(bq) == (index + 1) + len(bq.fetchMissingSidecars(broot1, electraBlock1)) == + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) - (index + 1) + bq.put(broot1, sidecars1d[index]) + check: + len(bq) == (index + 1) + len(bq.fetchMissingSidecars(broot1, electraBlock1)) == + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) - (index + 1) + + for index in 0 ..< int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + bq.put(broot2, sidecars2[index]) + check: + len(bq) == int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + (index + 1) + len(bq.fetchMissingSidecars(broot2, electraBlock2)) == + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) - (index + 1) + bq.put(broot2, sidecars2d[index]) + check: + len(bq) == int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + (index + 1) + len(bq.fetchMissingSidecars(broot2, electraBlock2)) == + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) - (index + 1) + + bq.remove(broot2) + check len(bq) == int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + bq.remove(broot1) + check len(bq) == 0 + + test "pruneAfterFinalization() test": + const TestVectors = [ + (root: 1, slot: 1, kzg: 1, index: 0, proposer_index: 20), + (root: 1, slot: 1, kzg: 2, index: 1, proposer_index: 20), + (root: 1, slot: 1, kzg: 3, index: 2, proposer_index: 20), + (root: 1, slot: 1, kzg: 4, index: 3, proposer_index: 20), + (root: 1, slot: 1, kzg: 5, index: 4, proposer_index: 20), + (root: 2, slot: 32, kzg: 6, index: 0, proposer_index: 21), + (root: 2, slot: 32, kzg: 7, index: 1, proposer_index: 21), + (root: 2, slot: 32, kzg: 8, index: 2, proposer_index: 21), + (root: 3, slot: 33, kzg: 9, index: 3, proposer_index: 22), + (root: 3, slot: 33, kzg: 10, index: 4, proposer_index: 22), + (root: 4, slot: 63, kzg: 11, index: 5, proposer_index: 23), + (root: 5, slot: 64, kzg: 12, index: 0, proposer_index: 24), + (root: 5, slot: 64, kzg: 13, index: 1, proposer_index: 24), + (root: 5, slot: 64, kzg: 14, index: 2, proposer_index: 24), + (root: 6, slot: 65, kzg: 15, index: 0, proposer_index: 25), + (root: 6, slot: 65, kzg: 16, index: 1, proposer_index: 25), + (root: 7, slot: 67, kzg: 17, index: 0, proposer_index: 26), + (root: 7, slot: 67, kzg: 18, index: 1, proposer_index: 26), + (root: 8, slot: 95, kzg: 19, index: 0, proposer_index: 27), + (root: 8, slot: 95, kzg: 20, index: 1, proposer_index: 27), + (root: 8, slot: 95, kzg: 21, index: 2, proposer_index: 27), + (root: 8, slot: 95, kzg: 22, index: 3, proposer_index: 27), + (root: 8, slot: 95, kzg: 23, index: 4, proposer_index: 27), + (root: 9, slot: 96, kzg: 24, index: 0, proposer_index: 28), + (root: 9, slot: 96, kzg: 25, index: 1, proposer_index: 28), + (root: 9, slot: 96, kzg: 26, index: 2, proposer_index: 28), + (root: 9, slot: 96, kzg: 27, index: 3, proposer_index: 28), + (root: 9, slot: 96, kzg: 28, index: 4, proposer_index: 28), + (root: 9, slot: 96, kzg: 29, index: 5, proposer_index: 28), + (root: 9, slot: 96, kzg: 30, index: 6, proposer_index: 28), + (root: 9, slot: 96, kzg: 31, index: 7, proposer_index: 28), + (root: 9, slot: 96, kzg: 32, index: 8, proposer_index: 28), + (root: 10, slot: 127, kzg: 33, index: 0, proposer_index: 29), + (root: 10, slot: 127, kzg: 34, index: 1, proposer_index: 29), + (root: 10, slot: 127, kzg: 35, index: 2, proposer_index: 29) + ] + + var bq = BlobQuarantine.init(cfg, quarantine, 0, nil) + for item in TestVectors: + let sidecar = + newClone( + genBlobSidecar(index = item.index, slot = item.slot, item.kzg, + proposer_index = item.proposer_index)) + bq.put(genBlockRoot(item.root), sidecar) + + check: + len(bq) == len(TestVectors) + + for item in TestVectors: + check: + bq.hasSidecar( + genBlockRoot(item.root), Slot(item.slot), + uint64(item.proposer_index), BlobIndex(item.index)) == true + + bq.pruneAfterFinalization(Epoch(0), false) + check: + len(bq) == len(TestVectors) - 5 + + for item in TestVectors: + let res = + if item.root == 1: + false + else: + true + check: + bq.hasSidecar( + genBlockRoot(item.root), Slot(item.slot), + uint64(item.proposer_index), BlobIndex(item.index)) == res + + bq.pruneAfterFinalization(Epoch(1), false) + check: + len(bq) == len(TestVectors) - 5 - 6 + + for item in TestVectors: + let res = + if item.root in [1, 2, 3, 4]: + false + else: + true + check: + bq.hasSidecar( + genBlockRoot(item.root), Slot(item.slot), + uint64(item.proposer_index), BlobIndex(item.index)) == res + + bq.pruneAfterFinalization(Epoch(2), false) + check: + len(bq) == len(TestVectors) - 5 - 6 - 12 + + for item in TestVectors: + let res = + if item.root in [1, 2, 3, 4, 5, 6, 7, 8]: + false + else: + true + check: + bq.hasSidecar( + genBlockRoot(item.root), Slot(item.slot), + uint64(item.proposer_index), BlobIndex(item.index)) == res + + bq.pruneAfterFinalization(Epoch(3), false) + check: + len(bq) == 0 + + for item in TestVectors: + check: + bq.hasSidecar( + genBlockRoot(item.root), Slot(item.slot), + uint64(item.proposer_index), BlobIndex(item.index)) == false + + test "database unload/load test": + var + bq = BlobQuarantine.init(cfg, quarantine, 2, nil) + sidecars: seq[tuple[sidecar: ref BlobSidecar, blockRoot: Eth2Digest]] + + let maxSidecars = int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA * SLOTS_PER_EPOCH) * 3 + for i in 0 ..< maxSidecars: + let + index = i mod int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + slot = i div int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 100 + blockRoot = genBlockRoot(slot) + sidecar = newClone(genBlobSidecar(index, slot, i, proposer_index = i)) + sidecars.add((sidecar, blockRoot)) + + for item in sidecars: + bq.put(item.blockRoot, item.sidecar) + + # put(sidecar) test + + check: + len(bq) == maxSidecars + lenMemory(bq) == maxSidecars + lenDisk(bq) == 0 + quarantine.sidecarsCount(typedesc[BlobSidecar]) == 0 + + for i in 0 ..< int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[i].sidecar[].index + ) == true + + let + sidecar = newClone(genBlobSidecar(index = 0, slot = 10000, 100000, + proposer_index = 1000000)) + blockRoot1 = genBlockRoot(10000) + check: + bq.hasSidecar(blockRoot = blockRoot1, slot = Slot(10000), + proposer_index = 1000000'u64, index = BlobIndex(0)) == false + + bq.put(blockRoot1, sidecar) + + check: + len(bq) == len(sidecars) + 1 + lenDisk(bq) == int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + quarantine.sidecarsCount(typedesc[BlobSidecar]) == + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + lenMemory(bq) == len(sidecars) - int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 1 + bq.hasSidecar(blockRoot = blockRoot1, slot = Slot(10000), + proposer_index = 1000000'u64, index = BlobIndex(0)) == true + + for i in 0 ..< int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[i].sidecar[].index + ) == true + + let + blockRoot2 = + genBlockRoot( + int(sidecars[0].sidecar[].signed_block_header.message.slot)) + sidecars2 = + sidecars.toOpenArray(0, int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) - 1). + mapIt(it.sidecar) + blck = genElectraSignedBeaconBlock(blockRoot2, sidecars2) + dres = bq.popSidecars(blockRoot2, blck) + + check: + dres.isOk() + compareSidecarsByValue(dres.get(), sidecars2) == true + len(bq) == len(sidecars) - int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 1 + lenDisk(bq) == 0 + quarantine.sidecarsCount(typedesc[BlobSidecar]) == 0 + + # put(openArray[sidecar]) test + + let + msidecars = + block: + var res: seq[ref BlobSidecar] + for i in 0 ..< int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + let sidecar = + newClone(genBlobSidecar(index = i, slot = 100_000, 200000, + proposer_index = 2000000)) + res.add(sidecar) + res + mblockRoot = genBlockRoot(20000) + + check: + len(bq) == (len(sidecars) - int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 1) + + for s in msidecars: + check: + bq.hasSidecar(mblockRoot, + s.signed_block_header.message.slot, + s.signed_block_header.message.proposer_index, + s.index) == false + + bq.put(mblockRoot, msidecars) + + check: + lenDisk(bq) == int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + quarantine.sidecarsCount(typedesc[BlobSidecar]) == + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + len(bq) == len(sidecars) + 1 + + for s in msidecars: + check: + bq.hasSidecar(mblockRoot, + s.signed_block_header.message.slot, + s.signed_block_header.message.proposer_index, + s.index) == true + + for i in 0 ..< int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + let j = int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + i + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[j].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[j].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[j].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[j].sidecar[].index + ) == true + + let + i3 = int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + blockRoot3 = + genBlockRoot( + int(sidecars[i3].sidecar[].signed_block_header.message.slot)) + sidecars3 = + sidecars.toOpenArray(i3, i3 + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) - 1). + mapIt(it.sidecar) + blck2 = genElectraSignedBeaconBlock(blockRoot3, sidecars3) + dres2 = bq.popSidecars(blockRoot3, blck2) + + check: + dres2.isOk() + compareSidecarsByValue(dres2.get(), sidecars3) == true + len(bq) == len(sidecars) - int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 1 + lenDisk(bq) == 0 + quarantine.sidecarsCount(typedesc[BlobSidecar]) == 0 + + test "database and memory overfill protection and pruning test": + var + bq = BlobQuarantine.init(cfg, quarantine, 1, nil) + sidecars1: seq[tuple[sidecar: ref BlobSidecar, blockRoot: Eth2Digest]] + sidecars2: seq[tuple[sidecar: ref BlobSidecar, blockRoot: Eth2Digest]] + epochs1: seq[Epoch] + epochs2: seq[Epoch] + + let maxSidecars = int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA * SLOTS_PER_EPOCH) * 3 + for i in 0 ..< maxSidecars: + let + index = i mod int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + slot1 = i div int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 100 + slot2 = i div int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 1000 + epoch1 = Slot(slot1).epoch() + epoch2 = Slot(slot2).epoch() + blockRoot1 = genBlockRoot(slot1) + blockRoot2 = genBlockRoot(slot2) + sidecar1 = newClone(genBlobSidecar(index, slot1, i, proposer_index = i)) + sidecar2 = newClone(genBlobSidecar(index, slot2, i + maxSidecars, + proposer_index = 100 + i)) + sidecars1.add((sidecar1, blockRoot1)) + sidecars2.add((sidecar2, blockRoot2)) + if len(epochs1) == 0 or epochs1[^1] != epoch1: + epochs1.add(epoch1) + if len(epochs2) == 0 or epochs2[^1] != epoch2: + epochs2.add(epoch2) + + for item in sidecars1: + bq.put(item.blockRoot, item.sidecar) + + check: + len(bq) == len(sidecars1) + lenDisk(bq) == 0 + quarantine.sidecarsCount(typedesc[BlobSidecar]) == 0 + + for i in 0 ..< SLOTS_PER_EPOCH * 3: + let + start = int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) * int(i) + finish = start + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) - 1 + blockRoot = sidecars2[start].blockRoot + sidecars = sidecars2.toOpenArray(start, finish).mapIt(it.sidecar) + bq.put(blockRoot, sidecars) + + check: + len(bq) == len(sidecars1) + len(sidecars2) + lenDisk(bq) == len(sidecars1) + quarantine.sidecarsCount(typedesc[BlobSidecar]) == len(sidecars1) + lenMemory(bq) == len(sidecars2) + + for i in 0 ..< len(sidecars1): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars1[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars1[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars1[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars1[i].sidecar[].index + ) == true + + for i in 0 ..< len(sidecars2): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars2[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars2[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars2[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars2[i].sidecar[].index + ) == true + + let + sidecar = newClone(genBlobSidecar(index = 0, slot = 100000, 100000, + proposer_index = 1000000)) + blockRoot = genBlockRoot(100000) + + check: + bq.hasSidecar(blockRoot = blockRoot, slot = Slot(100000), + proposer_index = 1000000'u64, + index = BlobIndex(0)) == false + + bq.put(blockRoot, sidecar) + + check: + len(bq) == len(sidecars1) + len(sidecars2) - + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 1 + lenDisk(bq) == len(sidecars1) + quarantine.sidecarsCount(typedesc[BlobSidecar]) == len(sidecars1) + lenMemory(bq) == len(sidecars2) - + int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) + 1 + bq.hasSidecar(blockRoot = blockRoot, slot = Slot(100000), + proposer_index = 1000000'u64, index = BlobIndex(0)) == true + + for i in 0 ..< int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars1[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars1[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars1[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars1[i].sidecar[].index + ) == false + + for i in int(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA) ..< len(sidecars1): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars1[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars1[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars1[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars1[i].sidecar[].index + ) == true + + for i in 0 ..< len(sidecars2): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars2[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars2[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars2[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars2[i].sidecar[].index + ) == true + + # Pruning memory and database + for epoch in epochs1: + bq.pruneAfterFinalization(epoch, false) + for epoch in epochs2: + bq.pruneAfterFinalization(epoch, false) + + check: + len(bq) == 1 + + bq.pruneAfterFinalization(Slot(100000).epoch(), false) + + check: + len(bq) == 0 + +suite "ColumnQuarantine data structure test suite " & preset(): + setup: + let + cfg {.used.} = defaultRuntimeConfig + db {.used.} = BeaconChainDB.new("", cfg, inMemory = true) + quarantine {.used.} = db.getQuarantineDB() + + teardown: + db.close() + + test "put()/hasSidecar(index, slot, proposer_index)/remove() test": + let custodyColumns = + [0, 31, 32, 63, 64, 95, 96, 127].mapIt(ColumnIndex(it)) + var bq = ColumnQuarantine.init(cfg, custodyColumns, quarantine, 0, nil) + let + broot1 = genBlockRoot(1) + broot2 = genBlockRoot(2) + broot3 = genBlockRoot(3) + broot4 = genBlockRoot(4) + broot5 = genBlockRoot(5) + sidecar1 = + newClone(genDataColumnSidecar( + index = 0, slot = 1, proposer_index = 5)) + sidecar2 = + newClone(genDataColumnSidecar( + index = 31, slot = 1, proposer_index = 5)) + sidecar3 = + newClone(genDataColumnSidecar( + index = 32, slot = 1, proposer_index = 5)) + sidecar4 = + newClone(genDataColumnSidecar( + index = 127, slot = 2, proposer_index = 6)) + sidecar5 = + newClone(genDataColumnSidecar( + index = 0, slot = 3, proposer_index = 7)) + sidecar6 = + newClone(genDataColumnSidecar( + index = 31, slot = 3, proposer_index = 8)) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(0)) == false + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(31)) == false + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(32)) == false + bq.hasSidecar(broot2, Slot(2), uint64(5), ColumnIndex(127)) == false + bq.hasSidecar(broot3, Slot(3), uint64(5), ColumnIndex(0)) == false + bq.hasSidecar(broot4, Slot(3), uint64(5), ColumnIndex(31)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), ColumnIndex(3)) == false + + bq.put(broot1, sidecar1) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(31)) == false + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(32)) == false + bq.hasSidecar(broot2, Slot(2), uint64(6), ColumnIndex(127)) == false + bq.hasSidecar(broot3, Slot(3), uint64(7), ColumnIndex(0)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), ColumnIndex(31)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), ColumnIndex(3)) == false + + bq.put(broot1, sidecar2) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(31)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(32)) == false + bq.hasSidecar(broot2, Slot(2), uint64(6), ColumnIndex(127)) == false + bq.hasSidecar(broot3, Slot(3), uint64(7), ColumnIndex(0)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), ColumnIndex(31)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), ColumnIndex(3)) == false + + bq.put(broot1, sidecar3) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(31)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(32)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), ColumnIndex(127)) == false + bq.hasSidecar(broot3, Slot(3), uint64(7), ColumnIndex(0)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), ColumnIndex(31)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), ColumnIndex(3)) == false + + bq.put(broot2, sidecar4) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(31)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(32)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), ColumnIndex(127)) == true + bq.hasSidecar(broot3, Slot(3), uint64(7), ColumnIndex(0)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), ColumnIndex(31)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), ColumnIndex(3)) == false + + bq.put(broot3, sidecar5) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(31)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(32)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), ColumnIndex(127)) == true + bq.hasSidecar(broot3, Slot(3), uint64(7), ColumnIndex(0)) == true + bq.hasSidecar(broot4, Slot(3), uint64(8), ColumnIndex(31)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), ColumnIndex(3)) == false + + bq.put(broot4, sidecar6) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(31)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(32)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), ColumnIndex(127)) == true + bq.hasSidecar(broot3, Slot(3), uint64(7), ColumnIndex(0)) == true + bq.hasSidecar(broot4, Slot(3), uint64(8), ColumnIndex(31)) == true + bq.hasSidecar(broot5, Slot(10), uint64(100), ColumnIndex(3)) == false + + bq.remove(broot4) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(31)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(32)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), ColumnIndex(127)) == true + bq.hasSidecar(broot3, Slot(3), uint64(7), ColumnIndex(0)) == true + bq.hasSidecar(broot4, Slot(3), uint64(8), ColumnIndex(31)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), ColumnIndex(3)) == false + + bq.remove(broot3) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(31)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(32)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), ColumnIndex(127)) == true + bq.hasSidecar(broot3, Slot(3), uint64(7), ColumnIndex(0)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), ColumnIndex(31)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), ColumnIndex(3)) == false + + bq.remove(broot2) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(0)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(31)) == true + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(32)) == true + bq.hasSidecar(broot2, Slot(2), uint64(6), ColumnIndex(127)) == false + bq.hasSidecar(broot3, Slot(3), uint64(7), ColumnIndex(0)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), ColumnIndex(31)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), ColumnIndex(3)) == false + + bq.remove(broot1) + + check: + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(0)) == false + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(31)) == false + bq.hasSidecar(broot1, Slot(1), uint64(5), ColumnIndex(32)) == false + bq.hasSidecar(broot2, Slot(2), uint64(6), ColumnIndex(127)) == false + bq.hasSidecar(broot3, Slot(3), uint64(7), ColumnIndex(0)) == false + bq.hasSidecar(broot4, Slot(3), uint64(8), ColumnIndex(31)) == false + bq.hasSidecar(broot5, Slot(10), uint64(100), ColumnIndex(3)) == false + len(bq) == 0 + + test "put(sidecar)/put([sidecars])/hasSidecars/popSidecars/remove() [node] test": + let custodyColumns = + [0, 31, 32, 63, 64, 95, 96, 127].mapIt(ColumnIndex(it)) + var bq = ColumnQuarantine.init(cfg, custodyColumns, quarantine, 0, nil) + let + broot1 = genBlockRoot(1) + broot2 = genBlockRoot(2) + sidecars1 = + block: + var res: seq[ref fulu.DataColumnSidecar] + for i in 0 ..< len(custodyColumns): + res.add(newClone(genDataColumnSidecar( + index = int(custodyColumns[i]), slot = 1, proposer_index = 5))) + res + sidecars2 = + block: + var res: seq[ref fulu.DataColumnSidecar] + for i in 0 ..< len(custodyColumns): + res.add(newClone(genDataColumnSidecar( + index = int(custodyColumns[i]), slot = 1, proposer_index = 6))) + res + commitments1 = [ + genKzgCommitment(1), genKzgCommitment(2), genKzgCommitment(3) + ] + commitments2 = [ + genKzgCommitment(4), genKzgCommitment(5), genKzgCommitment(6) + ] + fuluBlock1 = genFuluSignedBeaconBlock(broot1, commitments1) + fuluBlock2 = genFuluSignedBeaconBlock(broot2, commitments2) + + check: + bq.hasSidecars(fuluBlock1) == false + bq.popSidecars(fuluBlock1).isNone() == true + bq.hasSidecars(fuluBlock2) == false + bq.popSidecars(fuluBlock2).isNone() == true + + bq.put(broot1, sidecars1) + check: + len(bq) == len(sidecars1) + + var counter = 0 + for index in 0 ..< len(sidecars2): + if index notin [1, 3, 5, 7]: + bq.put(broot2, sidecars2[index]) + inc(counter) + check len(bq) == len(sidecars1) + counter + + check: + bq.hasSidecars(fuluBlock1) == true + bq.hasSidecars(fuluBlock2) == false + bq.popSidecars(fuluBlock2).isNone() == true + let dres = bq.popSidecars(fuluBlock1) + check: + dres.isOk() + compareSidecars(dres.get(), sidecars1) == true + len(bq) == counter + + bq.put(broot2, sidecars2[1]) + check: + bq.hasSidecars(fuluBlock2) == false + bq.popSidecars(fuluBlock2).isNone() == true + len(bq) == counter + 1 + + bq.put(broot2, sidecars2[3]) + check: + bq.hasSidecars(fuluBlock2) == false + bq.popSidecars(fuluBlock2).isNone() == true + len(bq) == counter + 2 + + bq.put(broot2, sidecars2[5]) + check: + bq.hasSidecars(fuluBlock2) == false + bq.popSidecars(fuluBlock2).isNone() == true + len(bq) == counter + 3 + + bq.put(broot2, sidecars2[7]) + check: + bq.hasSidecars(fuluBlock2) == true + len(bq) == len(sidecars2) + + let eres = bq.popSidecars(fuluBlock2) + check: + eres.isOk() + compareSidecars(eres.get(), sidecars2) == true + len(bq) == 0 + + test "put(sidecar)/put([sidecars])/hasSidecars/popSidecars/remove() [supernode] test": + let custodyColumns = supernodeColumns() + var bq = ColumnQuarantine.init(cfg, custodyColumns, quarantine, 0, nil) + let + broot1 = genBlockRoot(1) + broot2 = genBlockRoot(2) + sidecars1 = + block: + var res: seq[ref fulu.DataColumnSidecar] + for i in 0 ..< (len(custodyColumns) div 2 + 1): + res.add(newClone(genDataColumnSidecar( + index = int(custodyColumns[i]), slot = 1, proposer_index = 5))) + res + sidecars2 = + block: + var res: seq[ref fulu.DataColumnSidecar] + for i in 0 ..< (len(custodyColumns) div 2 + 1): + res.add(newClone(genDataColumnSidecar( + index = int(custodyColumns[i]), slot = 1, proposer_index = 6))) + res + commitments1 = [ + genKzgCommitment(1), genKzgCommitment(2), genKzgCommitment(3) + ] + commitments2 = [ + genKzgCommitment(4), genKzgCommitment(5), genKzgCommitment(6) + ] + fuluBlock1 = genFuluSignedBeaconBlock(broot1, commitments1) + fuluBlock2 = genFuluSignedBeaconBlock(broot2, commitments2) + + check: + bq.hasSidecars(fuluBlock1) == false + bq.popSidecars(fuluBlock1).isNone() == true + bq.hasSidecars(fuluBlock2) == false + bq.popSidecars(fuluBlock2).isNone() == true + + bq.put(broot1, sidecars1) + + for index in 0 ..< len(sidecars2): + if index notin [1, 3, 5, 7]: + bq.put(broot2, sidecars2[index]) + + check: + bq.hasSidecars(fuluBlock1) == true + bq.hasSidecars(fuluBlock2) == false + bq.popSidecars(fuluBlock2).isNone() == true + let dres = bq.popSidecars(fuluBlock1) + check: + dres.isOk() + compareSidecars(dres.get(), sidecars1) == true + + bq.put(broot2, sidecars2[1]) + check: + bq.hasSidecars(fuluBlock2) == false + bq.popSidecars(fuluBlock2).isNone() == true + + bq.put(broot2, sidecars2[3]) + check: + bq.hasSidecars(fuluBlock2) == false + bq.popSidecars(fuluBlock2).isNone() == true + + bq.put(broot2, sidecars2[5]) + check: + bq.hasSidecars(fuluBlock2) == false + bq.popSidecars(fuluBlock2).isNone() == true + + bq.put(broot2, sidecars2[7]) + check: + bq.hasSidecars(fuluBlock2) == true + + let eres = bq.popSidecars(fuluBlock2) + check: + eres.isOk() + compareSidecars(eres.get(), sidecars2) == true + + bq.remove(broot1) + bq.remove(broot2) + check len(bq) == 0 + + test "put()/fetchMissingSidecars/remove test [node]": + let + custodyColumns = + [0, 31, 32, 63, 64, 95, 96, 127].mapIt(ColumnIndex(it)) + peerCustodyColumns1 = + [63, 64, 65, 66, 95, 96, 97, 98].mapIt(ColumnIndex(it)) + peerCustodyColumns2 = + [1, 2, 3, 4, 5, 6, 7, 8].mapIt(ColumnIndex(it)) + + var bq = ColumnQuarantine.init(cfg, custodyColumns, quarantine, 0, nil) + let + broot1 = genBlockRoot(1) + broot2 = genBlockRoot(2) + expected1 = [ + DataColumnsByRootIdentifier( + block_root: broot1, + indices: DataColumnIndices @[ColumnIndex(63), 64, 95, 96]), + DataColumnsByRootIdentifier( + block_root: broot1, + indices: DataColumnIndices @[ColumnIndex(63), 64, 95, 96]), + DataColumnsByRootIdentifier( + block_root: broot1, + indices: DataColumnIndices @[ColumnIndex(63), 64, 95, 96]), + DataColumnsByRootIdentifier( + block_root: broot1, + indices: DataColumnIndices @[ColumnIndex(63), 64, 95, 96]), + DataColumnsByRootIdentifier( + block_root: broot1, + indices: DataColumnIndices @[ColumnIndex(64), 95, 96]), + DataColumnsByRootIdentifier( + block_root: broot1, + indices: DataColumnIndices @[ColumnIndex(95), 96]), + DataColumnsByRootIdentifier( + block_root: broot1, + indices: DataColumnIndices @[ColumnIndex(96)]), + DataColumnsByRootIdentifier( + block_root: broot1, + indices: DataColumnIndices @[]), + DataColumnsByRootIdentifier( + block_root: broot1, + indices: DataColumnIndices @[]) + ] + sidecars1 = + block: + var res: seq[ref fulu.DataColumnSidecar] + for i in 0 ..< len(custodyColumns): + res.add(newClone(genDataColumnSidecar( + index = int(custodyColumns[i]), slot = 1, proposer_index = 5))) + res + sidecars2 = + block: + var res: seq[ref fulu.DataColumnSidecar] + for i in 0 ..< len(custodyColumns): + res.add(newClone(genDataColumnSidecar( + index = int(custodyColumns[i]), slot = 2, proposer_index = 50))) + res + commitments1 = [ + genKzgCommitment(1), genKzgCommitment(2), genKzgCommitment(3) + ] + commitments2 = [ + genKzgCommitment(4), genKzgCommitment(5), genKzgCommitment(6) + ] + fuluBlock1 = genFuluSignedBeaconBlock(broot1, commitments1) + fuluBlock2 = genFuluSignedBeaconBlock(broot2, commitments2) + + for i in 0 ..< len(sidecars1) + 1: + let + missing1 = bq.fetchMissingSidecars(broot1, fuluBlock1) + missing2 = bq.fetchMissingSidecars(broot2, fuluBlock2) + missing3 = + bq.fetchMissingSidecars(broot1, fuluBlock1, peerCustodyColumns1) + missing4 = + bq.fetchMissingSidecars(broot2, fuluBlock2, peerCustodyColumns2) + + check: + compareSidecars( + broot1, + sidecars1.toOpenArray(i, len(sidecars1) - 1), missing1) == true + compareSidecars( + broot2, + sidecars2.toOpenArray(i, len(sidecars2) - 1), missing2) == true + + check: + compareIdentifiers(expected1[i], missing3) + len(missing4.indices) == 0 + + if i >= len(sidecars1): + break + + bq.put(broot1, sidecars1[i]) + bq.put(broot2, sidecars2[i]) + + bq.remove(broot1) + bq.remove(broot2) + check len(bq) == 0 + + test "put()/fetchMissingSidecars/remove test [supernode]": + let + custodyColumns = supernodeColumns() + peerCustodyColumns1 = + [63, 64, 65, 66, 95, 96, 97, 98].mapIt(ColumnIndex(it)) + + var bq = ColumnQuarantine.init(cfg, custodyColumns, quarantine, 0, nil) + let + broot1 = genBlockRoot(1) + broot2 = genBlockRoot(2) + sidecars1 = + block: + var res: seq[ref fulu.DataColumnSidecar] + for i in 0 ..< (len(custodyColumns) div 2 + 1): + res.add(newClone(genDataColumnSidecar( + index = int(custodyColumns[i]), slot = 1, proposer_index = 5))) + res + sidecars2 = + block: + var res: seq[ref fulu.DataColumnSidecar] + for i in 0 ..< (len(custodyColumns) div 2 + 1): + res.add(newClone(genDataColumnSidecar( + index = int(custodyColumns[i]), slot = 2, proposer_index = 50))) + res + commitments1 = [ + genKzgCommitment(1), genKzgCommitment(2), genKzgCommitment(3) + ] + commitments2 = [ + genKzgCommitment(4), genKzgCommitment(5), genKzgCommitment(6) + ] + fuluBlock1 = genFuluSignedBeaconBlock(broot1, commitments1) + fuluBlock2 = genFuluSignedBeaconBlock(broot2, commitments2) + + func checkSupernodeExpected( + root: Eth2Digest, + index: int, + missing: DataColumnsByRootIdentifier + ): bool = + const ExpectedVectors = [ + (@[63, 64, 65, 66, 95, 96, 97, 98], 0 .. 57), + (@[63, 64, 65, 66, 95, 96, 97], 58 .. 58), + (@[63, 64, 65, 66, 95, 96], 59 .. 59), + (@[63, 64, 65, 66, 95], 60 .. 60), + (@[63, 64, 65, 66], 61 .. 61), + (@[63, 64, 65], 62 .. 62), + (@[63, 64], 63 .. 63), + (@[64], 64 .. 64), + (@[], 65 .. 65) + ] + + doAssert(index in 0 .. 65) + for expect in ExpectedVectors: + if index in expect[1]: + if len(expect[0]) != len(missing.indices): + return false + for i in 0 ..< len(missing.indices): + if missing.block_root != root: + return false + if (int(missing.indices[i]) != expect[0][i]): + return false + return true + false + + for i in 0 ..< len(sidecars1) + 1: + let + missing1 = bq.fetchMissingSidecars(broot1, fuluBlock1) + missing2 = bq.fetchMissingSidecars(broot2, fuluBlock2) + missing3 = + bq.fetchMissingSidecars(broot1, fuluBlock1, peerCustodyColumns1) + check: + compareSidecars( + broot1, + sidecars1.toOpenArray(i, len(sidecars1) - 1), missing1) == true + compareSidecars( + broot2, + sidecars2.toOpenArray(i, len(sidecars2) - 1), missing2) == true + checkSupernodeExpected( + broot1, + i, missing3) == true + + if i >= len(sidecars1): + break + + bq.put(broot1, sidecars1[i]) + bq.put(broot2, sidecars2[i]) + + bq.remove(broot1) + bq.remove(broot2) + check len(bq) == 0 + + test "popSidecars()/hasSidecars() return []/true on block without columns": + let + custodyColumns = + [63, 64, 65, 66, 95, 96, 97, 98].mapIt(ColumnIndex(it)) + var bq = ColumnQuarantine.init(cfg, custodyColumns, quarantine, 0, nil) + let + blockRoot1 = genBlockRoot(100) + blockRoot2 = genBlockRoot(5337) + blockRoot3 = genBlockRoot(1294967295) + fuluBlock1 = genFuluSignedBeaconBlock(blockRoot1, []) + fuluBlock2 = genFuluSignedBeaconBlock(blockRoot2, []) + fuluBlock3 = genFuluSignedBeaconBlock(blockRoot3, []) + + check: + bq.hasSidecars(fuluBlock1.root, fuluBlock1) == true + bq.hasSidecars(fuluBlock2.root, fuluBlock2) == true + bq.hasSidecars(fuluBlock3.root, fuluBlock3) == true + + let + res1 = bq.popSidecars(fuluBlock1.root, fuluBlock1) + res2 = bq.popSidecars(fuluBlock2.root, fuluBlock2) + res3 = bq.popSidecars(fuluBlock3.root, fuluBlock3) + + check: + res1.isOk() + len(res1.get()) == 0 + res2.isOk() + len(res2.get()) == 0 + res3.isOk() + len(res3.get()) == 0 + + test "overfill protection test": + let + custodyColumns = + [63, 64, 65, 66, 95, 96, 97, 98].mapIt(ColumnIndex(it)) + + var + bq = ColumnQuarantine.init(cfg, custodyColumns, quarantine, 0, nil) + sidecars: seq[tuple[sidecar: ref fulu.DataColumnSidecar, + blockRoot: Eth2Digest]] + + let maxSidecars = int(NUMBER_OF_COLUMNS * SLOTS_PER_EPOCH) * 3 + for i in 0 ..< maxSidecars: + let + index = i mod len(custodyColumns) + slot = i div len(custodyColumns) + 100 + blockRoot = genBlockRoot(slot) + sidecar = newClone( + genDataColumnSidecar(index = int(custodyColumns[index]), + slot, proposer_index = i)) + sidecars.add((sidecar, blockRoot)) + + for item in sidecars: + bq.put(item.blockRoot, item.sidecar) + + check len(bq) == maxSidecars + + # put(sidecar) test + + for i in 0 ..< len(custodyColumns): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[i].sidecar[].index + ) == true + + let + sidecar = newClone( + genDataColumnSidecar(index = int(custodyColumns[0]), + slot = 10000, proposer_index = 1000000)) + blockRoot = genBlockRoot(10000) + check: + bq.hasSidecar(blockRoot = blockRoot, slot = Slot(10000), + proposer_index = 1000000'u64, + index = custodyColumns[0]) == false + bq.put(blockRoot, sidecar) + check: + len(bq) == (len(sidecars) - len(custodyColumns) + 1) + bq.hasSidecar(blockRoot = blockRoot, slot = Slot(10000), + proposer_index = 1000000'u64, + index = custodyColumns[0]) == true + + for i in 0 ..< len(custodyColumns): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[i].sidecar[].index + ) == false + + # put(openArray[sidecar]) test + + let + msidecars = + block: + var res: seq[ref fulu.DataColumnSidecar] + for i in 0 ..< len(custodyColumns): + let sidecar = + newClone(genDataColumnSidecar(index = int(custodyColumns[i]), + slot = 100_000, + proposer_index = 2000000)) + res.add(sidecar) + res + mblockRoot = genBlockRoot(20000) + + check: + len(bq) == (len(sidecars) - len(custodyColumns) + 1) + + let beforeLength = len(bq) + + for s in msidecars: + check: + bq.hasSidecar(mblockRoot, + s.signed_block_header.message.slot, + s.signed_block_header.message.proposer_index, + s.index) == false + + bq.put(mblockRoot, msidecars) + check len(bq) == beforeLength + + for s in msidecars: + check: + bq.hasSidecar(mblockRoot, + s.signed_block_header.message.slot, + s.signed_block_header.message.proposer_index, + s.index) == true + + for i in 0 ..< len(custodyColumns): + let j = len(custodyColumns) + i + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[j].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[j].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[j].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[j].sidecar[].index + ) == false + + test "put() duplicate items should not affect counters": + let + custodyColumns = + [63, 64, 65, 66, 95, 96, 97, 98].mapIt(ColumnIndex(it)) + var + bq = ColumnQuarantine.init(cfg, custodyColumns, quarantine, 0, nil) + sidecars1: seq[ref fulu.DataColumnSidecar] + sidecars1d: seq[ref fulu.DataColumnSidecar] + sidecars2: seq[ref fulu.DataColumnSidecar] + sidecars2d: seq[ref fulu.DataColumnSidecar] + + for index in custodyColumns: + let + sidecar1 = newClone(genDataColumnSidecar(int(index), 1, 64)) + sidecar1d = newClone(genDataColumnSidecar(int(index), 1, 64)) + sidecar2 = newClone(genDataColumnSidecar(int(index), 2, 65)) + sidecar2d = newClone(genDataColumnSidecar(int(index), 2, 65)) + sidecars1.add(sidecar1) + sidecars1d.add(sidecar1d) + sidecars2.add(sidecar2) + sidecars2d.add(sidecar2d) + + let + broot1 = genBlockRoot(100) + broot2 = genBlockRoot(200) + fuluBlock1 = genFuluSignedBeaconBlock(broot1, [genKzgCommitment(1)]) + fuluBlock2 = genFuluSignedBeaconBlock(broot2, [genKzgCommitment(2)]) + + check: + len(bq) == 0 + len(bq.fetchMissingSidecars( + broot1, fuluBlock1, custodyColumns).indices) == len(custodyColumns) + len(bq.fetchMissingSidecars( + broot2, fuluBlock2, custodyColumns).indices) == len(custodyColumns) + + for index in 0 ..< len(custodyColumns): + bq.put(broot1, sidecars1[index]) + check: + len(bq) == (index + 1) + len(bq.fetchMissingSidecars( + broot1, fuluBlock1, custodyColumns).indices) == + len(custodyColumns) - (index + 1) + bq.put(broot1, sidecars1d[index]) + check: + len(bq) == (index + 1) + len(bq.fetchMissingSidecars( + broot1, fuluBlock1, custodyColumns).indices) == + len(custodyColumns) - (index + 1) + + for index in 0 ..< len(custodyColumns): + bq.put(broot2, sidecars2[index]) + check: + len(bq) == len(custodyColumns) + (index + 1) + len(bq.fetchMissingSidecars( + broot2, fuluBlock2, custodyColumns).indices) == + len(custodyColumns) - (index + 1) + bq.put(broot2, sidecars2d[index]) + check: + len(bq) == len(custodyColumns) + (index + 1) + len(bq.fetchMissingSidecars( + broot2, fuluBlock2, custodyColumns).indices) == + len(custodyColumns) - (index + 1) + + bq.remove(broot2) + check len(bq) == len(custodyColumns) + bq.remove(broot1) + check len(bq) == 0 + + test "pruneAfterFinalization() test": + let + custodyColumns = + [63, 64, 65, 66, 95, 96, 97, 98].mapIt(ColumnIndex(it)) + + const TestVectors = [ + (root: 1, slot: 1, index: 63, proposer_index: 20), + (root: 1, slot: 1, index: 64, proposer_index: 20), + (root: 1, slot: 1, index: 65, proposer_index: 20), + (root: 1, slot: 1, index: 66, proposer_index: 20), + (root: 1, slot: 1, index: 96, proposer_index: 20), + (root: 2, slot: 32, index: 63, proposer_index: 21), + (root: 2, slot: 32, index: 64, proposer_index: 21), + (root: 2, slot: 32, index: 65, proposer_index: 21), + (root: 3, slot: 33, index: 63, proposer_index: 22), + (root: 3, slot: 33, index: 64, proposer_index: 22), + (root: 4, slot: 63, index: 63, proposer_index: 23), + (root: 5, slot: 64, index: 63, proposer_index: 24), + (root: 5, slot: 64, index: 64, proposer_index: 24), + (root: 5, slot: 64, index: 65, proposer_index: 24), + (root: 6, slot: 65, index: 63, proposer_index: 25), + (root: 6, slot: 65, index: 64, proposer_index: 25), + (root: 7, slot: 67, index: 63, proposer_index: 26), + (root: 7, slot: 67, index: 64, proposer_index: 26), + (root: 8, slot: 95, index: 63, proposer_index: 27), + (root: 8, slot: 95, index: 64, proposer_index: 27), + (root: 8, slot: 95, index: 65, proposer_index: 27), + (root: 8, slot: 95, index: 66, proposer_index: 27), + (root: 8, slot: 95, index: 98, proposer_index: 27), + (root: 9, slot: 96, index: 63, proposer_index: 28), + (root: 9, slot: 96, index: 64, proposer_index: 28), + (root: 9, slot: 96, index: 65, proposer_index: 28), + (root: 9, slot: 96, index: 66, proposer_index: 28), + (root: 9, slot: 96, index: 95, proposer_index: 28), + (root: 9, slot: 96, index: 96, proposer_index: 28), + (root: 9, slot: 96, index: 97, proposer_index: 28), + (root: 9, slot: 96, index: 98, proposer_index: 28), + (root: 10, slot: 127, index: 96, proposer_index: 29), + (root: 10, slot: 127, index: 97, proposer_index: 29), + (root: 10, slot: 127, index: 98, proposer_index: 29) + ] + + var bq = ColumnQuarantine.init(cfg, custodyColumns, quarantine, 0, nil) + for item in TestVectors: + let sidecar = + newClone( + genDataColumnSidecar(index = item.index, slot = item.slot, + proposer_index = item.proposer_index)) + bq.put(genBlockRoot(item.root), sidecar) + + check: + len(bq) == len(TestVectors) + + for item in TestVectors: + check: + bq.hasSidecar( + genBlockRoot(item.root), Slot(item.slot), + uint64(item.proposer_index), BlobIndex(item.index)) == true + + bq.pruneAfterFinalization(Epoch(0), false) + check: + len(bq) == len(TestVectors) - 5 + + for item in TestVectors: + let res = + if item.root == 1: + false + else: + true + check: + bq.hasSidecar( + genBlockRoot(item.root), Slot(item.slot), + uint64(item.proposer_index), BlobIndex(item.index)) == res + + bq.pruneAfterFinalization(Epoch(1), false) + check: + len(bq) == len(TestVectors) - 5 - 6 + + for item in TestVectors: + let res = + if item.root in [1, 2, 3, 4]: + false + else: + true + check: + bq.hasSidecar( + genBlockRoot(item.root), Slot(item.slot), + uint64(item.proposer_index), BlobIndex(item.index)) == res + + bq.pruneAfterFinalization(Epoch(2), false) + check: + len(bq) == len(TestVectors) - 5 - 6 - 12 + + for item in TestVectors: + let res = + if item.root in [1, 2, 3, 4, 5, 6, 7, 8]: + false + else: + true + check: + bq.hasSidecar( + genBlockRoot(item.root), Slot(item.slot), + uint64(item.proposer_index), BlobIndex(item.index)) == res + + bq.pruneAfterFinalization(Epoch(3), false) + check: + len(bq) == 0 + + for item in TestVectors: + check: + bq.hasSidecar( + genBlockRoot(item.root), Slot(item.slot), + uint64(item.proposer_index), BlobIndex(item.index)) == false + + test "database unload/load test": + let + custodyColumns = + [63, 64, 65, 66, 95, 96, 97, 98].mapIt(ColumnIndex(it)) + + var + bq = ColumnQuarantine.init(cfg, custodyColumns, quarantine, 2, nil) + sidecars: seq[tuple[sidecar: ref fulu.DataColumnSidecar, + blockRoot: Eth2Digest]] + + let maxSidecars = int(NUMBER_OF_COLUMNS * SLOTS_PER_EPOCH) * 3 + for i in 0 ..< maxSidecars: + let + index = i mod len(custodyColumns) + slot = i div len(custodyColumns) + 100 + blockRoot = genBlockRoot(slot) + sidecar = newClone( + genDataColumnSidecar(index = int(custodyColumns[index]), + slot, proposer_index = i)) + sidecars.add((sidecar, blockRoot)) + + for item in sidecars: + bq.put(item.blockRoot, item.sidecar) + + # put(sidecar) test + + check: + len(bq) == maxSidecars + lenMemory(bq) == maxSidecars + lenDisk(bq) == 0 + quarantine.sidecarsCount(typedesc[fulu.DataColumnSidecar]) == 0 + + for i in 0 ..< len(custodyColumns): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[i].sidecar[].index + ) == true + + let + sidecar = newClone( + genDataColumnSidecar(index = int(custodyColumns[0]), slot = 10000, + proposer_index = 1000000)) + blockRoot1 = genBlockRoot(10000) + check: + bq.hasSidecar( + blockRoot = blockRoot1, slot = Slot(10000), + proposer_index = 1000000'u64, index = custodyColumns[0]) == false + + bq.put(blockRoot1, sidecar) + + check: + len(bq) == len(sidecars) + 1 + lenDisk(bq) == len(custodyColumns) + quarantine.sidecarsCount(typedesc[fulu.DataColumnSidecar]) == + len(custodyColumns) + lenMemory(bq) == len(sidecars) - len(custodyColumns) + 1 + bq.hasSidecar( + blockRoot = blockRoot1, slot = Slot(10000), + proposer_index = 1000000'u64, index = custodyColumns[0]) == true + + for i in 0 ..< len(custodyColumns): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[i].sidecar[].index + ) == true + + let + blockRoot2 = + genBlockRoot( + int(sidecars[0].sidecar[].signed_block_header.message.slot)) + sidecars2 = + sidecars.toOpenArray(0, len(custodyColumns) - 1).mapIt(it.sidecar) + commitments2 = + @[genKzgCommitment(1), genKzgCommitment(2), genKzgCommitment(3)] + blck = genFuluSignedBeaconBlock(blockRoot2, commitments2) + dres = bq.popSidecars(blockRoot2, blck) + + check: + dres.isOk() + compareSidecarsByValue(dres.get(), sidecars2) == true + len(bq) == len(sidecars) - len(custodyColumns) + 1 + lenDisk(bq) == 0 + quarantine.sidecarsCount(typedesc[fulu.DataColumnSidecar]) == 0 + + # put(openArray[sidecar]) test + + let + msidecars = + block: + var res: seq[ref fulu.DataColumnSidecar] + for i in 0 ..< len(custodyColumns): + let sidecar = + newClone( + genDataColumnSidecar( + index = int(custodyColumns[i]), slot = 100_000, + proposer_index = 2000000)) + res.add(sidecar) + res + mblockRoot = genBlockRoot(20000) + + check: + len(bq) == len(sidecars) - len(custodyColumns) + 1 + + for s in msidecars: + check: + bq.hasSidecar(mblockRoot, + s.signed_block_header.message.slot, + s.signed_block_header.message.proposer_index, + s.index) == false + + bq.put(mblockRoot, msidecars) + + check: + lenDisk(bq) == len(custodyColumns) + quarantine.sidecarsCount(typedesc[fulu.DataColumnSidecar]) == + len(custodyColumns) + len(bq) == len(sidecars) + 1 + + for s in msidecars: + check: + bq.hasSidecar(mblockRoot, + s.signed_block_header.message.slot, + s.signed_block_header.message.proposer_index, + s.index) == true + + for i in 0 ..< len(custodyColumns): + let j = len(custodyColumns) + i + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars[j].sidecar[].signed_block_header.message.slot)), + slot = + sidecars[j].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars[j].sidecar[].signed_block_header.message.proposer_index, + index = sidecars[j].sidecar[].index + ) == true + + let + i3 = len(custodyColumns) + blockRoot3 = + genBlockRoot( + int(sidecars[i3].sidecar[].signed_block_header.message.slot)) + sidecars3 = + sidecars.toOpenArray(i3, i3 + len(custodyColumns) - 1). + mapIt(it.sidecar) + commitments3 = + @[genKzgCommitment(5), genKzgCommitment(6), genKzgCommitment(7)] + blck3 = genFuluSignedBeaconBlock(blockRoot3, commitments3) + dres3 = bq.popSidecars(blockRoot3, blck3) + + check: + dres3.isOk() + compareSidecarsByValue(dres3.get(), sidecars3) == true + len(bq) == len(sidecars) - len(custodyColumns) + 1 + lenDisk(bq) == 0 + quarantine.sidecarsCount(typedesc[fulu.DataColumnSidecar]) == 0 + + test "database and memory overfill protection and pruning test": + let + custodyColumns = + [63, 64, 65, 66, 95, 96, 97, 98].mapIt(ColumnIndex(it)) + var + bq = ColumnQuarantine.init(cfg, custodyColumns, quarantine, 1, nil) + sidecars1: seq[tuple[sidecar: ref fulu.DataColumnSidecar, + blockRoot: Eth2Digest]] + sidecars2: seq[tuple[sidecar: ref fulu.DataColumnSidecar, + blockRoot: Eth2Digest]] + epochs1: seq[Epoch] + epochs2: seq[Epoch] + + let maxSidecars = int(NUMBER_OF_COLUMNS * SLOTS_PER_EPOCH) * 3 + for i in 0 ..< maxSidecars: + let + index = i mod len(custodyColumns) + slot1 = i div len(custodyColumns) + 100 + slot2 = i div len(custodyColumns) + 100000 + epoch1 = Slot(slot1).epoch() + epoch2 = Slot(slot2).epoch() + blockRoot1 = genBlockRoot(slot1) + blockRoot2 = genBlockRoot(slot2) + sidecar1 = newClone( + genDataColumnSidecar(int(custodyColumns[index]), slot1, + proposer_index = i)) + sidecar2 = newClone( + genDataColumnSidecar(int(custodyColumns[index]), slot2, + proposer_index = 100 + i)) + + sidecars1.add((sidecar1, blockRoot1)) + sidecars2.add((sidecar2, blockRoot2)) + if len(epochs1) == 0 or epochs1[^1] != epoch1: + epochs1.add(epoch1) + if len(epochs2) == 0 or epochs2[^1] != epoch2: + epochs2.add(epoch2) + + for item in sidecars1: + bq.put(item.blockRoot, item.sidecar) + + check: + len(bq) == len(sidecars1) + lenDisk(bq) == 0 + quarantine.sidecarsCount(typedesc[fulu.DataColumnSidecar]) == 0 + + for i in 0 ..< (maxSidecars div len(custodyColumns)): + let + start = len(custodyColumns) * int(i) + finish = start + len(custodyColumns) - 1 + blockRoot = sidecars2[start].blockRoot + sidecars = sidecars2.toOpenArray(start, finish).mapIt(it.sidecar) + bq.put(blockRoot, sidecars) + + check: + len(bq) == len(sidecars1) + len(sidecars2) + lenDisk(bq) == len(sidecars1) + quarantine.sidecarsCount(typedesc[fulu.DataColumnSidecar]) == + len(sidecars1) + lenMemory(bq) == len(sidecars2) + + for i in 0 ..< len(sidecars1): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars1[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars1[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars1[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars1[i].sidecar[].index + ) == true + + for i in 0 ..< len(sidecars2): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars2[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars2[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars2[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars2[i].sidecar[].index + ) == true + + let + sidecar = newClone(genDataColumnSidecar( + index = int(custodyColumns[0]), slot = 1000000, + proposer_index = 2000000)) + blockRoot = genBlockRoot(1000000) + + check: + bq.hasSidecar(blockRoot = blockRoot, slot = Slot(1000000), + proposer_index = 2000000'u64, + index = custodyColumns[0]) == false + + bq.put(blockRoot, sidecar) + + check: + len(bq) == len(sidecars1) + len(sidecars2) - len(custodyColumns) + 1 + lenDisk(bq) == len(sidecars1) + quarantine.sidecarsCount(typedesc[fulu.DataColumnSidecar]) == len(sidecars1) + lenMemory(bq) == len(sidecars2) - len(custodyColumns) + 1 + bq.hasSidecar( + blockRoot = blockRoot, slot = Slot(1000000), + proposer_index = 2000000'u64, index = custodyColumns[0]) == true + + for i in 0 ..< len(custodyColumns): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars1[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars1[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars1[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars1[i].sidecar[].index + ) == false + + for i in len(custodyColumns) ..< len(sidecars1): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars1[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars1[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars1[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars1[i].sidecar[].index + ) == true + + for i in 0 ..< len(sidecars2): + check: + bq.hasSidecar( + blockRoot = + genBlockRoot( + int(sidecars2[i].sidecar[].signed_block_header.message.slot)), + slot = + sidecars2[i].sidecar[].signed_block_header.message.slot, + proposer_index = + sidecars2[i].sidecar[].signed_block_header.message.proposer_index, + index = sidecars2[i].sidecar[].index + ) == true + + # Pruning memory and database + for epoch in epochs1: + bq.pruneAfterFinalization(epoch, false) + for epoch in epochs2: + bq.pruneAfterFinalization(epoch, false) + + check: + len(bq) == 1 + + bq.pruneAfterFinalization(Slot(1000000).epoch(), false) + + check: + len(bq) == 0 + quarantine.sidecarsCount(typedesc[fulu.DataColumnSidecar]) == 0 diff --git a/tests/test_remote_keystore.nim b/tests/test_remote_keystore.nim index a635772fdd..333c2a368f 100644 --- a/tests/test_remote_keystore.nim +++ b/tests/test_remote_keystore.nim @@ -1,11 +1,11 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import @@ -135,10 +135,9 @@ suite "Remove keystore testing suite": check keystore.remotes[0].id == 0 check keystore.remotes[0].pubkey.toHex == "8b9c875fbe539c6429c4fc304675062579ce47fb6b2ac6b6a1ba1188ca123a80affbfe381dbbc8e7f2437709a4c3325c" check keystore.provenBlockProperties.len == 1 - check keystore.provenBlockProperties[0].capellaIndex == some GeneralizedIndex(401) - check keystore.provenBlockProperties[0].denebIndex == some GeneralizedIndex(801) - check keystore.provenBlockProperties[0].electraIndex == some GeneralizedIndex(801) - check keystore.provenBlockProperties[0].fuluIndex == some GeneralizedIndex(801) + check keystore.provenBlockProperties[0].electraIndex == GeneralizedIndex(801) + check keystore.provenBlockProperties[0].fuluIndex == GeneralizedIndex(801) + check keystore.provenBlockProperties[0].gloasIndex == GeneralizedIndex(801) test "Verifying Signer / Many remotes": for version in [3]: @@ -185,7 +184,6 @@ suite "Remove keystore testing suite": check keystore.remotes[2].pubkey.toHex == "8f5f9e305e7fcbde94182747f5ecec573d1786e8320a920347a74c0ff5e70f12ca22607c98fdc8dbe71161db59e0ac9d" check keystore.threshold == 2 check keystore.provenBlockProperties.len == 1 - check keystore.provenBlockProperties[0].capellaIndex == some GeneralizedIndex(401) - check keystore.provenBlockProperties[0].denebIndex == some GeneralizedIndex(801) - check keystore.provenBlockProperties[0].electraIndex == some GeneralizedIndex(801) - check keystore.provenBlockProperties[0].fuluIndex == some GeneralizedIndex(801) \ No newline at end of file + check keystore.provenBlockProperties[0].electraIndex == GeneralizedIndex(801) + check keystore.provenBlockProperties[0].fuluIndex == GeneralizedIndex(801) + check keystore.provenBlockProperties[0].gloasIndex == GeneralizedIndex(801) diff --git a/tests/test_signing_node.nim b/tests/test_signing_node.nim index e0030cc678..08eb2e55c9 100644 --- a/tests/test_signing_node.nim +++ b/tests/test_signing_node.nim @@ -5,22 +5,20 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import unittest2, chronicles, results, chronos/asyncproc, chronos/unittest2/asynctests, - ../beacon_chain/spec/crypto, + ../beacon_chain/spec/[crypto, presets], ../beacon_chain/spec/eth2_apis/rest_remote_signer_calls, ../beacon_chain/validators/validator_pool from std/os import getEnv, osErrorMsg from stew/byteutils import hexToByteArray from ../beacon_chain/filepath import secureCreatePath, secureWriteFile -from ../beacon_chain/spec/signatures import - get_aggregate_and_proof_signature, get_block_signature const TestDirectoryName = "test-signing-node" @@ -65,8 +63,6 @@ const AgAttestationPhase0 = "{\"data\":{\"aggregation_bits\":\"0x01\",\"signature\":\"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505\",\"data\":{\"slot\":\"1\",\"index\":\"1\",\"beacon_block_root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\",\"source\":{\"epoch\":\"1\",\"root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\"},\"target\":{\"epoch\":\"1\",\"root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\"}}}}" AgAttestationElectra = "{\"data\":{\"aggregation_bits\":\"0x01\",\"committee_bits\":\"0x0000000000000001\",\"signature\":\"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505\",\"data\":{\"slot\":\"1\",\"index\":\"1\",\"beacon_block_root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\",\"source\":{\"epoch\":\"1\",\"root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\"},\"target\":{\"epoch\":\"1\",\"root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\"}}}}" - DenebBlockContents = "{\"signed_block\":{\"message\":{\"slot\":\"5297696\",\"proposer_index\":\"153094\",\"parent_root\":\"0xe6106533af9be918120ead7440a8006c7f123cc3cb7daf1f11d951864abea014\",\"state_root\":\"0xf86196d34500ca25d1f4e7431d4d52f6f85540bcaf97dd0d2ad9ecdb3eebcdf0\",\"body\":{\"randao_reveal\":\"0xa7efee3d5ddceb60810b23e3b5d39734696418f41dfd13a0851c7be7a72acbdceaa61e1db27513801917d72519d1c1040ccfed829faf06abe06d9964949554bf4369134b66de715ea49eb4fecf3e2b7e646f1764a1993e31e53dbc6557929c12\",\"eth1_data\":{\"deposit_root\":\"0x8ec87d7219a3c873fff3bfe206b4f923d1b471ce4ff9d6d6ecc162ef07825e14\",\"deposit_count\":\"259476\",\"block_hash\":\"0x877b6f8332c7397251ff3f0c5cecec105ff7d4cb78251b47f91fd15a86a565ab\"},\"graffiti\":\"\",\"proposer_slashings\":[],\"attester_slashings\":[],\"attestations\":[],\"deposits\":[],\"voluntary_exits\":[],\"sync_aggregate\":{\"sync_committee_bits\":\"0x733dfda7f5ffde5ade73367fcbf7fffeef7fe43777ffdffab9dbad6f7eed5fff9bfec4affdefbfaddf35bf5efbff9ffff9dfd7dbf97fbfcdfaddfeffbf95f75f\",\"sync_committee_signature\":\"0x81fdf76e797f81b0116a1c1ae5200b613c8041115223cd89e8bd5477aab13de6097a9ebf42b130c59527bbb4c96811b809353a17c717549f82d4bd336068ef0b99b1feebd4d2432a69fa77fac12b78f1fcc9d7b59edbeb381adf10b15bc4a520\"},\"execution_payload\":{\"parent_hash\":\"0x14c2242a8cfbce559e84c391f5f16d10d7719751b8558873012dc88ae5a193e8\",\"fee_recipient\":\"$1\",\"state_root\":\"0xdf8d96b2c292736d39e72e25802c2744d34d3d3c616de5b362425cab01f72fa5\",\"receipts_root\":\"0x4938a2bf640846d213b156a1a853548b369cd02917fa63d8766ab665d7930bac\",\"logs_bloom\":\"0x298610600038408c201080013832408850a00bc8f801920121840030a015310010e2a0e0108628110552062811441c84802f43825c4fc82140b036c58025a28800054c80a44025c052090a0f2c209a0400058040019ea0008e589084078048050880930113a2894082e0112408b088382402a851621042212aa40018a408d07e178c68691486411aa9a2809043b000a04c040000065a030028018540b04b1820271d00821b00c29059095022322c10a530060223240416140190056608200063c82248274ba8f0098e402041cd9f451031481a1010b8220824833520490221071898802d206348449116812280014a10a2d1c210100a30010802490f0a221849\",\"prev_randao\":\"0xc061711e135cd40531ec3ee29d17d3824c0e5f80d07f721e792ab83240aa0ab5\",\"block_number\":\"8737497\",\"gas_limit\":\"30000000\",\"gas_used\":\"16367052\",\"timestamp\":\"1680080352\",\"extra_data\":\"0xd883010b05846765746888676f312e32302e32856c696e7578\",\"base_fee_per_gas\":\"231613172261\",\"block_hash\":\"0x5aa9fd22a9238925adb2b038fd6eafc77adabf554051db5bc16ae5168a52eff6\",\"transactions\":[],\"withdrawals\":[],\"blob_gas_used\":\"2316131761\",\"excess_blob_gas\":\"231613172261\"},\"bls_to_execution_changes\":[],\"blob_kzg_commitments\":[]}},\"signature\":\"$2\"},\"kzg_proofs\":[],\"blobs\":[]}" - ElectraBlockContents = "{\"signed_block\":{\"message\":{\"slot\":\"5297696\",\"proposer_index\":\"153094\",\"parent_root\":\"0xe6106533af9be918120ead7440a8006c7f123cc3cb7daf1f11d951864abea014\",\"state_root\":\"0xf86196d34500ca25d1f4e7431d4d52f6f85540bcaf97dd0d2ad9ecdb3eebcdf0\",\"body\":{\"randao_reveal\":\"0xa7efee3d5ddceb60810b23e3b5d39734696418f41dfd13a0851c7be7a72acbdceaa61e1db27513801917d72519d1c1040ccfed829faf06abe06d9964949554bf4369134b66de715ea49eb4fecf3e2b7e646f1764a1993e31e53dbc6557929c12\",\"eth1_data\":{\"deposit_root\":\"0x8ec87d7219a3c873fff3bfe206b4f923d1b471ce4ff9d6d6ecc162ef07825e14\",\"deposit_count\":\"259476\",\"block_hash\":\"0x877b6f8332c7397251ff3f0c5cecec105ff7d4cb78251b47f91fd15a86a565ab\"},\"graffiti\":\"\",\"proposer_slashings\":[],\"attester_slashings\":[],\"attestations\":[],\"deposits\":[],\"voluntary_exits\":[],\"sync_aggregate\":{\"sync_committee_bits\":\"0x733dfda7f5ffde5ade73367fcbf7fffeef7fe43777ffdffab9dbad6f7eed5fff9bfec4affdefbfaddf35bf5efbff9ffff9dfd7dbf97fbfcdfaddfeffbf95f75f\",\"sync_committee_signature\":\"0x81fdf76e797f81b0116a1c1ae5200b613c8041115223cd89e8bd5477aab13de6097a9ebf42b130c59527bbb4c96811b809353a17c717549f82d4bd336068ef0b99b1feebd4d2432a69fa77fac12b78f1fcc9d7b59edbeb381adf10b15bc4a520\"},\"execution_payload\":{\"parent_hash\":\"0x14c2242a8cfbce559e84c391f5f16d10d7719751b8558873012dc88ae5a193e8\",\"fee_recipient\":\"$1\",\"state_root\":\"0xdf8d96b2c292736d39e72e25802c2744d34d3d3c616de5b362425cab01f72fa5\",\"receipts_root\":\"0x4938a2bf640846d213b156a1a853548b369cd02917fa63d8766ab665d7930bac\",\"logs_bloom\":\"0x298610600038408c201080013832408850a00bc8f801920121840030a015310010e2a0e0108628110552062811441c84802f43825c4fc82140b036c58025a28800054c80a44025c052090a0f2c209a0400058040019ea0008e589084078048050880930113a2894082e0112408b088382402a851621042212aa40018a408d07e178c68691486411aa9a2809043b000a04c040000065a030028018540b04b1820271d00821b00c29059095022322c10a530060223240416140190056608200063c82248274ba8f0098e402041cd9f451031481a1010b8220824833520490221071898802d206348449116812280014a10a2d1c210100a30010802490f0a221849\",\"prev_randao\":\"0xc061711e135cd40531ec3ee29d17d3824c0e5f80d07f721e792ab83240aa0ab5\",\"block_number\":\"8737497\",\"gas_limit\":\"30000000\",\"gas_used\":\"16367052\",\"timestamp\":\"1680080352\",\"extra_data\":\"0xd883010b05846765746888676f312e32302e32856c696e7578\",\"base_fee_per_gas\":\"231613172261\",\"block_hash\":\"0x5aa9fd22a9238925adb2b038fd6eafc77adabf554051db5bc16ae5168a52eff6\",\"transactions\":[],\"withdrawals\":[],\"blob_gas_used\":\"2316131761\",\"excess_blob_gas\":\"231613172261\"},\"bls_to_execution_changes\":[],\"blob_kzg_commitments\":[],\"execution_requests\":{\"deposits\":[],\"withdrawals\":[],\"consolidations\":[]}}},\"signature\":\"$2\"},\"kzg_proofs\":[],\"blobs\":[]}" SigningNodeAddress = "127.0.0.1" @@ -92,23 +88,21 @@ func getNodePort(basePort: int, rt: RemoteSignerType): int = proc getBlock( fork: ConsensusFork, - feeRecipient = SigningExpectedFeeRecipient -): ForkedBeaconBlock {.raises: [ResultError[cstring]].} = + feeRecipient = SigningExpectedFeeRecipient): ForkedBeaconBlock = try: case fork - of ConsensusFork.Phase0 .. ConsensusFork.Capella: + of ConsensusFork.Phase0 .. ConsensusFork.Deneb: raiseAssert "Unsupported fork" - of ConsensusFork.Deneb: - ForkedBeaconBlock.init(RestJson.decode( - DenebBlockContents % [feeRecipient, SomeSignature], - DenebSignedBlockContents).signed_block.message) of ConsensusFork.Electra: ForkedBeaconBlock.init(RestJson.decode( ElectraBlockContents % [feeRecipient, SomeSignature], ElectraSignedBlockContents).signed_block.message) of ConsensusFork.Fulu: - debugFuluComment "electra test signing node getblock" + debugFuluComment "fulu test signing node getblock" raiseAssert "fulu unsupported" + of ConsensusFork.Gloas: + debugFuluComment "gloas test signing node getblock" + raiseAssert "gloas unsupported" except ValueError: # https://github.com/nim-lang/Nim/pull/23356 raiseAssert "Arguments match the format string" @@ -118,12 +112,8 @@ proc getBlock( func init(t: typedesc[Web3SignerForkedBeaconBlock], forked: ForkedBeaconBlock): Web3SignerForkedBeaconBlock = case forked.kind - of ConsensusFork.Phase0 .. ConsensusFork.Capella: - raiseAssert "supports Deneb and later forks" - of ConsensusFork.Deneb: - Web3SignerForkedBeaconBlock( - kind: ConsensusFork.Deneb, - data: forked.denebData.toBeaconBlockHeader) + of ConsensusFork.Phase0 .. ConsensusFork.Deneb: + raiseAssert "supports Electra and later forks" of ConsensusFork.Electra: Web3SignerForkedBeaconBlock( kind: ConsensusFork.Electra, @@ -132,6 +122,10 @@ func init(t: typedesc[Web3SignerForkedBeaconBlock], Web3SignerForkedBeaconBlock( kind: ConsensusFork.Fulu, data: forked.fuluData.toBeaconBlockHeader) + of ConsensusFork.Gloas: + Web3SignerForkedBeaconBlock( + kind: ConsensusFork.Gloas, + data: forked.gloasData.toBeaconBlockHeader) proc createKeystore(dataDir, pubkey, store, password: string): Result[void, string] = @@ -248,6 +242,7 @@ func getRemoteKeystoreData(data: string, basePort: int, pubkey: publicKey ) + debugGloasComment "presumably gloasIndex shouldn't be 801" ok case rt of RemoteSignerType.Web3Signer: KeystoreData( @@ -263,10 +258,9 @@ func getRemoteKeystoreData(data: string, basePort: int, provenBlockProperties: @[ ProvenProperty( path: ".execution_payload.fee_recipient", - fuluIndex: some GeneralizedIndex(801), - electraIndex: some GeneralizedIndex(801), - denebIndex: some GeneralizedIndex(801), - capellaIndex: some GeneralizedIndex(401) + gloasIndex: GeneralizedIndex(801), + fuluIndex: GeneralizedIndex(801), + electraIndex: GeneralizedIndex(801), ) ], version: uint64(4), @@ -753,14 +747,16 @@ block: sres3.get() == rres3.get() asyncTest "Signing validator registration (getBuilderSignature())": + # mainnet version used by default in nimbus_signing_node + const genesis_fork_version = defaultRuntimeConfig.GENESIS_FORK_VERSION let vdata = default(ValidatorRegistrationV1) - sres1 = await validator1.getBuilderSignature(SigningFork, vdata) - sres2 = await validator2.getBuilderSignature(SigningFork, vdata) - sres3 = await validator3.getBuilderSignature(SigningFork, vdata) - rres1 = await validator4.getBuilderSignature(SigningFork, vdata) - rres2 = await validator5.getBuilderSignature(SigningFork, vdata) - rres3 = await validator6.getBuilderSignature(SigningFork, vdata) + sres1 = await validator1.getBuilderSignature(genesis_fork_version, vdata) + sres2 = await validator2.getBuilderSignature(genesis_fork_version, vdata) + sres3 = await validator3.getBuilderSignature(genesis_fork_version, vdata) + rres1 = await validator4.getBuilderSignature(genesis_fork_version, vdata) + rres2 = await validator5.getBuilderSignature(genesis_fork_version, vdata) + rres3 = await validator6.getBuilderSignature(genesis_fork_version, vdata) check: sres1.isOk() @@ -839,41 +835,6 @@ block: sres2.get() == rres2.get() sres3.get() == rres3.get() - asyncTest "Signing BeaconBlock (getBlockSignature(deneb))": - let - forked = getBlock(ConsensusFork.Deneb) - blockRoot = withBlck(forked): hash_tree_root(forkyBlck) - - sres1 = - await validator1.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) - sres2 = - await validator2.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) - sres3 = - await validator3.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) - rres1 = - await validator4.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) - rres2 = - await validator5.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) - rres3 = - await validator6.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) - - check: - sres1.isOk() - sres2.isOk() - sres3.isOk() - rres1.isOk() - rres2.isOk() - rres3.isOk() - sres1.get() == rres1.get() - sres2.get() == rres2.get() - sres3.get() == rres3.get() - asyncTest "Signing BeaconBlock (getBlockSignature(electra))": let forked = getBlock(ConsensusFork.Electra) @@ -881,22 +842,22 @@ block: sres1 = await validator1.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) + blockRoot, forked) sres2 = await validator2.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) + blockRoot, forked) sres3 = await validator3.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) + blockRoot, forked) rres1 = await validator4.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) + blockRoot, forked) rres2 = await validator5.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) + blockRoot, forked) rres3 = await validator6.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot, forked) + blockRoot, forked) check: sres1.isOk() @@ -1041,95 +1002,6 @@ block: await client.closeWait() - asyncTest "Signing BeaconBlock (getBlockSignature(deneb))": - let - fork = ConsensusFork.Deneb - forked1 = getBlock(fork) - blockRoot1 = withBlck(forked1): hash_tree_root(forkyBlck) - forked2 = getBlock(fork, SigningOtherFeeRecipient) - blockRoot2 = withBlck(forked2): hash_tree_root(forkyBlck) - request1 = Web3SignerRequest.init(SigningFork, GenesisValidatorsRoot, - Web3SignerForkedBeaconBlock.init(forked1)) - request2 = Web3SignerRequest.init(SigningFork, GenesisValidatorsRoot, - Web3SignerForkedBeaconBlock.init(forked1), @[]) - remoteUrl = "http://" & SigningNodeAddress & ":" & - $getNodePort(basePort, RemoteSignerType.VerifyingWeb3Signer) - prestoFlags = {RestClientFlag.CommaSeparatedArray} - rclient = RestClientRef.new(remoteUrl, prestoFlags, {}) - publicKey1 = ValidatorPubKey.fromHex(ValidatorPubKey1).get() - publicKey2 = ValidatorPubKey.fromHex(ValidatorPubKey2).get() - publicKey3 = ValidatorPubKey.fromHex(ValidatorPubKey3).get() - - check rclient.isOk() - - let - client = rclient.get() - sres1 = - await validator1.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) - sres2 = - await validator2.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) - sres3 = - await validator3.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) - rres1 = - await validator4.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) - rres2 = - await validator5.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) - rres3 = - await validator6.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) - bres1 = - await validator4.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot2, forked2) - bres2 = - await validator5.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot2, forked2) - bres3 = - await validator6.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot2, forked2) - - check: - # Local requests - sres1.isOk() - sres2.isOk() - sres3.isOk() - # Remote requests with proper Merkle proof of proper FeeRecipent field - rres1.isOk() - rres2.isOk() - rres3.isOk() - # Signature comparison - sres1.get() == rres1.get() - sres2.get() == rres2.get() - sres3.get() == rres3.get() - # Remote requests with changed FeeRecipient field - bres1.isErr() - bres2.isErr() - bres3.isErr() - - try: - let - # `proofs` array is not present. - response1 = await client.signDataPlain(publicKey1, request1) - response2 = await client.signDataPlain(publicKey2, request1) - response3 = await client.signDataPlain(publicKey3, request1) - # `proofs` array is empty. - response4 = await client.signDataPlain(publicKey1, request2) - response5 = await client.signDataPlain(publicKey2, request2) - response6 = await client.signDataPlain(publicKey3, request2) - check: - response1.status == 400 - response2.status == 400 - response3.status == 400 - response4.status == 400 - response5.status == 400 - response6.status == 400 - finally: - await client.closeWait() - asyncTest "Signing BeaconBlock (getBlockSignature(electra))": let fork = ConsensusFork.Electra @@ -1155,31 +1027,31 @@ block: client = rclient.get() sres1 = await validator1.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) + blockRoot1, forked1) sres2 = await validator2.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) + blockRoot1, forked1) sres3 = await validator3.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) + blockRoot1, forked1) rres1 = await validator4.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) + blockRoot1, forked1) rres2 = await validator5.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) + blockRoot1, forked1) rres3 = await validator6.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot1, forked1) + blockRoot1, forked1) bres1 = await validator4.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot2, forked2) + blockRoot2, forked2) bres2 = await validator5.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot2, forked2) + blockRoot2, forked2) bres3 = await validator6.getBlockSignature(SigningFork, GenesisValidatorsRoot, - Slot(1), blockRoot2, forked2) + blockRoot2, forked2) check: # Local requests diff --git a/tests/test_spec.nim b/tests/test_spec.nim index 179f46ec6a..3fb338f8bb 100644 --- a/tests/test_spec.nim +++ b/tests/test_spec.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -21,11 +21,11 @@ from ../beacon_chain/spec/state_transition import process_slots suite "Beacon state" & preset(): setup: - let cfg = defaultRuntimeConfig + let cfg {.used.} = defaultRuntimeConfig test "Smoke test initialize_beacon_state_from_eth1" & preset(): let state = newClone(initialize_beacon_state_from_eth1( - cfg, ZERO_HASH, 0, makeInitialDeposits(SLOTS_PER_EPOCH, {}), + cfg, ConsensusFork.Bellatrix, ZERO_HASH, 0, makeInitialDeposits(SLOTS_PER_EPOCH, {}), default(bellatrix.ExecutionPayloadHeader), {})) check: state.validators.lenu64 == SLOTS_PER_EPOCH @@ -103,7 +103,6 @@ suite "Beacon state" & preset(): makeInitialDeposits(SLOTS_PER_EPOCH, {}), {skipBlsValidation})) genBlock = get_initial_beacon_block(state[]) cache: StateCache - info: ForkedEpochInfo check: state[].phase0Data.dependent_root(Epoch(0)) == genBlock.root @@ -126,14 +125,6 @@ suite "Beacon state" & preset(): state[].phase0Data.data.get_block_root_at_slot(Epoch(1).start_slot - 1) state[].phase0Data.dependent_root(Epoch(0)) == genBlock.root - test "merklizer state roundtrip": - let - dcs = DepositContractState() - merkleizer = DepositsMerkleizer.init(dcs) - - check: - dcs == merkleizer.toDepositContractState() - test "can_advance_slots": var state = (ref ForkedHashedBeaconState)( @@ -143,7 +134,6 @@ suite "Beacon state" & preset(): makeInitialDeposits(SLOTS_PER_EPOCH, {}), {skipBlsValidation})) genBlock = get_initial_beacon_block(state[]) cache: StateCache - info: ForkedEpochInfo check: state[].can_advance_slots(genBlock.root, Slot(0)) diff --git a/tests/test_statediff.nim b/tests/test_statediff.nim index 2bf046e5ca..c9a131a186 100644 --- a/tests/test_statediff.nim +++ b/tests/test_statediff.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -40,10 +40,11 @@ when isMainModule: suite "state diff tests" & preset(): setup: + let cfg = defaultRuntimeConfig var - db = makeTestDB(SLOTS_PER_EPOCH) - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) + db = cfg.makeTestDB(SLOTS_PER_EPOCH) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + dag = init(ChainDAGRef, cfg, db, validatorMonitor, {}) test "random slot differences" & preset(): let testStates = getTestStates(dag.headState, ConsensusFork.Capella) diff --git a/tests/test_sync_manager.nim b/tests/test_sync_manager.nim index 2be73ecc51..23c7afc4e8 100644 --- a/tests/test_sync_manager.nim +++ b/tests/test_sync_manager.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import std/[strutils, sequtils] @@ -45,6 +45,9 @@ func getStaticSlotCb(slot: Slot): GetSlotCallback = slot getSlot +proc testforkAtEpoch(epoch: Epoch): ConsensusFork = + ConsensusFork.Phase0 + type BlockEntry = object blck*: ForkedSignedBeaconBlock @@ -61,6 +64,10 @@ func createChain(slots: Slice[Slot]): seq[ref ForkedSignedBeaconBlock] = proc createChain(srange: SyncRange): seq[ref ForkedSignedBeaconBlock] = createChain(srange.slot .. (srange.slot + srange.count - 1)) +func cmp(request: SyncRequest[SomeTPeer], srange: Slice[Slot]): bool = + (request.data.start_slot() == srange.a) and + (request.data.last_slot() == srange.b) + func createBlobs( blocks: var seq[ref ForkedSignedBeaconBlock], slots: openArray[Slot] @@ -68,7 +75,9 @@ func createBlobs( var res = newSeq[ref BlobSidecar](len(slots)) for blck in blocks: withBlck(blck[]): - when consensusFork >= ConsensusFork.Deneb: + when consensusFork >= ConsensusFork.Fulu: + doAssert false # create_blob_sidecars() might not work as such + elif consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: template kzgs: untyped = forkyBlck.message.body.blob_kzg_commitments for i, slot in slots: if slot == forkyBlck.message.slot: @@ -76,7 +85,7 @@ func createBlobs( if kzgs.len > 0: forkyBlck.root = hash_tree_root(forkyBlck.message) var - kzg_proofs: KzgProofs + kzg_proofs: deneb.KzgProofs blobs: Blobs for _ in kzgs: doAssert kzg_proofs.add default(KzgProof) @@ -167,14 +176,16 @@ suite "SyncManager test suite": 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(0)), - verifier.collector) + verifier.collector, + testforkAtEpoch) of SyncQueueKind.Backward: SyncQueue.init(SomeTPeer, kind, Slot(127), Slot(0), 32'u64, # 32 slots per request 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(127)), - verifier.collector) + verifier.collector, + testforkAtEpoch) peer = SomeTPeer.init("1") r1 = sq.pop(Slot(127), peer) r2 = sq.pop(Slot(127), peer) @@ -245,14 +256,16 @@ suite "SyncManager test suite": 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(0)), - verifier.collector) + verifier.collector, + testforkAtEpoch) of SyncQueueKind.Backward: SyncQueue.init(SomeTPeer, kind, Slot(127), Slot(0), 32'u64, # 32 slots per request 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(127)), - verifier.collector) + verifier.collector, + testforkAtEpoch) peer1 = SomeTPeer.init("1") peer2 = SomeTPeer.init("2") peer3 = SomeTPeer.init("3") @@ -366,14 +379,16 @@ suite "SyncManager test suite": 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(0)), - verifier.collector) + verifier.collector, + testforkAtEpoch) of SyncQueueKind.Backward: SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0), 32'u64, # 32 slots per request 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(63)), - verifier.collector) + verifier.collector, + testforkAtEpoch) peer1 = SomeTPeer.init("1") peer2 = SomeTPeer.init("2") peer3 = SomeTPeer.init("3") @@ -466,14 +481,16 @@ suite "SyncManager test suite": 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(0)), - verifier.collector) + verifier.collector, + testforkAtEpoch) of SyncQueueKind.Backward: SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0), 32'u64, # 32 slots per request 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(63)), - verifier.collector) + verifier.collector, + testforkAtEpoch) peer1 = SomeTPeer.init("1") peer2 = SomeTPeer.init("2") peer3 = SomeTPeer.init("3") @@ -614,14 +631,16 @@ suite "SyncManager test suite": 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(0)), - verifier.collector) + verifier.collector, + testforkAtEpoch) of SyncQueueKind.Backward: SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0), 32'u64, # 32 slots per request 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(63)), - verifier.collector) + verifier.collector, + testforkAtEpoch) peer1 = SomeTPeer.init("1") peer2 = SomeTPeer.init("2") peer3 = SomeTPeer.init("3") @@ -721,6 +740,226 @@ suite "SyncManager test suite": await noCancel wait(verifier.verifier, 2.seconds) + asyncTest "[SyncQueue# & " & $kind & "] Empty responses should not " & + "advance queue until other peers will not confirm [3 peers] " & + "test": + var emptyResponse: seq[ref ForkedSignedBeaconBlock] + + let + scenario = + case kind + of SyncQueueKind.Forward: + [ + (Slot(32) .. Slot(63), Opt.none(VerifierError)), + (Slot(64) .. Slot(95), Opt.none(VerifierError)), + ] + of SyncQueueKind.Backward: + [ + (Slot(32) .. Slot(63), Opt.none(VerifierError)), + (Slot(0) .. Slot(31), Opt.none(VerifierError)) + ] + verifier = setupVerifier(kind, scenario) + sq = + case kind + of SyncQueueKind.Forward: + SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(95), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(0)), + verifier.collector, + testforkAtEpoch) + of SyncQueueKind.Backward: + SyncQueue.init(SomeTPeer, kind, Slot(95), Slot(0), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(127)), + verifier.collector, + testforkAtEpoch) + peer1 = SomeTPeer.init("1") + peer2 = SomeTPeer.init("2") + peer3 = SomeTPeer.init("3") + startSlot = + case kind + of SyncQueueKind.Forward: + Slot(0) + of SyncQueueKind.Backward: + Slot(95) + finishSlot = + case kind + of SyncQueueKind.Forward: + Slot(96) + of SyncQueueKind.Backward: + Slot(0) + middleSlot1 = + case kind + of SyncQueueKind.Forward: + Slot(32) + of SyncQueueKind.Backward: + Slot(63) + middleSlot2 = + case kind + of SyncQueueKind.Forward: + Slot(64) + of SyncQueueKind.Backward: + Slot(31) + + check: + sq.inpSlot == startSlot + sq.outSlot == startSlot + + let + r11 = sq.pop(Slot(127), peer1) + await sq.push(r11, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + # No movement after 1st empty response + sq.inpSlot == startSlot + sq.outSlot == startSlot + + let + r12 = sq.pop(Slot(127), peer2) + await sq.push(r12, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + # No movement after 2nd empty response + sq.inpSlot == startSlot + sq.outSlot == startSlot + + let + r13 = sq.pop(Slot(127), peer3) + await sq.push(r13, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + # After 3rd empty response we moving forward + sq.inpSlot == middleSlot1 + sq.outSlot == middleSlot1 + + let + r21 = sq.pop(Slot(127), peer1) + await sq.push(r21, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + # No movement after 1st empty response + sq.inpSlot == middleSlot1 + sq.outSlot == middleSlot1 + + let + r22 = sq.pop(Slot(127), peer2) + await sq.push(r22, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + # No movement after 2nd empty response + sq.inpSlot == middleSlot1 + sq.outSlot == middleSlot1 + + let + r23 = sq.pop(Slot(127), peer3) + d23 = createChain(r23.data) + + await sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + check: + # We got non-empty response so we should advance + sq.inpSlot == middleSlot2 + sq.outSlot == middleSlot2 + + let + r31 = sq.pop(Slot(127), peer1) + await sq.push(r31, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + # No movement after 1st empty response + sq.inpSlot == middleSlot2 + sq.outSlot == middleSlot2 + + let + r32 = sq.pop(Slot(127), peer2) + d32 = createChain(r32.data) + await sq.push(r32, d32, Opt.none(seq[BlobSidecars])) + check: + # We got non-empty response, so we should advance + sq.inpSlot == finishSlot + sq.outSlot == finishSlot + + asyncTest "[SyncQueue# & " & $kind & "] Empty responses should not " & + "be accounted [3 peers] test": + var emptyResponse: seq[ref ForkedSignedBeaconBlock] + let + scenario = + case kind + of SyncQueueKind.Forward: + [ + (Slot(0) .. Slot(31), Opt.none(VerifierError)), + (Slot(32) .. Slot(63), Opt.none(VerifierError)), + (Slot(64) .. Slot(95), Opt.none(VerifierError)), + (Slot(96) .. Slot(127), Opt.none(VerifierError)), + (Slot(128) .. Slot(159), Opt.none(VerifierError)) + ] + of SyncQueueKind.Backward: + [ + (Slot(128) .. Slot(159), Opt.none(VerifierError)), + (Slot(96) .. Slot(127), Opt.none(VerifierError)), + (Slot(64) .. Slot(95), Opt.none(VerifierError)), + (Slot(32) .. Slot(63), Opt.none(VerifierError)), + (Slot(0) .. Slot(31), Opt.none(VerifierError)) + ] + verifier = setupVerifier(kind, scenario) + sq = + case kind + of SyncQueueKind.Forward: + SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(159), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(0)), + verifier.collector, + testforkAtEpoch) + of SyncQueueKind.Backward: + SyncQueue.init(SomeTPeer, kind, Slot(159), Slot(0), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(159)), + verifier.collector, + testforkAtEpoch) + slots = + case kind + of SyncQueueKind.Forward: + @[Slot(0), Slot(32), Slot(64), Slot(96), Slot(128)] + of SyncQueueKind.Backward: + @[Slot(128), Slot(96), Slot(64), Slot(32), Slot(0)] + peer1 = SomeTPeer.init("1") + peer2 = SomeTPeer.init("2") + peer3 = SomeTPeer.init("3") + + let + r11 = sq.pop(Slot(159), peer1) + r21 = sq.pop(Slot(159), peer2) + await sq.push(r11, emptyResponse, Opt.none(seq[BlobSidecars])) + let + r12 = sq.pop(Slot(159), peer1) + r13 = sq.pop(Slot(159), peer1) + # This should not raise an assertion, as the previously sent empty + # response should not be taken into account. + r14 = sq.pop(Slot(159), peer1) + + expect AssertionDefect: + let r1e {.used.} = sq.pop(Slot(159), peer1) + + check: + r11.data.slot == slots[0] + r12.data.slot == slots[1] + r13.data.slot == slots[2] + r14.data.slot == slots[3] + + # Scenario requires some finish steps + await sq.push(r21, createChain(r21.data), Opt.none(seq[BlobSidecars])) + let r22 = sq.pop(Slot(159), peer2) + await sq.push(r22, createChain(r22.data), Opt.none(seq[BlobSidecars])) + let r23 = sq.pop(Slot(159), peer2) + await sq.push(r23, createChain(r23.data), Opt.none(seq[BlobSidecars])) + let r24 = sq.pop(Slot(159), peer2) + await sq.push(r24, createChain(r24.data), Opt.none(seq[BlobSidecars])) + let r35 = sq.pop(Slot(159), peer3) + await sq.push(r35, createChain(r35.data), Opt.none(seq[BlobSidecars])) + + await noCancel wait(verifier.verifier, 2.seconds) + asyncTest "[SyncQueue# & " & $kind & "] Combination of missing parent " & "and good blocks [3 peers] test": let @@ -771,14 +1010,16 @@ suite "SyncManager test suite": 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(0)), - verifier.collector) + verifier.collector, + testforkAtEpoch) of SyncQueueKind.Backward: SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0), 32'u64, # 32 slots per request 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(63)), - verifier.collector) + verifier.collector, + testforkAtEpoch) peer1 = SomeTPeer.init("1") peer2 = SomeTPeer.init("2") peer3 = SomeTPeer.init("3") @@ -862,6 +1103,139 @@ suite "SyncManager test suite": await noCancel wait(verifier.verifier, 2.seconds) + test "[SyncQueue# & " & $kind & "] epochFilter() test": + let + aq = newAsyncQueue[BlockEntry]() + scenario = + case kind + of SyncQueueKind.Forward: + @[ + ( + Slot(0), 128, 13, + @[ConsensusFork.Phase0, ConsensusFork.Altair, + ConsensusFork.Bellatrix], + @[Slot(0)..Slot(12), Slot(13)..Slot(25), Slot(26)..Slot(31), + Slot(32)..Slot(44), Slot(45)..Slot(57), Slot(58)..Slot(63), + Slot(64)..Slot(76)] + ), + ( + Slot(0), 128, 31, + @[ConsensusFork.Phase0, ConsensusFork.Altair, + ConsensusFork.Bellatrix, ConsensusFork.Capella], + @[Slot(0)..Slot(30), Slot(31)..Slot(31), Slot(32)..Slot(62), + Slot(63)..Slot(63), Slot(64)..Slot(94), Slot(95)..Slot(95), + Slot(96)..Slot(126)] + ), + ( + Slot(0), 128, 32, # Size of chunk equal to SLOTS_PER_EPOCH + @[ConsensusFork.Phase0, ConsensusFork.Altair, + ConsensusFork.Bellatrix, ConsensusFork.Capella], + @[Slot(0)..Slot(31), Slot(32)..Slot(63), Slot(64)..Slot(95), + Slot(96)..Slot(127)] + ), + ( + Slot(0), 192, 33, # Size of chunk bigger than SLOTS_PER_EPOCH + @[ConsensusFork.Phase0, ConsensusFork.Altair, + ConsensusFork.Bellatrix, ConsensusFork.Capella], + @[Slot(0)..Slot(31), Slot(32)..Slot(63), Slot(64)..Slot(95), + Slot(96)..Slot(128), Slot(129)..Slot(161)] + ), + ( + Slot(0), 192, 192, # Size of chunk bigger than SLOTS_PER_EPOCH + @[ConsensusFork.Phase0, ConsensusFork.Altair, + ConsensusFork.Bellatrix, ConsensusFork.Capella, + ConsensusFork.Deneb, ConsensusFork.Electra], + @[Slot(0)..Slot(31), Slot(32)..Slot(63), Slot(64)..Slot(95), + Slot(96)..Slot(127)] + ) + ] + of SyncQueueKind.Backward: + @[ + ( + Slot(95), 96, 13, + @[ConsensusFork.Phase0, ConsensusFork.Altair, + ConsensusFork.Bellatrix], + @[Slot(83)..Slot(95), Slot(70)..Slot(82), Slot(64)..Slot(69), + Slot(51)..Slot(63), Slot(38)..Slot(50), Slot(32)..Slot(37), + Slot(19)..Slot(31), Slot(6)..Slot(18), Slot(0)..Slot(5)] + ), + ( + Slot(127), 128, 31, + @[ConsensusFork.Phase0, ConsensusFork.Altair, + ConsensusFork.Bellatrix, ConsensusFork.Capella], + @[Slot(97)..Slot(127), Slot(96)..Slot(96), Slot(65)..Slot(95), + Slot(64)..Slot(64), Slot(33)..Slot(63), Slot(32)..Slot(32), + Slot(1)..Slot(31), Slot(0)..Slot(0)] + ), + ( + Slot(127), 128, 32, # Size of chunk equal to SLOTS_PER_EPOCH + @[ConsensusFork.Phase0, ConsensusFork.Altair, + ConsensusFork.Bellatrix, ConsensusFork.Capella], + @[Slot(96)..Slot(127), Slot(64)..Slot(95), Slot(32)..Slot(63), + Slot(0)..Slot(31)] + ), + ( + Slot(127), 128, 33, # Size of chunk bigger than SLOTS_PER_EPOCH + @[ConsensusFork.Phase0, ConsensusFork.Altair, + ConsensusFork.Bellatrix, ConsensusFork.Capella], + @[Slot(96)..Slot(127), Slot(64)..Slot(95), Slot(32)..Slot(63), + Slot(0)..Slot(31)] + ), + ( + Slot(127), 128, 128, # Size of chunk bigger than SLOTS_PER_EPOCH + @[ConsensusFork.Phase0, ConsensusFork.Altair, + ConsensusFork.Bellatrix, ConsensusFork.Capella], + @[Slot(96)..Slot(127), Slot(64)..Slot(95), Slot(32)..Slot(63), + Slot(0)..Slot(31)] + ) + ] + + func epochManager(epochs: openArray[ConsensusFork]): ForkAtEpochCallback = + var epochsSeq = @epochs + proc forkAtEpoch(epoch: Epoch): ConsensusFork = + let index = int(epoch) + if index >= len(epochsSeq): + epochsSeq[^1] + elif index < 0: + epochsSeq[0] + else: + epochsSeq[index] + forkAtEpoch + + for vector in scenario: + case kind + of SyncQueueKind.Forward: + let + maxSlot = vector[0] + uint64(vector[1]) - 1'u64 + sq = + SyncQueue.init(SomeTPeer, kind, vector[0], maxSlot, + uint64(vector[2]), + 9, # 8 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(0)), + collector(aq), + epochManager(vector[3])) + peer = SomeTPeer.init("1") + for srange in vector[4]: + let request = sq.pop(maxSlot, peer) + check cmp(request, srange) + of SyncQueueKind.Backward: + let + minSlot = vector[0] + 1'u64 - uint64(vector[1]) + maxSlot = vector[0] + sq = + SyncQueue.init(SomeTPeer, kind, vector[0], minSlot, + uint64(vector[2]), + 9, # 8 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(0)), + collector(aq), + epochManager(vector[3])) + peer = SomeTPeer.init("1") + for srange in vector[4]: + let request = sq.pop(maxSlot, peer) + check cmp(request, srange) + asyncTest "[SyncQueue#Forward] Missing parent and exponential rewind " & "[3 peers] test": let @@ -891,7 +1265,8 @@ suite "SyncManager test suite": 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(0)), - verifier.collector) + verifier.collector, + testforkAtEpoch) peer1 = SomeTPeer.init("1") peer2 = SomeTPeer.init("2") peer3 = SomeTPeer.init("3") @@ -1053,7 +1428,8 @@ suite "SyncManager test suite": 3, # 3 concurrent requests 2, # 2 failures allowed getStaticSlotCb(Slot(159)), - verifier.collector) + verifier.collector, + testforkAtEpoch) peer1 = SomeTPeer.init("1") peer2 = SomeTPeer.init("2") peer3 = SomeTPeer.init("3") @@ -1221,7 +1597,7 @@ suite "SyncManager test suite": queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), 1'u64, 3, 2, getStaticSlotCb(Slot(0)), - collector(aq)) + collector(aq), testforkAtEpoch) finalizedSlot = start_slot(Epoch(0'u64)) epochStartSlot = start_slot(Epoch(0'u64)) + 1'u64 finishSlot = start_slot(Epoch(2'u64)) @@ -1234,7 +1610,7 @@ suite "SyncManager test suite": queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), 1'u64, 3, 2, getStaticSlotCb(Slot(0)), - collector(aq)) + collector(aq), testforkAtEpoch) finalizedSlot = start_slot(Epoch(1'u64)) epochStartSlot = start_slot(Epoch(1'u64)) + 1'u64 finishSlot = start_slot(Epoch(3'u64)) @@ -1247,7 +1623,7 @@ suite "SyncManager test suite": queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), 1'u64, 3, 2, getStaticSlotCb(Slot(0)), - collector(aq)) + collector(aq), testforkAtEpoch) finalizedSlot = start_slot(Epoch(0'u64)) failSlot = Slot(0xFFFF_FFFF_FFFF_FFFFF'u64) failEpoch = epoch(failSlot) @@ -1266,7 +1642,7 @@ suite "SyncManager test suite": queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), 1'u64, 3, 2, getStaticSlotCb(Slot(0)), - collector(aq)) + collector(aq), testforkAtEpoch) let finalizedSlot = start_slot(Epoch(1'u64)) failSlot = Slot(0xFFFF_FFFF_FFFF_FFFFF'u64) @@ -1289,7 +1665,8 @@ suite "SyncManager test suite": getSafeSlot = getStaticSlotCb(Slot(1024)) queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Backward, Slot(1024), Slot(0), - 1'u64, 3, 2, getSafeSlot, collector(aq)) + 1'u64, 3, 2, getSafeSlot, collector(aq), + testforkAtEpoch) safeSlot = getSafeSlot() for i in countdown(1023, 0): @@ -1364,7 +1741,7 @@ suite "SyncManager test suite": test "[SyncQueue] checkBlobsResponse() test": const maxBlobsPerBlockElectra = 9 - + proc checkBlobsResponse[T]( req: SyncRequest[T], data: openArray[Slot]): Result[void, cstring] = diff --git a/tests/test_toblindedblock.nim b/tests/test_toblindedblock.nim index b5e2a1bb80..d49eb85d43 100644 --- a/tests/test_toblindedblock.nim +++ b/tests/test_toblindedblock.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import @@ -145,7 +145,8 @@ template fulu_steps() = suite "Blinded block conversions": withAll(ConsensusFork): - when consensusFork >= ConsensusFork.Bellatrix: + debugGloasComment "needs toSignedBlindedBeaconBlock" + when consensusFork >= ConsensusFork.Bellatrix and consensusFork != ConsensusFork.Gloas: test $consensusFork & " toSignedBlindedBeaconBlock": var b = default(consensusFork.SignedBeaconBlock) do_check @@ -158,4 +159,5 @@ suite "Blinded block conversions": electra_steps when consensusFork >= ConsensusFork.Fulu: fulu_steps - static: doAssert high(ConsensusFork) == ConsensusFork.Fulu + debugGloasComment "" + static: doAssert high(ConsensusFork) == ConsensusFork.Gloas diff --git a/tests/test_validator_bucket_sort.nim b/tests/test_validator_bucket_sort.nim index e27bbe423e..f7491947ea 100644 --- a/tests/test_validator_bucket_sort.nim +++ b/tests/test_validator_bucket_sort.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -228,8 +228,8 @@ func findValidatorIndexBruteforce( if validators[validatorIndex.distinctBase].pubkey == h2: return Opt.some validatorIndex for validatorIndex in bsv.bucketSorted: - if validators[validatorIndex].pubkey == h2: - return Opt.some validatorIndex.ValidatorIndex + if validators[validatorIndex.distinctBase].pubkey == h2: + return Opt.some validatorIndex Opt.none ValidatorIndex suite "ValidatorPubKey bucket sort": diff --git a/tests/test_validator_change_pool.nim b/tests/test_validator_change_pool.nim index 51c273d305..337e687064 100644 --- a/tests/test_validator_change_pool.nim +++ b/tests/test_validator_change_pool.nim @@ -1,16 +1,15 @@ # beacon_chain -# Copyright (c) 2020-2024 Status Research & Development GmbH +# Copyright (c) 2020-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} {.used.} import - ../beacon_chain/spec/[ - datatypes/base, forks, presets, signatures, state_transition], + ../beacon_chain/spec/[forks, presets, signatures, state_transition], ../beacon_chain/consensus_object_pools/[ block_quarantine, blockchain_dag, validator_change_pool], "."/[testutil, testblockutil, testdbutil] @@ -82,11 +81,10 @@ suite "Validator change pool testing suite": tmp.FULU_FORK_EPOCH = Epoch(tmp.SHARD_COMMITTEE_PERIOD) + 5 tmp - validatorMonitor = newClone(ValidatorMonitor.init()) - dag = init( - ChainDAGRef, cfg, makeTestDB(SLOTS_PER_EPOCH * 3), - validatorMonitor, {}) - fork = dag.forkAtEpoch(Epoch(0)) + validatorMonitor = newClone(ValidatorMonitor.init(cfg.time)) + dag = ChainDAGRef.init( + cfg, cfg.makeTestDB(SLOTS_PER_EPOCH * 3), validatorMonitor, {}) + fork {.used.} = dag.forkAtEpoch(Epoch(0)) genesis_validators_root = dag.genesis_validators_root pool = newClone(ValidatorChangePool.init(dag)) @@ -200,7 +198,6 @@ suite "Validator change pool testing suite": dag.cfg, dag.headState, Epoch(dag.cfg.SHARD_COMMITTEE_PERIOD).start_slot + 1 + SLOTS_PER_EPOCH * 1, cache, info, {}).expect("ok") - let fork = dag.forkAtEpoch(dag.headState.get_current_epoch()) for i in 0'u64 .. MAX_BLS_TO_EXECUTION_CHANGES + 5: for j in 0'u64 .. i: @@ -231,7 +228,6 @@ suite "Validator change pool testing suite": dag.cfg, dag.headState, Epoch(dag.cfg.SHARD_COMMITTEE_PERIOD).start_slot + 1 + SLOTS_PER_EPOCH * 2, cache, info, {}).expect("ok") - let fork = dag.forkAtEpoch(dag.headState.get_current_epoch()) for i in 0'u64 .. MAX_BLS_TO_EXECUTION_CHANGES + 5: var priorityMessages: seq[SignedBLSToExecutionChange] @@ -286,4 +282,4 @@ suite "Validator change pool testing suite": # Message signed with a (fork-2) domain can no longer be added as that # fork is not present in the BeaconState and thus fails transition pool[].getBeaconBlockValidatorChanges( - cfg, forkyState.data).voluntary_exits.lenu64 == 0 \ No newline at end of file + cfg, forkyState.data).voluntary_exits.lenu64 == 0 diff --git a/tests/test_validator_client.nim b/tests/test_validator_client.nim index 8da419d56a..2df4f4b724 100644 --- a/tests/test_validator_client.nim +++ b/tests/test_validator_client.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -9,7 +9,7 @@ {.used.} import std/strutils -import httputils +import httputils, stew/base10 import chronos/apps/http/httpserver import chronos/unittest2/asynctests import ../beacon_chain/spec/eth2_apis/eth2_rest_serialization, @@ -506,12 +506,7 @@ proc init(t: typedesc[ProduceSyncCommitteeContributionResponse], proc init(t: typedesc[GetBlockRootResponse], optimistic: Opt[bool], root: Eth2Digest): GetBlockRootResponse = - let optopt = - if optimistic.isNone(): - none[bool]() - else: - some(optimistic.get()) - GetBlockRootResponse(data: RestRoot(root: root), execution_optimistic: optopt) + GetBlockRootResponse(data: RestRoot(root: root), execution_optimistic: optimistic) proc createRootsSeen( root: tuple[root: string, slot: uint64]): Table[Eth2Digest, Slot] = @@ -536,9 +531,7 @@ suite "Validator Client test suite": if mediaType == ApplicationJsonMediaType: try: - ok RestJson.decode(value, T, - requireAllFields = true, - allowUnknownFields = true) + ok RestJson.decode(value, T) except SerializationError: err("Serialization error") else: @@ -863,7 +856,7 @@ suite "Validator Client test suite": response.isErr() gotCancellation == true - asyncTest "bestSuccess() API timeout test": + asyncTest "bestSuccess() API hard timeout test": let uri = parseUri("http://127.0.0.1/") beaconNodes = @[BeaconNodeServerRef.init(uri, 0).tryGet()] @@ -893,6 +886,7 @@ suite "Validator Client test suite": RestPlainResponse, uint64, float64, + 50.milliseconds, 100.milliseconds, AllBeaconNodeStatuses, {BeaconNodeRole.Duties}, @@ -908,6 +902,237 @@ suite "Validator Client test suite": response.isErr() gotCancellation == true + asyncTest "bestSuccess() API soft timeout test": + let + strategy = ApiStrategyKind.Best + beaconNodes = @[ + BeaconNodeServerRef.init(parseUri("http://127.0.0.1/"), 0).tryGet(), + BeaconNodeServerRef.init(parseUri("http://127.0.0.2/"), 1).tryGet(), + BeaconNodeServerRef.init(parseUri("http://127.0.0.3/"), 2).tryGet(), + BeaconNodeServerRef.init(parseUri("http://127.0.0.4/"), 3).tryGet() + ] + vconf = ValidatorClientConf.load( + cmdLine = mapIt([ + "--beacon-node=http://127.0.0.1", + "--beacon-node=http://127.0.0.2", + "--beacon-node=http://127.0.0.3", + "--beacon-node=http://127.0.0.4" + ], it)) + epoch = Epoch(1) + + let + vc = newClone(ValidatorClient(config: vconf, beaconNodes: beaconNodes)) + + vc.fallbackService = await FallbackServiceRef.init(vc) + + proc getIndex(hostname: string): int = + case hostname + of "127.0.0.1": 0 + of "127.0.0.2": 1 + of "127.0.0.3": 2 + of "127.0.0.4": 3 + else: -1 + + proc init(t: typedesc[RestPlainResponse], data: string): RestPlainResponse = + RestPlainResponse( + status: 200, + contentType: Opt.some(getContentType("text/plain").get()), + data: stringToBytes(data) + ) + + template generateTestProcedures( + tm1, tm2, tm3, tm4: untyped, + rsps1, rsps2, rsps3, rsps4: static string, + rspu1, rspu2, rspu3, rspu4: static uint64, + score1, score2, score3, score4: static float64 + ) = + proc getTestDuties( + client: RestClientRef, + epoch: Epoch + ): Future[RestPlainResponse] {.async: (raises: [CancelledError]).} = + let index = getIndex(client.address.hostname) + try: + case index + of 0: + await sleepAsync(tm1) + events[0].fire() + RestPlainResponse.init(rsps1) + of 1: + await sleepAsync(tm2) + events[1].fire() + RestPlainResponse.init(rsps2) + of 2: + await sleepAsync(tm3) + events[2].fire() + RestPlainResponse.init(rsps3) + of 3: + await sleepAsync(tm4) + events[3].fire() + RestPlainResponse.init(rsps4) + else: + raiseAssert "Should not be here" + except CancelledError as exc: + cancellations[index] = true + events[index].fire() + raise exc + + proc getTestScore(data: uint64): float64 = + case data + of rspu1: + score1 + of rspu2: + score2 + of rspu3: + score3 + of rspu4: + score4 + else: + raiseAssert "Should not be here" + + const + RequestName = "getTestDuties" + + block: + let events = @[ + newAsyncEvent(), newAsyncEvent(), newAsyncEvent(), newAsyncEvent() + ] + var cancellations = @[false, false, false, false] + + generateTestProcedures( + 1500.milliseconds, + 900.milliseconds, + 600.milliseconds, + 1200.milliseconds, + "0", "10", "100", "1000", + 0'u64, 10'u64, 100'u64, 1000'u64, + 0'f64, 10'f64, 100'f64, 1000'f64 + ) + + let + response = + vc.bestSuccess( + RestPlainResponse, + uint64, + float64, + 500.milliseconds, + 1000.milliseconds, + AllBeaconNodeStatuses, + {BeaconNodeRole.Duties}, + getTestDuties(it, epoch), + getTestScore(itresponse)): + if apiResponse.isErr(): + ApiResponse[uint64].err(apiResponse.error) + else: + let response = apiResponse.get() + case response.status + of 200: + ApiResponse[uint64].ok( + Base10.decode(uint64, response.data).get()) + else: + ApiResponse[uint64].ok(0'u64) + pendingFutures = events.mapIt(it.wait()) + + await allFutures(pendingFutures) + + check: + cancellations == @[true, false, false, true] + response.isOk() + response.get() == 100'u64 + + block: + let events = @[ + newAsyncEvent(), newAsyncEvent(), newAsyncEvent(), newAsyncEvent() + ] + var cancellations = @[false, false, false, false] + + generateTestProcedures( + 1500.milliseconds, + 100.milliseconds, + 1200.milliseconds, + 1100.milliseconds, + "0", "10", "100", "1000", + 0'u64, 10'u64, 100'u64, 1000'u64, + 0'f64, 10'f64, 100'f64, 1000'f64 + ) + + let + response = + vc.bestSuccess( + RestPlainResponse, + uint64, + float64, + 500.milliseconds, + 1000.milliseconds, + AllBeaconNodeStatuses, + {BeaconNodeRole.Duties}, + getTestDuties(it, epoch), + getTestScore(itresponse)): + if apiResponse.isErr(): + ApiResponse[uint64].err(apiResponse.error) + else: + let response = apiResponse.get() + case response.status + of 200: + ApiResponse[uint64].ok( + Base10.decode(uint64, response.data).get()) + else: + ApiResponse[uint64].ok(0'u64) + pendingFutures = events.mapIt(it.wait()) + + await allFutures(pendingFutures) + + check: + cancellations == @[true, false, true, true] + response.isOk() + response.get() == 10'u64 + + block: + let events = @[ + newAsyncEvent(), newAsyncEvent(), newAsyncEvent(), newAsyncEvent() + ] + var cancellations = @[false, false, false, false] + + generateTestProcedures( + 1500.milliseconds, + 100.milliseconds, + 300.milliseconds, + 1200.milliseconds, + "0", "10", "100", "1000", + 0'u64, 10'u64, 100'u64, 1000'u64, + 0'f64, 10'f64, 100'f64, 1000'f64 + ) + + let + response = + vc.bestSuccess( + RestPlainResponse, + uint64, + float64, + 500.milliseconds, + 1000.milliseconds, + AllBeaconNodeStatuses, + {BeaconNodeRole.Duties}, + getTestDuties(it, epoch), + getTestScore(itresponse)): + if apiResponse.isErr(): + ApiResponse[uint64].err(apiResponse.error) + else: + let response = apiResponse.get() + case response.status + of 200: + ApiResponse[uint64].ok( + Base10.decode(uint64, response.data).get()) + else: + ApiResponse[uint64].ok(0'u64) + pendingFutures = events.mapIt(it.wait()) + + await allFutures(pendingFutures) + + check: + cancellations == @[true, false, false, true] + response.isOk() + response.get() == 100'u64 + test "getLiveness() response deserialization test": proc generateLivenessResponse(T: typedesc[string], start, count, modv: int): string = diff --git a/tests/testbcutil.nim b/tests/testbcutil.nim index 148906935a..b21d90c7b9 100644 --- a/tests/testbcutil.nim +++ b/tests/testbcutil.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -11,17 +11,27 @@ import results from ../beacon_chain/consensus_object_pools/block_clearance import addHeadBlockWithParent -from ../beacon_chain/consensus_object_pools/block_dag import BlockRef +from ../beacon_chain/consensus_object_pools/block_dag import + BlockRef, OptimisticStatus from ../beacon_chain/consensus_object_pools/block_pools_types import - ChainDAGRef, OnForkyBlockAdded, VerifierError + ChainDAGRef, OnBlockAdded, VerifierError from ../beacon_chain/spec/forks import ForkySignedBeaconBlock from ../beacon_chain/spec/signatures_batch import BatchVerifier -proc addHeadBlock*( +proc addHeadBlockImpl( dag: ChainDAGRef, verifier: var BatchVerifier, signedBlock: ForkySignedBeaconBlock, - onBlockAdded: OnForkyBlockAdded + onBlockAdded: OnBlockAdded ): Result[BlockRef, VerifierError] = addHeadBlockWithParent( dag, verifier, signedBlock, ? dag.checkHeadBlock(signedBlock), - executionValid = true, onBlockAdded) + OptimisticStatus.valid, onBlockAdded) + +template addHeadBlock*( + dag: ChainDAGRef, verifier: var BatchVerifier, + signedBlock: ForkySignedBeaconBlock, + onBlockAddedParam: untyped + ): Result[BlockRef, VerifierError] = + let onBlockAdded: OnBlockAdded[typeof(signedBlock).kind] = onBlockAddedParam + + addHeadBlockImpl(dag, verifier, signedBlock, onBlockAdded) diff --git a/tests/testblockutil.nim b/tests/testblockutil.nim index fb0a2830ac..53660ad4e8 100644 --- a/tests/testblockutil.nim +++ b/tests/testblockutil.nim @@ -5,12 +5,13 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import chronicles, stew/endians2, ../beacon_chain/consensus_object_pools/sync_committee_msg_pool, + ../beacon_chain/el/engine_api_conversions, ../beacon_chain/spec/datatypes/bellatrix, ../beacon_chain/spec/[ beaconstate, helpers, keystore, signatures, state_transition, validator] @@ -18,8 +19,6 @@ import # TODO remove this dependency from std/random import rand -from eth/common/eth_types_rlp import rlpHash - type MockPrivKeysT = object MockPubKeysT = object @@ -73,18 +72,18 @@ proc makeInitialDeposits*( result.add makeDeposit(i, flags, cfg = cfg) func signBlock( - fork: Fork, genesis_validators_root: Eth2Digest, forked: ForkedBeaconBlock, + fork: Fork, genesis_validators_root: Eth2Digest, blck: ForkyBeaconBlock, privKey: ValidatorPrivKey, flags: UpdateFlags = {}): ForkedSignedBeaconBlock = let - slot = withBlck(forked): forkyBlck.slot - root = hash_tree_root(forked) + slot = blck.slot + root = hash_tree_root(blck) signature = if skipBlsValidation notin flags: get_block_signature( fork, genesis_validators_root, slot, root, privKey).toValidatorSig() else: ValidatorSig() - ForkedSignedBeaconBlock.init(forked, root, signature) + ForkedSignedBeaconBlock.init(ForkedBeaconBlock.init(blck), root, signature) from eth/eip1559 import EIP1559_INITIAL_BASE_FEE, calcEip1599BaseFee from eth/common/eth_types import EMPTY_ROOT_HASH, GasInt @@ -104,7 +103,7 @@ func build_empty_merge_execution_payload(state: bellatrix.BeaconState): var payload = bellatrix.ExecutionPayload( parent_hash: latest.block_hash, state_root: latest.state_root, # no changes to the state - receipts_root: EMPTY_ROOT_HASH, + receipts_root: EMPTY_ROOT_HASH.asEth2Digest, block_number: latest.block_number + 1, prev_randao: randao_mix, gas_limit: 30000000, # retain same limit @@ -134,9 +133,9 @@ func build_empty_execution_payload( var payload = bellatrix.ExecutionPayloadForSigning( executionPayload: bellatrix.ExecutionPayload( parent_hash: latest.block_hash, - fee_recipient: bellatrix.ExecutionAddress(data: distinctBase(feeRecipient)), + fee_recipient: feeRecipient, state_root: latest.state_root, # no changes to the state - receipts_root: EMPTY_ROOT_HASH, + receipts_root: EMPTY_ROOT_HASH.asEth2Digest, block_number: latest.block_number + 1, prev_randao: randao_mix, gas_limit: latest.gas_limit, # retain same limit @@ -151,6 +150,10 @@ func build_empty_execution_payload( payload +func lastPremergeSlotInTestCfg*(cfg: RuntimeConfig): Slot = + # Merge shortly after Bellatrix + cfg.BELLATRIX_FORK_EPOCH.start_slot + 10 + proc addTestBlock*( state: var ForkedHashedBeaconState, cache: var StateCache, @@ -183,7 +186,7 @@ proc addTestBlock*( else: ValidatorSig() - let message = withState(state): + withState(state): let execution_payload = when consensusFork > ConsensusFork.Bellatrix: default(consensusFork.ExecutionPayloadForSigning) @@ -193,9 +196,7 @@ proc addTestBlock*( # test relies on merging. So, merge only if no Capella transition. default(bellatrix.ExecutionPayloadForSigning) else: - # Merge shortly after Bellatrix - if forkyState.data.slot > - cfg.BELLATRIX_FORK_EPOCH * SLOTS_PER_EPOCH + 10: + if forkyState.data.slot > cfg.lastPremergeSlotInTestCfg: if is_merge_transition_complete(forkyState.data): const feeRecipient = default(Eth1Address) build_empty_execution_payload(forkyState.data, feeRecipient) @@ -206,9 +207,11 @@ proc addTestBlock*( else: default(bellatrix.ExecutionPayloadForSigning) - makeBeaconBlock( + let message = makeBeaconBlock( cfg, - state, + consensusFork, + forkyState, + cache, proposer_index, randao_reveal, # Keep deposit counts internally consistent. @@ -217,7 +220,7 @@ proc addTestBlock*( deposit_count: forkyState.data.eth1_deposit_index + deposits.lenu64, block_hash: eth1_data.block_hash), graffiti, - when consensusFork == ConsensusFork.Electra: + when consensusFork >= ConsensusFork.Electra: electraAttestations elif consensusFork == ConsensusFork.Fulu: electraAttestations @@ -229,20 +232,10 @@ proc addTestBlock*( BeaconBlockValidatorChanges(), sync_aggregate, execution_payload, - noRollback, - cache, - verificationFlags = {skipBlsValidation}) - - if message.isErr: - raiseAssert "Failed to create a block: " & $message.error - - let - new_block = signBlock( - getStateField(state, fork), - getStateField(state, genesis_validators_root), message.get(), privKey, - flags) + verificationFlags = {skipBlsValidation}).expect("block") - new_block + signBlock( + forkyState.data.fork, forkyState.data.genesis_validators_root, message, privKey, flags) proc makeTestBlock*( state: ForkedHashedBeaconState, diff --git a/tests/testdbutil.nim b/tests/testdbutil.nim index 1dc590f980..af44828855 100644 --- a/tests/testdbutil.nim +++ b/tests/testdbutil.nim @@ -21,10 +21,10 @@ from ../beacon_chain/spec/beaconstate import export beacon_chain_db, testblockutil, kvstore, kvstore_sqlite3 proc makeTestDB*( + cfg: RuntimeConfig, validators: Natural, eth1Data = Opt.none(Eth1Data), - flags: UpdateFlags = {}, - cfg = defaultRuntimeConfig): BeaconChainDB = + flags: UpdateFlags = {}): BeaconChainDB = # Blob support requires DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH # Data column support requires FULU_FORK_EPOCH != FAR_FUTURE_EPOCH var cfg = cfg @@ -63,7 +63,7 @@ proc makeTestDB*( hash_tree_root(default(BeaconBlockBody(consensusFork))) forkyState.root = hash_tree_root(forkyState.data) - result = BeaconChainDB.new("", cfg = cfg, inMemory = true) + result = BeaconChainDB.new("", cfg, inMemory = true) ChainDAGRef.preInit(result, genState[]) proc getEarliestInvalidBlockRoot*( @@ -103,4 +103,4 @@ proc getEarliestInvalidBlockRoot*( break curBlck = curBlck.parent - curBlck.root \ No newline at end of file + curBlck.root diff --git a/tests/teststateutil.nim b/tests/teststateutil.nim index 907e6a0cd7..cc25f7be7d 100644 --- a/tests/teststateutil.nim +++ b/tests/teststateutil.nim @@ -5,7 +5,7 @@ # * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) # at your option. This file may not be copied, modified, or distributed except according to those terms. -{.push raises: [].} +{.push raises: [], gcsafe.} import chronicles, @@ -13,9 +13,6 @@ import ../beacon_chain/spec/[forks, state_transition] from ".."/beacon_chain/validator_bucket_sort import sortValidatorBuckets -from ".."/beacon_chain/spec/state_transition_epoch import - get_validator_balance_after_epoch, get_next_slot_expected_withdrawals, - process_epoch func round_multiple_down(x: Gwei, n: Gwei): Gwei = ## Round the input to the previous multiple of "n" @@ -79,7 +76,7 @@ proc getTestStates*( info = ForkedEpochInfo() cfg = defaultRuntimeConfig - static: doAssert high(ConsensusFork) == ConsensusFork.Fulu + static: doAssert high(ConsensusFork) == ConsensusFork.Gloas if consensusFork >= ConsensusFork.Altair: cfg.ALTAIR_FORK_EPOCH = 1.Epoch if consensusFork >= ConsensusFork.Bellatrix: @@ -92,6 +89,8 @@ proc getTestStates*( cfg.ELECTRA_FORK_EPOCH = 5.Epoch if consensusFork >= ConsensusFork.Fulu: cfg.FULU_FORK_EPOCH = 6.Epoch + if consensusFork >= ConsensusFork.Gloas: + cfg.GLOAS_FORK_EPOCH = 7.Epoch for i, epoch in stateEpochs: let slot = epoch.Epoch.start_slot @@ -106,21 +105,3 @@ proc getTestStates*( if tmpState[].kind == consensusFork: result.add assignClone(tmpState[]) - -from std/sequtils import allIt -from ".."/beacon_chain/spec/beaconstate import get_expected_withdrawals - -proc checkPerValidatorBalanceCalc*( - state: deneb.BeaconState | electra.BeaconState | - fulu.BeaconState): bool = - var - info: altair.EpochInfo - cache: StateCache - let tmpState = newClone(state) # slow, but tolerable for tests - discard process_epoch(defaultRuntimeConfig, tmpState[], {}, cache, info) - - allIt(0 ..< tmpState.balances.len, - tmpState.balances.item(it) == get_validator_balance_after_epoch( - defaultRuntimeConfig, state, cache, info, it.ValidatorIndex)) and - get_expected_withdrawals(tmpState[]) == get_next_slot_expected_withdrawals( - defaultRuntimeConfig, state, cache, info) \ No newline at end of file diff --git a/vendor/EIPs b/vendor/EIPs deleted file mode 160000 index 73fbb29019..0000000000 --- a/vendor/EIPs +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 73fbb29019c19887235c1da456cfbfd5b4835184 diff --git a/vendor/holesky b/vendor/holesky index 32a72e21c6..8aec65f11f 160000 --- a/vendor/holesky +++ b/vendor/holesky @@ -1 +1 @@ -Subproject commit 32a72e21c6e53c262f27d50dd540cb654517d03a +Subproject commit 8aec65f11f0c986d6b76b2eb902420635eb9b815 diff --git a/vendor/hoodi b/vendor/hoodi index 08dd242abd..cb39a1e931 160000 --- a/vendor/hoodi +++ b/vendor/hoodi @@ -1 +1 @@ -Subproject commit 08dd242abdb1f93026453bf8e63e6bba1c7b1bc1 +Subproject commit cb39a1e931b797f0b917b340db9cd889c5b9a0b1 diff --git a/vendor/mainnet b/vendor/mainnet index 978f1794ea..5a94a36cde 160000 --- a/vendor/mainnet +++ b/vendor/mainnet @@ -1 +1 @@ -Subproject commit 978f1794eada6f85bee76e4d2d5959a5fb8e0cc5 +Subproject commit 5a94a36cde0af52ff7c486331bff4f690a1438f2 diff --git a/vendor/nim-bearssl b/vendor/nim-bearssl index c7683c5a62..b16f70bd8e 160000 --- a/vendor/nim-bearssl +++ b/vendor/nim-bearssl @@ -1 +1 @@ -Subproject commit c7683c5a6221605bbab31b53d01feb3a3161bb8b +Subproject commit b16f70bd8e14abed1c9f03f93757547cf1e95c03 diff --git a/vendor/nim-blscurve b/vendor/nim-blscurve index 52ae4332c7..f4d0de2eec 160000 --- a/vendor/nim-blscurve +++ b/vendor/nim-blscurve @@ -1 +1 @@ -Subproject commit 52ae4332c749d89fa05226f5493decae568f682c +Subproject commit f4d0de2eece20380541fbf73d4b8bf57dc214b3b diff --git a/vendor/nim-chronicles b/vendor/nim-chronicles index a8fb38a10b..67da11265f 160000 --- a/vendor/nim-chronicles +++ b/vendor/nim-chronicles @@ -1 +1 @@ -Subproject commit a8fb38a10bcb548df78e9a70bd77b26bb50abd12 +Subproject commit 67da11265f8defa5b2c7d2f17ab5b1637c6a1d41 diff --git a/vendor/nim-chronos b/vendor/nim-chronos index 0646c444fc..bb0a8f07bc 160000 --- a/vendor/nim-chronos +++ b/vendor/nim-chronos @@ -1 +1 @@ -Subproject commit 0646c444fce7c7ed08ef6f2c9a7abfd172ffe655 +Subproject commit bb0a8f07bcff3fb79b20720e4bb286fccedae93a diff --git a/vendor/nim-confutils b/vendor/nim-confutils index e214b3992a..eec84f4b5e 160000 --- a/vendor/nim-confutils +++ b/vendor/nim-confutils @@ -1 +1 @@ -Subproject commit e214b3992a31acece6a9aada7d0a1ad37c928f3b +Subproject commit eec84f4b5e8ea322f42b9c4008e549825c310a8d diff --git a/vendor/nim-eth b/vendor/nim-eth index 57fa21263a..7f7aea1035 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 57fa21263a50a325e80636280f976e8077f6627c +Subproject commit 7f7aea10355660b9770a56a69dce085f3a285af3 diff --git a/vendor/nim-eth2-scenarios b/vendor/nim-eth2-scenarios index 5d3b872650..9b9d64c23e 160000 --- a/vendor/nim-eth2-scenarios +++ b/vendor/nim-eth2-scenarios @@ -1 +1 @@ -Subproject commit 5d3b8726503b591e9132cabf5d6da3cb8f819d88 +Subproject commit 9b9d64c23e96e5039df286096d5a16226df230d3 diff --git a/vendor/nim-faststreams b/vendor/nim-faststreams index 2b08c774af..8a94d6f73f 160000 --- a/vendor/nim-faststreams +++ b/vendor/nim-faststreams @@ -1 +1 @@ -Subproject commit 2b08c774afaafd600cf4c6f994cf78b8aa090c0c +Subproject commit 8a94d6f73fc1b0d0cff10ee537268baa772fab40 diff --git a/vendor/nim-json-rpc b/vendor/nim-json-rpc index 274372132d..a4f8470b40 160000 --- a/vendor/nim-json-rpc +++ b/vendor/nim-json-rpc @@ -1 +1 @@ -Subproject commit 274372132de497e6b7b793c9d5d5474b71bf80a2 +Subproject commit a4f8470b40587ef155b351aebc02ef9dbae3578d diff --git a/vendor/nim-json-serialization b/vendor/nim-json-serialization index 6eadb6e939..0640259af2 160000 --- a/vendor/nim-json-serialization +++ b/vendor/nim-json-serialization @@ -1 +1 @@ -Subproject commit 6eadb6e939ffa7882ff5437033c11a9464d3385c +Subproject commit 0640259af2fad330ea28e77359c0d0cefac5a361 diff --git a/vendor/nim-kzg4844 b/vendor/nim-kzg4844 index 2163a77cb6..6c9d91c532 160000 --- a/vendor/nim-kzg4844 +++ b/vendor/nim-kzg4844 @@ -1 +1 @@ -Subproject commit 2163a77cb66b1b0faf032a735a751d0ea1e83499 +Subproject commit 6c9d91c53214d874a645dadaad39684472c15e74 diff --git a/vendor/nim-libbacktrace b/vendor/nim-libbacktrace index 65f9ed0a3e..267dee621e 160000 --- a/vendor/nim-libbacktrace +++ b/vendor/nim-libbacktrace @@ -1 +1 @@ -Subproject commit 65f9ed0a3e8aa4c860ccb659ae20d5795aed8207 +Subproject commit 267dee621ecc33883c9fad1b4af0d296cde7d822 diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index e67744bf2a..59e7069c15 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit e67744bf2a3cbc14f554e63eda6c26e82701c0cd +Subproject commit 59e7069c15e914618b7f7b2206c47d16c5d10a34 diff --git a/vendor/nim-metrics b/vendor/nim-metrics index 25ffd054fd..ecf64c6078 160000 --- a/vendor/nim-metrics +++ b/vendor/nim-metrics @@ -1 +1 @@ -Subproject commit 25ffd054fd774f8cf7935e75d6cad542306d7802 +Subproject commit ecf64c6078d1276d3b7d9b3d931fbdb70004db11 diff --git a/vendor/nim-minilru b/vendor/nim-minilru index 0c4b2bce95..aba86fcf59 160000 --- a/vendor/nim-minilru +++ b/vendor/nim-minilru @@ -1 +1 @@ -Subproject commit 0c4b2bce959591f0a862e9b541ba43c6d0cf3476 +Subproject commit aba86fcf59597f5d43875751d478732100456d1f diff --git a/vendor/nim-nat-traversal b/vendor/nim-nat-traversal index dae59ddfd5..860e18c376 160000 --- a/vendor/nim-nat-traversal +++ b/vendor/nim-nat-traversal @@ -1 +1 @@ -Subproject commit dae59ddfd514260bb8586b700ec20f58c4ea30ff +Subproject commit 860e18c37667b5dd005b94c63264560c35d88004 diff --git a/vendor/nim-normalize b/vendor/nim-normalize index 08caa2a4da..1c51c44a15 160000 --- a/vendor/nim-normalize +++ b/vendor/nim-normalize @@ -1 +1 @@ -Subproject commit 08caa2a4da40580ed7fe3ce723b3f7feabcca8e5 +Subproject commit 1c51c44a1541c2402af9fc21fef9ff4028e8e9ce diff --git a/vendor/nim-presto b/vendor/nim-presto index 92b1c7ff14..3ccb356220 160000 --- a/vendor/nim-presto +++ b/vendor/nim-presto @@ -1 +1 @@ -Subproject commit 92b1c7ff141e6920e1f8a98a14c35c1fa098e3be +Subproject commit 3ccb356220b70f7d9eb0fbd58b674c4080f78014 diff --git a/vendor/nim-results b/vendor/nim-results index 71d404b314..df8113dda4 160000 --- a/vendor/nim-results +++ b/vendor/nim-results @@ -1 +1 @@ -Subproject commit 71d404b314479a6205bfd050f4fe5fe49cdafc69 +Subproject commit df8113dda4c2d74d460a8fa98252b0b771bf1f27 diff --git a/vendor/nim-secp256k1 b/vendor/nim-secp256k1 index 62e16b4dff..9dd3df6212 160000 --- a/vendor/nim-secp256k1 +++ b/vendor/nim-secp256k1 @@ -1 +1 @@ -Subproject commit 62e16b4dff513f1eea7148a8cbba8a8c547b9546 +Subproject commit 9dd3df62124aae79d564da636bb22627c53c7676 diff --git a/vendor/nim-serialization b/vendor/nim-serialization index 2086c99608..f80cfd8657 160000 --- a/vendor/nim-serialization +++ b/vendor/nim-serialization @@ -1 +1 @@ -Subproject commit 2086c99608b4bf472e1ef5fe063710f280243396 +Subproject commit f80cfd8657f272a2abd063d070b77f2a74f704cd diff --git a/vendor/nim-snappy b/vendor/nim-snappy index 8291337351..00bfcef94f 160000 --- a/vendor/nim-snappy +++ b/vendor/nim-snappy @@ -1 +1 @@ -Subproject commit 829133735113951b219e3b108a6bd2146209300b +Subproject commit 00bfcef94f8ef6981df5d5b994897f6695badfb2 diff --git a/vendor/nim-sqlite3-abi b/vendor/nim-sqlite3-abi index 3108a5f48f..a322ceb619 160000 --- a/vendor/nim-sqlite3-abi +++ b/vendor/nim-sqlite3-abi @@ -1 +1 @@ -Subproject commit 3108a5f48f4a8b1f4e7ffbac6f1c8ad9d6680441 +Subproject commit a322ceb619f40a5c521ea711ee1fad4c6d11aa09 diff --git a/vendor/nim-ssz-serialization b/vendor/nim-ssz-serialization index 55ac17ca1f..2729a994f7 160000 --- a/vendor/nim-ssz-serialization +++ b/vendor/nim-ssz-serialization @@ -1 +1 @@ -Subproject commit 55ac17ca1f42afa35db9a06dd50c4d79a17c5d28 +Subproject commit 2729a994f7606815b194fa46584bbc82aa58be1a diff --git a/vendor/nim-stew b/vendor/nim-stew index 79e4fa5a9d..b66168735d 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit 79e4fa5a9d3374db17ed63622714d3e1094c7f34 +Subproject commit b66168735d6f3841c5239c3169d3fe5fe98b1257 diff --git a/vendor/nim-stint b/vendor/nim-stint index 1a2c661e3f..470b789256 160000 --- a/vendor/nim-stint +++ b/vendor/nim-stint @@ -1 +1 @@ -Subproject commit 1a2c661e3f50ff696b0b6692fab0d7bb2abf10cc +Subproject commit 470b7892561b5179ab20bd389a69217d6213fe58 diff --git a/vendor/nim-taskpools b/vendor/nim-taskpools index 7b74a716a4..9e8ccc7546 160000 --- a/vendor/nim-taskpools +++ b/vendor/nim-taskpools @@ -1 +1 @@ -Subproject commit 7b74a716a40249720fd7da428113147942b9642d +Subproject commit 9e8ccc754631ac55ac2fd495e167e74e86293edb diff --git a/vendor/nim-testutils b/vendor/nim-testutils index 94d68e796c..e4d37dc165 160000 --- a/vendor/nim-testutils +++ b/vendor/nim-testutils @@ -1 +1 @@ -Subproject commit 94d68e796c045d5b37cabc6be32d7bfa168f8857 +Subproject commit e4d37dc1652d5c63afb89907efb5a5e812261797 diff --git a/vendor/nim-toml-serialization b/vendor/nim-toml-serialization index fea85b27f0..bf994c11ea 160000 --- a/vendor/nim-toml-serialization +++ b/vendor/nim-toml-serialization @@ -1 +1 @@ -Subproject commit fea85b27f0badcf617033ca1bc05444b5fd8aa7a +Subproject commit bf994c11ea21c1506334949317554b92e34635f0 diff --git a/vendor/nim-unittest2 b/vendor/nim-unittest2 index 845b6af28b..8b51e99b4a 160000 --- a/vendor/nim-unittest2 +++ b/vendor/nim-unittest2 @@ -1 +1 @@ -Subproject commit 845b6af28b9f68f02d320e03ad18eccccea7ddb9 +Subproject commit 8b51e99b4a57fcfb31689230e75595f024543024 diff --git a/vendor/nim-web3 b/vendor/nim-web3 index d8a91d0409..141907cd95 160000 --- a/vendor/nim-web3 +++ b/vendor/nim-web3 @@ -1 +1 @@ -Subproject commit d8a91d040975cd3dd2a10c26456fab2d7523e8dd +Subproject commit 141907cd958d7ee3b554ec94bc9ac7ec692e546b diff --git a/vendor/nim-websock b/vendor/nim-websock index ebe308a79a..2c1dbcef10 160000 --- a/vendor/nim-websock +++ b/vendor/nim-websock @@ -1 +1 @@ -Subproject commit ebe308a79a7b440a11dfbe74f352be86a3883508 +Subproject commit 2c1dbcef10b0d99fb846535ae1a01d5c8263041b diff --git a/vendor/nim-zlib b/vendor/nim-zlib index 3f79980952..c71efff5fd 160000 --- a/vendor/nim-zlib +++ b/vendor/nim-zlib @@ -1 +1 @@ -Subproject commit 3f7998095264d262a8d99e2be89045e6d9301537 +Subproject commit c71efff5fd1721362b3363dc7d0e2a4c0dbc6453 diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system index 4c6ff070c1..e6c2c9da39 160000 --- a/vendor/nimbus-build-system +++ b/vendor/nimbus-build-system @@ -1 +1 @@ -Subproject commit 4c6ff070c116450bb2c285691724ac9e6202cb28 +Subproject commit e6c2c9da39c2d368d9cf420ac22692e99715d22c diff --git a/vendor/nimcrypto b/vendor/nimcrypto index dc07e3058c..19c41d6be4 160000 --- a/vendor/nimcrypto +++ b/vendor/nimcrypto @@ -1 +1 @@ -Subproject commit dc07e3058c6904eef965394493b6ea99aa2adefc +Subproject commit 19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1 diff --git a/vendor/sepolia b/vendor/sepolia index 562d9938f0..906833e9dd 160000 --- a/vendor/sepolia +++ b/vendor/sepolia @@ -1 +1 @@ -Subproject commit 562d9938f08675e9ba490a1dfba21fb05843f39f +Subproject commit 906833e9dda07c340bdb6bf3de73dc07384f0e20