Skip to content

Commit 7927f72

Browse files
committed
Add godoc / notice about trusting beacon client
1 parent 33a25f8 commit 7927f72

File tree

6 files changed

+51
-1
lines changed

6 files changed

+51
-1
lines changed

README.md

+5
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,11 @@ There are currently two supported storage options:
1515
You can control which storage backend is used by setting the `BLOB_API_DATA_STORE` and `BLOB_ARCHIVER_DATA_STORE` to
1616
either `disk` or `s3`.
1717

18+
### Data Validity
19+
Currently, the archiver and api do not validate the beacon node's data. Therefore, it's important to either trust the
20+
Beacon node, or validate the data in the client. There is an open [issue](https://github.com/base-org/blob-archiver/issues/4)
21+
to add data validation to the archiver and api.
22+
1823
### Development
1924
The `Makefile` contains a number of commands for development:
2025

api/service/api.go

+2
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,8 @@ func (a *API) toBeaconBlockHash(id string) (common.Hash, *httpError) {
150150
}
151151
}
152152

153+
// blobSidecarHandler implements the /eth/v1/beacon/blob_sidecars/{id} endpoint, using the underlying DataStoreReader
154+
// to fetch blobs instead of the beacon node. This allows clients to fetch expired blobs.
153155
func (a *API) blobSidecarHandler(w http.ResponseWriter, r *http.Request) {
154156
param := chi.URLParam(r, "id")
155157
beaconBlockHash, err := a.toBeaconBlockHash(param)

archiver/service/api.go

+1
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ type API struct {
2121
metrics m.Metricer
2222
}
2323

24+
// NewAPI creates a new Archiver API instance. This API exposes an admin interface to control the archiver.
2425
func NewAPI(metrics m.Metricer, logger log.Logger) *API {
2526
result := &API{
2627
router: chi.NewRouter(),

archiver/service/archiver.go

+18
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,10 @@ type ArchiverService struct {
5252
api *API
5353
}
5454

55+
// Start starts the archiver service. It begins polling the beacon node for the latest blocks and persisting blobs for
56+
// them. Concurrently it'll also begin a backfill process (see backfillBlobs) to store all blobs from the current head
57+
// to the previously stored blocks. This ensures that during restarts or outages of an archiver, any gaps will be
58+
// filled in.
5559
func (a *ArchiverService) Start(ctx context.Context) error {
5660
if a.cfg.MetricsConfig.Enabled {
5761
a.log.Info("starting metrics server", "addr", a.cfg.MetricsConfig.ListenAddr, "port", a.cfg.MetricsConfig.ListenPort)
@@ -82,6 +86,11 @@ func (a *ArchiverService) Start(ctx context.Context) error {
8286
return a.trackLatestBlocks(ctx)
8387
}
8488

89+
// persistBlobsForBlockToS3 fetches the blobs for a given block and persists them to S3. It returns the block header
90+
// and a boolean indicating whether the blobs already existed in S3 and any errors that occur.
91+
// If the blobs are already stored, it will not overwrite the data. Currently, the archiver does not
92+
// perform any validation of the blobs, it assumes a trusted beacon node. See:
93+
// https://github.com/base-org/blob-archiver/issues/4.
8594
func (a *ArchiverService) persistBlobsForBlockToS3(ctx context.Context, blockIdentifier string) (*v1.BeaconBlockHeader, bool, error) {
8695
currentHeader, err := a.beaconClient.BeaconBlockHeader(ctx, &api.BeaconBlockHeaderOpts{
8796
Block: blockIdentifier,
@@ -121,6 +130,7 @@ func (a *ArchiverService) persistBlobsForBlockToS3(ctx context.Context, blockIde
121130
BlobSidecars: storage.BlobSidecars{Data: blobSidecars.Data},
122131
}
123132

133+
// The blob that is being written has not been validated. It is assumed that the beacon node is trusted.
124134
err = a.dataStoreClient.Write(ctx, blobData)
125135

126136
if err != nil {
@@ -133,6 +143,7 @@ func (a *ArchiverService) persistBlobsForBlockToS3(ctx context.Context, blockIde
133143
return currentHeader.Data, false, nil
134144
}
135145

146+
// Stops the archiver service.
136147
func (a *ArchiverService) Stop(ctx context.Context) error {
137148
if a.stopped.Load() {
138149
return ErrAlreadyStopped
@@ -155,6 +166,9 @@ func (a *ArchiverService) Stopped() bool {
155166
return a.stopped.Load()
156167
}
157168

169+
// backfillBlobs will persist all blobs from the provided beacon block header, to either the last block that was persisted
170+
// to the archivers storage or the origin block in the configuration. This is used to ensure that any gaps can be filled.
171+
// If an error is encountered persisting a block, it will retry after waiting for a period of time.
158172
func (a *ArchiverService) backfillBlobs(ctx context.Context, latest *v1.BeaconBlockHeader) {
159173
current, alreadyExists, err := latest, false, error(nil)
160174

@@ -182,6 +196,7 @@ func (a *ArchiverService) backfillBlobs(ctx context.Context, latest *v1.BeaconBl
182196
a.log.Info("backfill complete", "endHash", current.Root.String(), "startHash", latest.Root.String())
183197
}
184198

199+
// trackLatestBlocks will poll the beacon node for the latest blocks and persist blobs for them.
185200
func (a *ArchiverService) trackLatestBlocks(ctx context.Context) error {
186201
t := time.NewTicker(a.cfg.PollInterval)
187202
defer t.Stop()
@@ -198,6 +213,9 @@ func (a *ArchiverService) trackLatestBlocks(ctx context.Context) error {
198213
}
199214
}
200215

216+
// processBlocksUntilKnownBlock will fetch and persist blobs for blocks until it finds a block that has been stored before.
217+
// In the case of a reorg, it will fetch the new head and then walk back the chain, storing all blobs until it finds a
218+
// known block -- that already exists in the archivers' storage.
201219
func (a *ArchiverService) processBlocksUntilKnownBlock(ctx context.Context) {
202220
a.log.Debug("refreshing live data")
203221

common/beacon/client.go

+2
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,13 @@ import (
88
"github.com/base-org/blob-archiver/common/flags"
99
)
1010

11+
// Client is an interface that wraps the go-eth-2 interfaces that the blob archiver and api require.
1112
type Client interface {
1213
client.BeaconBlockHeadersProvider
1314
client.BlobSidecarsProvider
1415
}
1516

17+
// NewBeaconClient returns a new HTTP beacon client.
1618
func NewBeaconClient(ctx context.Context, cfg flags.BeaconConfig) (Client, error) {
1719
cctx, cancel := context.WithCancel(ctx)
1820
defer cancel()

common/storage/storage.go

+23-1
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,11 @@ const (
1515
)
1616

1717
var (
18+
// ErrNotFound is returned when a blob is not found in the storage.
1819
ErrNotFound = errors.New("blob not found")
19-
ErrStorage = errors.New("error accessing storage")
20+
// ErrStorage is returned when there is an error accessing the storage.
21+
ErrStorage = errors.New("error accessing storage")
22+
// ErrEncoding is returned when there is an error in blob encoding or decoding.
2023
ErrEncoding = errors.New("error encoding/decoding blob")
2124
)
2225

@@ -28,6 +31,8 @@ type BlobSidecars struct {
2831
Data []*deneb.BlobSidecar `json:"data"`
2932
}
3033

34+
// MarshalSSZ marshals the blob sidecars into SSZ. As the blob sidecars are a single list of fixed size elements, we can
35+
// simply concatenate the marshaled sidecars together.
3136
func (b *BlobSidecars) MarshalSSZ() ([]byte, error) {
3237
result := make([]byte, b.SizeSSZ())
3338

@@ -55,15 +60,32 @@ type BlobData struct {
5560
BlobSidecars BlobSidecars `json:"blob_sidecars"`
5661
}
5762

63+
// DataStoreReader is the interface for reading from a data store.
5864
type DataStoreReader interface {
65+
// Exists returns true if the given blob hash exists in the data store, false otherwise.
66+
// It should return one of the following:
67+
// - nil: the existence check was successful. In this case the boolean should also be set correctly.
68+
// - ErrStorage: there was an error accessing the data store.
5969
Exists(ctx context.Context, hash common.Hash) (bool, error)
70+
// Read reads the blob data for the given beacon block hash from the data store.
71+
// It should return one of the following:
72+
// - nil: reading the blob was successful. The blob data is also returned.
73+
// - ErrNotFound: the blob data was not found in the data store.
74+
// - ErrStorage: there was an error accessing the data store.
75+
// - ErrEncoding: there was an error decoding the blob data.
6076
Read(ctx context.Context, hash common.Hash) (BlobData, error)
6177
}
6278

79+
// DataStoreWriter is the interface for writing to a data store.
6380
type DataStoreWriter interface {
81+
// Write writes the given blob data to the data store. It should return one of the following errors:
82+
// - nil: writing the blob was successful.
83+
// - ErrStorage: there was an error accessing the data store.
84+
// - ErrEncoding: there was an error encoding the blob data.
6485
Write(ctx context.Context, data BlobData) error
6586
}
6687

88+
// DataStore is the interface for a data store that can be both written to and read from.
6789
type DataStore interface {
6890
DataStoreReader
6991
DataStoreWriter

0 commit comments

Comments
 (0)