From da7b970d3e54cc87ade4946de02d214c92f940ef Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Thu, 23 Apr 2026 15:21:32 +0200 Subject: [PATCH 01/13] add key rotation --- block/internal/executing/executor.go | 26 ++- block/internal/executing/executor_test.go | 95 ++++++++ block/internal/submitting/da_submitter.go | 9 +- .../internal/submitting/da_submitter_test.go | 91 ++++++++ block/internal/syncing/assert.go | 11 +- block/internal/syncing/da_retriever.go | 8 +- block/internal/syncing/p2p_handler.go | 12 +- block/internal/syncing/p2p_handler_test.go | 38 ++++ block/internal/syncing/raft_retriever.go | 2 +- docs/.vitepress/config.ts | 4 + docs/adr/adr-023-proposer-key-rotation.md | 151 +++++++++++++ docs/guides/create-genesis.md | 4 + .../operations/proposer-key-rotation.md | 195 ++++++++++++++++ docs/guides/operations/upgrades.md | 6 + node/failover.go | 2 +- node/full.go | 2 +- pkg/genesis/genesis.go | 34 ++- pkg/genesis/io.go | 4 +- pkg/genesis/proposer_schedule.go | 208 ++++++++++++++++++ pkg/genesis/proposer_schedule_test.go | 93 ++++++++ 20 files changed, 956 insertions(+), 39 deletions(-) create mode 100644 docs/adr/adr-023-proposer-key-rotation.md create mode 100644 docs/guides/operations/proposer-key-rotation.md create mode 100644 pkg/genesis/proposer_schedule.go create mode 100644 pkg/genesis/proposer_schedule_test.go diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index f5be5e1b40..4cc7f4984c 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -126,7 +126,7 @@ func NewExecutor( return nil, fmt.Errorf("failed to get address: %w", err) } - if !bytes.Equal(addr, genesis.ProposerAddress) { + if !genesis.HasScheduledProposer(addr) { return nil, common.ErrNotProposer } } @@ -696,6 +696,10 @@ func (e *Executor) RetrieveBatch(ctx context.Context) (*BatchData, error) { func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *BatchData) (*types.SignedHeader, *types.Data, error) { currentState := e.getLastState() headerTime := uint64(e.genesis.StartTime.UnixNano()) + proposer, err := e.genesis.ProposerAtHeight(height) + if err != nil { + return nil, nil, fmt.Errorf("resolve proposer for height %d: %w", height, err) + } var lastHeaderHash types.Hash var lastDataHash types.Hash @@ -728,22 +732,30 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba // Get signer info and validator hash var pubKey crypto.PubKey + var signerAddress []byte var validatorHash types.Hash if e.signer != nil { - var err error pubKey, err = e.signer.GetPublic() if err != nil { return nil, nil, fmt.Errorf("failed to get public key: %w", err) } - validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, pubKey) + signerAddress, err = e.signer.GetAddress() + if err != nil { + return nil, nil, fmt.Errorf("failed to get signer address: %w", err) + } + + if err := e.genesis.ValidateProposer(height, signerAddress, pubKey); err != nil { + return nil, nil, fmt.Errorf("signer does not match proposer schedule: %w", err) + } + + validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, pubKey) if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } } else { - var err error - validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, nil) + validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, nil) if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } @@ -763,13 +775,13 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba }, LastHeaderHash: lastHeaderHash, AppHash: currentState.AppHash, - ProposerAddress: e.genesis.ProposerAddress, + ProposerAddress: proposer.Address, ValidatorHash: validatorHash, }, Signature: lastSignature, Signer: types.Signer{ PubKey: pubKey, - Address: e.genesis.ProposerAddress, + Address: proposer.Address, }, } diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index 1099cdb87d..cec6a3fecc 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -1,6 +1,7 @@ package executing import ( + "context" "testing" "time" @@ -12,6 +13,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" @@ -121,3 +123,96 @@ func TestExecutor_NilBroadcasters(t *testing.T) { assert.Equal(t, cacheManager, executor.cache) assert.Equal(t, gen, executor.genesis) } + +func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { + ds := sync.MutexWrap(datastore.NewMapDatastore()) + memStore := store.New(ds) + + cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) + require.NoError(t, err) + + metrics := common.NopMetrics() + oldAddr, oldSignerInfo, _ := buildTestSigner(t) + newAddr, newSignerInfo, newSigner := buildTestSigner(t) + + entry1, err := genesis.NewProposerScheduleEntry(1, oldSignerInfo.PubKey) + require.NoError(t, err) + entry2, err := genesis.NewProposerScheduleEntry(2, newSignerInfo.PubKey) + require.NoError(t, err) + + gen := genesis.Genesis{ + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: entry1.Address, + ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + executor, err := NewExecutor( + memStore, + nil, + nil, + newSigner, + cacheManager, + metrics, + config.DefaultConfig(), + gen, + nil, + nil, + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + nil, + ) + require.NoError(t, err) + + prevHeader := &types.SignedHeader{ + Header: types.Header{ + Version: types.InitStateVersion, + BaseHeader: types.BaseHeader{ + ChainID: gen.ChainID, + Height: 1, + Time: uint64(gen.StartTime.UnixNano()), + }, + AppHash: []byte("state-root-0"), + ProposerAddress: oldAddr, + DataHash: common.DataHashForEmptyTxs, + }, + Signature: types.Signature([]byte("sig-1")), + Signer: oldSignerInfo, + } + prevData := &types.Data{ + Metadata: &types.Metadata{ + ChainID: gen.ChainID, + Height: 1, + Time: prevHeader.BaseHeader.Time, + }, + Txs: nil, + } + + batch, err := memStore.NewBatch(context.Background()) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(prevHeader, prevData, &prevHeader.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + executor.setLastState(types.State{ + Version: types.InitStateVersion, + ChainID: gen.ChainID, + InitialHeight: gen.InitialHeight, + LastBlockHeight: 1, + LastBlockTime: prevHeader.Time(), + LastHeaderHash: prevHeader.Hash(), + AppHash: []byte("state-root-1"), + }) + + header, data, err := executor.CreateBlock(context.Background(), 2, &BatchData{ + Batch: &coreseq.Batch{}, + Time: time.Now(), + }) + require.NoError(t, err) + require.Equal(t, newAddr, header.ProposerAddress) + require.Equal(t, newAddr, header.Signer.Address) + require.Equal(t, uint64(2), data.Height()) +} diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index 83f56d9cb5..e53e351832 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -1,7 +1,6 @@ package submitting import ( - "bytes" "context" "encoding/json" "fmt" @@ -476,10 +475,6 @@ func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.Si return nil, nil, fmt.Errorf("failed to get address: %w", err) } - if len(genesis.ProposerAddress) > 0 && !bytes.Equal(addr, genesis.ProposerAddress) { - return nil, nil, fmt.Errorf("signer address mismatch with genesis proposer") - } - signerInfo := types.Signer{ PubKey: pubKey, Address: addr, @@ -494,6 +489,10 @@ func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.Si continue } + if err := genesis.ValidateProposer(unsignedData.Height(), addr, pubKey); err != nil { + return nil, nil, fmt.Errorf("signer does not match proposer schedule for data at height %d: %w", unsignedData.Height(), err) + } + signature, err := signer.Sign(ctx, unsignedDataListBz[i]) if err != nil { return nil, nil, fmt.Errorf("failed to sign data: %w", err) diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index d25786018b..9c55b9bd6c 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -343,6 +343,97 @@ func TestDASubmitter_SubmitData_Success(t *testing.T) { assert.True(t, ok) } +func TestDASubmitter_SubmitData_UsesScheduledProposerForHeight(t *testing.T) { + submitter, st, cm, mockDA, gen := setupDASubmitterTest(t) + ctx := context.Background() + dataNamespace := datypes.NamespaceFromString(testDataNamespace).Bytes() + + mockDA.On( + "Submit", + mock.Anything, + mock.AnythingOfType("[][]uint8"), + mock.AnythingOfType("float64"), + dataNamespace, + mock.Anything, + ).Return(func(_ context.Context, blobs [][]byte, _ float64, _ []byte, _ []byte) datypes.ResultSubmit { + return datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: uint64(len(blobs)), Height: 2}} + }).Once() + + oldAddr, oldPub, _ := createTestSigner(t) + nextAddr, nextPub, nextSigner := createTestSigner(t) + + entry1, err := genesis.NewProposerScheduleEntry(gen.InitialHeight, oldPub) + require.NoError(t, err) + entry2, err := genesis.NewProposerScheduleEntry(2, nextPub) + require.NoError(t, err) + + gen.ProposerAddress = entry1.Address + gen.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} + submitter.genesis = gen + + data1 := &types.Data{ + Metadata: &types.Metadata{ + ChainID: gen.ChainID, + Height: 1, + Time: uint64(time.Now().UnixNano()), + }, + Txs: types.Txs{}, + } + + header1 := &types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ + ChainID: gen.ChainID, + Height: 1, + Time: uint64(time.Now().UnixNano()), + }, + ProposerAddress: oldAddr, + DataHash: common.DataHashForEmptyTxs, + }, + Signer: types.Signer{PubKey: oldPub, Address: oldAddr}, + } + + data := &types.Data{ + Metadata: &types.Metadata{ + ChainID: gen.ChainID, + Height: 2, + Time: uint64(time.Now().UnixNano()), + }, + Txs: types.Txs{types.Tx("rotated-key-tx")}, + } + + header := &types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ + ChainID: gen.ChainID, + Height: 2, + Time: uint64(time.Now().UnixNano()), + }, + ProposerAddress: nextAddr, + DataHash: data.DACommitment(), + }, + Signer: types.Signer{PubKey: nextPub, Address: nextAddr}, + } + + sig1 := types.Signature([]byte("sig-1")) + sig2 := types.Signature([]byte("sig-2")) + batch, err := st.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(header1, data1, &sig1)) + require.NoError(t, batch.SaveBlockData(header, data, &sig2)) + require.NoError(t, batch.SetHeight(2)) + require.NoError(t, batch.Commit()) + + signedDataList, marshalledData, err := cm.GetPendingData(ctx) + require.NoError(t, err) + err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, nextSigner, gen) + require.NoError(t, err) + + _, ok := cm.GetDataDAIncludedByHeight(2) + assert.True(t, ok) + assert.NotEqual(t, oldAddr, nextAddr) +} + func TestDASubmitter_SubmitData_SkipsEmptyData(t *testing.T) { submitter, st, cm, mockDA, gen := setupDASubmitterTest(t) ctx := context.Background() diff --git a/block/internal/syncing/assert.go b/block/internal/syncing/assert.go index 7c77400571..56000e744e 100644 --- a/block/internal/syncing/assert.go +++ b/block/internal/syncing/assert.go @@ -1,7 +1,6 @@ package syncing import ( - "bytes" "errors" "fmt" @@ -9,11 +8,11 @@ import ( "github.com/evstack/ev-node/types" ) -func assertExpectedProposer(genesis genesis.Genesis, proposerAddr []byte) error { - if !bytes.Equal(proposerAddr, genesis.ProposerAddress) { - return fmt.Errorf("unexpected proposer: got %x, expected %x", - proposerAddr, genesis.ProposerAddress) +func assertExpectedProposer(genesis genesis.Genesis, height uint64, proposerAddr []byte, signer types.Signer) error { + if err := genesis.ValidateProposer(height, proposerAddr, signer.PubKey); err != nil { + return fmt.Errorf("unexpected proposer at height %d: %w", height, err) } + return nil } @@ -22,7 +21,7 @@ func assertValidSignedData(signedData *types.SignedData, genesis genesis.Genesis return errors.New("empty signed data") } - if err := assertExpectedProposer(genesis, signedData.Signer.Address); err != nil { + if err := assertExpectedProposer(genesis, signedData.Height(), signedData.Signer.Address, signedData.Signer); err != nil { return err } diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index d4fa93ce04..1b3393f181 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -299,7 +299,7 @@ func (r *daRetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedH return nil } - if err := r.assertExpectedProposer(header.ProposerAddress); err != nil { + if err := r.assertExpectedProposer(header); err != nil { r.logger.Debug().Err(err).Msg("unexpected proposer") return nil } @@ -355,9 +355,9 @@ func (r *daRetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { return &signedData.Data } -// assertExpectedProposer validates the proposer address -func (r *daRetriever) assertExpectedProposer(proposerAddr []byte) error { - return assertExpectedProposer(r.genesis, proposerAddr) +// assertExpectedProposer validates the proposer schedule entry for the header height. +func (r *daRetriever) assertExpectedProposer(header *types.SignedHeader) error { + return assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer) } // assertValidSignedData validates signed data using the configured signature provider diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index a3778757a1..67e5a1b278 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -81,7 +81,7 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC } return err } - if err := h.assertExpectedProposer(p2pHeader.ProposerAddress); err != nil { + if err := h.assertExpectedProposer(p2pHeader.SignedHeader); err != nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") return err } @@ -125,11 +125,11 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC return nil } -// assertExpectedProposer validates the proposer address. -func (h *P2PHandler) assertExpectedProposer(proposerAddr []byte) error { - if !bytes.Equal(h.genesis.ProposerAddress, proposerAddr) { - return fmt.Errorf("proposer address mismatch: got %x, expected %x", - proposerAddr, h.genesis.ProposerAddress) +// assertExpectedProposer validates the proposer schedule entry for the header height. +func (h *P2PHandler) assertExpectedProposer(header *types.SignedHeader) error { + if err := assertExpectedProposer(h.genesis, header.Height(), header.ProposerAddress, header.Signer); err != nil { + return err } + return nil } diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index 8bffc31ede..dc370a9482 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -215,6 +215,44 @@ func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(11)) } +func TestP2PHandler_ProcessHeight_AllowsScheduledProposerRotation(t *testing.T) { + p := setupP2P(t) + ctx := context.Background() + + nextAddr, nextPub, nextSigner := buildTestSigner(t) + + entry1, err := genesis.NewProposerScheduleEntry(p.Genesis.InitialHeight, p.ProposerPub) + require.NoError(t, err) + entry2, err := genesis.NewProposerScheduleEntry(11, nextPub) + require.NoError(t, err) + + p.Genesis.ProposerAddress = entry1.Address + p.Genesis.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} + p.Genesis.DAEpochForcedInclusion = 1 + require.NoError(t, p.Genesis.Validate()) + p.Handler.genesis = p.Genesis + + header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 11, nextAddr, nextPub, nextSigner) + data := &types.P2PData{Data: makeData(p.Genesis.ChainID, 11, 1)} + header.DataHash = data.DACommitment() + bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) + require.NoError(t, err) + sig, err := nextSigner.Sign(t.Context(), bz) + require.NoError(t, err) + header.Signature = sig + + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(header, nil).Once() + p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(data, nil).Once() + + ch := make(chan common.DAHeightEvent, 1) + err = p.Handler.ProcessHeight(ctx, 11, ch) + require.NoError(t, err) + + events := collectEvents(t, ch, 50*time.Millisecond) + require.Len(t, events, 1) + require.Equal(t, nextAddr, events[0].Header.ProposerAddress) +} + func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { p := setupP2P(t) ctx := t.Context() diff --git a/block/internal/syncing/raft_retriever.go b/block/internal/syncing/raft_retriever.go index aaebb7a458..b67fe86e09 100644 --- a/block/internal/syncing/raft_retriever.go +++ b/block/internal/syncing/raft_retriever.go @@ -125,7 +125,7 @@ func (r *raftRetriever) consumeRaftBlock(ctx context.Context, state *raft.RaftBl r.logger.Debug().Err(err).Msg("invalid header structure") return nil } - if err := assertExpectedProposer(r.genesis, header.ProposerAddress); err != nil { + if err := assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer); err != nil { r.logger.Debug().Err(err).Msg("unexpected proposer") return nil } diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index 0cfdf5c7ae..01bda4a8d9 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -297,6 +297,10 @@ function sidebarHome() { text: "Create genesis for your chain", link: "/guides/create-genesis", }, + { + text: "Rotate proposer key", + link: "/guides/operations/proposer-key-rotation", + }, { text: "Metrics", link: "/guides/metrics", diff --git a/docs/adr/adr-023-proposer-key-rotation.md b/docs/adr/adr-023-proposer-key-rotation.md new file mode 100644 index 0000000000..7d4114709c --- /dev/null +++ b/docs/adr/adr-023-proposer-key-rotation.md @@ -0,0 +1,151 @@ +# ADR 023: Proposer Key Rotation via Height-Based Schedule + +## Changelog + +- 2026-04-23: Implemented proposer key rotation through a height-indexed proposer schedule + +## Context + +ev-node historically treated the proposer as a single static identity embedded in genesis via `proposer_address`. +That assumption leaked into block production, DA submission, and sync validation. As a result, rotating a compromised +or operationally obsolete proposer key required out-of-band coordination and effectively behaved like a manual +re-genesis from the point of view of node operators. + +This was suboptimal for three reasons: + +1. It made proposer rotation operationally risky and easy to get wrong. +2. Fresh nodes syncing from genesis had no protocol-visible record of when the proposer changed. +3. Validation only pinned the proposer address, not the scheduled public key that should be producing blocks. + +## Alternative Approaches + +### 1. Manual key swap only + +Operators can stop the sequencer, swap the local signer, redistribute config, and restart nodes. +This is insufficient because the chain itself does not encode when the proposer changed, so historical sync +and validation become ambiguous. + +### 2. Re-issue a new genesis on each rotation + +This treats every proposer rotation like a chain restart. It is operationally heavy, conflates upgrades with +rotations, and breaks continuity for nodes syncing historical data. + +### 3. Height-indexed proposer schedule in genesis (Chosen) + +Record proposer changes as an ordered schedule indexed by activation height. This preserves chain continuity while +making rotation rules explicit and replayable from genesis. + +## Decision + +ev-node now supports proposer rotation through a `proposer_schedule` field in genesis. + +Each entry declares: + +- `start_height` +- `address` +- `pub_key` + +The active proposer for block height `h` is the last entry whose `start_height <= h`. + +The legacy `proposer_address` field remains for backward compatibility. When no explicit schedule is present, +ev-node derives an implicit single-entry schedule beginning at `initial_height`. + +When an explicit schedule is present: + +- the first entry must start at `initial_height` +- entries must be strictly increasing by `start_height` +- each entry's `address` must match the configured `pub_key` +- `proposer_address`, when present, must match the first schedule entry + +## Detailed Design + +### Data model + +Genesis gains: + +```json +"proposer_schedule": [ + { + "start_height": 1, + "address": "...", + "pub_key": "..." + }, + { + "start_height": 1250000, + "address": "...", + "pub_key": "..." + } +] +``` + +The existing `proposer_address` field is retained as a compatibility field and is normalized to the first +scheduled proposer when a schedule is present. + +### Validation rules + +The proposer schedule is now consulted in all proposer-sensitive paths: + +1. executor startup accepts any signer that appears somewhere in the schedule +2. block creation resolves the proposer for the exact height being produced +3. DA submission validates the configured signer against the scheduled proposer for each signed data height +4. sync validation validates incoming headers and signed data against the scheduled proposer for their heights + +This makes proposer rotation protocol-visible for both live nodes and nodes syncing historical data. + +### Operational procedure + +For a planned rotation: + +1. Choose activation height `H` +2. Add a new `proposer_schedule` entry with `start_height = H` +3. Distribute the updated genesis/config to node operators +4. Upgrade follower/full nodes before activation +5. Stop the old sequencer before `H` +6. Start the new sequencer with the replacement key at or after `H` + +The old proposer remains valid for heights `< H`, and the new proposer becomes valid at heights `>= H`. + +### Security considerations + +This design improves safety over address-only pinning by allowing validation against the scheduled public key. +It does not solve emergency rotation authorization by itself; a future design can add a separate upgrade authority +or rotation certificate flow if the network needs signer replacement without prior static scheduling. + +### Testing + +Coverage includes: + +- genesis schedule validation and height resolution +- sync acceptance of scheduled proposer rotation +- DA submission using a rotated proposer key at the configured height +- executor block creation using the proposer scheduled for the produced height + +## Status + +Implemented + +## Consequences + +### Positive + +- proposer rotation is now part of the chain configuration rather than an operator convention +- fresh nodes can validate historical proposer changes from genesis +- sync and DA validation can pin scheduled public keys, not just addresses +- routine key rotation no longer requires a chain restart + +### Negative + +- proposer schedule changes are consensus-visible and require coordinated rollout +- operators must distribute updated genesis/config before activation height +- emergency rotation still requires preplanned scheduling or a later authority-based mechanism + +### Neutral + +- legacy single-proposer deployments continue to work without defining `proposer_schedule` + +## References + +- [pkg/genesis/genesis.go](../../pkg/genesis/genesis.go) +- [pkg/genesis/proposer_schedule.go](../../pkg/genesis/proposer_schedule.go) +- [block/internal/executing/executor.go](../../block/internal/executing/executor.go) +- [block/internal/syncing/assert.go](../../block/internal/syncing/assert.go) diff --git a/docs/guides/create-genesis.md b/docs/guides/create-genesis.md index 5886325dab..365b491b82 100644 --- a/docs/guides/create-genesis.md +++ b/docs/guides/create-genesis.md @@ -125,6 +125,10 @@ Before doing so, add a `da_start_height` field to the genesis file, that corresp jq '.da_start_height = 1' ~/.$CHAIN_ID/config/genesis.json > temp.json && mv temp.json ~/.$CHAIN_ID/config/genesis.json ``` +:::tip +If you want to plan a future proposer key migration without restarting the chain, define a `proposer_schedule` in your genesis and roll it out as a coordinated upgrade. See [Rotate proposer key](./operations/proposer-key-rotation.md). +::: + ## Summary By following these steps, you will set up the genesis for your chain, initialize the validator, add a genesis account, and start the chain. This guide provides a basic framework for configuring and starting your chain using the gm-world binary. Make sure you initialized your chain correctly, and use the `gmd` command for all operations. diff --git a/docs/guides/operations/proposer-key-rotation.md b/docs/guides/operations/proposer-key-rotation.md new file mode 100644 index 0000000000..b25c43fc7a --- /dev/null +++ b/docs/guides/operations/proposer-key-rotation.md @@ -0,0 +1,195 @@ +# Rotate proposer key + +Use this guide to rotate a sequencer proposer key without restarting the chain. The active proposer is selected from `proposer_schedule` in `genesis.json` based on block height. + +## Before you start + +- This is a coordinated network upgrade. Every node must run a binary that supports `proposer_schedule`. +- Every node must use the same updated `genesis.json` before the activation height. +- `ev-node` loads `genesis.json` when the node starts. Updating the file on disk is not enough; you must restart nodes after replacing it. +- The old proposer key remains valid until the block before the activation height. If the old key cannot safely produce until then, stop the sequencer and coordinate operator recovery first. + +## How proposer rotation is stored in genesis + +`proposer_address`, `proposer_schedule[].address`, and `proposer_schedule[].pub_key` are base64-encoded strings in JSON. + +```json +{ + "initial_height": 1, + "proposer_address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=", + "proposer_schedule": [ + { + "start_height": 1, + "address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=", + "pub_key": "5l6vM0b0GqQYQw4x0cI6q7N2vD1cE+oV6rN5eQ7v6dM=" + }, + { + "start_height": 125000, + "address": "Y7z5v9mQm4Nw6mD0a2yR9kD2B0qv5iJj1Q1R7gD4B7Q=", + "pub_key": "9r5mM4XjKx6h6sJv2Jf6dB5nQ0eU9l8cM1qT2wV3yZQ=" + } + ] +} +``` + +Rules enforced by `ev-node`: + +- `proposer_schedule[0].start_height` must equal `initial_height` +- schedule entries must be strictly increasing by `start_height` +- every `address` must match its `pub_key` +- if `proposer_address` is set, it must match the first schedule entry + +Keep all earlier schedule entries. Fresh full nodes need them to validate historical blocks. + +## 1. Pick an activation height + +Choose an activation height `H` far enough in the future that you can distribute the updated genesis and restart every non-producing node before the cutover. + +```bash +ACTIVATION_HEIGHT=125000 +GENESIS="$HOME/.evnode/config/genesis.json" +INITIAL_HEIGHT="$(jq -r '.initial_height' "$GENESIS")" +``` + +## 2. Get the current and replacement proposer public keys + +For a file-based signer, the signer public key is stored in `signer.json` as base64: + +```bash +OLD_SIGNER_DIR="$HOME/.evnode/config" +NEW_SIGNER_DIR="/secure/path/new-signer" + +OLD_PROPOSER_PUBKEY="$(jq -r '.pub_key' "$OLD_SIGNER_DIR/signer.json")" +NEW_PROPOSER_PUBKEY="$(jq -r '.pub_key' "$NEW_SIGNER_DIR/signer.json")" +``` + +If you use a KMS-backed signer, export the replacement Ed25519 public key from your signer flow and base64-encode the raw public key bytes in the same format. The runtime configuration stays the same as in the [AWS KMS signer guide](./aws-kms-signer.md). + +## 3. Derive proposer addresses from the public keys + +`ev-node` derives the proposer address as `sha256(raw_pubkey_bytes)`. The helper below prints the address in the base64 format used by `genesis.json`. + +```bash +proposer_address() { + python3 - "$1" <<'PY' +import base64 +import hashlib +import sys + +pub_key = base64.b64decode(sys.argv[1]) +address = hashlib.sha256(pub_key).digest() +print(base64.b64encode(address).decode()) +PY +} + +OLD_PROPOSER_ADDRESS="$(proposer_address "$OLD_PROPOSER_PUBKEY")" +NEW_PROPOSER_ADDRESS="$(proposer_address "$NEW_PROPOSER_PUBKEY")" +``` + +## 4. Update `genesis.json` + +### If your chain only has `proposer_address` today + +Create an explicit schedule with the current proposer at `initial_height` and the new proposer at `ACTIVATION_HEIGHT`. + +```bash +jq \ + --arg old_addr "$OLD_PROPOSER_ADDRESS" \ + --arg old_pub "$OLD_PROPOSER_PUBKEY" \ + --arg new_addr "$NEW_PROPOSER_ADDRESS" \ + --arg new_pub "$NEW_PROPOSER_PUBKEY" \ + --argjson initial_height "$INITIAL_HEIGHT" \ + --argjson activation_height "$ACTIVATION_HEIGHT" \ + ' + .proposer_address = $old_addr + | .proposer_schedule = [ + { + start_height: $initial_height, + address: $old_addr, + pub_key: $old_pub + }, + { + start_height: $activation_height, + address: $new_addr, + pub_key: $new_pub + } + ] + ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" +``` + +### If your chain already has `proposer_schedule` + +Append the new entry. Do not replace older entries, and make sure `ACTIVATION_HEIGHT` is greater than the last scheduled `start_height`. + +```bash +jq \ + --arg new_addr "$NEW_PROPOSER_ADDRESS" \ + --arg new_pub "$NEW_PROPOSER_PUBKEY" \ + --argjson activation_height "$ACTIVATION_HEIGHT" \ + ' + .proposer_schedule += [ + { + start_height: $activation_height, + address: $new_addr, + pub_key: $new_pub + } + ] + ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" +``` + +Verify the result before you distribute it: + +```bash +jq '{initial_height, proposer_address, proposer_schedule}' "$GENESIS" +``` + +## 5. Distribute the updated genesis and restart followers + +Copy the same `genesis.json` to every full node, replica, and failover node. Restart them after copying the file so they load the updated schedule. + +Do this before the chain reaches `ACTIVATION_HEIGHT`. + +## 6. Cut over the sequencer + +Wait until the chain reaches `ACTIVATION_HEIGHT - 1`, then stop the old sequencer and start it with the replacement signer. + +Example with a file-based signer: + +```bash +evnode start \ + --home "$HOME/.evnode" \ + --evnode.node.aggregator \ + --evnode.signer.signer_type file \ + --evnode.signer.signer_path "$NEW_SIGNER_DIR" \ + --evnode.signer.passphrase "$SIGNER_PASSPHRASE" +``` + +If you run a custom chain binary such as `gmd` or `appd`, use the same start command you already use for the sequencer and only change the signer configuration. + +## 7. Verify the first post-upgrade block + +Fetch the header at `ACTIVATION_HEIGHT` or the next produced block and confirm that it carries the new proposer address. + +```bash +curl -s http://127.0.0.1:26657/header \ + -H 'Content-Type: application/json' \ + -d "{\"jsonrpc\":\"2.0\",\"method\":\"header\",\"params\":{\"height\":\"${ACTIVATION_HEIGHT}\"},\"id\":1}" \ + | jq . +``` + +Some RPC clients render binary fields as hex instead of base64. If needed, convert the base64 genesis address before comparing: + +```bash +python3 - "$NEW_PROPOSER_ADDRESS" <<'PY' +import base64 +import sys + +print("0x" + base64.b64decode(sys.argv[1]).hex()) +PY +``` + +If the node at `ACTIVATION_HEIGHT` is still signed by the old key, stop block production and check three things first: + +1. every node was restarted after receiving the updated genesis +2. `proposer_schedule` contains the new entry at the intended height +3. the sequencer is actually running with the replacement signer diff --git a/docs/guides/operations/upgrades.md b/docs/guides/operations/upgrades.md index 0027f13c36..ac5f6dcbf1 100644 --- a/docs/guides/operations/upgrades.md +++ b/docs/guides/operations/upgrades.md @@ -38,6 +38,12 @@ May require state migration or coordinated network upgrade. 5. Run any migration scripts 6. Restart +### Proposer Key Rotation + +Rotating the proposer key is a coordinated upgrade even when the chain does not restart. All nodes must receive the same updated `genesis.json`, restart to load it, and be ready before the scheduled activation height. + +Use [Rotate proposer key](./proposer-key-rotation.md) for the exact `proposer_schedule` format, genesis update steps, and cutover procedure. + ## ev-node Upgrades ### Check Current Version diff --git a/node/failover.go b/node/failover.go index 42dac4e8bc..752b6aaba3 100644 --- a/node/failover.go +++ b/node/failover.go @@ -139,7 +139,7 @@ func setupFailoverState( headerSyncService.Store(), dataSyncService.Store(), p2pClient, - genesis.ProposerAddress, + genesis.InitialProposerAddress(), logger, nodeConfig, bestKnownHeightProvider, diff --git a/node/full.go b/node/full.go index bd44f9ef42..5d13beebbd 100644 --- a/node/full.go +++ b/node/full.go @@ -78,7 +78,7 @@ func newFullNode( logger zerolog.Logger, nodeOpts NodeOptions, ) (fn *FullNode, err error) { - logger.Debug().Hex("address", genesis.ProposerAddress).Msg("Proposer address") + logger.Debug().Hex("address", genesis.InitialProposerAddress()).Msg("Initial proposer address") blockMetrics, _ := metricsProvider(genesis.ChainID) diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index e1a401d9fc..a5079c72e6 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -1,6 +1,7 @@ package genesis import ( + "bytes" "fmt" "time" ) @@ -11,10 +12,11 @@ const ChainIDFlag = "chain_id" // This genesis struct only contains the fields required by evolve. // The app state or other fields are not included here. type Genesis struct { - ChainID string `json:"chain_id"` - StartTime time.Time `json:"start_time"` - InitialHeight uint64 `json:"initial_height"` - ProposerAddress []byte `json:"proposer_address"` + ChainID string `json:"chain_id"` + StartTime time.Time `json:"start_time"` + InitialHeight uint64 `json:"initial_height"` + ProposerAddress []byte `json:"proposer_address"` + ProposerSchedule []ProposerScheduleEntry `json:"proposer_schedule,omitempty"` // DAStartHeight corresponds to the height at which the first DA header/data has been published. // This value is meant to be updated after genesis and shared to all syncing nodes for speeding up syncing via DA. DAStartHeight uint64 `json:"da_start_height"` @@ -56,8 +58,28 @@ func (g Genesis) Validate() error { return fmt.Errorf("start_time cannot be zero time") } - if g.ProposerAddress == nil { - return fmt.Errorf("proposer_address cannot be nil") + if len(g.ProposerSchedule) == 0 { + if len(g.ProposerAddress) == 0 { + return fmt.Errorf("proposer_address cannot be empty when proposer_schedule is unset") + } + } else { + if err := g.ProposerSchedule[0].validate(g.InitialHeight, true); err != nil { + return fmt.Errorf("invalid proposer_schedule[0]: %w", err) + } + if g.ProposerSchedule[0].StartHeight != g.InitialHeight { + return fmt.Errorf("proposer_schedule[0].start_height must equal initial_height (%d), got %d", g.InitialHeight, g.ProposerSchedule[0].StartHeight) + } + for i := 1; i < len(g.ProposerSchedule); i++ { + if err := g.ProposerSchedule[i].validate(g.InitialHeight, true); err != nil { + return fmt.Errorf("invalid proposer_schedule[%d]: %w", i, err) + } + if g.ProposerSchedule[i].StartHeight <= g.ProposerSchedule[i-1].StartHeight { + return fmt.Errorf("proposer_schedule must be strictly increasing: entry %d start_height %d is not greater than previous %d", i, g.ProposerSchedule[i].StartHeight, g.ProposerSchedule[i-1].StartHeight) + } + } + if len(g.ProposerAddress) > 0 && !bytes.Equal(g.ProposerAddress, g.ProposerSchedule[0].Address) { + return fmt.Errorf("proposer_address must match proposer_schedule[0].address") + } } if g.DAEpochForcedInclusion < 1 { diff --git a/pkg/genesis/io.go b/pkg/genesis/io.go index 8c9d88e955..dcf9048aa6 100644 --- a/pkg/genesis/io.go +++ b/pkg/genesis/io.go @@ -72,12 +72,12 @@ func LoadGenesis(genesisPath string) (Genesis, error) { return Genesis{}, err } - return genesis, nil + return genesis.normalized(), nil } // Save saves the genesis state to the specified file path. func (g Genesis) Save(genesisPath string) error { - genesisJSON, err := json.MarshalIndent(g, "", " ") + genesisJSON, err := json.MarshalIndent(g.normalized(), "", " ") if err != nil { return fmt.Errorf("failed to marshal genesis state: %w", err) } diff --git a/pkg/genesis/proposer_schedule.go b/pkg/genesis/proposer_schedule.go new file mode 100644 index 0000000000..28d9abbea8 --- /dev/null +++ b/pkg/genesis/proposer_schedule.go @@ -0,0 +1,208 @@ +package genesis + +import ( + "bytes" + "crypto/sha256" + "fmt" + + "github.com/libp2p/go-libp2p/core/crypto" +) + +// ProposerScheduleEntry declares the proposer key that becomes active at start_height. +type ProposerScheduleEntry struct { + StartHeight uint64 `json:"start_height"` + Address []byte `json:"address"` + PubKey []byte `json:"pub_key,omitempty"` +} + +// NewProposerScheduleEntry creates a proposer schedule entry from a libp2p public key. +func NewProposerScheduleEntry(startHeight uint64, pubKey crypto.PubKey) (ProposerScheduleEntry, error) { + if pubKey == nil { + return ProposerScheduleEntry{}, fmt.Errorf("proposer pub_key cannot be nil") + } + + marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) + if err != nil { + return ProposerScheduleEntry{}, fmt.Errorf("marshal proposer pub_key: %w", err) + } + + return ProposerScheduleEntry{ + StartHeight: startHeight, + Address: proposerKeyAddress(pubKey), + PubKey: marshalledPubKey, + }, nil +} + +// PublicKey unmarshals the configured proposer public key. Legacy single-proposer +// configs may omit the pubkey and will return nil, nil here. +func (e ProposerScheduleEntry) PublicKey() (crypto.PubKey, error) { + if len(e.PubKey) == 0 { + return nil, nil + } + + pubKey, err := crypto.UnmarshalPublicKey(e.PubKey) + if err != nil { + return nil, fmt.Errorf("unmarshal proposer pub_key: %w", err) + } + + return pubKey, nil +} + +func (e ProposerScheduleEntry) validate(initialHeight uint64, requirePubKey bool) error { + if e.StartHeight < initialHeight { + return fmt.Errorf("proposer schedule start_height must be >= initial_height (%d), got %d", initialHeight, e.StartHeight) + } + + if len(e.Address) == 0 { + return fmt.Errorf("proposer schedule address cannot be empty") + } + + if len(e.PubKey) == 0 { + if requirePubKey { + return fmt.Errorf("proposer schedule pub_key cannot be empty") + } + return nil + } + + pubKey, err := e.PublicKey() + if err != nil { + return err + } + + expectedAddress := proposerKeyAddress(pubKey) + if !bytes.Equal(expectedAddress, e.Address) { + return fmt.Errorf("proposer schedule address does not match pub_key: got %x, expected %x", e.Address, expectedAddress) + } + + return nil +} + +// EffectiveProposerSchedule returns the explicit proposer schedule when present, +// or derives a legacy single-entry schedule from proposer_address. +func (g Genesis) EffectiveProposerSchedule() []ProposerScheduleEntry { + if len(g.ProposerSchedule) > 0 { + out := make([]ProposerScheduleEntry, len(g.ProposerSchedule)) + copy(out, g.ProposerSchedule) + return out + } + + if len(g.ProposerAddress) == 0 { + return nil + } + + return []ProposerScheduleEntry{{ + StartHeight: g.InitialHeight, + Address: cloneBytes(g.ProposerAddress), + }} +} + +// InitialProposerAddress returns the first proposer address for compatibility +// with code paths that still surface a single address externally. +func (g Genesis) InitialProposerAddress() []byte { + entry, err := g.ProposerAtHeight(g.InitialHeight) + if err != nil { + return nil + } + + return cloneBytes(entry.Address) +} + +func (g Genesis) normalized() Genesis { + normalized := g + if len(normalized.ProposerAddress) == 0 { + normalized.ProposerAddress = normalized.InitialProposerAddress() + } + return normalized +} + +// HasScheduledProposer reports whether the address appears in the effective proposer schedule. +func (g Genesis) HasScheduledProposer(address []byte) bool { + for _, entry := range g.EffectiveProposerSchedule() { + if bytes.Equal(entry.Address, address) { + return true + } + } + return false +} + +// ProposerAtHeight resolves the proposer that is active for the given block height. +func (g Genesis) ProposerAtHeight(height uint64) (ProposerScheduleEntry, error) { + schedule := g.EffectiveProposerSchedule() + if len(schedule) == 0 { + return ProposerScheduleEntry{}, fmt.Errorf("no proposer configured") + } + + if height < schedule[0].StartHeight { + return ProposerScheduleEntry{}, fmt.Errorf("no proposer configured for height %d before start_height %d", height, schedule[0].StartHeight) + } + + entry := schedule[0] + for i := 1; i < len(schedule); i++ { + if height < schedule[i].StartHeight { + break + } + entry = schedule[i] + } + + return ProposerScheduleEntry{ + StartHeight: entry.StartHeight, + Address: cloneBytes(entry.Address), + PubKey: cloneBytes(entry.PubKey), + }, nil +} + +// ValidateProposer checks that the provided proposer address and public key match +// the proposer schedule entry active at the given height. +func (g Genesis) ValidateProposer(height uint64, address []byte, pubKey crypto.PubKey) error { + entry, err := g.ProposerAtHeight(height) + if err != nil { + return err + } + + if !bytes.Equal(entry.Address, address) { + return fmt.Errorf("unexpected proposer at height %d: got %x, expected %x", height, address, entry.Address) + } + + if len(entry.PubKey) == 0 { + return nil + } + + if pubKey == nil { + return fmt.Errorf("missing proposer pub_key at height %d", height) + } + + marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) + if err != nil { + return fmt.Errorf("marshal proposer pub_key: %w", err) + } + + if !bytes.Equal(entry.PubKey, marshalledPubKey) { + return fmt.Errorf("unexpected proposer pub_key at height %d", height) + } + + return nil +} + +func cloneBytes(src []byte) []byte { + if src == nil { + return nil + } + + out := make([]byte, len(src)) + copy(out, src) + return out +} + +func proposerKeyAddress(pubKey crypto.PubKey) []byte { + if pubKey == nil { + return nil + } + + raw, err := pubKey.Raw() + if err != nil { + return nil + } + + sum := sha256.Sum256(raw) + return sum[:] +} diff --git a/pkg/genesis/proposer_schedule_test.go b/pkg/genesis/proposer_schedule_test.go new file mode 100644 index 0000000000..8da1457bc1 --- /dev/null +++ b/pkg/genesis/proposer_schedule_test.go @@ -0,0 +1,93 @@ +package genesis + +import ( + "crypto/rand" + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/stretchr/testify/require" +) + +func makeProposerScheduleEntry(t *testing.T, startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { + t.Helper() + + _, pubKey, err := crypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err) + + entry, err := NewProposerScheduleEntry(startHeight, pubKey) + require.NoError(t, err) + + return entry, pubKey +} + +func TestGenesisProposerAtHeight(t *testing.T) { + entry1, _ := makeProposerScheduleEntry(t, 3) + entry2, _ := makeProposerScheduleEntry(t, 10) + + genesis := Genesis{ + ChainID: "test-chain", + StartTime: time.Now().UTC(), + InitialHeight: 3, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + require.NoError(t, genesis.Validate()) + + proposer, err := genesis.ProposerAtHeight(3) + require.NoError(t, err) + require.Equal(t, entry1.Address, proposer.Address) + + proposer, err = genesis.ProposerAtHeight(9) + require.NoError(t, err) + require.Equal(t, entry1.Address, proposer.Address) + + proposer, err = genesis.ProposerAtHeight(10) + require.NoError(t, err) + require.Equal(t, entry2.Address, proposer.Address) +} + +func TestGenesisValidateProposerSchedule(t *testing.T) { + entry1, pubKey1 := makeProposerScheduleEntry(t, 1) + entry2, pubKey2 := makeProposerScheduleEntry(t, 20) + + genesis := Genesis{ + ChainID: "test-chain", + StartTime: time.Now().UTC(), + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + require.NoError(t, genesis.Validate()) + require.NoError(t, genesis.ValidateProposer(1, entry1.Address, pubKey1)) + require.NoError(t, genesis.ValidateProposer(21, entry2.Address, pubKey2)) + require.Error(t, genesis.ValidateProposer(21, entry2.Address, pubKey1)) +} + +func TestLoadGenesisNormalizesLegacyProposerAddressFromSchedule(t *testing.T) { + entry1, _ := makeProposerScheduleEntry(t, 1) + entry2, _ := makeProposerScheduleEntry(t, 50) + + rawGenesis := Genesis{ + ChainID: "test-chain", + StartTime: time.Now().UTC(), + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + genesisPath := filepath.Join(t.TempDir(), "genesis.json") + genesisJSON, err := json.Marshal(rawGenesis) + require.NoError(t, err) + require.NoError(t, os.WriteFile(genesisPath, genesisJSON, 0o600)) + + loaded, err := LoadGenesis(genesisPath) + require.NoError(t, err) + require.Equal(t, entry1.Address, loaded.ProposerAddress) + require.Equal(t, rawGenesis.ProposerSchedule, loaded.ProposerSchedule) +} From 6a0f3679f5cb8d6933d1792b1fdd67670f9a46dd Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Thu, 23 Apr 2026 15:38:56 +0200 Subject: [PATCH 02/13] remove need for pubkey and add some tests --- block/internal/syncing/assert.go | 8 +++-- block/internal/syncing/da_retriever.go | 2 +- block/internal/syncing/p2p_handler.go | 6 +--- block/internal/syncing/raft_retriever.go | 2 +- .../operations/proposer-key-rotation.md | 23 +++++--------- pkg/genesis/genesis.go | 4 +-- pkg/genesis/proposer_schedule.go | 30 ++++++------------- pkg/genesis/proposer_schedule_test.go | 21 ++++++++++++- 8 files changed, 46 insertions(+), 50 deletions(-) diff --git a/block/internal/syncing/assert.go b/block/internal/syncing/assert.go index 56000e744e..1bed6db8b9 100644 --- a/block/internal/syncing/assert.go +++ b/block/internal/syncing/assert.go @@ -4,12 +4,14 @@ import ( "errors" "fmt" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" ) -func assertExpectedProposer(genesis genesis.Genesis, height uint64, proposerAddr []byte, signer types.Signer) error { - if err := genesis.ValidateProposer(height, proposerAddr, signer.PubKey); err != nil { +func assertExpectedProposer(genesis genesis.Genesis, height uint64, proposerAddr []byte, pubKey crypto.PubKey) error { + if err := genesis.ValidateProposer(height, proposerAddr, pubKey); err != nil { return fmt.Errorf("unexpected proposer at height %d: %w", height, err) } @@ -21,7 +23,7 @@ func assertValidSignedData(signedData *types.SignedData, genesis genesis.Genesis return errors.New("empty signed data") } - if err := assertExpectedProposer(genesis, signedData.Height(), signedData.Signer.Address, signedData.Signer); err != nil { + if err := assertExpectedProposer(genesis, signedData.Height(), signedData.Signer.Address, signedData.Signer.PubKey); err != nil { return err } diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 1b3393f181..75bc631e8f 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -357,7 +357,7 @@ func (r *daRetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { // assertExpectedProposer validates the proposer schedule entry for the header height. func (r *daRetriever) assertExpectedProposer(header *types.SignedHeader) error { - return assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer) + return assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey) } // assertValidSignedData validates signed data using the configured signature provider diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 67e5a1b278..0e8a08cea3 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -127,9 +127,5 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC // assertExpectedProposer validates the proposer schedule entry for the header height. func (h *P2PHandler) assertExpectedProposer(header *types.SignedHeader) error { - if err := assertExpectedProposer(h.genesis, header.Height(), header.ProposerAddress, header.Signer); err != nil { - return err - } - - return nil + return assertExpectedProposer(h.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey) } diff --git a/block/internal/syncing/raft_retriever.go b/block/internal/syncing/raft_retriever.go index b67fe86e09..4cb15aec07 100644 --- a/block/internal/syncing/raft_retriever.go +++ b/block/internal/syncing/raft_retriever.go @@ -125,7 +125,7 @@ func (r *raftRetriever) consumeRaftBlock(ctx context.Context, state *raft.RaftBl r.logger.Debug().Err(err).Msg("invalid header structure") return nil } - if err := assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer); err != nil { + if err := assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey); err != nil { r.logger.Debug().Err(err).Msg("unexpected proposer") return nil } diff --git a/docs/guides/operations/proposer-key-rotation.md b/docs/guides/operations/proposer-key-rotation.md index b25c43fc7a..3c5667d50c 100644 --- a/docs/guides/operations/proposer-key-rotation.md +++ b/docs/guides/operations/proposer-key-rotation.md @@ -11,7 +11,7 @@ Use this guide to rotate a sequencer proposer key without restarting the chain. ## How proposer rotation is stored in genesis -`proposer_address`, `proposer_schedule[].address`, and `proposer_schedule[].pub_key` are base64-encoded strings in JSON. +`proposer_address` and `proposer_schedule[].address` are base64-encoded strings in JSON. ```json { @@ -20,13 +20,11 @@ Use this guide to rotate a sequencer proposer key without restarting the chain. "proposer_schedule": [ { "start_height": 1, - "address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=", - "pub_key": "5l6vM0b0GqQYQw4x0cI6q7N2vD1cE+oV6rN5eQ7v6dM=" + "address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=" }, { "start_height": 125000, - "address": "Y7z5v9mQm4Nw6mD0a2yR9kD2B0qv5iJj1Q1R7gD4B7Q=", - "pub_key": "9r5mM4XjKx6h6sJv2Jf6dB5nQ0eU9l8cM1qT2wV3yZQ=" + "address": "Y7z5v9mQm4Nw6mD0a2yR9kD2B0qv5iJj1Q1R7gD4B7Q=" } ] } @@ -36,7 +34,6 @@ Rules enforced by `ev-node`: - `proposer_schedule[0].start_height` must equal `initial_height` - schedule entries must be strictly increasing by `start_height` -- every `address` must match its `pub_key` - if `proposer_address` is set, it must match the first schedule entry Keep all earlier schedule entries. Fresh full nodes need them to validate historical blocks. @@ -53,7 +50,7 @@ INITIAL_HEIGHT="$(jq -r '.initial_height' "$GENESIS")" ## 2. Get the current and replacement proposer public keys -For a file-based signer, the signer public key is stored in `signer.json` as base64: +For a file-based signer, the signer public key is stored in `signer.json` as base64. You only put the derived address into genesis, but you still need the public key once to compute that address. ```bash OLD_SIGNER_DIR="$HOME/.evnode/config" @@ -95,9 +92,7 @@ Create an explicit schedule with the current proposer at `initial_height` and th ```bash jq \ --arg old_addr "$OLD_PROPOSER_ADDRESS" \ - --arg old_pub "$OLD_PROPOSER_PUBKEY" \ --arg new_addr "$NEW_PROPOSER_ADDRESS" \ - --arg new_pub "$NEW_PROPOSER_PUBKEY" \ --argjson initial_height "$INITIAL_HEIGHT" \ --argjson activation_height "$ACTIVATION_HEIGHT" \ ' @@ -105,13 +100,11 @@ jq \ | .proposer_schedule = [ { start_height: $initial_height, - address: $old_addr, - pub_key: $old_pub + address: $old_addr }, { start_height: $activation_height, - address: $new_addr, - pub_key: $new_pub + address: $new_addr } ] ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" @@ -124,14 +117,12 @@ Append the new entry. Do not replace older entries, and make sure `ACTIVATION_HE ```bash jq \ --arg new_addr "$NEW_PROPOSER_ADDRESS" \ - --arg new_pub "$NEW_PROPOSER_PUBKEY" \ --argjson activation_height "$ACTIVATION_HEIGHT" \ ' .proposer_schedule += [ { start_height: $activation_height, - address: $new_addr, - pub_key: $new_pub + address: $new_addr } ] ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index a5079c72e6..1cbe506e1c 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -63,14 +63,14 @@ func (g Genesis) Validate() error { return fmt.Errorf("proposer_address cannot be empty when proposer_schedule is unset") } } else { - if err := g.ProposerSchedule[0].validate(g.InitialHeight, true); err != nil { + if err := g.ProposerSchedule[0].validate(g.InitialHeight); err != nil { return fmt.Errorf("invalid proposer_schedule[0]: %w", err) } if g.ProposerSchedule[0].StartHeight != g.InitialHeight { return fmt.Errorf("proposer_schedule[0].start_height must equal initial_height (%d), got %d", g.InitialHeight, g.ProposerSchedule[0].StartHeight) } for i := 1; i < len(g.ProposerSchedule); i++ { - if err := g.ProposerSchedule[i].validate(g.InitialHeight, true); err != nil { + if err := g.ProposerSchedule[i].validate(g.InitialHeight); err != nil { return fmt.Errorf("invalid proposer_schedule[%d]: %w", i, err) } if g.ProposerSchedule[i].StartHeight <= g.ProposerSchedule[i-1].StartHeight { diff --git a/pkg/genesis/proposer_schedule.go b/pkg/genesis/proposer_schedule.go index 28d9abbea8..3ac6e4f831 100644 --- a/pkg/genesis/proposer_schedule.go +++ b/pkg/genesis/proposer_schedule.go @@ -8,7 +8,8 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" ) -// ProposerScheduleEntry declares the proposer key that becomes active at start_height. +// ProposerScheduleEntry declares the proposer address that becomes active at start_height. +// PubKey is optional and can be used to pin the exact key material for a schedule entry. type ProposerScheduleEntry struct { StartHeight uint64 `json:"start_height"` Address []byte `json:"address"` @@ -33,8 +34,8 @@ func NewProposerScheduleEntry(startHeight uint64, pubKey crypto.PubKey) (Propose }, nil } -// PublicKey unmarshals the configured proposer public key. Legacy single-proposer -// configs may omit the pubkey and will return nil, nil here. +// PublicKey unmarshals the configured proposer public key. Address-only schedule +// entries may omit the pubkey and will return nil, nil here. func (e ProposerScheduleEntry) PublicKey() (crypto.PubKey, error) { if len(e.PubKey) == 0 { return nil, nil @@ -48,7 +49,7 @@ func (e ProposerScheduleEntry) PublicKey() (crypto.PubKey, error) { return pubKey, nil } -func (e ProposerScheduleEntry) validate(initialHeight uint64, requirePubKey bool) error { +func (e ProposerScheduleEntry) validate(initialHeight uint64) error { if e.StartHeight < initialHeight { return fmt.Errorf("proposer schedule start_height must be >= initial_height (%d), got %d", initialHeight, e.StartHeight) } @@ -58,9 +59,6 @@ func (e ProposerScheduleEntry) validate(initialHeight uint64, requirePubKey bool } if len(e.PubKey) == 0 { - if requirePubKey { - return fmt.Errorf("proposer schedule pub_key cannot be empty") - } return nil } @@ -92,7 +90,7 @@ func (g Genesis) EffectiveProposerSchedule() []ProposerScheduleEntry { return []ProposerScheduleEntry{{ StartHeight: g.InitialHeight, - Address: cloneBytes(g.ProposerAddress), + Address: bytes.Clone(g.ProposerAddress), }} } @@ -104,7 +102,7 @@ func (g Genesis) InitialProposerAddress() []byte { return nil } - return cloneBytes(entry.Address) + return bytes.Clone(entry.Address) } func (g Genesis) normalized() Genesis { @@ -146,8 +144,8 @@ func (g Genesis) ProposerAtHeight(height uint64) (ProposerScheduleEntry, error) return ProposerScheduleEntry{ StartHeight: entry.StartHeight, - Address: cloneBytes(entry.Address), - PubKey: cloneBytes(entry.PubKey), + Address: bytes.Clone(entry.Address), + PubKey: bytes.Clone(entry.PubKey), }, nil } @@ -183,16 +181,6 @@ func (g Genesis) ValidateProposer(height uint64, address []byte, pubKey crypto.P return nil } -func cloneBytes(src []byte) []byte { - if src == nil { - return nil - } - - out := make([]byte, len(src)) - copy(out, src) - return out -} - func proposerKeyAddress(pubKey crypto.PubKey) []byte { if pubKey == nil { return nil diff --git a/pkg/genesis/proposer_schedule_test.go b/pkg/genesis/proposer_schedule_test.go index 8da1457bc1..48835deb89 100644 --- a/pkg/genesis/proposer_schedule_test.go +++ b/pkg/genesis/proposer_schedule_test.go @@ -51,7 +51,7 @@ func TestGenesisProposerAtHeight(t *testing.T) { require.Equal(t, entry2.Address, proposer.Address) } -func TestGenesisValidateProposerSchedule(t *testing.T) { +func TestGenesisValidateProposerScheduleWithPinnedPubKey(t *testing.T) { entry1, pubKey1 := makeProposerScheduleEntry(t, 1) entry2, pubKey2 := makeProposerScheduleEntry(t, 20) @@ -69,6 +69,25 @@ func TestGenesisValidateProposerSchedule(t *testing.T) { require.Error(t, genesis.ValidateProposer(21, entry2.Address, pubKey1)) } +func TestGenesisValidateAddressOnlyProposerSchedule(t *testing.T) { + entry1, pubKey1 := makeProposerScheduleEntry(t, 1) + entry2, pubKey2 := makeProposerScheduleEntry(t, 20) + entry1.PubKey = nil + entry2.PubKey = nil + + genesis := Genesis{ + ChainID: "test-chain", + StartTime: time.Now().UTC(), + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + require.NoError(t, genesis.Validate()) + require.NoError(t, genesis.ValidateProposer(1, entry1.Address, pubKey1)) + require.NoError(t, genesis.ValidateProposer(21, entry2.Address, pubKey2)) +} + func TestLoadGenesisNormalizesLegacyProposerAddressFromSchedule(t *testing.T) { entry1, _ := makeProposerScheduleEntry(t, 1) entry2, _ := makeProposerScheduleEntry(t, 50) From 6ad13f299dc52a8467a98d9aff8260b882cb30d5 Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Thu, 23 Apr 2026 17:06:36 +0200 Subject: [PATCH 03/13] comments and amendments --- block/internal/executing/executor_test.go | 126 +++++++++++ block/internal/syncing/p2p_handler_test.go | 38 ++++ docs/adr/adr-023-proposer-key-rotation.md | 24 ++- pkg/genesis/genesis_test.go | 175 +++++++++++++++ pkg/genesis/proposer_schedule.go | 19 +- pkg/genesis/proposer_schedule_test.go | 236 ++++++++++++++++++++- 6 files changed, 608 insertions(+), 10 deletions(-) diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index cec6a3fecc..5f2d4db7d8 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -216,3 +216,129 @@ func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { require.Equal(t, newAddr, header.Signer.Address) require.Equal(t, uint64(2), data.Height()) } + +// TestNewExecutor_RejectsSignerOutsideSchedule verifies that a signer whose +// address does not appear anywhere in the proposer schedule cannot start the +// executor. This prevents a misconfigured replacement key from coming up as +// an aggregator on a chain it was never scheduled on. +func TestNewExecutor_RejectsSignerOutsideSchedule(t *testing.T) { + ds := sync.MutexWrap(datastore.NewMapDatastore()) + memStore := store.New(ds) + + cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) + require.NoError(t, err) + + _, scheduledSigner, _ := buildTestSigner(t) + _, _, strayerSigner := buildTestSigner(t) + + entry, err := genesis.NewProposerScheduleEntry(1, scheduledSigner.PubKey) + require.NoError(t, err) + + gen := genesis.Genesis{ + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now(), + ProposerAddress: entry.Address, + ProposerSchedule: []genesis.ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + _, err = NewExecutor( + memStore, nil, nil, strayerSigner, cacheManager, + common.NopMetrics(), config.DefaultConfig(), gen, + nil, nil, zerolog.Nop(), common.DefaultBlockOptions(), + make(chan error, 1), nil, + ) + require.ErrorIs(t, err, common.ErrNotProposer) +} + +// TestExecutor_CreateBlock_RejectsSignerAtWrongHeight verifies that a signer +// which is scheduled (so startup succeeds) but not active at the current +// height cannot produce a block. This guards the per-height proposer check +// inside CreateBlock — without it, a rotation could be jumped ahead or +// rolled back by whichever signer the operator happens to start. +func TestExecutor_CreateBlock_RejectsSignerAtWrongHeight(t *testing.T) { + ds := sync.MutexWrap(datastore.NewMapDatastore()) + memStore := store.New(ds) + + cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) + require.NoError(t, err) + + oldAddr, oldSignerInfo, oldSigner := buildTestSigner(t) + _, newSignerInfo, _ := buildTestSigner(t) + + entry1, err := genesis.NewProposerScheduleEntry(1, oldSignerInfo.PubKey) + require.NoError(t, err) + // Second entry activates at height 5. The old signer is scheduled at + // height 1 and is NOT the proposer for height 5+. + entry2, err := genesis.NewProposerScheduleEntry(5, newSignerInfo.PubKey) + require.NoError(t, err) + + gen := genesis.Genesis{ + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: entry1.Address, + ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + // Start the executor as the old signer — it IS in the schedule at + // height 1, so NewExecutor must accept it. + executor, err := NewExecutor( + memStore, nil, nil, oldSigner, cacheManager, + common.NopMetrics(), config.DefaultConfig(), gen, + nil, nil, zerolog.Nop(), common.DefaultBlockOptions(), + make(chan error, 1), nil, + ) + require.NoError(t, err) + + // Seed a height-4 block so CreateBlock(5) has a parent to reference. + prevHeader := &types.SignedHeader{ + Header: types.Header{ + Version: types.InitStateVersion, + BaseHeader: types.BaseHeader{ + ChainID: gen.ChainID, + Height: 4, + Time: uint64(gen.StartTime.UnixNano()), + }, + AppHash: []byte("state-root-4"), + ProposerAddress: oldAddr, + DataHash: common.DataHashForEmptyTxs, + }, + Signature: types.Signature([]byte("sig-4")), + Signer: oldSignerInfo, + } + prevData := &types.Data{ + Metadata: &types.Metadata{ + ChainID: gen.ChainID, + Height: 4, + Time: prevHeader.BaseHeader.Time, + }, + } + + batch, err := memStore.NewBatch(context.Background()) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(prevHeader, prevData, &prevHeader.Signature)) + require.NoError(t, batch.SetHeight(4)) + require.NoError(t, batch.Commit()) + + executor.setLastState(types.State{ + Version: types.InitStateVersion, + ChainID: gen.ChainID, + InitialHeight: gen.InitialHeight, + LastBlockHeight: 4, + LastBlockTime: prevHeader.Time(), + LastHeaderHash: prevHeader.Hash(), + AppHash: []byte("state-root-4"), + }) + + // Height 5 belongs to the NEW signer per the schedule — the old + // signer must be rejected even though it's a known schedule member. + _, _, err = executor.CreateBlock(context.Background(), 5, &BatchData{ + Batch: &coreseq.Batch{}, + Time: time.Now(), + }) + require.Error(t, err) + require.Contains(t, err.Error(), "proposer") +} diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index dc370a9482..1ba3b86e27 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -253,6 +253,44 @@ func TestP2PHandler_ProcessHeight_AllowsScheduledProposerRotation(t *testing.T) require.Equal(t, nextAddr, events[0].Header.ProposerAddress) } +// TestP2PHandler_ProcessHeight_RejectsScheduledProposerBeforeActivation verifies +// the counterpart to the rotation-allows test: a signer that IS in the schedule +// but only active at a later height must not be accepted for blocks before the +// activation height. Without the per-height check, any scheduled signer could +// forge blocks outside their active window. +func TestP2PHandler_ProcessHeight_RejectsScheduledProposerBeforeActivation(t *testing.T) { + p := setupP2P(t) + ctx := context.Background() + + nextAddr, nextPub, nextSigner := buildTestSigner(t) + + entry1, err := genesis.NewProposerScheduleEntry(p.Genesis.InitialHeight, p.ProposerPub) + require.NoError(t, err) + entry2, err := genesis.NewProposerScheduleEntry(11, nextPub) + require.NoError(t, err) + + p.Genesis.ProposerAddress = entry1.Address + p.Genesis.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} + p.Genesis.DAEpochForcedInclusion = 1 + require.NoError(t, p.Genesis.Validate()) + p.Handler.genesis = p.Genesis + + // entry2 is scheduled but only active at height 11. Height 10 still + // belongs to entry1, so a header from the next signer at height 10 + // must be rejected. + header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 10, nextAddr, nextPub, nextSigner) + header.DataHash = common.DataHashForEmptyTxs + + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(10)).Return(header, nil).Once() + + ch := make(chan common.DAHeightEvent, 1) + err = p.Handler.ProcessHeight(ctx, 10, ch) + require.Error(t, err) + + require.Empty(t, collectEvents(t, ch, 50*time.Millisecond)) + p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(10)) +} + func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { p := setupP2P(t) ctx := t.Context() diff --git a/docs/adr/adr-023-proposer-key-rotation.md b/docs/adr/adr-023-proposer-key-rotation.md index 7d4114709c..2c2934bf28 100644 --- a/docs/adr/adr-023-proposer-key-rotation.md +++ b/docs/adr/adr-023-proposer-key-rotation.md @@ -27,18 +27,32 @@ and validation become ambiguous. ### 2. Re-issue a new genesis on each rotation -This treats every proposer rotation like a chain restart. It is operationally heavy, conflates upgrades with -rotations, and breaks continuity for nodes syncing historical data. +This treats every proposer rotation like a chain restart: a new `chain_id`, state reset back to `initial_height`, +and existing block history discarded. It is operationally heavy, conflates upgrades with rotations, and breaks +continuity for nodes syncing historical data. ### 3. Height-indexed proposer schedule in genesis (Chosen) -Record proposer changes as an ordered schedule indexed by activation height. This preserves chain continuity while -making rotation rules explicit and replayable from genesis. +Record proposer changes as an ordered schedule indexed by activation height. The `genesis.json` file is updated +with a new schedule entry and redistributed, but the chain keeps its `chain_id`, continues from the current +height, preserves all block history, and fresh nodes can still validate the entire chain end-to-end across +rotation boundaries. The rollout is still coordinated — every node must receive the updated `genesis.json` and +restart before the activation height — but none of the chain's state or provenance is reset. ## Decision ev-node now supports proposer rotation through a `proposer_schedule` field in genesis. +### What this is not + +This is **not** a re-genesis. Re-genesis — in the sense we mean it above — would involve issuing a new `chain_id`, +resetting height to `initial_height`, and discarding existing block history. Proposer key rotation does none of +that: the `chain_id` is unchanged, block height keeps progressing, all previous blocks remain valid, and fresh +nodes can sync the chain from genesis across any number of rotation boundaries. + +The `genesis.json` file itself is updated (a new `proposer_schedule` entry is appended) and operators must +restart every node to reload it. The file changes; the chain's state does not. + Each entry declares: - `start_height` @@ -137,7 +151,7 @@ Implemented - proposer schedule changes are consensus-visible and require coordinated rollout - operators must distribute updated genesis/config before activation height -- emergency rotation still requires preplanned scheduling or a later authority-based mechanism +- emergency rotation still requires prior scheduling or a later authority-based mechanism ### Neutral diff --git a/pkg/genesis/genesis_test.go b/pkg/genesis/genesis_test.go index da3cc14b1f..a5aca88586 100644 --- a/pkg/genesis/genesis_test.go +++ b/pkg/genesis/genesis_test.go @@ -1,10 +1,13 @@ package genesis import ( + "crypto/rand" "testing" "time" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewGenesis(t *testing.T) { @@ -135,3 +138,175 @@ func TestGenesis_Validate(t *testing.T) { }) } } + +func TestGenesis_ValidateProposerSchedule(t *testing.T) { + validTime := time.Now().UTC() + + newEntry := func(startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { + _, pub, err := crypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err) + entry, err := NewProposerScheduleEntry(startHeight, pub) + require.NoError(t, err) + return entry, pub + } + + entry1, _ := newEntry(1) + entry10, _ := newEntry(10) + entry20, _ := newEntry(20) + + tests := []struct { + name string + mutate func() Genesis + wantErr string + }{ + { + name: "valid - schedule without proposer_address", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, + DAEpochForcedInclusion: 1, + } + }, + }, + { + name: "valid - schedule with matching proposer_address", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: entry1.Address, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, + DAEpochForcedInclusion: 1, + } + }, + }, + { + name: "invalid - first entry start_height != initial_height", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 5, + ProposerSchedule: []ProposerScheduleEntry{entry10, entry20}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "start_height must equal initial_height", + }, + { + name: "invalid - first entry start_height below initial_height", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 5, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "start_height must be >= initial_height", + }, + { + name: "invalid - non-increasing (equal start_heights)", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry1}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "strictly increasing", + }, + { + name: "invalid - non-increasing (decreasing start_heights)", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry20, entry10}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "start_height must equal initial_height", + }, + { + name: "invalid - entry address does not match pub_key", + mutate: func() Genesis { + tampered := entry10 + tampered.Address = append([]byte(nil), entry10.Address...) + tampered.Address[0] ^= 0xFF + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, tampered}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "address does not match pub_key", + }, + { + name: "invalid - proposer_address mismatches schedule[0].address", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: entry10.Address, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "proposer_address must match proposer_schedule[0].address", + }, + { + name: "invalid - empty address in entry", + mutate: func() Genesis { + empty := entry10 + empty.Address = nil + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, empty}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "address cannot be empty", + }, + { + name: "invalid - malformed pub_key bytes", + mutate: func() Genesis { + bad := entry10 + bad.PubKey = []byte{0x00, 0x01, 0x02} + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, bad}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "unmarshal proposer pub_key", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.mutate().Validate() + if tt.wantErr == "" { + require.NoError(t, err) + return + } + require.Error(t, err) + require.Contains(t, err.Error(), tt.wantErr) + }) + } +} diff --git a/pkg/genesis/proposer_schedule.go b/pkg/genesis/proposer_schedule.go index 3ac6e4f831..c53684535a 100644 --- a/pkg/genesis/proposer_schedule.go +++ b/pkg/genesis/proposer_schedule.go @@ -80,7 +80,13 @@ func (e ProposerScheduleEntry) validate(initialHeight uint64) error { func (g Genesis) EffectiveProposerSchedule() []ProposerScheduleEntry { if len(g.ProposerSchedule) > 0 { out := make([]ProposerScheduleEntry, len(g.ProposerSchedule)) - copy(out, g.ProposerSchedule) + for i, entry := range g.ProposerSchedule { + out[i] = ProposerScheduleEntry{ + StartHeight: entry.StartHeight, + Address: bytes.Clone(entry.Address), + PubKey: bytes.Clone(entry.PubKey), + } + } return out } @@ -162,6 +168,17 @@ func (g Genesis) ValidateProposer(height uint64, address []byte, pubKey crypto.P } if len(entry.PubKey) == 0 { + // Address-only schedule entry. Without a pinned pubkey we still + // have to bind the caller-provided pubkey to the scheduled + // address, otherwise a forger can pair the scheduled address + // with an arbitrary key and later satisfy signature checks that + // trust Signer.PubKey. + if pubKey != nil { + derived := proposerKeyAddress(pubKey) + if !bytes.Equal(entry.Address, derived) { + return fmt.Errorf("proposer pub_key does not match scheduled address at height %d", height) + } + } return nil } diff --git a/pkg/genesis/proposer_schedule_test.go b/pkg/genesis/proposer_schedule_test.go index 48835deb89..950481526f 100644 --- a/pkg/genesis/proposer_schedule_test.go +++ b/pkg/genesis/proposer_schedule_test.go @@ -1,6 +1,7 @@ package genesis import ( + "bytes" "crypto/rand" "encoding/json" "os" @@ -10,8 +11,14 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/pkg/signer/noop" ) +// testGenesisStartTime is a fixed timestamp for genesis fixtures so tests do +// not depend on wall-clock time. +var testGenesisStartTime = time.Unix(1_700_000_000, 0).UTC() + func makeProposerScheduleEntry(t *testing.T, startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { t.Helper() @@ -30,7 +37,7 @@ func TestGenesisProposerAtHeight(t *testing.T) { genesis := Genesis{ ChainID: "test-chain", - StartTime: time.Now().UTC(), + StartTime: testGenesisStartTime, InitialHeight: 3, ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, @@ -57,7 +64,7 @@ func TestGenesisValidateProposerScheduleWithPinnedPubKey(t *testing.T) { genesis := Genesis{ ChainID: "test-chain", - StartTime: time.Now().UTC(), + StartTime: testGenesisStartTime, InitialHeight: 1, ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, @@ -77,7 +84,7 @@ func TestGenesisValidateAddressOnlyProposerSchedule(t *testing.T) { genesis := Genesis{ ChainID: "test-chain", - StartTime: time.Now().UTC(), + StartTime: testGenesisStartTime, InitialHeight: 1, ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, @@ -88,13 +95,234 @@ func TestGenesisValidateAddressOnlyProposerSchedule(t *testing.T) { require.NoError(t, genesis.ValidateProposer(21, entry2.Address, pubKey2)) } +func TestNewProposerScheduleEntry_NilPubKey(t *testing.T) { + _, err := NewProposerScheduleEntry(1, nil) + require.Error(t, err) +} + +func TestProposerAtHeight_BeforeFirstStartHeight(t *testing.T) { + entry, _ := makeProposerScheduleEntry(t, 5) + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 5, + ProposerSchedule: []ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + _, err := genesis.ProposerAtHeight(4) + require.Error(t, err) + require.Contains(t, err.Error(), "before start_height") +} + +func TestProposerAtHeight_NoProposerConfigured(t *testing.T) { + genesis := Genesis{ChainID: "c", InitialHeight: 1} + _, err := genesis.ProposerAtHeight(1) + require.Error(t, err) + require.Contains(t, err.Error(), "no proposer configured") +} + +func TestProposerAtHeight_ReturnedEntryIsCopy(t *testing.T) { + entry, _ := makeProposerScheduleEntry(t, 1) + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + got, err := genesis.ProposerAtHeight(1) + require.NoError(t, err) + got.Address[0] ^= 0xFF + got.PubKey[0] ^= 0xFF + + same, err := genesis.ProposerAtHeight(1) + require.NoError(t, err) + require.Equal(t, entry.Address, same.Address) + require.Equal(t, entry.PubKey, same.PubKey) +} + +func TestValidateProposer_WrongAddress(t *testing.T) { + entry, pubKey := makeProposerScheduleEntry(t, 1) + other, _ := makeProposerScheduleEntry(t, 1) + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + err := genesis.ValidateProposer(1, other.Address, pubKey) + require.Error(t, err) + require.Contains(t, err.Error(), "unexpected proposer at height 1") +} + +func TestValidateProposer_MissingPubKey(t *testing.T) { + entry, _ := makeProposerScheduleEntry(t, 1) + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + err := genesis.ValidateProposer(1, entry.Address, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "missing proposer pub_key") +} + +// TestValidateProposer_AddressOnly_RejectsForgedPubKey ensures that an address-only +// schedule entry still binds the caller-provided pubkey to the scheduled address. +// Without this check, a forger could claim Signer.Address = scheduled_addr with an +// arbitrary Signer.PubKey and later pass signature validation that trusts that pubkey. +func TestValidateProposer_AddressOnly_RejectsForgedPubKey(t *testing.T) { + scheduled, _ := makeProposerScheduleEntry(t, 1) + _, attackerPub := makeProposerScheduleEntry(t, 1) + + scheduled.PubKey = nil // address-only entry + + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{scheduled}, + DAEpochForcedInclusion: 1, + } + + // Scheduled address paired with a different pubkey must be rejected. + err := genesis.ValidateProposer(1, scheduled.Address, attackerPub) + require.Error(t, err) + require.Contains(t, err.Error(), "does not match scheduled address") +} + +func TestValidateProposer_UsesActiveEntryAtHeight(t *testing.T) { + entry1, pub1 := makeProposerScheduleEntry(t, 1) + entry2, pub2 := makeProposerScheduleEntry(t, 10) + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + // entry2 signer trying to sign height within entry1's active range must fail. + require.Error(t, genesis.ValidateProposer(9, entry2.Address, pub2)) + // entry1 signer trying to sign height within entry2's active range must fail. + require.Error(t, genesis.ValidateProposer(10, entry1.Address, pub1)) +} + +func TestHasScheduledProposer(t *testing.T) { + entry1, _ := makeProposerScheduleEntry(t, 1) + entry2, _ := makeProposerScheduleEntry(t, 10) + unknown, _ := makeProposerScheduleEntry(t, 99) + + explicit := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + require.True(t, explicit.HasScheduledProposer(entry1.Address)) + require.True(t, explicit.HasScheduledProposer(entry2.Address)) + require.False(t, explicit.HasScheduledProposer(unknown.Address)) + + legacy := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerAddress: entry1.Address, + DAEpochForcedInclusion: 1, + } + require.True(t, legacy.HasScheduledProposer(entry1.Address)) + require.False(t, legacy.HasScheduledProposer(entry2.Address)) + + empty := Genesis{ChainID: "c", InitialHeight: 1} + require.False(t, empty.HasScheduledProposer(entry1.Address)) +} + +func TestEffectiveProposerSchedule_ExplicitScheduleIsDeepCopy(t *testing.T) { + entry1, _ := makeProposerScheduleEntry(t, 1) + entry2, _ := makeProposerScheduleEntry(t, 10) + origAddr := bytes.Clone(entry1.Address) + origPub := bytes.Clone(entry1.PubKey) + + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + // Mutating returned byte slices must not corrupt the genesis-backed data. + got := genesis.EffectiveProposerSchedule() + got[0].Address[0] ^= 0xFF + got[0].PubKey[0] ^= 0xFF + + require.Equal(t, origAddr, genesis.ProposerSchedule[0].Address) + require.Equal(t, origPub, genesis.ProposerSchedule[0].PubKey) +} + +func TestEffectiveProposerSchedule_LegacyFallback(t *testing.T) { + addr := []byte("some-address-bytes") + legacy := Genesis{ + ChainID: "c", + InitialHeight: 7, + ProposerAddress: addr, + } + schedule := legacy.EffectiveProposerSchedule() + require.Len(t, schedule, 1) + require.Equal(t, uint64(7), schedule[0].StartHeight) + require.Equal(t, addr, schedule[0].Address) + require.Empty(t, schedule[0].PubKey) + + // mutating the derived slice must not affect the genesis backing data. + schedule[0].Address[0] ^= 0xFF + require.Equal(t, addr, legacy.ProposerAddress) +} + +func TestEffectiveProposerSchedule_Empty(t *testing.T) { + require.Nil(t, Genesis{}.EffectiveProposerSchedule()) +} + +func TestInitialProposerAddress_EmptyGenesisReturnsNil(t *testing.T) { + require.Nil(t, Genesis{InitialHeight: 1}.InitialProposerAddress()) +} + +// TestProposerKeyAddressMatchesSignerGetAddress pins the invariant that the +// genesis-side address derivation matches the signer implementations. If a +// signer ever changes its address formula this test will fail and flag the +// break instead of silently producing rejected blocks after a key rotation. +func TestProposerKeyAddressMatchesSignerGetAddress(t *testing.T) { + priv, pub, err := crypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err) + + s, err := noop.NewNoopSigner(priv) + require.NoError(t, err) + + signerAddr, err := s.GetAddress() + require.NoError(t, err) + + genesisAddr := proposerKeyAddress(pub) + require.Equal(t, signerAddr, genesisAddr) + + entry, err := NewProposerScheduleEntry(1, pub) + require.NoError(t, err) + require.Equal(t, signerAddr, entry.Address) +} + func TestLoadGenesisNormalizesLegacyProposerAddressFromSchedule(t *testing.T) { entry1, _ := makeProposerScheduleEntry(t, 1) entry2, _ := makeProposerScheduleEntry(t, 50) rawGenesis := Genesis{ ChainID: "test-chain", - StartTime: time.Now().UTC(), + StartTime: testGenesisStartTime, InitialHeight: 1, ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, From 45a7d75cb67b6d3ed6b3c02b9979945fd8a2b85c Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Fri, 24 Apr 2026 13:02:32 +0200 Subject: [PATCH 04/13] commen changes --- block/internal/executing/executor.go | 7 +- block/internal/executing/executor_test.go | 94 ++++++++++++++++++++-- block/internal/syncing/p2p_handler.go | 9 +++ block/internal/syncing/p2p_handler_test.go | 17 ++++ docs/adr/adr-023-proposer-key-rotation.md | 12 +-- pkg/genesis/genesis_test.go | 6 +- pkg/genesis/proposer_schedule_test.go | 3 +- 7 files changed, 131 insertions(+), 17 deletions(-) diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 4cc7f4984c..3fe5ba5d10 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -755,7 +755,12 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } } else { - validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, nil) + pubKey, err = proposer.PublicKey() + if err != nil { + return nil, nil, fmt.Errorf("failed to get scheduled proposer public key: %w", err) + } + + validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, pubKey) if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index 5f2d4db7d8..c6f6ccc1a5 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -7,6 +7,7 @@ import ( "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/sync" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -20,6 +21,8 @@ import ( "github.com/evstack/ev-node/types" ) +var fixedExecutorTestStartTime = time.Unix(1_700_000_000, 0).UTC() + func TestExecutor_BroadcasterIntegration(t *testing.T) { // Create in-memory store ds := sync.MutexWrap(datastore.NewMapDatastore()) @@ -143,7 +146,7 @@ func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { gen := genesis.Genesis{ ChainID: "test-chain", InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), + StartTime: fixedExecutorTestStartTime, ProposerAddress: entry1.Address, ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, @@ -209,7 +212,7 @@ func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { header, data, err := executor.CreateBlock(context.Background(), 2, &BatchData{ Batch: &coreseq.Batch{}, - Time: time.Now(), + Time: fixedExecutorTestStartTime.Add(time.Second), }) require.NoError(t, err) require.Equal(t, newAddr, header.ProposerAddress) @@ -217,6 +220,85 @@ func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { require.Equal(t, uint64(2), data.Height()) } +func TestExecutor_CreateBlock_BasedSequencerUsesScheduledPubKey(t *testing.T) { + ds := sync.MutexWrap(datastore.NewMapDatastore()) + memStore := store.New(ds) + + cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) + require.NoError(t, err) + + _, signerInfo, _ := buildTestSigner(t) + entry, err := genesis.NewProposerScheduleEntry(1, signerInfo.PubKey) + require.NoError(t, err) + + gen := genesis.Genesis{ + ChainID: "test-chain", + InitialHeight: 1, + StartTime: fixedExecutorTestStartTime, + ProposerAddress: entry.Address, + ProposerSchedule: []genesis.ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.Node.BasedSequencer = true + + wantValidatorHash := types.Hash{0x01} + hasherCalled := false + options := common.DefaultBlockOptions() + options.ValidatorHasherProvider = func(address []byte, pubKey crypto.PubKey) (types.Hash, error) { + hasherCalled = true + require.Equal(t, entry.Address, address) + require.NotNil(t, pubKey) + + marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) + require.NoError(t, err) + require.Equal(t, entry.PubKey, marshalledPubKey) + + return wantValidatorHash, nil + } + + executor, err := NewExecutor( + memStore, + nil, + nil, + nil, + cacheManager, + common.NopMetrics(), + cfg, + gen, + nil, + nil, + zerolog.Nop(), + options, + make(chan error, 1), + nil, + ) + require.NoError(t, err) + + executor.setLastState(types.State{ + Version: types.InitStateVersion, + ChainID: gen.ChainID, + InitialHeight: gen.InitialHeight, + AppHash: []byte("state-root-1"), + }) + + header, data, err := executor.CreateBlock(context.Background(), 1, &BatchData{ + Batch: &coreseq.Batch{}, + Time: fixedExecutorTestStartTime, + }) + require.NoError(t, err) + require.True(t, hasherCalled) + require.Equal(t, wantValidatorHash, header.ValidatorHash) + require.Equal(t, entry.Address, header.Signer.Address) + require.NotNil(t, header.Signer.PubKey) + + marshalledPubKey, err := crypto.MarshalPublicKey(header.Signer.PubKey) + require.NoError(t, err) + require.Equal(t, entry.PubKey, marshalledPubKey) + require.Equal(t, uint64(1), data.Height()) +} + // TestNewExecutor_RejectsSignerOutsideSchedule verifies that a signer whose // address does not appear anywhere in the proposer schedule cannot start the // executor. This prevents a misconfigured replacement key from coming up as @@ -237,7 +319,7 @@ func TestNewExecutor_RejectsSignerOutsideSchedule(t *testing.T) { gen := genesis.Genesis{ ChainID: "test-chain", InitialHeight: 1, - StartTime: time.Now(), + StartTime: fixedExecutorTestStartTime, ProposerAddress: entry.Address, ProposerSchedule: []genesis.ProposerScheduleEntry{entry}, DAEpochForcedInclusion: 1, @@ -277,7 +359,7 @@ func TestExecutor_CreateBlock_RejectsSignerAtWrongHeight(t *testing.T) { gen := genesis.Genesis{ ChainID: "test-chain", InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), + StartTime: fixedExecutorTestStartTime, ProposerAddress: entry1.Address, ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, @@ -300,7 +382,7 @@ func TestExecutor_CreateBlock_RejectsSignerAtWrongHeight(t *testing.T) { BaseHeader: types.BaseHeader{ ChainID: gen.ChainID, Height: 4, - Time: uint64(gen.StartTime.UnixNano()), + Time: uint64(fixedExecutorTestStartTime.Add(4 * time.Second).UnixNano()), }, AppHash: []byte("state-root-4"), ProposerAddress: oldAddr, @@ -337,7 +419,7 @@ func TestExecutor_CreateBlock_RejectsSignerAtWrongHeight(t *testing.T) { // signer must be rejected even though it's a known schedule member. _, _, err = executor.CreateBlock(context.Background(), 5, &BatchData{ Batch: &coreseq.Batch{}, - Time: time.Now(), + Time: fixedExecutorTestStartTime.Add(5 * time.Second), }) require.Error(t, err) require.Contains(t, err.Error(), "proposer") diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 0e8a08cea3..2150bd2933 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -81,6 +81,15 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC } return err } + if headerHeight := p2pHeader.Height(); headerHeight != height { + err := fmt.Errorf("header height mismatch: requested %d, got %d", height, headerHeight) + h.logger.Warn(). + Uint64("requested_height", height). + Uint64("header_height", headerHeight). + Err(err). + Msg("discarding mismatched header from P2P") + return err + } if err := h.assertExpectedProposer(p2pHeader.SignedHeader); err != nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") return err diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index 1ba3b86e27..84a875ffbf 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -215,6 +215,23 @@ func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(11)) } +func TestP2PHandler_ProcessHeight_RejectsHeaderHeightMismatch(t *testing.T) { + p := setupP2P(t) + ctx := context.Background() + + header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 12, p.ProposerAddr, p.ProposerPub, p.Signer) + + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(10)).Return(header, nil).Once() + + ch := make(chan common.DAHeightEvent, 1) + err := p.Handler.ProcessHeight(ctx, 10, ch) + require.Error(t, err) + require.Contains(t, err.Error(), "header height mismatch") + + require.Empty(t, collectEvents(t, ch, 50*time.Millisecond)) + p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(10)) +} + func TestP2PHandler_ProcessHeight_AllowsScheduledProposerRotation(t *testing.T) { p := setupP2P(t) ctx := context.Background() diff --git a/docs/adr/adr-023-proposer-key-rotation.md b/docs/adr/adr-023-proposer-key-rotation.md index 2c2934bf28..1c1b067982 100644 --- a/docs/adr/adr-023-proposer-key-rotation.md +++ b/docs/adr/adr-023-proposer-key-rotation.md @@ -57,7 +57,7 @@ Each entry declares: - `start_height` - `address` -- `pub_key` +- `pub_key` (optional; when present, it must match `address`) The active proposer for block height `h` is the last entry whose `start_height <= h`. @@ -68,8 +68,9 @@ When an explicit schedule is present: - the first entry must start at `initial_height` - entries must be strictly increasing by `start_height` -- each entry's `address` must match the configured `pub_key` -- `proposer_address`, when present, must match the first schedule entry +- if `pub_key` is present, the entry's `address` must match it +- entries without `pub_key` are interpreted by `address` only +- `proposer_address`, when present, must match the first schedule entry's `address` ## Detailed Design @@ -86,8 +87,7 @@ Genesis gains: }, { "start_height": 1250000, - "address": "...", - "pub_key": "..." + "address": "..." } ] ``` @@ -121,7 +121,7 @@ The old proposer remains valid for heights `< H`, and the new proposer becomes v ### Security considerations -This design improves safety over address-only pinning by allowing validation against the scheduled public key. +This design improves safety by allowing validation against the scheduled public key when one is pinned. It does not solve emergency rotation authorization by itself; a future design can add a separate upgrade authority or rotation certificate flow if the network needs signer replacement without prior static scheduling. diff --git a/pkg/genesis/genesis_test.go b/pkg/genesis/genesis_test.go index a5aca88586..9c850f3963 100644 --- a/pkg/genesis/genesis_test.go +++ b/pkg/genesis/genesis_test.go @@ -140,7 +140,7 @@ func TestGenesis_Validate(t *testing.T) { } func TestGenesis_ValidateProposerSchedule(t *testing.T) { - validTime := time.Now().UTC() + validTime := time.Unix(1_700_000_000, 0).UTC() newEntry := func(startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { _, pub, err := crypto.GenerateEd25519Key(rand.Reader) @@ -230,11 +230,11 @@ func TestGenesis_ValidateProposerSchedule(t *testing.T) { ChainID: "c", StartTime: validTime, InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry20, entry10}, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry20, entry10}, DAEpochForcedInclusion: 1, } }, - wantErr: "start_height must equal initial_height", + wantErr: "strictly increasing", }, { name: "invalid - entry address does not match pub_key", diff --git a/pkg/genesis/proposer_schedule_test.go b/pkg/genesis/proposer_schedule_test.go index 950481526f..88f87590ea 100644 --- a/pkg/genesis/proposer_schedule_test.go +++ b/pkg/genesis/proposer_schedule_test.go @@ -270,6 +270,7 @@ func TestEffectiveProposerSchedule_ExplicitScheduleIsDeepCopy(t *testing.T) { func TestEffectiveProposerSchedule_LegacyFallback(t *testing.T) { addr := []byte("some-address-bytes") + origAddr := bytes.Clone(addr) legacy := Genesis{ ChainID: "c", InitialHeight: 7, @@ -283,7 +284,7 @@ func TestEffectiveProposerSchedule_LegacyFallback(t *testing.T) { // mutating the derived slice must not affect the genesis backing data. schedule[0].Address[0] ^= 0xFF - require.Equal(t, addr, legacy.ProposerAddress) + require.Equal(t, origAddr, legacy.ProposerAddress) } func TestEffectiveProposerSchedule_Empty(t *testing.T) { From 8db31f9c919cab358f0e7acde149bd34eac642bb Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Fri, 24 Apr 2026 16:31:21 +0200 Subject: [PATCH 05/13] redo the design --- apps/evm/go.mod | 1 + apps/grpc/go.mod | 1 + apps/testapp/go.mod | 5 +- apps/testapp/kv/kvexecutor.go | 14 +- apps/testapp/kv/kvexecutor_test.go | 12 +- block/internal/common/replay.go | 44 ++- block/internal/executing/executor.go | 116 ++++-- .../executing/executor_benchmark_test.go | 4 +- .../internal/executing/executor_logic_test.go | 40 +- block/internal/executing/executor_test.go | 303 ---------------- block/internal/reaping/bench_test.go | 4 +- block/internal/submitting/da_submitter.go | 15 +- .../internal/submitting/da_submitter_test.go | 91 ----- block/internal/syncing/assert.go | 15 +- block/internal/syncing/da_retriever.go | 10 - block/internal/syncing/da_retriever_test.go | 15 +- block/internal/syncing/p2p_handler.go | 18 - block/internal/syncing/p2p_handler_test.go | 91 +---- block/internal/syncing/raft_retriever.go | 4 - block/internal/syncing/syncer.go | 54 ++- block/internal/syncing/syncer_test.go | 58 ++- .../types/src/proto/evnode.v1.messages.rs | 147 +++++++- .../types/src/proto/evnode.v1.services.rs | 306 +++++++++++++++- core/README.md | 11 +- core/execution/dummy.go | 4 +- core/execution/dummy_test.go | 14 +- core/execution/execution.go | 19 +- docs/.vitepress/config.ts | 4 - ...r-023-execution-owned-proposer-rotation.md | 83 +++++ docs/adr/adr-023-proposer-key-rotation.md | 165 --------- .../custom/implement-executor.md | 33 +- docs/guides/create-genesis.md | 4 - .../operations/proposer-key-rotation.md | 186 ---------- docs/guides/operations/upgrades.md | 6 - docs/reference/interfaces/executor.md | 20 +- execution/evm/execution.go | 22 +- execution/evm/go.mod | 5 + execution/evm/test/go.mod | 1 + execution/grpc/client.go | 12 +- execution/grpc/client_test.go | 16 +- execution/grpc/go.mod | 5 + execution/grpc/server.go | 8 +- execution/grpc/server_test.go | 11 +- go.mod | 2 + node/execution_test.go | 2 +- node/failover.go | 2 +- node/full.go | 2 +- pkg/genesis/genesis.go | 34 +- pkg/genesis/genesis_test.go | 175 --------- pkg/genesis/io.go | 4 +- pkg/genesis/proposer_schedule.go | 213 ----------- pkg/genesis/proposer_schedule_test.go | 341 ------------------ pkg/rpc/server/server.go | 11 +- pkg/telemetry/executor_tracing.go | 9 +- pkg/telemetry/executor_tracing_test.go | 4 +- proto/evnode/v1/evnode.proto | 2 + proto/evnode/v1/execution.proto | 8 + proto/evnode/v1/state.proto | 1 + test/e2e/go.mod | 1 + test/mocks/execution.go | 38 +- test/mocks/height_aware_executor.go | 27 +- types/header.go | 14 +- types/pb/evnode/v1/evnode.pb.go | 20 +- types/pb/evnode/v1/execution.pb.go | 42 ++- types/pb/evnode/v1/state.pb.go | 36 +- types/serialization.go | 50 ++- types/state.go | 33 +- 67 files changed, 1180 insertions(+), 1888 deletions(-) create mode 100644 docs/adr/adr-023-execution-owned-proposer-rotation.md delete mode 100644 docs/adr/adr-023-proposer-key-rotation.md delete mode 100644 docs/guides/operations/proposer-key-rotation.md delete mode 100644 pkg/genesis/proposer_schedule.go delete mode 100644 pkg/genesis/proposer_schedule_test.go diff --git a/apps/evm/go.mod b/apps/evm/go.mod index 2dcdda8469..a215b11b07 100644 --- a/apps/evm/go.mod +++ b/apps/evm/go.mod @@ -4,6 +4,7 @@ go 1.25.7 replace ( github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/core => ../../core github.com/evstack/ev-node/execution/evm => ../../execution/evm ) diff --git a/apps/grpc/go.mod b/apps/grpc/go.mod index 66caa09cb6..64a32c143d 100644 --- a/apps/grpc/go.mod +++ b/apps/grpc/go.mod @@ -4,6 +4,7 @@ go 1.25.7 replace ( github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/core => ../../core github.com/evstack/ev-node/execution/grpc => ../../execution/grpc ) diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index 652a4615c0..7285464eb2 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -2,7 +2,10 @@ module github.com/evstack/ev-node/apps/testapp go 1.25.7 -replace github.com/evstack/ev-node => ../../. +replace ( + github.com/evstack/ev-node => ../../. + github.com/evstack/ev-node/core => ../../core +) require ( github.com/evstack/ev-node v1.1.1 diff --git a/apps/testapp/kv/kvexecutor.go b/apps/testapp/kv/kvexecutor.go index aef3aedf3a..1a3ec4b776 100644 --- a/apps/testapp/kv/kvexecutor.go +++ b/apps/testapp/kv/kvexecutor.go @@ -239,16 +239,16 @@ func (k *KVExecutor) GetTxs(ctx context.Context) ([][]byte, error) { // ExecuteTxs processes each transaction assumed to be in the format "key=value". // It updates the database accordingly using a batch and removes the executed transactions from the mempool. // Invalid transactions are filtered out and logged, but execution continues. -func (k *KVExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (k *KVExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { select { case <-ctx.Done(): - return nil, ctx.Err() + return execution.ExecuteResult{}, ctx.Err() default: } batch, err := k.db.Batch(ctx) if err != nil { - return nil, fmt.Errorf("failed to create database batch: %w", err) + return execution.ExecuteResult{}, fmt.Errorf("failed to create database batch: %w", err) } validTxCount := 0 @@ -291,7 +291,7 @@ func (k *KVExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight u err = batch.Put(ctx, dsKey, []byte(value)) if err != nil { // This error is unlikely for Put unless the context is cancelled. - return nil, fmt.Errorf("failed to stage put operation in batch for key '%s': %w", key, err) + return execution.ExecuteResult{}, fmt.Errorf("failed to stage put operation in batch for key '%s': %w", key, err) } validTxCount++ } @@ -304,7 +304,7 @@ func (k *KVExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight u // Commit the batch to apply all changes atomically err = batch.Commit(ctx) if err != nil { - return nil, fmt.Errorf("failed to commit transaction batch: %w", err) + return execution.ExecuteResult{}, fmt.Errorf("failed to commit transaction batch: %w", err) } k.blocksProduced.Add(1) @@ -315,10 +315,10 @@ func (k *KVExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight u if err != nil { // This is problematic, state was changed but root calculation failed. // May need more robust error handling or recovery logic. - return nil, fmt.Errorf("failed to compute state root after executing transactions: %w", err) + return execution.ExecuteResult{}, fmt.Errorf("failed to compute state root after executing transactions: %w", err) } - return stateRoot, nil + return execution.ExecuteResult{UpdatedStateRoot: stateRoot}, nil } // SetFinal marks a block as finalized at the specified height. diff --git a/apps/testapp/kv/kvexecutor_test.go b/apps/testapp/kv/kvexecutor_test.go index 97280aee10..486fa576f8 100644 --- a/apps/testapp/kv/kvexecutor_test.go +++ b/apps/testapp/kv/kvexecutor_test.go @@ -105,13 +105,13 @@ func TestExecuteTxs_Valid(t *testing.T) { []byte("key2=value2"), } - stateRoot, err := exec.ExecuteTxs(ctx, txs, 1, time.Now(), []byte("")) + result, err := exec.ExecuteTxs(ctx, txs, 1, time.Now(), []byte("")) if err != nil { t.Fatalf("ExecuteTxs failed: %v", err) } // Check that stateRoot contains the updated key-value pairs - rootStr := string(stateRoot) + rootStr := string(result.UpdatedStateRoot) if !strings.Contains(rootStr, "key1:value1;") || !strings.Contains(rootStr, "key2:value2;") { t.Errorf("State root does not contain expected key-values: %s", rootStr) } @@ -134,13 +134,13 @@ func TestExecuteTxs_Invalid(t *testing.T) { []byte(""), } - stateRoot, err := exec.ExecuteTxs(ctx, txs, 1, time.Now(), []byte("")) + result, err := exec.ExecuteTxs(ctx, txs, 1, time.Now(), []byte("")) if err != nil { t.Fatalf("ExecuteTxs should handle gibberish gracefully, got error: %v", err) } // State root should still be computed (empty block is valid) - if stateRoot == nil { + if result.UpdatedStateRoot == nil { t.Error("Expected non-nil state root even with all invalid transactions") } @@ -152,13 +152,13 @@ func TestExecuteTxs_Invalid(t *testing.T) { []byte(""), } - stateRoot2, err := exec.ExecuteTxs(ctx, mixedTxs, 2, time.Now(), stateRoot) + result2, err := exec.ExecuteTxs(ctx, mixedTxs, 2, time.Now(), result.UpdatedStateRoot) if err != nil { t.Fatalf("ExecuteTxs should filter invalid transactions and process valid ones, got error: %v", err) } // State root should contain only the valid transactions - rootStr := string(stateRoot2) + rootStr := string(result2.UpdatedStateRoot) if !strings.Contains(rootStr, "valid_key:valid_value") || !strings.Contains(rootStr, "another_valid:value2") { t.Errorf("State root should contain valid transactions: %s", rootStr) } diff --git a/block/internal/common/replay.go b/block/internal/common/replay.go index ba13a5a4b7..a120450d22 100644 --- a/block/internal/common/replay.go +++ b/block/internal/common/replay.go @@ -152,11 +152,12 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { if height == s.genesis.InitialHeight { // For the first block, use genesis state. prevState = types.State{ - ChainID: s.genesis.ChainID, - InitialHeight: s.genesis.InitialHeight, - LastBlockHeight: s.genesis.InitialHeight - 1, - LastBlockTime: s.genesis.StartTime, - AppHash: header.AppHash, // Genesis app hash (input to first block execution) + ChainID: s.genesis.ChainID, + InitialHeight: s.genesis.InitialHeight, + LastBlockHeight: s.genesis.InitialHeight - 1, + LastBlockTime: s.genesis.StartTime, + AppHash: header.AppHash, // Genesis app hash (input to first block execution) + NextProposerAddress: append([]byte(nil), s.genesis.ProposerAddress...), } } else { // GetStateAtHeight(height-1) returns the state AFTER block height-1 was executed, @@ -179,10 +180,25 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { Int("tx_count", len(rawTxs)). Msg("executing transactions on execution layer") - newAppHash, err := s.exec.ExecuteTxs(ctx, rawTxs, height, header.Time(), prevState.AppHash) + result, err := s.exec.ExecuteTxs(ctx, rawTxs, height, header.Time(), prevState.AppHash) if err != nil { return fmt.Errorf("failed to execute transactions: %w", err) } + newAppHash := result.UpdatedStateRoot + if len(result.NextProposerAddress) > 0 { + if len(header.NextProposerAddress) == 0 { + return fmt.Errorf("next proposer mismatch at height %d: header empty, execution %x", height, result.NextProposerAddress) + } + if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { + return fmt.Errorf("next proposer mismatch at height %d: header %x, execution %x", + height, + header.NextProposerAddress, + result.NextProposerAddress, + ) + } + } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { + return fmt.Errorf("next proposer mismatch at height %d: header %x, execution unchanged", height, header.NextProposerAddress) + } // The result of ExecuteTxs (newAppHash) should match the stored state at this height. // Note: header.AppHash is the PREVIOUS state's app hash (input), not the expected output. @@ -207,6 +223,22 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { Msg("app hash mismatch during replay") return err } + if len(expectedState.NextProposerAddress) > 0 { + expectedNextProposer := header.NextProposerAddress + if len(expectedNextProposer) == 0 { + expectedNextProposer = result.NextProposerAddress + } + if len(expectedNextProposer) == 0 { + expectedNextProposer = header.ProposerAddress + } + if !bytes.Equal(expectedNextProposer, expectedState.NextProposerAddress) { + return fmt.Errorf("next proposer mismatch at height %d: expected %x got %x", + height, + expectedState.NextProposerAddress, + expectedNextProposer, + ) + } + } s.logger.Debug(). Uint64("height", height). Str("app_hash", hex.EncodeToString(newAppHash)). diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 3fe5ba5d10..de825db24b 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -121,14 +121,6 @@ func NewExecutor( return nil, errors.New("signer cannot be nil") } - addr, err := signer.GetAddress() - if err != nil { - return nil, fmt.Errorf("failed to get address: %w", err) - } - - if !genesis.HasScheduledProposer(addr) { - return nil, common.ErrNotProposer - } } if raftNode != nil && reflect.ValueOf(raftNode).IsNil() { raftNode = nil @@ -242,15 +234,22 @@ func (e *Executor) initializeState() error { } state = types.State{ - ChainID: e.genesis.ChainID, - InitialHeight: e.genesis.InitialHeight, - LastBlockHeight: e.genesis.InitialHeight - 1, - LastBlockTime: e.genesis.StartTime, - AppHash: stateRoot, + ChainID: e.genesis.ChainID, + InitialHeight: e.genesis.InitialHeight, + LastBlockHeight: e.genesis.InitialHeight - 1, + LastBlockTime: e.genesis.StartTime, + AppHash: stateRoot, + NextProposerAddress: e.initialProposerAddress(e.ctx), // DA start height is usually 0 at InitChain unless it is a re-genesis or a based sequencer. DAHeight: e.genesis.DAStartHeight, } } + if len(state.NextProposerAddress) == 0 { + state.NextProposerAddress = e.initialProposerAddress(e.ctx) + } + if err := e.assertConfiguredSigner(state.NextProposerAddress); err != nil { + return err + } if e.raftNode != nil { // Ensure node is fully synced before producing any blocks @@ -379,6 +378,32 @@ func (e *Executor) initializeState() error { return nil } +func (e *Executor) initialProposerAddress(ctx context.Context) []byte { + if e.exec != nil { + info, err := e.exec.GetExecutionInfo(ctx) + if err != nil { + e.logger.Warn().Err(err).Msg("failed to get execution info for proposer, falling back to genesis proposer") + } else if len(info.NextProposerAddress) > 0 { + return append([]byte(nil), info.NextProposerAddress...) + } + } + return append([]byte(nil), e.genesis.ProposerAddress...) +} + +func (e *Executor) assertConfiguredSigner(expectedProposer []byte) error { + if e.config.Node.BasedSequencer { + return nil + } + addr, err := e.signer.GetAddress() + if err != nil { + return fmt.Errorf("failed to get address: %w", err) + } + if !bytes.Equal(addr, expectedProposer) { + return common.ErrNotProposer + } + return nil +} + // executionLoop handles block production and aggregation func (e *Executor) executionLoop() { e.logger.Info().Msg("starting execution loop") @@ -547,6 +572,13 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to apply block: %w", err) } + if !bytes.Equal(newState.NextProposerAddress, header.ProposerAddress) { + header.NextProposerAddress = append([]byte(nil), newState.NextProposerAddress...) + header.InvalidateHash() + } else if len(header.NextProposerAddress) > 0 { + header.NextProposerAddress = nil + header.InvalidateHash() + } // set the DA height in the sequencer newState.DAHeight = e.sequencer.GetDAHeight() @@ -696,9 +728,9 @@ func (e *Executor) RetrieveBatch(ctx context.Context) (*BatchData, error) { func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *BatchData) (*types.SignedHeader, *types.Data, error) { currentState := e.getLastState() headerTime := uint64(e.genesis.StartTime.UnixNano()) - proposer, err := e.genesis.ProposerAtHeight(height) - if err != nil { - return nil, nil, fmt.Errorf("resolve proposer for height %d: %w", height, err) + proposerAddress := currentState.NextProposerAddress + if len(proposerAddress) == 0 { + proposerAddress = e.genesis.ProposerAddress } var lastHeaderHash types.Hash @@ -732,35 +764,29 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba // Get signer info and validator hash var pubKey crypto.PubKey - var signerAddress []byte var validatorHash types.Hash if e.signer != nil { + var err error pubKey, err = e.signer.GetPublic() if err != nil { return nil, nil, fmt.Errorf("failed to get public key: %w", err) } - - signerAddress, err = e.signer.GetAddress() + addr, err := e.signer.GetAddress() if err != nil { - return nil, nil, fmt.Errorf("failed to get signer address: %w", err) + return nil, nil, fmt.Errorf("failed to get address: %w", err) } - - if err := e.genesis.ValidateProposer(height, signerAddress, pubKey); err != nil { - return nil, nil, fmt.Errorf("signer does not match proposer schedule: %w", err) + if !bytes.Equal(addr, proposerAddress) { + return nil, nil, common.ErrNotProposer } - validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, pubKey) + validatorHash, err = e.options.ValidatorHasherProvider(proposerAddress, pubKey) if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } } else { - pubKey, err = proposer.PublicKey() - if err != nil { - return nil, nil, fmt.Errorf("failed to get scheduled proposer public key: %w", err) - } - - validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, pubKey) + var err error + validatorHash, err = e.options.ValidatorHasherProvider(proposerAddress, nil) if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } @@ -780,13 +806,13 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba }, LastHeaderHash: lastHeaderHash, AppHash: currentState.AppHash, - ProposerAddress: proposer.Address, + ProposerAddress: proposerAddress, ValidatorHash: validatorHash, }, Signature: lastSignature, Signer: types.Signer{ PubKey: pubKey, - Address: proposer.Address, + Address: proposerAddress, }, } @@ -830,14 +856,24 @@ func (e *Executor) ApplyBlock(ctx context.Context, header types.Header, data *ty // Execute transactions execCtx := context.WithValue(ctx, types.HeaderContextKey, header) - newAppHash, err := e.executeTxsWithRetry(execCtx, rawTxs, header, currentState) + result, err := e.executeTxsWithRetry(execCtx, rawTxs, header, currentState) if err != nil { e.sendCriticalError(fmt.Errorf("failed to execute transactions: %w", err)) return types.State{}, fmt.Errorf("failed to execute transactions: %w", err) } + if len(result.NextProposerAddress) > 0 { + if len(header.NextProposerAddress) == 0 { + header.NextProposerAddress = append([]byte(nil), result.NextProposerAddress...) + } else if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { + return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution %x", header.NextProposerAddress, result.NextProposerAddress) + } + header.InvalidateHash() + } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { + return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution unchanged", header.NextProposerAddress) + } // Create new state - newState, err := currentState.NextState(header, newAppHash) + newState, err := currentState.NextState(header, result.UpdatedStateRoot) if err != nil { return types.State{}, fmt.Errorf("failed to create next state: %w", err) } @@ -868,12 +904,12 @@ func (e *Executor) signHeader(ctx context.Context, header *types.Header) (types. // executeTxsWithRetry executes transactions with retry logic. // NOTE: the function retries the execution client call regardless of the error. Some execution clients errors are irrecoverable, and will eventually halt the node, as expected. -func (e *Executor) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, header types.Header, currentState types.State) ([]byte, error) { +func (e *Executor) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, header types.Header, currentState types.State) (coreexecutor.ExecuteResult, error) { for attempt := 1; attempt <= common.MaxRetriesBeforeHalt; attempt++ { - newAppHash, err := e.exec.ExecuteTxs(ctx, rawTxs, header.Height(), header.Time(), currentState.AppHash) + result, err := e.exec.ExecuteTxs(ctx, rawTxs, header.Height(), header.Time(), currentState.AppHash) if err != nil { if attempt == common.MaxRetriesBeforeHalt { - return nil, fmt.Errorf("failed to execute transactions: %w", err) + return coreexecutor.ExecuteResult{}, fmt.Errorf("failed to execute transactions: %w", err) } e.logger.Error().Err(err). @@ -886,14 +922,14 @@ func (e *Executor) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, hea case <-time.After(common.MaxRetriesTimeout): continue case <-e.ctx.Done(): - return nil, fmt.Errorf("context cancelled during retry: %w", e.ctx.Err()) + return coreexecutor.ExecuteResult{}, fmt.Errorf("context cancelled during retry: %w", e.ctx.Err()) } } - return newAppHash, nil + return result, nil } - return nil, nil + return coreexecutor.ExecuteResult{}, nil } // sendCriticalError sends a critical error to the error channel without blocking diff --git a/block/internal/executing/executor_benchmark_test.go b/block/internal/executing/executor_benchmark_test.go index be71d8fe26..da13a5f760 100644 --- a/block/internal/executing/executor_benchmark_test.go +++ b/block/internal/executing/executor_benchmark_test.go @@ -149,8 +149,8 @@ func (s *stubExecClient) InitChain(context.Context, time.Time, uint64, string) ( return s.stateRoot, nil } func (s *stubExecClient) GetTxs(context.Context) ([][]byte, error) { return nil, nil } -func (s *stubExecClient) ExecuteTxs(_ context.Context, _ [][]byte, _ uint64, _ time.Time, _ []byte) ([]byte, error) { - return s.stateRoot, nil +func (s *stubExecClient) ExecuteTxs(_ context.Context, _ [][]byte, _ uint64, _ time.Time, _ []byte) (coreexec.ExecuteResult, error) { + return coreexec.ExecuteResult{UpdatedStateRoot: s.stateRoot}, nil } func (s *stubExecClient) SetFinal(context.Context, uint64) error { return nil } func (s *stubExecClient) GetExecutionInfo(context.Context) (coreexec.ExecutionInfo, error) { diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 1498bf5f79..0b1f86769a 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -19,6 +19,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + coreexec "github.com/evstack/ev-node/core/execution" coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -68,6 +69,43 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { require.NoError(t, err) assert.Equal(t, 0, len(data.Txs)) assert.EqualValues(t, common.DataHashForEmptyTxs, sh.DataHash) + assert.Empty(t, sh.NextProposerAddress) + + state, err := fx.MemStore.GetState(context.Background()) + require.NoError(t, err) + assert.Equal(t, fx.Exec.genesis.ProposerAddress, state.NextProposerAddress) +} + +func TestProduceBlock_CommitsExecutionNextProposer(t *testing.T) { + fx := setupTestExecutor(t, 1000) + defer fx.Cancel() + + nextAddr, _, _ := buildTestSigner(t) + + fx.MockSeq.EXPECT().GetNextBatch(mock.Anything, mock.AnythingOfType("sequencer.GetNextBatchRequest")). + RunAndReturn(func(ctx context.Context, req coreseq.GetNextBatchRequest) (*coreseq.GetNextBatchResponse, error) { + return &coreseq.GetNextBatchResponse{Batch: &coreseq.Batch{Transactions: nil}, Timestamp: time.Now()}, nil + }).Once() + + fx.MockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), fx.InitStateRoot). + Return(coreexec.ExecuteResult{ + UpdatedStateRoot: []byte("new_root"), + NextProposerAddress: nextAddr, + }, nil).Once() + + fx.MockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + + require.NoError(t, fx.Exec.ProduceBlock(fx.Exec.ctx)) + + header, data, err := fx.MemStore.GetBlockData(context.Background(), 1) + require.NoError(t, err) + require.NoError(t, header.ValidateBasicWithData(data)) + assert.Equal(t, nextAddr, header.NextProposerAddress) + + state, err := fx.MemStore.GetState(context.Background()) + require.NoError(t, err) + assert.Equal(t, nextAddr, state.NextProposerAddress) + assert.Equal(t, header.Hash(), state.LastHeaderHash) } func TestProduceBlock_OutputPassesValidation(t *testing.T) { @@ -220,7 +258,7 @@ func TestExecutor_executeTxsWithRetry(t *testing.T) { if tt.expectSuccess { require.NoError(t, err) - assert.Equal(t, tt.expectHash, result) + assert.Equal(t, tt.expectHash, result.UpdatedStateRoot) } else { require.Error(t, err) if tt.expectError != "" { diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index c6f6ccc1a5..1099cdb87d 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -1,28 +1,23 @@ package executing import ( - "context" "testing" "time" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/sync" - "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/types" ) -var fixedExecutorTestStartTime = time.Unix(1_700_000_000, 0).UTC() - func TestExecutor_BroadcasterIntegration(t *testing.T) { // Create in-memory store ds := sync.MutexWrap(datastore.NewMapDatastore()) @@ -126,301 +121,3 @@ func TestExecutor_NilBroadcasters(t *testing.T) { assert.Equal(t, cacheManager, executor.cache) assert.Equal(t, gen, executor.genesis) } - -func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { - ds := sync.MutexWrap(datastore.NewMapDatastore()) - memStore := store.New(ds) - - cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) - require.NoError(t, err) - - metrics := common.NopMetrics() - oldAddr, oldSignerInfo, _ := buildTestSigner(t) - newAddr, newSignerInfo, newSigner := buildTestSigner(t) - - entry1, err := genesis.NewProposerScheduleEntry(1, oldSignerInfo.PubKey) - require.NoError(t, err) - entry2, err := genesis.NewProposerScheduleEntry(2, newSignerInfo.PubKey) - require.NoError(t, err) - - gen := genesis.Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: fixedExecutorTestStartTime, - ProposerAddress: entry1.Address, - ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - executor, err := NewExecutor( - memStore, - nil, - nil, - newSigner, - cacheManager, - metrics, - config.DefaultConfig(), - gen, - nil, - nil, - zerolog.Nop(), - common.DefaultBlockOptions(), - make(chan error, 1), - nil, - ) - require.NoError(t, err) - - prevHeader := &types.SignedHeader{ - Header: types.Header{ - Version: types.InitStateVersion, - BaseHeader: types.BaseHeader{ - ChainID: gen.ChainID, - Height: 1, - Time: uint64(gen.StartTime.UnixNano()), - }, - AppHash: []byte("state-root-0"), - ProposerAddress: oldAddr, - DataHash: common.DataHashForEmptyTxs, - }, - Signature: types.Signature([]byte("sig-1")), - Signer: oldSignerInfo, - } - prevData := &types.Data{ - Metadata: &types.Metadata{ - ChainID: gen.ChainID, - Height: 1, - Time: prevHeader.BaseHeader.Time, - }, - Txs: nil, - } - - batch, err := memStore.NewBatch(context.Background()) - require.NoError(t, err) - require.NoError(t, batch.SaveBlockData(prevHeader, prevData, &prevHeader.Signature)) - require.NoError(t, batch.SetHeight(1)) - require.NoError(t, batch.Commit()) - - executor.setLastState(types.State{ - Version: types.InitStateVersion, - ChainID: gen.ChainID, - InitialHeight: gen.InitialHeight, - LastBlockHeight: 1, - LastBlockTime: prevHeader.Time(), - LastHeaderHash: prevHeader.Hash(), - AppHash: []byte("state-root-1"), - }) - - header, data, err := executor.CreateBlock(context.Background(), 2, &BatchData{ - Batch: &coreseq.Batch{}, - Time: fixedExecutorTestStartTime.Add(time.Second), - }) - require.NoError(t, err) - require.Equal(t, newAddr, header.ProposerAddress) - require.Equal(t, newAddr, header.Signer.Address) - require.Equal(t, uint64(2), data.Height()) -} - -func TestExecutor_CreateBlock_BasedSequencerUsesScheduledPubKey(t *testing.T) { - ds := sync.MutexWrap(datastore.NewMapDatastore()) - memStore := store.New(ds) - - cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) - require.NoError(t, err) - - _, signerInfo, _ := buildTestSigner(t) - entry, err := genesis.NewProposerScheduleEntry(1, signerInfo.PubKey) - require.NoError(t, err) - - gen := genesis.Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: fixedExecutorTestStartTime, - ProposerAddress: entry.Address, - ProposerSchedule: []genesis.ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - cfg := config.DefaultConfig() - cfg.Node.BasedSequencer = true - - wantValidatorHash := types.Hash{0x01} - hasherCalled := false - options := common.DefaultBlockOptions() - options.ValidatorHasherProvider = func(address []byte, pubKey crypto.PubKey) (types.Hash, error) { - hasherCalled = true - require.Equal(t, entry.Address, address) - require.NotNil(t, pubKey) - - marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) - require.NoError(t, err) - require.Equal(t, entry.PubKey, marshalledPubKey) - - return wantValidatorHash, nil - } - - executor, err := NewExecutor( - memStore, - nil, - nil, - nil, - cacheManager, - common.NopMetrics(), - cfg, - gen, - nil, - nil, - zerolog.Nop(), - options, - make(chan error, 1), - nil, - ) - require.NoError(t, err) - - executor.setLastState(types.State{ - Version: types.InitStateVersion, - ChainID: gen.ChainID, - InitialHeight: gen.InitialHeight, - AppHash: []byte("state-root-1"), - }) - - header, data, err := executor.CreateBlock(context.Background(), 1, &BatchData{ - Batch: &coreseq.Batch{}, - Time: fixedExecutorTestStartTime, - }) - require.NoError(t, err) - require.True(t, hasherCalled) - require.Equal(t, wantValidatorHash, header.ValidatorHash) - require.Equal(t, entry.Address, header.Signer.Address) - require.NotNil(t, header.Signer.PubKey) - - marshalledPubKey, err := crypto.MarshalPublicKey(header.Signer.PubKey) - require.NoError(t, err) - require.Equal(t, entry.PubKey, marshalledPubKey) - require.Equal(t, uint64(1), data.Height()) -} - -// TestNewExecutor_RejectsSignerOutsideSchedule verifies that a signer whose -// address does not appear anywhere in the proposer schedule cannot start the -// executor. This prevents a misconfigured replacement key from coming up as -// an aggregator on a chain it was never scheduled on. -func TestNewExecutor_RejectsSignerOutsideSchedule(t *testing.T) { - ds := sync.MutexWrap(datastore.NewMapDatastore()) - memStore := store.New(ds) - - cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) - require.NoError(t, err) - - _, scheduledSigner, _ := buildTestSigner(t) - _, _, strayerSigner := buildTestSigner(t) - - entry, err := genesis.NewProposerScheduleEntry(1, scheduledSigner.PubKey) - require.NoError(t, err) - - gen := genesis.Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: fixedExecutorTestStartTime, - ProposerAddress: entry.Address, - ProposerSchedule: []genesis.ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - _, err = NewExecutor( - memStore, nil, nil, strayerSigner, cacheManager, - common.NopMetrics(), config.DefaultConfig(), gen, - nil, nil, zerolog.Nop(), common.DefaultBlockOptions(), - make(chan error, 1), nil, - ) - require.ErrorIs(t, err, common.ErrNotProposer) -} - -// TestExecutor_CreateBlock_RejectsSignerAtWrongHeight verifies that a signer -// which is scheduled (so startup succeeds) but not active at the current -// height cannot produce a block. This guards the per-height proposer check -// inside CreateBlock — without it, a rotation could be jumped ahead or -// rolled back by whichever signer the operator happens to start. -func TestExecutor_CreateBlock_RejectsSignerAtWrongHeight(t *testing.T) { - ds := sync.MutexWrap(datastore.NewMapDatastore()) - memStore := store.New(ds) - - cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) - require.NoError(t, err) - - oldAddr, oldSignerInfo, oldSigner := buildTestSigner(t) - _, newSignerInfo, _ := buildTestSigner(t) - - entry1, err := genesis.NewProposerScheduleEntry(1, oldSignerInfo.PubKey) - require.NoError(t, err) - // Second entry activates at height 5. The old signer is scheduled at - // height 1 and is NOT the proposer for height 5+. - entry2, err := genesis.NewProposerScheduleEntry(5, newSignerInfo.PubKey) - require.NoError(t, err) - - gen := genesis.Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: fixedExecutorTestStartTime, - ProposerAddress: entry1.Address, - ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - // Start the executor as the old signer — it IS in the schedule at - // height 1, so NewExecutor must accept it. - executor, err := NewExecutor( - memStore, nil, nil, oldSigner, cacheManager, - common.NopMetrics(), config.DefaultConfig(), gen, - nil, nil, zerolog.Nop(), common.DefaultBlockOptions(), - make(chan error, 1), nil, - ) - require.NoError(t, err) - - // Seed a height-4 block so CreateBlock(5) has a parent to reference. - prevHeader := &types.SignedHeader{ - Header: types.Header{ - Version: types.InitStateVersion, - BaseHeader: types.BaseHeader{ - ChainID: gen.ChainID, - Height: 4, - Time: uint64(fixedExecutorTestStartTime.Add(4 * time.Second).UnixNano()), - }, - AppHash: []byte("state-root-4"), - ProposerAddress: oldAddr, - DataHash: common.DataHashForEmptyTxs, - }, - Signature: types.Signature([]byte("sig-4")), - Signer: oldSignerInfo, - } - prevData := &types.Data{ - Metadata: &types.Metadata{ - ChainID: gen.ChainID, - Height: 4, - Time: prevHeader.BaseHeader.Time, - }, - } - - batch, err := memStore.NewBatch(context.Background()) - require.NoError(t, err) - require.NoError(t, batch.SaveBlockData(prevHeader, prevData, &prevHeader.Signature)) - require.NoError(t, batch.SetHeight(4)) - require.NoError(t, batch.Commit()) - - executor.setLastState(types.State{ - Version: types.InitStateVersion, - ChainID: gen.ChainID, - InitialHeight: gen.InitialHeight, - LastBlockHeight: 4, - LastBlockTime: prevHeader.Time(), - LastHeaderHash: prevHeader.Hash(), - AppHash: []byte("state-root-4"), - }) - - // Height 5 belongs to the NEW signer per the schedule — the old - // signer must be rejected even though it's a known schedule member. - _, _, err = executor.CreateBlock(context.Background(), 5, &BatchData{ - Batch: &coreseq.Batch{}, - Time: fixedExecutorTestStartTime.Add(5 * time.Second), - }) - require.Error(t, err) - require.Contains(t, err.Error(), "proposer") -} diff --git a/block/internal/reaping/bench_test.go b/block/internal/reaping/bench_test.go index 5ec0aaa69d..3c879148a1 100644 --- a/block/internal/reaping/bench_test.go +++ b/block/internal/reaping/bench_test.go @@ -60,8 +60,8 @@ func (e *infiniteExecutor) GetTxs(_ context.Context) ([][]byte, error) { return txs, nil } -func (e *infiniteExecutor) ExecuteTxs(_ context.Context, _ [][]byte, _ uint64, _ time.Time, _ []byte) ([]byte, error) { - return nil, nil +func (e *infiniteExecutor) ExecuteTxs(_ context.Context, _ [][]byte, _ uint64, _ time.Time, _ []byte) (coreexecutor.ExecuteResult, error) { + return coreexecutor.ExecuteResult{}, nil } func (e *infiniteExecutor) FilterTxs(_ context.Context, txs [][]byte, _ uint64, _ uint64, _ bool) ([]coreexecutor.FilterStatus, error) { diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index e53e351832..f5f4a829bf 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -1,6 +1,7 @@ package submitting import ( + "bytes" "context" "encoding/json" "fmt" @@ -362,6 +363,14 @@ func (s *DASubmitter) signEnvelopesParallel( // signAndCacheEnvelope signs a single header and caches the result. func (s *DASubmitter) signAndCacheEnvelope(ctx context.Context, header *types.SignedHeader, marshalledHeader []byte, signer signer.Signer) ([]byte, error) { + addr, err := signer.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get signer address: %w", err) + } + if len(header.Signer.Address) > 0 && !bytes.Equal(addr, header.Signer.Address) { + return nil, fmt.Errorf("envelope signer address mismatch: got %x, expected %x", addr, header.Signer.Address) + } + // Sign the pre-marshalled header content envelopeSignature, err := signer.Sign(ctx, marshalledHeader) if err != nil { @@ -460,7 +469,7 @@ func (s *DASubmitter) SubmitData(ctx context.Context, unsignedDataList []*types. } // signData signs unsigned SignedData structs returned from cache -func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.SignedData, unsignedDataListBz [][]byte, signer signer.Signer, genesis genesis.Genesis) ([]*types.SignedData, [][]byte, error) { +func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.SignedData, unsignedDataListBz [][]byte, signer signer.Signer, _ genesis.Genesis) ([]*types.SignedData, [][]byte, error) { if signer == nil { return nil, nil, fmt.Errorf("signer is nil") } @@ -489,10 +498,6 @@ func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.Si continue } - if err := genesis.ValidateProposer(unsignedData.Height(), addr, pubKey); err != nil { - return nil, nil, fmt.Errorf("signer does not match proposer schedule for data at height %d: %w", unsignedData.Height(), err) - } - signature, err := signer.Sign(ctx, unsignedDataListBz[i]) if err != nil { return nil, nil, fmt.Errorf("failed to sign data: %w", err) diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index 9c55b9bd6c..d25786018b 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -343,97 +343,6 @@ func TestDASubmitter_SubmitData_Success(t *testing.T) { assert.True(t, ok) } -func TestDASubmitter_SubmitData_UsesScheduledProposerForHeight(t *testing.T) { - submitter, st, cm, mockDA, gen := setupDASubmitterTest(t) - ctx := context.Background() - dataNamespace := datypes.NamespaceFromString(testDataNamespace).Bytes() - - mockDA.On( - "Submit", - mock.Anything, - mock.AnythingOfType("[][]uint8"), - mock.AnythingOfType("float64"), - dataNamespace, - mock.Anything, - ).Return(func(_ context.Context, blobs [][]byte, _ float64, _ []byte, _ []byte) datypes.ResultSubmit { - return datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: uint64(len(blobs)), Height: 2}} - }).Once() - - oldAddr, oldPub, _ := createTestSigner(t) - nextAddr, nextPub, nextSigner := createTestSigner(t) - - entry1, err := genesis.NewProposerScheduleEntry(gen.InitialHeight, oldPub) - require.NoError(t, err) - entry2, err := genesis.NewProposerScheduleEntry(2, nextPub) - require.NoError(t, err) - - gen.ProposerAddress = entry1.Address - gen.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} - submitter.genesis = gen - - data1 := &types.Data{ - Metadata: &types.Metadata{ - ChainID: gen.ChainID, - Height: 1, - Time: uint64(time.Now().UnixNano()), - }, - Txs: types.Txs{}, - } - - header1 := &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - ChainID: gen.ChainID, - Height: 1, - Time: uint64(time.Now().UnixNano()), - }, - ProposerAddress: oldAddr, - DataHash: common.DataHashForEmptyTxs, - }, - Signer: types.Signer{PubKey: oldPub, Address: oldAddr}, - } - - data := &types.Data{ - Metadata: &types.Metadata{ - ChainID: gen.ChainID, - Height: 2, - Time: uint64(time.Now().UnixNano()), - }, - Txs: types.Txs{types.Tx("rotated-key-tx")}, - } - - header := &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - ChainID: gen.ChainID, - Height: 2, - Time: uint64(time.Now().UnixNano()), - }, - ProposerAddress: nextAddr, - DataHash: data.DACommitment(), - }, - Signer: types.Signer{PubKey: nextPub, Address: nextAddr}, - } - - sig1 := types.Signature([]byte("sig-1")) - sig2 := types.Signature([]byte("sig-2")) - batch, err := st.NewBatch(ctx) - require.NoError(t, err) - require.NoError(t, batch.SaveBlockData(header1, data1, &sig1)) - require.NoError(t, batch.SaveBlockData(header, data, &sig2)) - require.NoError(t, batch.SetHeight(2)) - require.NoError(t, batch.Commit()) - - signedDataList, marshalledData, err := cm.GetPendingData(ctx) - require.NoError(t, err) - err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, nextSigner, gen) - require.NoError(t, err) - - _, ok := cm.GetDataDAIncludedByHeight(2) - assert.True(t, ok) - assert.NotEqual(t, oldAddr, nextAddr) -} - func TestDASubmitter_SubmitData_SkipsEmptyData(t *testing.T) { submitter, st, cm, mockDA, gen := setupDASubmitterTest(t) ctx := context.Background() diff --git a/block/internal/syncing/assert.go b/block/internal/syncing/assert.go index 1bed6db8b9..3a23a06876 100644 --- a/block/internal/syncing/assert.go +++ b/block/internal/syncing/assert.go @@ -4,27 +4,16 @@ import ( "errors" "fmt" - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" ) -func assertExpectedProposer(genesis genesis.Genesis, height uint64, proposerAddr []byte, pubKey crypto.PubKey) error { - if err := genesis.ValidateProposer(height, proposerAddr, pubKey); err != nil { - return fmt.Errorf("unexpected proposer at height %d: %w", height, err) - } - - return nil -} - func assertValidSignedData(signedData *types.SignedData, genesis genesis.Genesis) error { if signedData == nil || signedData.Txs == nil { return errors.New("empty signed data") } - - if err := assertExpectedProposer(genesis, signedData.Height(), signedData.Signer.Address, signedData.Signer.PubKey); err != nil { - return err + if signedData.Signer.PubKey == nil { + return errors.New("missing signer public key in signed data") } dataBytes, err := signedData.Data.MarshalBinary() diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 75bc631e8f..62405cf61d 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -299,11 +299,6 @@ func (r *daRetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedH return nil } - if err := r.assertExpectedProposer(header); err != nil { - r.logger.Debug().Err(err).Msg("unexpected proposer") - return nil - } - if isValidEnvelope && !r.strictMode { r.logger.Info().Uint64("height", header.Height()).Msg("valid DA envelope detected, switching to STRICT MODE") r.strictMode = true @@ -355,11 +350,6 @@ func (r *daRetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { return &signedData.Data } -// assertExpectedProposer validates the proposer schedule entry for the header height. -func (r *daRetriever) assertExpectedProposer(header *types.SignedHeader) error { - return assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey) -} - // assertValidSignedData validates signed data using the configured signature provider func (r *daRetriever) assertValidSignedData(signedData *types.SignedData) error { return assertValidSignedData(signedData, r.genesis) diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 3b587def1f..c2786c36b0 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -215,15 +215,18 @@ func TestDARetriever_TryDecodeHeaderAndData_Basic(t *testing.T) { assert.Nil(t, r.tryDecodeData([]byte("junk"), 1)) } -func TestDARetriever_tryDecodeData_InvalidSignatureOrProposer(t *testing.T) { +func TestDARetriever_tryDecodeData_InvalidSignature(t *testing.T) { - goodAddr, pub, signer := buildSyncTestSigner(t) - badAddr := []byte("not-the-proposer") - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: badAddr} + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) - // Signed data is made by goodAddr; retriever expects badAddr -> should be rejected - db, _ := makeSignedDataBytes(t, gen.ChainID, 7, goodAddr, pub, signer, 1) + _, signedData := makeSignedDataBytes(t, gen.ChainID, 7, addr, pub, signer, 1) + require.NotEmpty(t, signedData.Signature) + signedData.Signature[0] ^= 0x01 + db, err := signedData.MarshalBinary() + require.NoError(t, err) + assert.Nil(t, r.tryDecodeData(db, 55)) } diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 2150bd2933..87a8b6a093 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -81,19 +81,6 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC } return err } - if headerHeight := p2pHeader.Height(); headerHeight != height { - err := fmt.Errorf("header height mismatch: requested %d, got %d", height, headerHeight) - h.logger.Warn(). - Uint64("requested_height", height). - Uint64("header_height", headerHeight). - Err(err). - Msg("discarding mismatched header from P2P") - return err - } - if err := h.assertExpectedProposer(p2pHeader.SignedHeader); err != nil { - h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") - return err - } p2pData, err := h.dataStore.GetByHeight(ctx, height) if err != nil { @@ -133,8 +120,3 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC h.logger.Debug().Uint64("height", height).Msg("processed event from P2P") return nil } - -// assertExpectedProposer validates the proposer schedule entry for the header height. -func (h *P2PHandler) assertExpectedProposer(header *types.SignedHeader) error { - return assertExpectedProposer(h.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey) -} diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index 84a875ffbf..e92a996550 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -194,7 +194,7 @@ func TestP2PHandler_ProcessHeight_SkipsWhenHeaderMissing(t *testing.T) { p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(9)) } -func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { +func TestP2PHandler_ProcessHeight_AcceptsNonGenesisProposer(t *testing.T) { p := setupP2P(t) ctx := context.Background() var err error @@ -203,58 +203,11 @@ func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { require.NotEqual(t, string(p.Genesis.ProposerAddress), string(badAddr)) header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 11, badAddr, pub, signer) - header.DataHash = common.DataHashForEmptyTxs - - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(header, nil).Once() - - ch := make(chan common.DAHeightEvent, 1) - err = p.Handler.ProcessHeight(ctx, 11, ch) - require.Error(t, err) - - require.Empty(t, collectEvents(t, ch, 50*time.Millisecond)) - p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(11)) -} - -func TestP2PHandler_ProcessHeight_RejectsHeaderHeightMismatch(t *testing.T) { - p := setupP2P(t) - ctx := context.Background() - - header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 12, p.ProposerAddr, p.ProposerPub, p.Signer) - - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(10)).Return(header, nil).Once() - - ch := make(chan common.DAHeightEvent, 1) - err := p.Handler.ProcessHeight(ctx, 10, ch) - require.Error(t, err) - require.Contains(t, err.Error(), "header height mismatch") - - require.Empty(t, collectEvents(t, ch, 50*time.Millisecond)) - p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(10)) -} - -func TestP2PHandler_ProcessHeight_AllowsScheduledProposerRotation(t *testing.T) { - p := setupP2P(t) - ctx := context.Background() - - nextAddr, nextPub, nextSigner := buildTestSigner(t) - - entry1, err := genesis.NewProposerScheduleEntry(p.Genesis.InitialHeight, p.ProposerPub) - require.NoError(t, err) - entry2, err := genesis.NewProposerScheduleEntry(11, nextPub) - require.NoError(t, err) - - p.Genesis.ProposerAddress = entry1.Address - p.Genesis.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} - p.Genesis.DAEpochForcedInclusion = 1 - require.NoError(t, p.Genesis.Validate()) - p.Handler.genesis = p.Genesis - - header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 11, nextAddr, nextPub, nextSigner) data := &types.P2PData{Data: makeData(p.Genesis.ChainID, 11, 1)} header.DataHash = data.DACommitment() bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) require.NoError(t, err) - sig, err := nextSigner.Sign(t.Context(), bz) + sig, err := signer.Sign(t.Context(), bz) require.NoError(t, err) header.Signature = sig @@ -267,45 +220,7 @@ func TestP2PHandler_ProcessHeight_AllowsScheduledProposerRotation(t *testing.T) events := collectEvents(t, ch, 50*time.Millisecond) require.Len(t, events, 1) - require.Equal(t, nextAddr, events[0].Header.ProposerAddress) -} - -// TestP2PHandler_ProcessHeight_RejectsScheduledProposerBeforeActivation verifies -// the counterpart to the rotation-allows test: a signer that IS in the schedule -// but only active at a later height must not be accepted for blocks before the -// activation height. Without the per-height check, any scheduled signer could -// forge blocks outside their active window. -func TestP2PHandler_ProcessHeight_RejectsScheduledProposerBeforeActivation(t *testing.T) { - p := setupP2P(t) - ctx := context.Background() - - nextAddr, nextPub, nextSigner := buildTestSigner(t) - - entry1, err := genesis.NewProposerScheduleEntry(p.Genesis.InitialHeight, p.ProposerPub) - require.NoError(t, err) - entry2, err := genesis.NewProposerScheduleEntry(11, nextPub) - require.NoError(t, err) - - p.Genesis.ProposerAddress = entry1.Address - p.Genesis.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} - p.Genesis.DAEpochForcedInclusion = 1 - require.NoError(t, p.Genesis.Validate()) - p.Handler.genesis = p.Genesis - - // entry2 is scheduled but only active at height 11. Height 10 still - // belongs to entry1, so a header from the next signer at height 10 - // must be rejected. - header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 10, nextAddr, nextPub, nextSigner) - header.DataHash = common.DataHashForEmptyTxs - - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(10)).Return(header, nil).Once() - - ch := make(chan common.DAHeightEvent, 1) - err = p.Handler.ProcessHeight(ctx, 10, ch) - require.Error(t, err) - - require.Empty(t, collectEvents(t, ch, 50*time.Millisecond)) - p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(10)) + require.Equal(t, badAddr, events[0].Header.ProposerAddress) } func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { diff --git a/block/internal/syncing/raft_retriever.go b/block/internal/syncing/raft_retriever.go index 4cb15aec07..a0a527f208 100644 --- a/block/internal/syncing/raft_retriever.go +++ b/block/internal/syncing/raft_retriever.go @@ -125,10 +125,6 @@ func (r *raftRetriever) consumeRaftBlock(ctx context.Context, state *raft.RaftBl r.logger.Debug().Err(err).Msg("invalid header structure") return nil } - if err := assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey); err != nil { - r.logger.Debug().Err(err).Msg("unexpected proposer") - return nil - } var data types.Data if err := data.UnmarshalBinary(state.Data); err != nil { diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 40e3c9523f..9bcbd0f3ee 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -320,14 +320,18 @@ func (s *Syncer) initializeState() error { } state = types.State{ - ChainID: s.genesis.ChainID, - InitialHeight: s.genesis.InitialHeight, - LastBlockHeight: s.genesis.InitialHeight - 1, - LastBlockTime: s.genesis.StartTime, - DAHeight: s.genesis.DAStartHeight, - AppHash: stateRoot, + ChainID: s.genesis.ChainID, + InitialHeight: s.genesis.InitialHeight, + LastBlockHeight: s.genesis.InitialHeight - 1, + LastBlockTime: s.genesis.StartTime, + DAHeight: s.genesis.DAStartHeight, + AppHash: stateRoot, + NextProposerAddress: s.initialProposerAddress(s.ctx), } } + if len(state.NextProposerAddress) == 0 { + state.NextProposerAddress = s.initialProposerAddress(s.ctx) + } if state.DAHeight != 0 && state.DAHeight < s.genesis.DAStartHeight { return fmt.Errorf("DA height (%d) is lower than DA start height (%d)", state.DAHeight, s.genesis.DAStartHeight) } @@ -398,6 +402,18 @@ func (s *Syncer) initializeState() error { return nil } +func (s *Syncer) initialProposerAddress(ctx context.Context) []byte { + if s.exec != nil { + info, err := s.exec.GetExecutionInfo(ctx) + if err != nil { + s.logger.Warn().Err(err).Msg("failed to get execution info for proposer, falling back to genesis proposer") + } else if len(info.NextProposerAddress) > 0 { + return append([]byte(nil), info.NextProposerAddress...) + } + } + return append([]byte(nil), s.genesis.ProposerAddress...) +} + // processLoop is the main coordination loop for processing events func (s *Syncer) processLoop(ctx context.Context) { s.logger.Info().Msg("starting process loop") @@ -816,14 +832,24 @@ func (s *Syncer) ApplyBlock(ctx context.Context, header types.Header, data *type // Execute transactions ctx = context.WithValue(ctx, types.HeaderContextKey, header) - newAppHash, err := s.executeTxsWithRetry(ctx, rawTxs, header, currentState) + result, err := s.executeTxsWithRetry(ctx, rawTxs, header, currentState) if err != nil { s.sendCriticalError(fmt.Errorf("failed to execute transactions: %w", err)) return types.State{}, fmt.Errorf("failed to execute transactions: %w", err) } + if len(result.NextProposerAddress) > 0 { + if len(header.NextProposerAddress) == 0 { + return types.State{}, fmt.Errorf("next proposer mismatch: header empty, execution %x", result.NextProposerAddress) + } + if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { + return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution %x", header.NextProposerAddress, result.NextProposerAddress) + } + } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { + return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution unchanged", header.NextProposerAddress) + } // Create new state - newState, err := currentState.NextState(header, newAppHash) + newState, err := currentState.NextState(header, result.UpdatedStateRoot) if err != nil { return types.State{}, fmt.Errorf("failed to create next state: %w", err) } @@ -833,12 +859,12 @@ func (s *Syncer) ApplyBlock(ctx context.Context, header types.Header, data *type // executeTxsWithRetry executes transactions with retry logic. // NOTE: the function retries the execution client call regardless of the error. Some execution clients errors are irrecoverable, and will eventually halt the node, as expected. -func (s *Syncer) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, header types.Header, currentState types.State) ([]byte, error) { +func (s *Syncer) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, header types.Header, currentState types.State) (coreexecutor.ExecuteResult, error) { for attempt := 1; attempt <= common.MaxRetriesBeforeHalt; attempt++ { - newAppHash, err := s.exec.ExecuteTxs(ctx, rawTxs, header.Height(), header.Time(), currentState.AppHash) + result, err := s.exec.ExecuteTxs(ctx, rawTxs, header.Height(), header.Time(), currentState.AppHash) if err != nil { if attempt == common.MaxRetriesBeforeHalt { - return nil, fmt.Errorf("failed to execute transactions: %w", err) + return coreexecutor.ExecuteResult{}, fmt.Errorf("failed to execute transactions: %w", err) } s.logger.Error().Err(err). @@ -851,14 +877,14 @@ func (s *Syncer) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, heade case <-time.After(common.MaxRetriesTimeout): continue case <-ctx.Done(): - return nil, fmt.Errorf("context cancelled during retry: %w", ctx.Err()) + return coreexecutor.ExecuteResult{}, fmt.Errorf("context cancelled during retry: %w", ctx.Err()) } } - return newAppHash, nil + return result, nil } - return nil, nil + return coreexecutor.ExecuteResult{}, nil } // ValidateBlock validates a synced block diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 67c87e06ed..696d2c939f 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -146,7 +146,7 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { addr, pub, signer := buildSyncTestSigner(t) cfg := config.DefaultConfig() - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second)} mockExec := testmocks.NewMockExecutor(t) mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain").Return([]byte("app0"), nil).Once() @@ -191,6 +191,60 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { require.Error(t, err) } +func TestSyncer_ValidateBlock_UsesStateNextProposer(t *testing.T) { + addr, _, _ := buildSyncTestSigner(t) + badAddr, badPub, badSigner := buildSyncTestSigner(t) + + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second)} + data := makeData(gen.ChainID, 1, 1) + _, header := makeSignedHeaderBytes(t, gen.ChainID, 1, badAddr, badPub, badSigner, []byte("app0"), data, nil) + + s := &Syncer{logger: zerolog.Nop()} + state := types.State{ + ChainID: gen.ChainID, + InitialHeight: gen.InitialHeight, + LastBlockHeight: gen.InitialHeight - 1, + LastBlockTime: gen.StartTime, + AppHash: []byte("app0"), + NextProposerAddress: addr, + } + + err := s.ValidateBlock(t.Context(), state, data, header) + require.Error(t, err) + require.Contains(t, err.Error(), "unexpected proposer") +} + +func TestSyncer_ApplyBlockRejectsExecutionNextProposerMismatch(t *testing.T) { + addr, _, _ := buildSyncTestSigner(t) + headerNext := []byte("header-next-proposer") + execNext := []byte("execution-next-proposer") + + mockExec := testmocks.NewMockExecutor(t) + data := makeData("tchain", 1, 1) + header := types.Header{ + BaseHeader: types.BaseHeader{ChainID: "tchain", Height: 1, Time: uint64(time.Now().UnixNano())}, + ProposerAddress: addr, + NextProposerAddress: headerNext, + } + currentState := types.State{AppHash: []byte("app0"), NextProposerAddress: addr} + + mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.Anything, currentState.AppHash). + Return(execution.ExecuteResult{ + UpdatedStateRoot: []byte("app1"), + NextProposerAddress: execNext, + }, nil).Once() + + s := &Syncer{ + exec: mockExec, + ctx: t.Context(), + logger: zerolog.Nop(), + } + + _, err := s.ApplyBlock(t.Context(), header, data, currentState) + require.Error(t, err) + require.Contains(t, err.Error(), "next proposer mismatch") +} + func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) @@ -936,7 +990,7 @@ func TestSyncer_executeTxsWithRetry(t *testing.T) { if tt.expectSuccess { require.NoError(t, err) - assert.Equal(t, tt.expectHash, result) + assert.Equal(t, tt.expectHash, result.UpdatedStateRoot) } else { require.Error(t, err) if tt.expectError != "" { diff --git a/client/crates/types/src/proto/evnode.v1.messages.rs b/client/crates/types/src/proto/evnode.v1.messages.rs index 019046d0b7..495aac85d6 100644 --- a/client/crates/types/src/proto/evnode.v1.messages.rs +++ b/client/crates/types/src/proto/evnode.v1.messages.rs @@ -65,6 +65,9 @@ pub struct Header { /// Chain ID the block belongs to #[prost(string, tag = "12")] pub chain_id: ::prost::alloc::string::String, + /// Proposer address selected by this block's execution result for the next block. + #[prost(bytes = "vec", tag = "13")] + pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SignedHeader is a header with a signature and a signer. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] @@ -76,6 +79,19 @@ pub struct SignedHeader { #[prost(message, optional, tag = "3")] pub signer: ::core::option::Option, } +/// DAHeaderEnvelope is a wrapper around SignedHeader for DA submission. +/// It is binary compatible with SignedHeader (fields 1-3) but adds an envelope signature. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct DaHeaderEnvelope { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option
, + #[prost(bytes = "vec", tag = "2")] + pub signature: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub signer: ::core::option::Option, + #[prost(bytes = "vec", tag = "4")] + pub envelope_signature: ::prost::alloc::vec::Vec, +} /// Signer is a signer of a block in the blockchain. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Signer { @@ -139,6 +155,28 @@ pub struct Vote { #[prost(bytes = "vec", tag = "5")] pub validator_address: ::prost::alloc::vec::Vec, } +/// P2PSignedHeader +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct P2pSignedHeader { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option
, + #[prost(bytes = "vec", tag = "2")] + pub signature: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub signer: ::core::option::Option, + #[prost(uint64, optional, tag = "4")] + pub da_height_hint: ::core::option::Option, +} +/// P2PData +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct P2pData { + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option, + #[prost(bytes = "vec", repeated, tag = "2")] + pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(uint64, optional, tag = "3")] + pub da_height_hint: ::core::option::Option, +} /// State is the state of the blockchain. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct State { @@ -158,6 +196,26 @@ pub struct State { pub app_hash: ::prost::alloc::vec::Vec, #[prost(bytes = "vec", tag = "9")] pub last_header_hash: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "10")] + pub next_proposer_address: ::prost::alloc::vec::Vec, +} +/// RaftBlockState represents a replicated block state +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct RaftBlockState { + #[prost(uint64, tag = "1")] + pub height: u64, + #[prost(uint64, tag = "2")] + pub last_submitted_da_header_height: u64, + #[prost(uint64, tag = "3")] + pub last_submitted_da_data_height: u64, + #[prost(bytes = "vec", tag = "4")] + pub hash: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "5")] + pub timestamp: u64, + #[prost(bytes = "vec", tag = "6")] + pub header: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "7")] + pub data: ::prost::alloc::vec::Vec, } /// SequencerDACheckpoint tracks the position in the DA where transactions were last processed #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] @@ -212,6 +270,17 @@ pub struct Batch { #[prost(bytes = "vec", repeated, tag = "1")] pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } +/// BlockData contains data retrieved from a single DA height. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockData { + #[prost(uint64, tag = "1")] + pub height: u64, + /// Unix timestamp in nanoseconds + #[prost(int64, tag = "2")] + pub timestamp: i64, + #[prost(bytes = "vec", repeated, tag = "3")] + pub blobs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} /// InitChainRequest contains the genesis parameters for chain initialization #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct InitChainRequest { @@ -231,9 +300,6 @@ pub struct InitChainResponse { /// Hash representing initial state #[prost(bytes = "vec", tag = "1")] pub state_root: ::prost::alloc::vec::Vec, - /// Maximum allowed bytes for transactions in a block - #[prost(uint64, tag = "2")] - pub max_bytes: u64, } /// GetTxsRequest is the request for fetching transactions /// @@ -272,6 +338,10 @@ pub struct ExecuteTxsResponse { /// Maximum allowed transaction size (may change with protocol updates) #[prost(uint64, tag = "2")] pub max_bytes: u64, + /// Proposer address that should sign the next block. + /// Empty means the current proposer remains active. + #[prost(bytes = "vec", tag = "3")] + pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SetFinalRequest marks a block as finalized #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] @@ -285,6 +355,77 @@ pub struct SetFinalRequest { /// Empty response, errors are returned via gRPC status #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct SetFinalResponse {} +/// GetExecutionInfoRequest requests execution layer parameters +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct GetExecutionInfoRequest {} +/// GetExecutionInfoResponse contains execution layer parameters +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct GetExecutionInfoResponse { + /// Maximum gas allowed for transactions in a block + /// For non-gas-based execution layers, this should be 0 + #[prost(uint64, tag = "1")] + pub max_gas: u64, + /// Proposer address that should sign the next block from the execution + /// layer's current view. Empty means unchanged or unavailable. + #[prost(bytes = "vec", tag = "2")] + pub next_proposer_address: ::prost::alloc::vec::Vec, +} +/// FilterTxsRequest contains transactions to validate and filter +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct FilterTxsRequest { + /// All transactions (force-included + mempool) + #[prost(bytes = "vec", repeated, tag = "1")] + pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// Maximum cumulative size allowed (0 means no size limit) + #[prost(uint64, tag = "2")] + pub max_bytes: u64, + /// Maximum cumulative gas allowed (0 means no gas limit) + #[prost(uint64, tag = "3")] + pub max_gas: u64, + /// Whether force-included transactions are present + #[prost(bool, tag = "4")] + pub has_force_included_transaction: bool, +} +/// FilterTxsResponse contains the filter status for each transaction +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct FilterTxsResponse { + /// Filter status for each transaction (same length as txs in request) + #[prost(enumeration = "FilterStatus", repeated, tag = "1")] + pub statuses: ::prost::alloc::vec::Vec, +} +/// FilterStatus represents the result of filtering a transaction +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FilterStatus { + /// Transaction will make it to the next batch + FilterOk = 0, + /// Transaction will be filtered out because invalid (too big, malformed, etc.) + FilterRemove = 1, + /// Transaction is valid but postponed for later processing due to size/gas constraint + FilterPostpone = 2, +} +impl FilterStatus { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::FilterOk => "FILTER_OK", + Self::FilterRemove => "FILTER_REMOVE", + Self::FilterPostpone => "FILTER_POSTPONE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FILTER_OK" => Some(Self::FilterOk), + "FILTER_REMOVE" => Some(Self::FilterRemove), + "FILTER_POSTPONE" => Some(Self::FilterPostpone), + _ => None, + } + } +} /// Block contains all the components of a complete block #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Block { diff --git a/client/crates/types/src/proto/evnode.v1.services.rs b/client/crates/types/src/proto/evnode.v1.services.rs index b34ae918b7..ef7fed4048 100644 --- a/client/crates/types/src/proto/evnode.v1.services.rs +++ b/client/crates/types/src/proto/evnode.v1.services.rs @@ -439,6 +439,9 @@ pub struct Header { /// Chain ID the block belongs to #[prost(string, tag = "12")] pub chain_id: ::prost::alloc::string::String, + /// Proposer address selected by this block's execution result for the next block. + #[prost(bytes = "vec", tag = "13")] + pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SignedHeader is a header with a signature and a signer. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] @@ -450,6 +453,19 @@ pub struct SignedHeader { #[prost(message, optional, tag = "3")] pub signer: ::core::option::Option, } +/// DAHeaderEnvelope is a wrapper around SignedHeader for DA submission. +/// It is binary compatible with SignedHeader (fields 1-3) but adds an envelope signature. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct DaHeaderEnvelope { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option
, + #[prost(bytes = "vec", tag = "2")] + pub signature: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub signer: ::core::option::Option, + #[prost(bytes = "vec", tag = "4")] + pub envelope_signature: ::prost::alloc::vec::Vec, +} /// Signer is a signer of a block in the blockchain. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Signer { @@ -513,6 +529,28 @@ pub struct Vote { #[prost(bytes = "vec", tag = "5")] pub validator_address: ::prost::alloc::vec::Vec, } +/// P2PSignedHeader +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct P2pSignedHeader { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option
, + #[prost(bytes = "vec", tag = "2")] + pub signature: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub signer: ::core::option::Option, + #[prost(uint64, optional, tag = "4")] + pub da_height_hint: ::core::option::Option, +} +/// P2PData +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct P2pData { + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option, + #[prost(bytes = "vec", repeated, tag = "2")] + pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(uint64, optional, tag = "3")] + pub da_height_hint: ::core::option::Option, +} /// State is the state of the blockchain. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct State { @@ -532,6 +570,26 @@ pub struct State { pub app_hash: ::prost::alloc::vec::Vec, #[prost(bytes = "vec", tag = "9")] pub last_header_hash: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "10")] + pub next_proposer_address: ::prost::alloc::vec::Vec, +} +/// RaftBlockState represents a replicated block state +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct RaftBlockState { + #[prost(uint64, tag = "1")] + pub height: u64, + #[prost(uint64, tag = "2")] + pub last_submitted_da_header_height: u64, + #[prost(uint64, tag = "3")] + pub last_submitted_da_data_height: u64, + #[prost(bytes = "vec", tag = "4")] + pub hash: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "5")] + pub timestamp: u64, + #[prost(bytes = "vec", tag = "6")] + pub header: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "7")] + pub data: ::prost::alloc::vec::Vec, } /// SequencerDACheckpoint tracks the position in the DA where transactions were last processed #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] @@ -957,6 +1015,17 @@ pub struct Batch { #[prost(bytes = "vec", repeated, tag = "1")] pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } +/// BlockData contains data retrieved from a single DA height. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockData { + #[prost(uint64, tag = "1")] + pub height: u64, + /// Unix timestamp in nanoseconds + #[prost(int64, tag = "2")] + pub timestamp: i64, + #[prost(bytes = "vec", repeated, tag = "3")] + pub blobs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} /// InitChainRequest contains the genesis parameters for chain initialization #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct InitChainRequest { @@ -976,9 +1045,6 @@ pub struct InitChainResponse { /// Hash representing initial state #[prost(bytes = "vec", tag = "1")] pub state_root: ::prost::alloc::vec::Vec, - /// Maximum allowed bytes for transactions in a block - #[prost(uint64, tag = "2")] - pub max_bytes: u64, } /// GetTxsRequest is the request for fetching transactions /// @@ -1017,6 +1083,10 @@ pub struct ExecuteTxsResponse { /// Maximum allowed transaction size (may change with protocol updates) #[prost(uint64, tag = "2")] pub max_bytes: u64, + /// Proposer address that should sign the next block. + /// Empty means the current proposer remains active. + #[prost(bytes = "vec", tag = "3")] + pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SetFinalRequest marks a block as finalized #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] @@ -1030,6 +1100,77 @@ pub struct SetFinalRequest { /// Empty response, errors are returned via gRPC status #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct SetFinalResponse {} +/// GetExecutionInfoRequest requests execution layer parameters +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct GetExecutionInfoRequest {} +/// GetExecutionInfoResponse contains execution layer parameters +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct GetExecutionInfoResponse { + /// Maximum gas allowed for transactions in a block + /// For non-gas-based execution layers, this should be 0 + #[prost(uint64, tag = "1")] + pub max_gas: u64, + /// Proposer address that should sign the next block from the execution + /// layer's current view. Empty means unchanged or unavailable. + #[prost(bytes = "vec", tag = "2")] + pub next_proposer_address: ::prost::alloc::vec::Vec, +} +/// FilterTxsRequest contains transactions to validate and filter +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct FilterTxsRequest { + /// All transactions (force-included + mempool) + #[prost(bytes = "vec", repeated, tag = "1")] + pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// Maximum cumulative size allowed (0 means no size limit) + #[prost(uint64, tag = "2")] + pub max_bytes: u64, + /// Maximum cumulative gas allowed (0 means no gas limit) + #[prost(uint64, tag = "3")] + pub max_gas: u64, + /// Whether force-included transactions are present + #[prost(bool, tag = "4")] + pub has_force_included_transaction: bool, +} +/// FilterTxsResponse contains the filter status for each transaction +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct FilterTxsResponse { + /// Filter status for each transaction (same length as txs in request) + #[prost(enumeration = "FilterStatus", repeated, tag = "1")] + pub statuses: ::prost::alloc::vec::Vec, +} +/// FilterStatus represents the result of filtering a transaction +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FilterStatus { + /// Transaction will make it to the next batch + FilterOk = 0, + /// Transaction will be filtered out because invalid (too big, malformed, etc.) + FilterRemove = 1, + /// Transaction is valid but postponed for later processing due to size/gas constraint + FilterPostpone = 2, +} +impl FilterStatus { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::FilterOk => "FILTER_OK", + Self::FilterRemove => "FILTER_REMOVE", + Self::FilterPostpone => "FILTER_POSTPONE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FILTER_OK" => Some(Self::FilterOk), + "FILTER_REMOVE" => Some(Self::FilterRemove), + "FILTER_POSTPONE" => Some(Self::FilterPostpone), + _ => None, + } + } +} /// Generated client implementations. pub mod executor_service_client { #![allow( @@ -1219,6 +1360,58 @@ pub mod executor_service_client { .insert(GrpcMethod::new("evnode.v1.ExecutorService", "SetFinal")); self.inner.unary(req, path, codec).await } + /// GetExecutionInfo returns current execution layer parameters + pub async fn get_execution_info( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/evnode.v1.ExecutorService/GetExecutionInfo", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("evnode.v1.ExecutorService", "GetExecutionInfo"), + ); + self.inner.unary(req, path, codec).await + } + /// FilterTxs validates force-included transactions and calculates gas for all transactions + pub async fn filter_txs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/evnode.v1.ExecutorService/FilterTxs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("evnode.v1.ExecutorService", "FilterTxs")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -1263,6 +1456,22 @@ pub mod executor_service_server { tonic::Response, tonic::Status, >; + /// GetExecutionInfo returns current execution layer parameters + async fn get_execution_info( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// FilterTxs validates force-included transactions and calculates gas for all transactions + async fn filter_txs( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } /// ExecutorService defines the execution layer interface for EVNode #[derive(Debug)] @@ -1521,6 +1730,97 @@ pub mod executor_service_server { }; Box::pin(fut) } + "/evnode.v1.ExecutorService/GetExecutionInfo" => { + #[allow(non_camel_case_types)] + struct GetExecutionInfoSvc(pub Arc); + impl< + T: ExecutorService, + > tonic::server::UnaryService + for GetExecutionInfoSvc { + type Response = super::GetExecutionInfoResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_execution_info(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetExecutionInfoSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/evnode.v1.ExecutorService/FilterTxs" => { + #[allow(non_camel_case_types)] + struct FilterTxsSvc(pub Arc); + impl< + T: ExecutorService, + > tonic::server::UnaryService + for FilterTxsSvc { + type Response = super::FilterTxsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::filter_txs(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = FilterTxsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { let mut response = http::Response::new( diff --git a/core/README.md b/core/README.md index 8f30a3a20f..0138cc004b 100644 --- a/core/README.md +++ b/core/README.md @@ -20,13 +20,20 @@ The `Executor` interface defines how the execution layer processes transactions // Executor defines the interface for the execution layer. type Executor interface { // InitChain initializes the chain based on the genesis information. - InitChain(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) (stateRoot []byte, maxBytes uint64, err error) + InitChain(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) (stateRoot []byte, err error) // GetTxs retrieves transactions from the mempool. GetTxs(ctx context.Context) ([][]byte, error) // ExecuteTxs executes a block of transactions against the current state. - ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, maxBytes uint64, err error) + ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (result ExecuteResult, err error) // SetFinal marks a block height as final. SetFinal(ctx context.Context, blockHeight uint64) error + // GetExecutionInfo returns execution parameters used by ev-node. + GetExecutionInfo(ctx context.Context) (ExecutionInfo, error) +} + +type ExecuteResult struct { + UpdatedStateRoot []byte + NextProposerAddress []byte } ``` diff --git a/core/execution/dummy.go b/core/execution/dummy.go index d6fb38959e..8953ded2a7 100644 --- a/core/execution/dummy.go +++ b/core/execution/dummy.go @@ -61,7 +61,7 @@ func (e *DummyExecutor) InjectTx(tx []byte) { } // ExecuteTxs simulate execution of transactions. -func (e *DummyExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (e *DummyExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (ExecuteResult, error) { e.mu.Lock() defer e.mu.Unlock() @@ -73,7 +73,7 @@ func (e *DummyExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeigh pending := hash.Sum(nil) e.pendingRoots[blockHeight] = pending e.removeExecutedTxs(txs) - return pending, nil + return ExecuteResult{UpdatedStateRoot: pending}, nil } // SetFinal marks block at given height as finalized. diff --git a/core/execution/dummy_test.go b/core/execution/dummy_test.go index f6be3d400b..e77f1a39c6 100644 --- a/core/execution/dummy_test.go +++ b/core/execution/dummy_test.go @@ -131,13 +131,13 @@ func TestExecuteTxs(t *testing.T) { prevStateRoot := executor.GetStateRoot() txsToExecute := [][]byte{tx1, tx3} - newStateRoot, err := executor.ExecuteTxs(ctx, txsToExecute, blockHeight, timestamp, prevStateRoot) + result, err := executor.ExecuteTxs(ctx, txsToExecute, blockHeight, timestamp, prevStateRoot) if err != nil { t.Fatalf("ExecuteTxs returned error: %v", err) } - if bytes.Equal(newStateRoot, prevStateRoot) { + if bytes.Equal(result.UpdatedStateRoot, prevStateRoot) { t.Error("stateRoot should have changed after ExecuteTxs") } @@ -167,7 +167,7 @@ func TestSetFinal(t *testing.T) { prevStateRoot := executor.GetStateRoot() txs := [][]byte{[]byte("tx1"), []byte("tx2")} - pendingRoot, _ := executor.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) + pendingResult, _ := executor.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) // Set the block as final err := executor.SetFinal(ctx, blockHeight) @@ -177,8 +177,8 @@ func TestSetFinal(t *testing.T) { // Verify that the state root was updated newStateRoot := executor.GetStateRoot() - if !bytes.Equal(newStateRoot, pendingRoot) { - t.Errorf("Expected state root to be updated to pending root %v, got %v", pendingRoot, newStateRoot) + if !bytes.Equal(newStateRoot, pendingResult.UpdatedStateRoot) { + t.Errorf("Expected state root to be updated to pending root %v, got %v", pendingResult.UpdatedStateRoot, newStateRoot) } // Verify that the pending root was removed @@ -398,7 +398,7 @@ func TestExecuteTxsWithInvalidPrevStateRoot(t *testing.T) { timestamp := time.Now() txs := [][]byte{[]byte("tx1"), []byte("tx2")} - newStateRoot, err := executor.ExecuteTxs(ctx, txs, blockHeight, timestamp, invalidPrevStateRoot) + result, err := executor.ExecuteTxs(ctx, txs, blockHeight, timestamp, invalidPrevStateRoot) // The dummy executor doesn't validate the previous state root, so it should still work // This is a characteristic of the dummy implementation @@ -406,7 +406,7 @@ func TestExecuteTxsWithInvalidPrevStateRoot(t *testing.T) { t.Fatalf("ExecuteTxs with invalid prevStateRoot returned error: %v", err) } - if len(newStateRoot) == 0 { + if len(result.UpdatedStateRoot) == 0 { t.Error("Expected non-empty state root even with invalid prevStateRoot") } diff --git a/core/execution/execution.go b/core/execution/execution.go index f3ebe1da91..477407e1da 100644 --- a/core/execution/execution.go +++ b/core/execution/execution.go @@ -63,9 +63,9 @@ type Executor interface { // - prevStateRoot: Previous block's state root hash // // Returns: - // - updatedStateRoot: New state root after executing transactions + // - result: New execution result after executing transactions // - err: Any execution errors - ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) + ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (result ExecuteResult, err error) // SetFinal marks a block as finalized at the specified height. // Requirements: @@ -132,6 +132,21 @@ type ExecutionInfo struct { // MaxGas is the maximum gas allowed for transactions in a block. // For non-gas-based execution layers, this should be 0. MaxGas uint64 + + // NextProposerAddress is the proposer address that should sign the next + // block from the execution layer's current view. Empty means unchanged or + // unavailable, and callers should fall back to their current proposer. + NextProposerAddress []byte +} + +// ExecuteResult contains execution output that consensus must persist. +type ExecuteResult struct { + // UpdatedStateRoot is the new state root after executing transactions. + UpdatedStateRoot []byte + + // NextProposerAddress is the proposer address selected by execution for the + // next block. Empty means the current proposer remains active. + NextProposerAddress []byte } // HeightProvider is an optional interface that execution clients can implement diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index 01bda4a8d9..0cfdf5c7ae 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -297,10 +297,6 @@ function sidebarHome() { text: "Create genesis for your chain", link: "/guides/create-genesis", }, - { - text: "Rotate proposer key", - link: "/guides/operations/proposer-key-rotation", - }, { text: "Metrics", link: "/guides/metrics", diff --git a/docs/adr/adr-023-execution-owned-proposer-rotation.md b/docs/adr/adr-023-execution-owned-proposer-rotation.md new file mode 100644 index 0000000000..c89a78412b --- /dev/null +++ b/docs/adr/adr-023-execution-owned-proposer-rotation.md @@ -0,0 +1,83 @@ +# ADR 023: Execution-Owned Proposer Rotation + +## Changelog + +- 2026-04-24: Initial ADR. + +## Status + +Proposed + +## Context + +ev-node originally selected the block proposer from genesis. That made proposer changes a consensus configuration concern and pushed key rotation into a static schedule. This is too rigid for EVM rollups and other execution environments where proposer selection should be governed by execution state. + +The replacement design moves proposer selection into the execution environment. ev-node remains responsible for signing, propagating, validating, and persisting blocks, but it consumes proposer updates returned by execution. + +## Decision + +`Executor.ExecuteTxs` returns an execution result containing: + +- `UpdatedStateRoot`: the state root after executing the block. +- `NextProposerAddress`: the address expected to sign the next block. + +`GetExecutionInfo` also exposes `NextProposerAddress` for startup. If execution returns an empty proposer at startup, ev-node falls back to `genesis.proposer_address`. + +An empty `NextProposerAddress` from `ExecuteTxs` means the proposer is unchanged. ev-node must not write a redundant header field in that case, preserving compatibility with existing headers and hash chains. + +When execution returns a non-empty next proposer: + +- The producing node commits it to `Header.NextProposerAddress` before signing the header. +- Syncing nodes require the signed header value to match the execution result. +- `State.NextProposerAddress` is updated and used as the expected signer for `LastBlockHeight + 1`. + +`Header.NextProposerAddress` lets header-only paths and DA envelope validation see proposer transitions without replaying execution first. The execution result remains the authority; mismatches between the signed header and execution are invalid. + +## EVM System Contract Model + +For ev-reth, proposer selection should be implemented as execution state, likely through a system contract. The contract stores the active next proposer address and exposes controlled update methods. + +The controlling address can be a multisig or security council. This keeps operational key rotation in execution state instead of requiring a new genesis file or node-side schedule. A future ev-reth implementation should read the contract during block execution and return the selected proposer through `ExecuteTxsResponse.next_proposer_address`. + +This ADR does not define the system contract ABI. The contract should be specified with ev-reth because access control, call routing, and predeploy/system-contract conventions are execution-environment details. + +## Security Considerations + +The security council or multisig becomes the authority for proposer updates. It must use a threshold and operational process appropriate for production signer rotation. + +The system contract must restrict writes to the configured authority. Unauthorized proposer updates are consensus-critical because they determine who can sign the next block. + +ev-node validates the execution output against the signed header. A malicious proposer cannot advertise one next proposer in the header while execution derives another. + +If the execution interface returns an empty proposer, ev-node treats the proposer as unchanged. At startup, empty execution info falls back to genesis so existing execution implementations remain usable. + +Compromise of the security council can still rotate the proposer to an attacker. This ADR reduces node configuration risk; it does not eliminate governance-key risk. + +## Consequences + +Positive: + +- Proposer rotation becomes deterministic execution state. +- EVM chains can use a system contract and multisig-controlled rotation. +- Existing chains keep working when execution returns an empty proposer. +- Header verification can follow rotations once the rotating block is known. + +Negative: + +- The execution API changes and all execution adapters must return `ExecuteResult`. +- Proposer updates become consensus-critical execution outputs. +- ev-reth needs a separate system-contract design and implementation. + +## Alternatives Considered + +Genesis proposer schedule: + +- Rejected. It makes rotation a static node/genesis concern and is not a good fit for security-council or multisig-controlled EVM deployments. + +Node-local proposer configuration: + +- Rejected. Nodes could disagree about the active proposer unless every operator updates configuration at the same time. + +Execution-only proposer without header commitment: + +- Rejected. Syncing nodes can replay execution, but header and DA envelope paths benefit from having the selected next proposer committed in the signed header when it changes. diff --git a/docs/adr/adr-023-proposer-key-rotation.md b/docs/adr/adr-023-proposer-key-rotation.md deleted file mode 100644 index 1c1b067982..0000000000 --- a/docs/adr/adr-023-proposer-key-rotation.md +++ /dev/null @@ -1,165 +0,0 @@ -# ADR 023: Proposer Key Rotation via Height-Based Schedule - -## Changelog - -- 2026-04-23: Implemented proposer key rotation through a height-indexed proposer schedule - -## Context - -ev-node historically treated the proposer as a single static identity embedded in genesis via `proposer_address`. -That assumption leaked into block production, DA submission, and sync validation. As a result, rotating a compromised -or operationally obsolete proposer key required out-of-band coordination and effectively behaved like a manual -re-genesis from the point of view of node operators. - -This was suboptimal for three reasons: - -1. It made proposer rotation operationally risky and easy to get wrong. -2. Fresh nodes syncing from genesis had no protocol-visible record of when the proposer changed. -3. Validation only pinned the proposer address, not the scheduled public key that should be producing blocks. - -## Alternative Approaches - -### 1. Manual key swap only - -Operators can stop the sequencer, swap the local signer, redistribute config, and restart nodes. -This is insufficient because the chain itself does not encode when the proposer changed, so historical sync -and validation become ambiguous. - -### 2. Re-issue a new genesis on each rotation - -This treats every proposer rotation like a chain restart: a new `chain_id`, state reset back to `initial_height`, -and existing block history discarded. It is operationally heavy, conflates upgrades with rotations, and breaks -continuity for nodes syncing historical data. - -### 3. Height-indexed proposer schedule in genesis (Chosen) - -Record proposer changes as an ordered schedule indexed by activation height. The `genesis.json` file is updated -with a new schedule entry and redistributed, but the chain keeps its `chain_id`, continues from the current -height, preserves all block history, and fresh nodes can still validate the entire chain end-to-end across -rotation boundaries. The rollout is still coordinated — every node must receive the updated `genesis.json` and -restart before the activation height — but none of the chain's state or provenance is reset. - -## Decision - -ev-node now supports proposer rotation through a `proposer_schedule` field in genesis. - -### What this is not - -This is **not** a re-genesis. Re-genesis — in the sense we mean it above — would involve issuing a new `chain_id`, -resetting height to `initial_height`, and discarding existing block history. Proposer key rotation does none of -that: the `chain_id` is unchanged, block height keeps progressing, all previous blocks remain valid, and fresh -nodes can sync the chain from genesis across any number of rotation boundaries. - -The `genesis.json` file itself is updated (a new `proposer_schedule` entry is appended) and operators must -restart every node to reload it. The file changes; the chain's state does not. - -Each entry declares: - -- `start_height` -- `address` -- `pub_key` (optional; when present, it must match `address`) - -The active proposer for block height `h` is the last entry whose `start_height <= h`. - -The legacy `proposer_address` field remains for backward compatibility. When no explicit schedule is present, -ev-node derives an implicit single-entry schedule beginning at `initial_height`. - -When an explicit schedule is present: - -- the first entry must start at `initial_height` -- entries must be strictly increasing by `start_height` -- if `pub_key` is present, the entry's `address` must match it -- entries without `pub_key` are interpreted by `address` only -- `proposer_address`, when present, must match the first schedule entry's `address` - -## Detailed Design - -### Data model - -Genesis gains: - -```json -"proposer_schedule": [ - { - "start_height": 1, - "address": "...", - "pub_key": "..." - }, - { - "start_height": 1250000, - "address": "..." - } -] -``` - -The existing `proposer_address` field is retained as a compatibility field and is normalized to the first -scheduled proposer when a schedule is present. - -### Validation rules - -The proposer schedule is now consulted in all proposer-sensitive paths: - -1. executor startup accepts any signer that appears somewhere in the schedule -2. block creation resolves the proposer for the exact height being produced -3. DA submission validates the configured signer against the scheduled proposer for each signed data height -4. sync validation validates incoming headers and signed data against the scheduled proposer for their heights - -This makes proposer rotation protocol-visible for both live nodes and nodes syncing historical data. - -### Operational procedure - -For a planned rotation: - -1. Choose activation height `H` -2. Add a new `proposer_schedule` entry with `start_height = H` -3. Distribute the updated genesis/config to node operators -4. Upgrade follower/full nodes before activation -5. Stop the old sequencer before `H` -6. Start the new sequencer with the replacement key at or after `H` - -The old proposer remains valid for heights `< H`, and the new proposer becomes valid at heights `>= H`. - -### Security considerations - -This design improves safety by allowing validation against the scheduled public key when one is pinned. -It does not solve emergency rotation authorization by itself; a future design can add a separate upgrade authority -or rotation certificate flow if the network needs signer replacement without prior static scheduling. - -### Testing - -Coverage includes: - -- genesis schedule validation and height resolution -- sync acceptance of scheduled proposer rotation -- DA submission using a rotated proposer key at the configured height -- executor block creation using the proposer scheduled for the produced height - -## Status - -Implemented - -## Consequences - -### Positive - -- proposer rotation is now part of the chain configuration rather than an operator convention -- fresh nodes can validate historical proposer changes from genesis -- sync and DA validation can pin scheduled public keys, not just addresses -- routine key rotation no longer requires a chain restart - -### Negative - -- proposer schedule changes are consensus-visible and require coordinated rollout -- operators must distribute updated genesis/config before activation height -- emergency rotation still requires prior scheduling or a later authority-based mechanism - -### Neutral - -- legacy single-proposer deployments continue to work without defining `proposer_schedule` - -## References - -- [pkg/genesis/genesis.go](../../pkg/genesis/genesis.go) -- [pkg/genesis/proposer_schedule.go](../../pkg/genesis/proposer_schedule.go) -- [block/internal/executing/executor.go](../../block/internal/executing/executor.go) -- [block/internal/syncing/assert.go](../../block/internal/syncing/assert.go) diff --git a/docs/getting-started/custom/implement-executor.md b/docs/getting-started/custom/implement-executor.md index 7a1d51886f..6e6bd11c14 100644 --- a/docs/getting-started/custom/implement-executor.md +++ b/docs/getting-started/custom/implement-executor.md @@ -6,10 +6,11 @@ The Executor interface is the boundary between ev-node and your execution layer. ```go type Executor interface { - InitChain(ctx context.Context, genesis Genesis) ([]byte, error) + InitChain(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) ([]byte, error) GetTxs(ctx context.Context) ([][]byte, error) - ExecuteTxs(ctx context.Context, txs [][]byte, height uint64, timestamp time.Time) (*ExecutionResult, error) + ExecuteTxs(ctx context.Context, txs [][]byte, height uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) SetFinal(ctx context.Context, height uint64) error + GetExecutionInfo(ctx context.Context) (execution.ExecutionInfo, error) } ``` @@ -95,7 +96,8 @@ func (e *MyExecutor) ExecuteTxs( txs [][]byte, height uint64, timestamp time.Time, -) (*ExecutionResult, error) + prevStateRoot []byte, +) (execution.ExecuteResult, error) ``` **Parameters:** @@ -103,17 +105,17 @@ func (e *MyExecutor) ExecuteTxs( - `txs` — Ordered transactions to execute - `height` — Block height - `timestamp` — Block timestamp +- `prevStateRoot` — Previous block's state root **Returns:** -- `ExecutionResult` containing new state root and gas used +- `execution.ExecuteResult` containing the new state root and optional next proposer address - Error only for system failures (not tx failures) **Responsibilities:** - Execute each transaction in order - Update state -- Track gas usage - Handle transaction failures gracefully - Return new state root @@ -125,30 +127,27 @@ func (e *MyExecutor) ExecuteTxs( txs [][]byte, height uint64, timestamp time.Time, -) (*ExecutionResult, error) { - var totalGas uint64 - + prevStateRoot []byte, +) (execution.ExecuteResult, error) { for _, txBytes := range txs { tx, err := DecodeTx(txBytes) if err != nil { continue // Skip invalid tx } - gas, err := e.executeTx(tx) - if err != nil { + if err := e.executeTx(tx); err != nil { // Log but continue - tx failure != block failure continue } - - totalGas += gas } // Commit state changes stateRoot := e.db.Commit() - return &ExecutionResult{ - StateRoot: stateRoot, - GasUsed: totalGas, + return execution.ExecuteResult{ + UpdatedStateRoot: stateRoot, + // Empty keeps the current proposer. + NextProposerAddress: nil, }, nil } ``` @@ -210,9 +209,9 @@ func TestExecuteTxs(t *testing.T) { require.NoError(t, err) // Execute - result, err := exec.ExecuteTxs(ctx, txs, 1, time.Now()) + result, err := exec.ExecuteTxs(ctx, txs, 1, time.Now(), initialStateRoot) require.NoError(t, err) - require.NotEmpty(t, result.StateRoot) + require.NotEmpty(t, result.UpdatedStateRoot) } ``` diff --git a/docs/guides/create-genesis.md b/docs/guides/create-genesis.md index 365b491b82..5886325dab 100644 --- a/docs/guides/create-genesis.md +++ b/docs/guides/create-genesis.md @@ -125,10 +125,6 @@ Before doing so, add a `da_start_height` field to the genesis file, that corresp jq '.da_start_height = 1' ~/.$CHAIN_ID/config/genesis.json > temp.json && mv temp.json ~/.$CHAIN_ID/config/genesis.json ``` -:::tip -If you want to plan a future proposer key migration without restarting the chain, define a `proposer_schedule` in your genesis and roll it out as a coordinated upgrade. See [Rotate proposer key](./operations/proposer-key-rotation.md). -::: - ## Summary By following these steps, you will set up the genesis for your chain, initialize the validator, add a genesis account, and start the chain. This guide provides a basic framework for configuring and starting your chain using the gm-world binary. Make sure you initialized your chain correctly, and use the `gmd` command for all operations. diff --git a/docs/guides/operations/proposer-key-rotation.md b/docs/guides/operations/proposer-key-rotation.md deleted file mode 100644 index 3c5667d50c..0000000000 --- a/docs/guides/operations/proposer-key-rotation.md +++ /dev/null @@ -1,186 +0,0 @@ -# Rotate proposer key - -Use this guide to rotate a sequencer proposer key without restarting the chain. The active proposer is selected from `proposer_schedule` in `genesis.json` based on block height. - -## Before you start - -- This is a coordinated network upgrade. Every node must run a binary that supports `proposer_schedule`. -- Every node must use the same updated `genesis.json` before the activation height. -- `ev-node` loads `genesis.json` when the node starts. Updating the file on disk is not enough; you must restart nodes after replacing it. -- The old proposer key remains valid until the block before the activation height. If the old key cannot safely produce until then, stop the sequencer and coordinate operator recovery first. - -## How proposer rotation is stored in genesis - -`proposer_address` and `proposer_schedule[].address` are base64-encoded strings in JSON. - -```json -{ - "initial_height": 1, - "proposer_address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=", - "proposer_schedule": [ - { - "start_height": 1, - "address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=" - }, - { - "start_height": 125000, - "address": "Y7z5v9mQm4Nw6mD0a2yR9kD2B0qv5iJj1Q1R7gD4B7Q=" - } - ] -} -``` - -Rules enforced by `ev-node`: - -- `proposer_schedule[0].start_height` must equal `initial_height` -- schedule entries must be strictly increasing by `start_height` -- if `proposer_address` is set, it must match the first schedule entry - -Keep all earlier schedule entries. Fresh full nodes need them to validate historical blocks. - -## 1. Pick an activation height - -Choose an activation height `H` far enough in the future that you can distribute the updated genesis and restart every non-producing node before the cutover. - -```bash -ACTIVATION_HEIGHT=125000 -GENESIS="$HOME/.evnode/config/genesis.json" -INITIAL_HEIGHT="$(jq -r '.initial_height' "$GENESIS")" -``` - -## 2. Get the current and replacement proposer public keys - -For a file-based signer, the signer public key is stored in `signer.json` as base64. You only put the derived address into genesis, but you still need the public key once to compute that address. - -```bash -OLD_SIGNER_DIR="$HOME/.evnode/config" -NEW_SIGNER_DIR="/secure/path/new-signer" - -OLD_PROPOSER_PUBKEY="$(jq -r '.pub_key' "$OLD_SIGNER_DIR/signer.json")" -NEW_PROPOSER_PUBKEY="$(jq -r '.pub_key' "$NEW_SIGNER_DIR/signer.json")" -``` - -If you use a KMS-backed signer, export the replacement Ed25519 public key from your signer flow and base64-encode the raw public key bytes in the same format. The runtime configuration stays the same as in the [AWS KMS signer guide](./aws-kms-signer.md). - -## 3. Derive proposer addresses from the public keys - -`ev-node` derives the proposer address as `sha256(raw_pubkey_bytes)`. The helper below prints the address in the base64 format used by `genesis.json`. - -```bash -proposer_address() { - python3 - "$1" <<'PY' -import base64 -import hashlib -import sys - -pub_key = base64.b64decode(sys.argv[1]) -address = hashlib.sha256(pub_key).digest() -print(base64.b64encode(address).decode()) -PY -} - -OLD_PROPOSER_ADDRESS="$(proposer_address "$OLD_PROPOSER_PUBKEY")" -NEW_PROPOSER_ADDRESS="$(proposer_address "$NEW_PROPOSER_PUBKEY")" -``` - -## 4. Update `genesis.json` - -### If your chain only has `proposer_address` today - -Create an explicit schedule with the current proposer at `initial_height` and the new proposer at `ACTIVATION_HEIGHT`. - -```bash -jq \ - --arg old_addr "$OLD_PROPOSER_ADDRESS" \ - --arg new_addr "$NEW_PROPOSER_ADDRESS" \ - --argjson initial_height "$INITIAL_HEIGHT" \ - --argjson activation_height "$ACTIVATION_HEIGHT" \ - ' - .proposer_address = $old_addr - | .proposer_schedule = [ - { - start_height: $initial_height, - address: $old_addr - }, - { - start_height: $activation_height, - address: $new_addr - } - ] - ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" -``` - -### If your chain already has `proposer_schedule` - -Append the new entry. Do not replace older entries, and make sure `ACTIVATION_HEIGHT` is greater than the last scheduled `start_height`. - -```bash -jq \ - --arg new_addr "$NEW_PROPOSER_ADDRESS" \ - --argjson activation_height "$ACTIVATION_HEIGHT" \ - ' - .proposer_schedule += [ - { - start_height: $activation_height, - address: $new_addr - } - ] - ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" -``` - -Verify the result before you distribute it: - -```bash -jq '{initial_height, proposer_address, proposer_schedule}' "$GENESIS" -``` - -## 5. Distribute the updated genesis and restart followers - -Copy the same `genesis.json` to every full node, replica, and failover node. Restart them after copying the file so they load the updated schedule. - -Do this before the chain reaches `ACTIVATION_HEIGHT`. - -## 6. Cut over the sequencer - -Wait until the chain reaches `ACTIVATION_HEIGHT - 1`, then stop the old sequencer and start it with the replacement signer. - -Example with a file-based signer: - -```bash -evnode start \ - --home "$HOME/.evnode" \ - --evnode.node.aggregator \ - --evnode.signer.signer_type file \ - --evnode.signer.signer_path "$NEW_SIGNER_DIR" \ - --evnode.signer.passphrase "$SIGNER_PASSPHRASE" -``` - -If you run a custom chain binary such as `gmd` or `appd`, use the same start command you already use for the sequencer and only change the signer configuration. - -## 7. Verify the first post-upgrade block - -Fetch the header at `ACTIVATION_HEIGHT` or the next produced block and confirm that it carries the new proposer address. - -```bash -curl -s http://127.0.0.1:26657/header \ - -H 'Content-Type: application/json' \ - -d "{\"jsonrpc\":\"2.0\",\"method\":\"header\",\"params\":{\"height\":\"${ACTIVATION_HEIGHT}\"},\"id\":1}" \ - | jq . -``` - -Some RPC clients render binary fields as hex instead of base64. If needed, convert the base64 genesis address before comparing: - -```bash -python3 - "$NEW_PROPOSER_ADDRESS" <<'PY' -import base64 -import sys - -print("0x" + base64.b64decode(sys.argv[1]).hex()) -PY -``` - -If the node at `ACTIVATION_HEIGHT` is still signed by the old key, stop block production and check three things first: - -1. every node was restarted after receiving the updated genesis -2. `proposer_schedule` contains the new entry at the intended height -3. the sequencer is actually running with the replacement signer diff --git a/docs/guides/operations/upgrades.md b/docs/guides/operations/upgrades.md index ac5f6dcbf1..0027f13c36 100644 --- a/docs/guides/operations/upgrades.md +++ b/docs/guides/operations/upgrades.md @@ -38,12 +38,6 @@ May require state migration or coordinated network upgrade. 5. Run any migration scripts 6. Restart -### Proposer Key Rotation - -Rotating the proposer key is a coordinated upgrade even when the chain does not restart. All nodes must receive the same updated `genesis.json`, restart to load it, and be ready before the scheduled activation height. - -Use [Rotate proposer key](./proposer-key-rotation.md) for the exact `proposer_schedule` format, genesis update steps, and cutover procedure. - ## ev-node Upgrades ### Check Current Version diff --git a/docs/reference/interfaces/executor.md b/docs/reference/interfaces/executor.md index 5cb0e9f8d8..31b425474d 100644 --- a/docs/reference/interfaces/executor.md +++ b/docs/reference/interfaces/executor.md @@ -8,7 +8,7 @@ The Executor interface defines how ev-node communicates with execution layers. I type Executor interface { InitChain(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) (stateRoot []byte, err error) GetTxs(ctx context.Context) ([][]byte, error) - ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) + ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (result ExecuteResult, err error) SetFinal(ctx context.Context, blockHeight uint64) error GetExecutionInfo(ctx context.Context) (ExecutionInfo, error) FilterTxs(ctx context.Context, txs [][]byte, maxBytes, maxGas uint64, hasForceIncludedTransaction bool) ([]FilterStatus, error) @@ -64,7 +64,7 @@ GetTxs(ctx context.Context) ([][]byte, error) Processes transactions to produce a new block state. ```go -ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) +ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (result ExecuteResult, err error) ``` **Parameters:** @@ -76,7 +76,15 @@ ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time **Returns:** -- `updatedStateRoot` - New state root after execution +- `result.UpdatedStateRoot` - New state root after execution +- `result.NextProposerAddress` - Address expected to sign the next block. Empty means the proposer is unchanged. + +```go +type ExecuteResult struct { + UpdatedStateRoot []byte + NextProposerAddress []byte +} +``` **Requirements:** @@ -115,10 +123,14 @@ GetExecutionInfo(ctx context.Context) (ExecutionInfo, error) ```go type ExecutionInfo struct { - MaxGas uint64 // Maximum gas per block (0 = no gas-based limiting) + MaxGas uint64 + NextProposerAddress []byte } ``` +- `MaxGas` - Maximum gas per block (0 = no gas-based limiting) +- `NextProposerAddress` - Execution layer's current next proposer. Empty at startup means ev-node falls back to `genesis.proposer_address`. + ### FilterTxs Validates and filters transactions for block inclusion. diff --git a/execution/evm/execution.go b/execution/evm/execution.go index 3067893360..ecca8bd642 100644 --- a/execution/evm/execution.go +++ b/execution/evm/execution.go @@ -344,7 +344,7 @@ func (c *EngineClient) GetTxs(ctx context.Context) ([][]byte, error) { // - Checks for already-promoted blocks to enable idempotent execution // - Saves ExecMeta with payloadID after forkchoiceUpdatedV3 for crash recovery // - Updates ExecMeta to "promoted" after successful execution -func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) { +func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { // 1. Check for idempotent execution stateRoot, payloadID, found, idempotencyErr := c.reconcileExecutionAtHeight(ctx, blockHeight, timestamp, txs) @@ -353,22 +353,26 @@ func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight // Continue execution on error, as it might be transient } else if found { if stateRoot != nil { - return stateRoot, nil + return execution.ExecuteResult{UpdatedStateRoot: stateRoot}, nil } if payloadID != nil { // Found in-progress execution, attempt to resume - return c.processPayload(ctx, *payloadID, txs) + stateRoot, err := c.processPayload(ctx, *payloadID, txs) + if err != nil { + return execution.ExecuteResult{}, err + } + return execution.ExecuteResult{UpdatedStateRoot: stateRoot}, nil } } prevBlockHash, prevHeaderStateRoot, prevGasLimit, _, err := c.getBlockInfo(ctx, blockHeight-1) if err != nil { - return nil, fmt.Errorf("failed to get block info: %w", err) + return execution.ExecuteResult{}, fmt.Errorf("failed to get block info: %w", err) } // It's possible that the prev state root passed in is nil if this is the first block. // If so, we can't do a comparison. Otherwise, we compare the roots. if len(prevStateRoot) > 0 && !bytes.Equal(prevStateRoot, prevHeaderStateRoot.Bytes()) { - return nil, fmt.Errorf("prevStateRoot mismatch at height %d: consensus=%x execution=%x", blockHeight-1, prevStateRoot, prevHeaderStateRoot.Bytes()) + return execution.ExecuteResult{}, fmt.Errorf("prevStateRoot mismatch at height %d: consensus=%x execution=%x", blockHeight-1, prevStateRoot, prevHeaderStateRoot.Bytes()) } // 2. Prepare payload attributes @@ -445,7 +449,7 @@ func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight return nil }, MaxPayloadStatusRetries, InitialRetryBackoff, "ExecuteTxs forkchoice") if err != nil { - return nil, err + return execution.ExecuteResult{}, err } // Save ExecMeta with payloadID for crash recovery (Stage="started") @@ -453,7 +457,11 @@ func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight c.saveExecMeta(ctx, blockHeight, timestamp.Unix(), newPayloadID[:], nil, nil, txs, ExecStageStarted) // 4. Process the payload (get, submit, finalize) - return c.processPayload(ctx, *newPayloadID, txs) + stateRoot, err = c.processPayload(ctx, *newPayloadID, txs) + if err != nil { + return execution.ExecuteResult{}, err + } + return execution.ExecuteResult{UpdatedStateRoot: stateRoot}, nil } // setHead updates the head block hash without changing safe or finalized. diff --git a/execution/evm/go.mod b/execution/evm/go.mod index 5a014af738..17b6831d67 100644 --- a/execution/evm/go.mod +++ b/execution/evm/go.mod @@ -2,6 +2,11 @@ module github.com/evstack/ev-node/execution/evm go 1.25.7 +replace ( + github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/core => ../../core +) + require ( github.com/ethereum/go-ethereum v1.17.2 github.com/evstack/ev-node v1.1.0 diff --git a/execution/evm/test/go.mod b/execution/evm/test/go.mod index 78b3552949..23aadab045 100644 --- a/execution/evm/test/go.mod +++ b/execution/evm/test/go.mod @@ -199,5 +199,6 @@ require ( replace ( github.com/evstack/ev-node => ../../../ + github.com/evstack/ev-node/core => ../../../core github.com/evstack/ev-node/execution/evm => ../ ) diff --git a/execution/grpc/client.go b/execution/grpc/client.go index efb9d2f840..fcc805a1c2 100644 --- a/execution/grpc/client.go +++ b/execution/grpc/client.go @@ -99,7 +99,7 @@ func (c *Client) GetTxs(ctx context.Context) ([][]byte, error) { // This method sends transactions to the execution service for processing and // returns the updated state root after execution. The execution service ensures // deterministic execution and validates the state transition. -func (c *Client) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) { +func (c *Client) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { req := connect.NewRequest(&pb.ExecuteTxsRequest{ Txs: txs, BlockHeight: blockHeight, @@ -109,10 +109,13 @@ func (c *Client) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint6 resp, err := c.client.ExecuteTxs(ctx, req) if err != nil { - return nil, fmt.Errorf("grpc client: failed to execute txs: %w", err) + return execution.ExecuteResult{}, fmt.Errorf("grpc client: failed to execute txs: %w", err) } - return resp.Msg.UpdatedStateRoot, nil + return execution.ExecuteResult{ + UpdatedStateRoot: resp.Msg.UpdatedStateRoot, + NextProposerAddress: resp.Msg.NextProposerAddress, + }, nil } // SetFinal marks a block as finalized at the specified height. @@ -145,7 +148,8 @@ func (c *Client) GetExecutionInfo(ctx context.Context) (execution.ExecutionInfo, } return execution.ExecutionInfo{ - MaxGas: resp.Msg.MaxGas, + MaxGas: resp.Msg.MaxGas, + NextProposerAddress: resp.Msg.NextProposerAddress, }, nil } diff --git a/execution/grpc/client_test.go b/execution/grpc/client_test.go index 59ec6416ff..3a9477823d 100644 --- a/execution/grpc/client_test.go +++ b/execution/grpc/client_test.go @@ -13,7 +13,7 @@ import ( type mockExecutor struct { initChainFunc func(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) ([]byte, error) getTxsFunc func(ctx context.Context) ([][]byte, error) - executeTxsFunc func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) + executeTxsFunc func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) setFinalFunc func(ctx context.Context, blockHeight uint64) error getExecutionInfoFunc func(ctx context.Context) (execution.ExecutionInfo, error) filterTxsFunc func(ctx context.Context, txs [][]byte, maxBytes, maxGas uint64, hasForceIncludedTransaction bool) ([]execution.FilterStatus, error) @@ -33,11 +33,11 @@ func (m *mockExecutor) GetTxs(ctx context.Context) ([][]byte, error) { return [][]byte{[]byte("tx1"), []byte("tx2")}, nil } -func (m *mockExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (m *mockExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { if m.executeTxsFunc != nil { return m.executeTxsFunc(ctx, txs, blockHeight, timestamp, prevStateRoot) } - return []byte("updated_state_root"), nil + return execution.ExecuteResult{UpdatedStateRoot: []byte("updated_state_root")}, nil } func (m *mockExecutor) SetFinal(ctx context.Context, blockHeight uint64) error { @@ -151,7 +151,7 @@ func TestClient_ExecuteTxs(t *testing.T) { expectedStateRoot := []byte("new_state_root") mockExec := &mockExecutor{ - executeTxsFunc: func(ctx context.Context, txsIn [][]byte, bh uint64, ts time.Time, psr []byte) ([]byte, error) { + executeTxsFunc: func(ctx context.Context, txsIn [][]byte, bh uint64, ts time.Time, psr []byte) (execution.ExecuteResult, error) { if len(txsIn) != len(txs) { t.Errorf("expected %d txs, got %d", len(txs), len(txsIn)) } @@ -164,7 +164,7 @@ func TestClient_ExecuteTxs(t *testing.T) { if string(psr) != string(prevStateRoot) { t.Errorf("expected prev state root %s, got %s", prevStateRoot, psr) } - return expectedStateRoot, nil + return execution.ExecuteResult{UpdatedStateRoot: expectedStateRoot}, nil }, } @@ -177,13 +177,13 @@ func TestClient_ExecuteTxs(t *testing.T) { client := NewClient(server.URL) // Test ExecuteTxs - stateRoot, err := client.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) + result, err := client.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) if err != nil { t.Fatalf("unexpected error: %v", err) } - if string(stateRoot) != string(expectedStateRoot) { - t.Errorf("expected state root %s, got %s", expectedStateRoot, stateRoot) + if string(result.UpdatedStateRoot) != string(expectedStateRoot) { + t.Errorf("expected state root %s, got %s", expectedStateRoot, result.UpdatedStateRoot) } } diff --git a/execution/grpc/go.mod b/execution/grpc/go.mod index 2312dd9d25..7817bb8f91 100644 --- a/execution/grpc/go.mod +++ b/execution/grpc/go.mod @@ -2,6 +2,11 @@ module github.com/evstack/ev-node/execution/grpc go 1.25.7 +replace ( + github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/core => ../../core +) + require ( connectrpc.com/connect v1.19.2 connectrpc.com/grpcreflect v1.3.0 diff --git a/execution/grpc/server.go b/execution/grpc/server.go index e0488b7655..1123d60fe7 100644 --- a/execution/grpc/server.go +++ b/execution/grpc/server.go @@ -102,7 +102,7 @@ func (s *Server) ExecuteTxs( return nil, connect.NewError(connect.CodeInvalidArgument, errors.New("prev_state_root is required")) } - updatedStateRoot, err := s.executor.ExecuteTxs( + result, err := s.executor.ExecuteTxs( ctx, req.Msg.Txs, req.Msg.BlockHeight, @@ -114,7 +114,8 @@ func (s *Server) ExecuteTxs( } return connect.NewResponse(&pb.ExecuteTxsResponse{ - UpdatedStateRoot: updatedStateRoot, + UpdatedStateRoot: result.UpdatedStateRoot, + NextProposerAddress: result.NextProposerAddress, }), nil } @@ -150,7 +151,8 @@ func (s *Server) GetExecutionInfo( } return connect.NewResponse(&pb.GetExecutionInfoResponse{ - MaxGas: info.MaxGas, + MaxGas: info.MaxGas, + NextProposerAddress: info.NextProposerAddress, }), nil } diff --git a/execution/grpc/server_test.go b/execution/grpc/server_test.go index e2a01b4bc4..559d8457b2 100644 --- a/execution/grpc/server_test.go +++ b/execution/grpc/server_test.go @@ -9,6 +9,7 @@ import ( "connectrpc.com/connect" "google.golang.org/protobuf/types/known/timestamppb" + "github.com/evstack/ev-node/core/execution" pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) @@ -190,7 +191,7 @@ func TestServer_ExecuteTxs(t *testing.T) { tests := []struct { name string req *pb.ExecuteTxsRequest - mockFunc func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) + mockFunc func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) wantErr bool wantCode connect.Code }{ @@ -202,8 +203,8 @@ func TestServer_ExecuteTxs(t *testing.T) { Timestamp: timestamppb.New(timestamp), PrevStateRoot: prevStateRoot, }, - mockFunc: func(ctx context.Context, t [][]byte, bh uint64, ts time.Time, psr []byte) ([]byte, error) { - return expectedStateRoot, nil + mockFunc: func(ctx context.Context, t [][]byte, bh uint64, ts time.Time, psr []byte) (execution.ExecuteResult, error) { + return execution.ExecuteResult{UpdatedStateRoot: expectedStateRoot}, nil }, wantErr: false, }, @@ -245,8 +246,8 @@ func TestServer_ExecuteTxs(t *testing.T) { Timestamp: timestamppb.New(timestamp), PrevStateRoot: prevStateRoot, }, - mockFunc: func(ctx context.Context, t [][]byte, bh uint64, ts time.Time, psr []byte) ([]byte, error) { - return nil, errors.New("execute txs failed") + mockFunc: func(ctx context.Context, t [][]byte, bh uint64, ts time.Time, psr []byte) (execution.ExecuteResult, error) { + return execution.ExecuteResult{}, errors.New("execute txs failed") }, wantErr: true, wantCode: connect.CodeInternal, diff --git a/go.mod b/go.mod index 2b517a48af..37b153773f 100644 --- a/go.mod +++ b/go.mod @@ -51,6 +51,8 @@ require ( gotest.tools/v3 v3.5.2 ) +replace github.com/evstack/ev-node/core => ./core + require ( cloud.google.com/go v0.123.0 // indirect cloud.google.com/go/auth v0.18.2 // indirect diff --git a/node/execution_test.go b/node/execution_test.go index 91133a218d..8f75cf87b6 100644 --- a/node/execution_test.go +++ b/node/execution_test.go @@ -98,7 +98,7 @@ func executeTransactions(t *testing.T, executor coreexecutor.Executor, ctx conte timestamp := time.Now() newStateRoot, err := executor.ExecuteTxs(ctx, txs, blockHeight, timestamp, stateRoot) require.NoError(t, err) - return newStateRoot + return newStateRoot.UpdatedStateRoot } func finalizeExecution(t *testing.T, executor coreexecutor.Executor, ctx context.Context) { diff --git a/node/failover.go b/node/failover.go index 752b6aaba3..42dac4e8bc 100644 --- a/node/failover.go +++ b/node/failover.go @@ -139,7 +139,7 @@ func setupFailoverState( headerSyncService.Store(), dataSyncService.Store(), p2pClient, - genesis.InitialProposerAddress(), + genesis.ProposerAddress, logger, nodeConfig, bestKnownHeightProvider, diff --git a/node/full.go b/node/full.go index 5d13beebbd..bd44f9ef42 100644 --- a/node/full.go +++ b/node/full.go @@ -78,7 +78,7 @@ func newFullNode( logger zerolog.Logger, nodeOpts NodeOptions, ) (fn *FullNode, err error) { - logger.Debug().Hex("address", genesis.InitialProposerAddress()).Msg("Initial proposer address") + logger.Debug().Hex("address", genesis.ProposerAddress).Msg("Proposer address") blockMetrics, _ := metricsProvider(genesis.ChainID) diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index 1cbe506e1c..e1a401d9fc 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -1,7 +1,6 @@ package genesis import ( - "bytes" "fmt" "time" ) @@ -12,11 +11,10 @@ const ChainIDFlag = "chain_id" // This genesis struct only contains the fields required by evolve. // The app state or other fields are not included here. type Genesis struct { - ChainID string `json:"chain_id"` - StartTime time.Time `json:"start_time"` - InitialHeight uint64 `json:"initial_height"` - ProposerAddress []byte `json:"proposer_address"` - ProposerSchedule []ProposerScheduleEntry `json:"proposer_schedule,omitempty"` + ChainID string `json:"chain_id"` + StartTime time.Time `json:"start_time"` + InitialHeight uint64 `json:"initial_height"` + ProposerAddress []byte `json:"proposer_address"` // DAStartHeight corresponds to the height at which the first DA header/data has been published. // This value is meant to be updated after genesis and shared to all syncing nodes for speeding up syncing via DA. DAStartHeight uint64 `json:"da_start_height"` @@ -58,28 +56,8 @@ func (g Genesis) Validate() error { return fmt.Errorf("start_time cannot be zero time") } - if len(g.ProposerSchedule) == 0 { - if len(g.ProposerAddress) == 0 { - return fmt.Errorf("proposer_address cannot be empty when proposer_schedule is unset") - } - } else { - if err := g.ProposerSchedule[0].validate(g.InitialHeight); err != nil { - return fmt.Errorf("invalid proposer_schedule[0]: %w", err) - } - if g.ProposerSchedule[0].StartHeight != g.InitialHeight { - return fmt.Errorf("proposer_schedule[0].start_height must equal initial_height (%d), got %d", g.InitialHeight, g.ProposerSchedule[0].StartHeight) - } - for i := 1; i < len(g.ProposerSchedule); i++ { - if err := g.ProposerSchedule[i].validate(g.InitialHeight); err != nil { - return fmt.Errorf("invalid proposer_schedule[%d]: %w", i, err) - } - if g.ProposerSchedule[i].StartHeight <= g.ProposerSchedule[i-1].StartHeight { - return fmt.Errorf("proposer_schedule must be strictly increasing: entry %d start_height %d is not greater than previous %d", i, g.ProposerSchedule[i].StartHeight, g.ProposerSchedule[i-1].StartHeight) - } - } - if len(g.ProposerAddress) > 0 && !bytes.Equal(g.ProposerAddress, g.ProposerSchedule[0].Address) { - return fmt.Errorf("proposer_address must match proposer_schedule[0].address") - } + if g.ProposerAddress == nil { + return fmt.Errorf("proposer_address cannot be nil") } if g.DAEpochForcedInclusion < 1 { diff --git a/pkg/genesis/genesis_test.go b/pkg/genesis/genesis_test.go index 9c850f3963..da3cc14b1f 100644 --- a/pkg/genesis/genesis_test.go +++ b/pkg/genesis/genesis_test.go @@ -1,13 +1,10 @@ package genesis import ( - "crypto/rand" "testing" "time" - "github.com/libp2p/go-libp2p/core/crypto" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestNewGenesis(t *testing.T) { @@ -138,175 +135,3 @@ func TestGenesis_Validate(t *testing.T) { }) } } - -func TestGenesis_ValidateProposerSchedule(t *testing.T) { - validTime := time.Unix(1_700_000_000, 0).UTC() - - newEntry := func(startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { - _, pub, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - entry, err := NewProposerScheduleEntry(startHeight, pub) - require.NoError(t, err) - return entry, pub - } - - entry1, _ := newEntry(1) - entry10, _ := newEntry(10) - entry20, _ := newEntry(20) - - tests := []struct { - name string - mutate func() Genesis - wantErr string - }{ - { - name: "valid - schedule without proposer_address", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, - DAEpochForcedInclusion: 1, - } - }, - }, - { - name: "valid - schedule with matching proposer_address", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: entry1.Address, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, - DAEpochForcedInclusion: 1, - } - }, - }, - { - name: "invalid - first entry start_height != initial_height", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 5, - ProposerSchedule: []ProposerScheduleEntry{entry10, entry20}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "start_height must equal initial_height", - }, - { - name: "invalid - first entry start_height below initial_height", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 5, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "start_height must be >= initial_height", - }, - { - name: "invalid - non-increasing (equal start_heights)", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry1}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "strictly increasing", - }, - { - name: "invalid - non-increasing (decreasing start_heights)", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry20, entry10}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "strictly increasing", - }, - { - name: "invalid - entry address does not match pub_key", - mutate: func() Genesis { - tampered := entry10 - tampered.Address = append([]byte(nil), entry10.Address...) - tampered.Address[0] ^= 0xFF - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, tampered}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "address does not match pub_key", - }, - { - name: "invalid - proposer_address mismatches schedule[0].address", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: entry10.Address, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "proposer_address must match proposer_schedule[0].address", - }, - { - name: "invalid - empty address in entry", - mutate: func() Genesis { - empty := entry10 - empty.Address = nil - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, empty}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "address cannot be empty", - }, - { - name: "invalid - malformed pub_key bytes", - mutate: func() Genesis { - bad := entry10 - bad.PubKey = []byte{0x00, 0x01, 0x02} - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, bad}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "unmarshal proposer pub_key", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.mutate().Validate() - if tt.wantErr == "" { - require.NoError(t, err) - return - } - require.Error(t, err) - require.Contains(t, err.Error(), tt.wantErr) - }) - } -} diff --git a/pkg/genesis/io.go b/pkg/genesis/io.go index dcf9048aa6..8c9d88e955 100644 --- a/pkg/genesis/io.go +++ b/pkg/genesis/io.go @@ -72,12 +72,12 @@ func LoadGenesis(genesisPath string) (Genesis, error) { return Genesis{}, err } - return genesis.normalized(), nil + return genesis, nil } // Save saves the genesis state to the specified file path. func (g Genesis) Save(genesisPath string) error { - genesisJSON, err := json.MarshalIndent(g.normalized(), "", " ") + genesisJSON, err := json.MarshalIndent(g, "", " ") if err != nil { return fmt.Errorf("failed to marshal genesis state: %w", err) } diff --git a/pkg/genesis/proposer_schedule.go b/pkg/genesis/proposer_schedule.go deleted file mode 100644 index c53684535a..0000000000 --- a/pkg/genesis/proposer_schedule.go +++ /dev/null @@ -1,213 +0,0 @@ -package genesis - -import ( - "bytes" - "crypto/sha256" - "fmt" - - "github.com/libp2p/go-libp2p/core/crypto" -) - -// ProposerScheduleEntry declares the proposer address that becomes active at start_height. -// PubKey is optional and can be used to pin the exact key material for a schedule entry. -type ProposerScheduleEntry struct { - StartHeight uint64 `json:"start_height"` - Address []byte `json:"address"` - PubKey []byte `json:"pub_key,omitempty"` -} - -// NewProposerScheduleEntry creates a proposer schedule entry from a libp2p public key. -func NewProposerScheduleEntry(startHeight uint64, pubKey crypto.PubKey) (ProposerScheduleEntry, error) { - if pubKey == nil { - return ProposerScheduleEntry{}, fmt.Errorf("proposer pub_key cannot be nil") - } - - marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) - if err != nil { - return ProposerScheduleEntry{}, fmt.Errorf("marshal proposer pub_key: %w", err) - } - - return ProposerScheduleEntry{ - StartHeight: startHeight, - Address: proposerKeyAddress(pubKey), - PubKey: marshalledPubKey, - }, nil -} - -// PublicKey unmarshals the configured proposer public key. Address-only schedule -// entries may omit the pubkey and will return nil, nil here. -func (e ProposerScheduleEntry) PublicKey() (crypto.PubKey, error) { - if len(e.PubKey) == 0 { - return nil, nil - } - - pubKey, err := crypto.UnmarshalPublicKey(e.PubKey) - if err != nil { - return nil, fmt.Errorf("unmarshal proposer pub_key: %w", err) - } - - return pubKey, nil -} - -func (e ProposerScheduleEntry) validate(initialHeight uint64) error { - if e.StartHeight < initialHeight { - return fmt.Errorf("proposer schedule start_height must be >= initial_height (%d), got %d", initialHeight, e.StartHeight) - } - - if len(e.Address) == 0 { - return fmt.Errorf("proposer schedule address cannot be empty") - } - - if len(e.PubKey) == 0 { - return nil - } - - pubKey, err := e.PublicKey() - if err != nil { - return err - } - - expectedAddress := proposerKeyAddress(pubKey) - if !bytes.Equal(expectedAddress, e.Address) { - return fmt.Errorf("proposer schedule address does not match pub_key: got %x, expected %x", e.Address, expectedAddress) - } - - return nil -} - -// EffectiveProposerSchedule returns the explicit proposer schedule when present, -// or derives a legacy single-entry schedule from proposer_address. -func (g Genesis) EffectiveProposerSchedule() []ProposerScheduleEntry { - if len(g.ProposerSchedule) > 0 { - out := make([]ProposerScheduleEntry, len(g.ProposerSchedule)) - for i, entry := range g.ProposerSchedule { - out[i] = ProposerScheduleEntry{ - StartHeight: entry.StartHeight, - Address: bytes.Clone(entry.Address), - PubKey: bytes.Clone(entry.PubKey), - } - } - return out - } - - if len(g.ProposerAddress) == 0 { - return nil - } - - return []ProposerScheduleEntry{{ - StartHeight: g.InitialHeight, - Address: bytes.Clone(g.ProposerAddress), - }} -} - -// InitialProposerAddress returns the first proposer address for compatibility -// with code paths that still surface a single address externally. -func (g Genesis) InitialProposerAddress() []byte { - entry, err := g.ProposerAtHeight(g.InitialHeight) - if err != nil { - return nil - } - - return bytes.Clone(entry.Address) -} - -func (g Genesis) normalized() Genesis { - normalized := g - if len(normalized.ProposerAddress) == 0 { - normalized.ProposerAddress = normalized.InitialProposerAddress() - } - return normalized -} - -// HasScheduledProposer reports whether the address appears in the effective proposer schedule. -func (g Genesis) HasScheduledProposer(address []byte) bool { - for _, entry := range g.EffectiveProposerSchedule() { - if bytes.Equal(entry.Address, address) { - return true - } - } - return false -} - -// ProposerAtHeight resolves the proposer that is active for the given block height. -func (g Genesis) ProposerAtHeight(height uint64) (ProposerScheduleEntry, error) { - schedule := g.EffectiveProposerSchedule() - if len(schedule) == 0 { - return ProposerScheduleEntry{}, fmt.Errorf("no proposer configured") - } - - if height < schedule[0].StartHeight { - return ProposerScheduleEntry{}, fmt.Errorf("no proposer configured for height %d before start_height %d", height, schedule[0].StartHeight) - } - - entry := schedule[0] - for i := 1; i < len(schedule); i++ { - if height < schedule[i].StartHeight { - break - } - entry = schedule[i] - } - - return ProposerScheduleEntry{ - StartHeight: entry.StartHeight, - Address: bytes.Clone(entry.Address), - PubKey: bytes.Clone(entry.PubKey), - }, nil -} - -// ValidateProposer checks that the provided proposer address and public key match -// the proposer schedule entry active at the given height. -func (g Genesis) ValidateProposer(height uint64, address []byte, pubKey crypto.PubKey) error { - entry, err := g.ProposerAtHeight(height) - if err != nil { - return err - } - - if !bytes.Equal(entry.Address, address) { - return fmt.Errorf("unexpected proposer at height %d: got %x, expected %x", height, address, entry.Address) - } - - if len(entry.PubKey) == 0 { - // Address-only schedule entry. Without a pinned pubkey we still - // have to bind the caller-provided pubkey to the scheduled - // address, otherwise a forger can pair the scheduled address - // with an arbitrary key and later satisfy signature checks that - // trust Signer.PubKey. - if pubKey != nil { - derived := proposerKeyAddress(pubKey) - if !bytes.Equal(entry.Address, derived) { - return fmt.Errorf("proposer pub_key does not match scheduled address at height %d", height) - } - } - return nil - } - - if pubKey == nil { - return fmt.Errorf("missing proposer pub_key at height %d", height) - } - - marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) - if err != nil { - return fmt.Errorf("marshal proposer pub_key: %w", err) - } - - if !bytes.Equal(entry.PubKey, marshalledPubKey) { - return fmt.Errorf("unexpected proposer pub_key at height %d", height) - } - - return nil -} - -func proposerKeyAddress(pubKey crypto.PubKey) []byte { - if pubKey == nil { - return nil - } - - raw, err := pubKey.Raw() - if err != nil { - return nil - } - - sum := sha256.Sum256(raw) - return sum[:] -} diff --git a/pkg/genesis/proposer_schedule_test.go b/pkg/genesis/proposer_schedule_test.go deleted file mode 100644 index 88f87590ea..0000000000 --- a/pkg/genesis/proposer_schedule_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package genesis - -import ( - "bytes" - "crypto/rand" - "encoding/json" - "os" - "path/filepath" - "testing" - "time" - - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/stretchr/testify/require" - - "github.com/evstack/ev-node/pkg/signer/noop" -) - -// testGenesisStartTime is a fixed timestamp for genesis fixtures so tests do -// not depend on wall-clock time. -var testGenesisStartTime = time.Unix(1_700_000_000, 0).UTC() - -func makeProposerScheduleEntry(t *testing.T, startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { - t.Helper() - - _, pubKey, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - - entry, err := NewProposerScheduleEntry(startHeight, pubKey) - require.NoError(t, err) - - return entry, pubKey -} - -func TestGenesisProposerAtHeight(t *testing.T) { - entry1, _ := makeProposerScheduleEntry(t, 3) - entry2, _ := makeProposerScheduleEntry(t, 10) - - genesis := Genesis{ - ChainID: "test-chain", - StartTime: testGenesisStartTime, - InitialHeight: 3, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - require.NoError(t, genesis.Validate()) - - proposer, err := genesis.ProposerAtHeight(3) - require.NoError(t, err) - require.Equal(t, entry1.Address, proposer.Address) - - proposer, err = genesis.ProposerAtHeight(9) - require.NoError(t, err) - require.Equal(t, entry1.Address, proposer.Address) - - proposer, err = genesis.ProposerAtHeight(10) - require.NoError(t, err) - require.Equal(t, entry2.Address, proposer.Address) -} - -func TestGenesisValidateProposerScheduleWithPinnedPubKey(t *testing.T) { - entry1, pubKey1 := makeProposerScheduleEntry(t, 1) - entry2, pubKey2 := makeProposerScheduleEntry(t, 20) - - genesis := Genesis{ - ChainID: "test-chain", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - require.NoError(t, genesis.Validate()) - require.NoError(t, genesis.ValidateProposer(1, entry1.Address, pubKey1)) - require.NoError(t, genesis.ValidateProposer(21, entry2.Address, pubKey2)) - require.Error(t, genesis.ValidateProposer(21, entry2.Address, pubKey1)) -} - -func TestGenesisValidateAddressOnlyProposerSchedule(t *testing.T) { - entry1, pubKey1 := makeProposerScheduleEntry(t, 1) - entry2, pubKey2 := makeProposerScheduleEntry(t, 20) - entry1.PubKey = nil - entry2.PubKey = nil - - genesis := Genesis{ - ChainID: "test-chain", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - require.NoError(t, genesis.Validate()) - require.NoError(t, genesis.ValidateProposer(1, entry1.Address, pubKey1)) - require.NoError(t, genesis.ValidateProposer(21, entry2.Address, pubKey2)) -} - -func TestNewProposerScheduleEntry_NilPubKey(t *testing.T) { - _, err := NewProposerScheduleEntry(1, nil) - require.Error(t, err) -} - -func TestProposerAtHeight_BeforeFirstStartHeight(t *testing.T) { - entry, _ := makeProposerScheduleEntry(t, 5) - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 5, - ProposerSchedule: []ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - _, err := genesis.ProposerAtHeight(4) - require.Error(t, err) - require.Contains(t, err.Error(), "before start_height") -} - -func TestProposerAtHeight_NoProposerConfigured(t *testing.T) { - genesis := Genesis{ChainID: "c", InitialHeight: 1} - _, err := genesis.ProposerAtHeight(1) - require.Error(t, err) - require.Contains(t, err.Error(), "no proposer configured") -} - -func TestProposerAtHeight_ReturnedEntryIsCopy(t *testing.T) { - entry, _ := makeProposerScheduleEntry(t, 1) - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - got, err := genesis.ProposerAtHeight(1) - require.NoError(t, err) - got.Address[0] ^= 0xFF - got.PubKey[0] ^= 0xFF - - same, err := genesis.ProposerAtHeight(1) - require.NoError(t, err) - require.Equal(t, entry.Address, same.Address) - require.Equal(t, entry.PubKey, same.PubKey) -} - -func TestValidateProposer_WrongAddress(t *testing.T) { - entry, pubKey := makeProposerScheduleEntry(t, 1) - other, _ := makeProposerScheduleEntry(t, 1) - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - err := genesis.ValidateProposer(1, other.Address, pubKey) - require.Error(t, err) - require.Contains(t, err.Error(), "unexpected proposer at height 1") -} - -func TestValidateProposer_MissingPubKey(t *testing.T) { - entry, _ := makeProposerScheduleEntry(t, 1) - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - err := genesis.ValidateProposer(1, entry.Address, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "missing proposer pub_key") -} - -// TestValidateProposer_AddressOnly_RejectsForgedPubKey ensures that an address-only -// schedule entry still binds the caller-provided pubkey to the scheduled address. -// Without this check, a forger could claim Signer.Address = scheduled_addr with an -// arbitrary Signer.PubKey and later pass signature validation that trusts that pubkey. -func TestValidateProposer_AddressOnly_RejectsForgedPubKey(t *testing.T) { - scheduled, _ := makeProposerScheduleEntry(t, 1) - _, attackerPub := makeProposerScheduleEntry(t, 1) - - scheduled.PubKey = nil // address-only entry - - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{scheduled}, - DAEpochForcedInclusion: 1, - } - - // Scheduled address paired with a different pubkey must be rejected. - err := genesis.ValidateProposer(1, scheduled.Address, attackerPub) - require.Error(t, err) - require.Contains(t, err.Error(), "does not match scheduled address") -} - -func TestValidateProposer_UsesActiveEntryAtHeight(t *testing.T) { - entry1, pub1 := makeProposerScheduleEntry(t, 1) - entry2, pub2 := makeProposerScheduleEntry(t, 10) - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - // entry2 signer trying to sign height within entry1's active range must fail. - require.Error(t, genesis.ValidateProposer(9, entry2.Address, pub2)) - // entry1 signer trying to sign height within entry2's active range must fail. - require.Error(t, genesis.ValidateProposer(10, entry1.Address, pub1)) -} - -func TestHasScheduledProposer(t *testing.T) { - entry1, _ := makeProposerScheduleEntry(t, 1) - entry2, _ := makeProposerScheduleEntry(t, 10) - unknown, _ := makeProposerScheduleEntry(t, 99) - - explicit := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - require.True(t, explicit.HasScheduledProposer(entry1.Address)) - require.True(t, explicit.HasScheduledProposer(entry2.Address)) - require.False(t, explicit.HasScheduledProposer(unknown.Address)) - - legacy := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerAddress: entry1.Address, - DAEpochForcedInclusion: 1, - } - require.True(t, legacy.HasScheduledProposer(entry1.Address)) - require.False(t, legacy.HasScheduledProposer(entry2.Address)) - - empty := Genesis{ChainID: "c", InitialHeight: 1} - require.False(t, empty.HasScheduledProposer(entry1.Address)) -} - -func TestEffectiveProposerSchedule_ExplicitScheduleIsDeepCopy(t *testing.T) { - entry1, _ := makeProposerScheduleEntry(t, 1) - entry2, _ := makeProposerScheduleEntry(t, 10) - origAddr := bytes.Clone(entry1.Address) - origPub := bytes.Clone(entry1.PubKey) - - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - // Mutating returned byte slices must not corrupt the genesis-backed data. - got := genesis.EffectiveProposerSchedule() - got[0].Address[0] ^= 0xFF - got[0].PubKey[0] ^= 0xFF - - require.Equal(t, origAddr, genesis.ProposerSchedule[0].Address) - require.Equal(t, origPub, genesis.ProposerSchedule[0].PubKey) -} - -func TestEffectiveProposerSchedule_LegacyFallback(t *testing.T) { - addr := []byte("some-address-bytes") - origAddr := bytes.Clone(addr) - legacy := Genesis{ - ChainID: "c", - InitialHeight: 7, - ProposerAddress: addr, - } - schedule := legacy.EffectiveProposerSchedule() - require.Len(t, schedule, 1) - require.Equal(t, uint64(7), schedule[0].StartHeight) - require.Equal(t, addr, schedule[0].Address) - require.Empty(t, schedule[0].PubKey) - - // mutating the derived slice must not affect the genesis backing data. - schedule[0].Address[0] ^= 0xFF - require.Equal(t, origAddr, legacy.ProposerAddress) -} - -func TestEffectiveProposerSchedule_Empty(t *testing.T) { - require.Nil(t, Genesis{}.EffectiveProposerSchedule()) -} - -func TestInitialProposerAddress_EmptyGenesisReturnsNil(t *testing.T) { - require.Nil(t, Genesis{InitialHeight: 1}.InitialProposerAddress()) -} - -// TestProposerKeyAddressMatchesSignerGetAddress pins the invariant that the -// genesis-side address derivation matches the signer implementations. If a -// signer ever changes its address formula this test will fail and flag the -// break instead of silently producing rejected blocks after a key rotation. -func TestProposerKeyAddressMatchesSignerGetAddress(t *testing.T) { - priv, pub, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - - s, err := noop.NewNoopSigner(priv) - require.NoError(t, err) - - signerAddr, err := s.GetAddress() - require.NoError(t, err) - - genesisAddr := proposerKeyAddress(pub) - require.Equal(t, signerAddr, genesisAddr) - - entry, err := NewProposerScheduleEntry(1, pub) - require.NoError(t, err) - require.Equal(t, signerAddr, entry.Address) -} - -func TestLoadGenesisNormalizesLegacyProposerAddressFromSchedule(t *testing.T) { - entry1, _ := makeProposerScheduleEntry(t, 1) - entry2, _ := makeProposerScheduleEntry(t, 50) - - rawGenesis := Genesis{ - ChainID: "test-chain", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - genesisPath := filepath.Join(t.TempDir(), "genesis.json") - genesisJSON, err := json.Marshal(rawGenesis) - require.NoError(t, err) - require.NoError(t, os.WriteFile(genesisPath, genesisJSON, 0o600)) - - loaded, err := LoadGenesis(genesisPath) - require.NoError(t, err) - require.Equal(t, entry1.Address, loaded.ProposerAddress) - require.Equal(t, rawGenesis.ProposerSchedule, loaded.ProposerSchedule) -} diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index 419f8b6631..24eb133124 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -142,11 +142,12 @@ func (s *StoreServer) GetState( // Convert state to protobuf type pbState := &pb.State{ - AppHash: state.AppHash, - LastBlockHeight: state.LastBlockHeight, - LastBlockTime: timestamppb.New(state.LastBlockTime), - DaHeight: state.DAHeight, - ChainId: state.ChainID, + AppHash: state.AppHash, + LastBlockHeight: state.LastBlockHeight, + LastBlockTime: timestamppb.New(state.LastBlockTime), + DaHeight: state.DAHeight, + ChainId: state.ChainID, + NextProposerAddress: state.NextProposerAddress, Version: &pb.Version{ Block: state.Version.Block, App: state.Version.App, diff --git a/pkg/telemetry/executor_tracing.go b/pkg/telemetry/executor_tracing.go index 0f5507e04f..365ae7dd14 100644 --- a/pkg/telemetry/executor_tracing.go +++ b/pkg/telemetry/executor_tracing.go @@ -2,6 +2,7 @@ package telemetry import ( "context" + "encoding/hex" "time" "go.opentelemetry.io/otel" @@ -58,7 +59,7 @@ func (t *tracedExecutor) GetTxs(ctx context.Context) ([][]byte, error) { return txs, err } -func (t *tracedExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (t *tracedExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { ctx, span := t.tracer.Start(ctx, "Executor.ExecuteTxs", trace.WithAttributes( attribute.Int("tx.count", len(txs)), @@ -68,12 +69,14 @@ func (t *tracedExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeig ) defer span.End() - stateRoot, err := t.inner.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) + result, err := t.inner.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) if err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) + } else if len(result.NextProposerAddress) > 0 { + span.SetAttributes(attribute.String("next_proposer_address", hex.EncodeToString(result.NextProposerAddress))) } - return stateRoot, err + return result, err } func (t *tracedExecutor) SetFinal(ctx context.Context, blockHeight uint64) error { diff --git a/pkg/telemetry/executor_tracing_test.go b/pkg/telemetry/executor_tracing_test.go index e53c8919af..a1715c928f 100644 --- a/pkg/telemetry/executor_tracing_test.go +++ b/pkg/telemetry/executor_tracing_test.go @@ -183,10 +183,10 @@ func TestWithTracingExecutor_ExecuteTxs_Success(t *testing.T) { ExecuteTxs(mock.Anything, txs, blockHeight, timestamp, prevStateRoot). Return(expectedStateRoot, nil) - stateRoot, err := traced.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) + result, err := traced.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) require.NoError(t, err) - require.Equal(t, expectedStateRoot, stateRoot) + require.Equal(t, expectedStateRoot, result.UpdatedStateRoot) // verify span spans := sr.Ended() diff --git a/proto/evnode/v1/evnode.proto b/proto/evnode/v1/evnode.proto index e60bd56e0d..0002f818f6 100644 --- a/proto/evnode/v1/evnode.proto +++ b/proto/evnode/v1/evnode.proto @@ -38,6 +38,8 @@ message Header { bytes validator_hash = 11; // Chain ID the block belongs to string chain_id = 12; + // Proposer address selected by this block's execution result for the next block. + bytes next_proposer_address = 13; reserved 5, 7, 9; } diff --git a/proto/evnode/v1/execution.proto b/proto/evnode/v1/execution.proto index a3abbea36a..13d19db336 100644 --- a/proto/evnode/v1/execution.proto +++ b/proto/evnode/v1/execution.proto @@ -77,6 +77,10 @@ message ExecuteTxsResponse { // Maximum allowed transaction size (may change with protocol updates) uint64 max_bytes = 2; + + // Proposer address that should sign the next block. + // Empty means the current proposer remains active. + bytes next_proposer_address = 3; } // SetFinalRequest marks a block as finalized @@ -98,6 +102,10 @@ message GetExecutionInfoResponse { // Maximum gas allowed for transactions in a block // For non-gas-based execution layers, this should be 0 uint64 max_gas = 1; + + // Proposer address that should sign the next block from the execution + // layer's current view. Empty means unchanged or unavailable. + bytes next_proposer_address = 2; } // FilterStatus represents the result of filtering a transaction diff --git a/proto/evnode/v1/state.proto b/proto/evnode/v1/state.proto index 1e8f35422d..7788c0123e 100644 --- a/proto/evnode/v1/state.proto +++ b/proto/evnode/v1/state.proto @@ -16,6 +16,7 @@ message State { uint64 da_height = 6; bytes app_hash = 8; bytes last_header_hash = 9; + bytes next_proposer_address = 10; reserved 7; } diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 9ffb941fe7..6d58d40d17 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -23,6 +23,7 @@ require ( replace ( github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/core => ../../core github.com/evstack/ev-node/execution/evm => ../../execution/evm github.com/evstack/ev-node/execution/evm/test => ../../execution/evm/test ) diff --git a/test/mocks/execution.go b/test/mocks/execution.go index 706e556291..8c973524e7 100644 --- a/test/mocks/execution.go +++ b/test/mocks/execution.go @@ -40,23 +40,29 @@ func (_m *MockExecutor) EXPECT() *MockExecutor_Expecter { } // ExecuteTxs provides a mock function for the type MockExecutor -func (_mock *MockExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (_mock *MockExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { ret := _mock.Called(ctx, txs, blockHeight, timestamp, prevStateRoot) if len(ret) == 0 { panic("no return value specified for ExecuteTxs") } - var r0 []byte + var r0 execution.ExecuteResult var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, [][]byte, uint64, time.Time, []byte) ([]byte, error)); ok { + if returnFunc, ok := ret.Get(0).(func(context.Context, [][]byte, uint64, time.Time, []byte) (execution.ExecuteResult, error)); ok { return returnFunc(ctx, txs, blockHeight, timestamp, prevStateRoot) } - if returnFunc, ok := ret.Get(0).(func(context.Context, [][]byte, uint64, time.Time, []byte) []byte); ok { + if returnFunc, ok := ret.Get(0).(func(context.Context, [][]byte, uint64, time.Time, []byte) execution.ExecuteResult); ok { r0 = returnFunc(ctx, txs, blockHeight, timestamp, prevStateRoot) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) + switch result := ret.Get(0).(type) { + case nil: + case execution.ExecuteResult: + r0 = result + case []byte: + r0 = execution.ExecuteResult{UpdatedStateRoot: result} + default: + r0 = ret.Get(0).(execution.ExecuteResult) } } if returnFunc, ok := ret.Get(1).(func(context.Context, [][]byte, uint64, time.Time, []byte) error); ok { @@ -115,12 +121,12 @@ func (_c *MockExecutor_ExecuteTxs_Call) Run(run func(ctx context.Context, txs [] return _c } -func (_c *MockExecutor_ExecuteTxs_Call) Return(updatedStateRoot []byte, err error) *MockExecutor_ExecuteTxs_Call { - _c.Call.Return(updatedStateRoot, err) +func (_c *MockExecutor_ExecuteTxs_Call) Return(result interface{}, err error) *MockExecutor_ExecuteTxs_Call { + _c.Call.Return(result, err) return _c } -func (_c *MockExecutor_ExecuteTxs_Call) RunAndReturn(run func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error)) *MockExecutor_ExecuteTxs_Call { +func (_c *MockExecutor_ExecuteTxs_Call) RunAndReturn(run func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error)) *MockExecutor_ExecuteTxs_Call { _c.Call.Return(run) return _c } @@ -213,6 +219,20 @@ func (_c *MockExecutor_FilterTxs_Call) RunAndReturn(run func(ctx context.Context // GetExecutionInfo provides a mock function for the type MockExecutor func (_mock *MockExecutor) GetExecutionInfo(ctx context.Context) (execution.ExecutionInfo, error) { + if len(_mock.ExpectedCalls) == 0 { + return execution.ExecutionInfo{}, nil + } + hasExpectation := false + for _, call := range _mock.ExpectedCalls { + if call.Method == "GetExecutionInfo" { + hasExpectation = true + break + } + } + if !hasExpectation { + return execution.ExecutionInfo{}, nil + } + ret := _mock.Called(ctx) if len(ret) == 0 { diff --git a/test/mocks/height_aware_executor.go b/test/mocks/height_aware_executor.go index 354534c484..9e512d291b 100644 --- a/test/mocks/height_aware_executor.go +++ b/test/mocks/height_aware_executor.go @@ -44,9 +44,18 @@ func (m *MockHeightAwareExecutor) GetTxs(ctx context.Context) ([][]byte, error) } // ExecuteTxs implements the Executor interface. -func (m *MockHeightAwareExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (m *MockHeightAwareExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { args := m.Called(ctx, txs, blockHeight, timestamp, prevStateRoot) - return args.Get(0).([]byte), args.Error(1) + switch result := args.Get(0).(type) { + case nil: + return execution.ExecuteResult{}, args.Error(1) + case execution.ExecuteResult: + return result, args.Error(1) + case []byte: + return execution.ExecuteResult{UpdatedStateRoot: result}, args.Error(1) + default: + return args.Get(0).(execution.ExecuteResult), args.Error(1) + } } // SetFinal implements the Executor interface. @@ -63,6 +72,20 @@ func (m *MockHeightAwareExecutor) GetLatestHeight(ctx context.Context) (uint64, // GetExecutionInfo implements the Executor interface. func (m *MockHeightAwareExecutor) GetExecutionInfo(ctx context.Context) (execution.ExecutionInfo, error) { + if len(m.ExpectedCalls) == 0 { + return execution.ExecutionInfo{}, nil + } + hasExpectation := false + for _, call := range m.ExpectedCalls { + if call.Method == "GetExecutionInfo" { + hasExpectation = true + break + } + } + if !hasExpectation { + return execution.ExecutionInfo{}, nil + } + args := m.Called(ctx) return args.Get(0).(execution.ExecutionInfo), args.Error(1) } diff --git a/types/header.go b/types/header.go index 2b5e2881b9..7beb9a9728 100644 --- a/types/header.go +++ b/types/header.go @@ -82,6 +82,11 @@ type Header struct { // pubkey can't be recovered by the signature (e.g. ed25519). ProposerAddress []byte // original proposer of the block + // NextProposerAddress is selected by executing this block and becomes the + // proposer expected for the next block. Empty means the current proposer + // remains active. + NextProposerAddress []byte + // Legacy holds fields that were removed from the canonical header JSON/Go // representation but may still be required for backwards compatible binary // serialization (e.g. legacy signing payloads). @@ -124,11 +129,15 @@ func (h *Header) Time() time.Time { // Verify verifies the header. func (h *Header) Verify(untrstH *Header) error { - if !bytes.Equal(untrstH.ProposerAddress, h.ProposerAddress) { + expectedProposer := h.ProposerAddress + if len(h.NextProposerAddress) > 0 { + expectedProposer = h.NextProposerAddress + } + if !bytes.Equal(untrstH.ProposerAddress, expectedProposer) { return &header.VerifyError{ Reason: fmt.Errorf("%w: expected proposer (%X) got (%X)", ErrProposerVerificationFailed, - h.ProposerAddress, + expectedProposer, untrstH.ProposerAddress, ), } @@ -270,6 +279,7 @@ func (h Header) Clone() Header { clone.AppHash = cloneBytes(h.AppHash) clone.ValidatorHash = cloneBytes(h.ValidatorHash) clone.ProposerAddress = cloneBytes(h.ProposerAddress) + clone.NextProposerAddress = cloneBytes(h.NextProposerAddress) clone.Legacy = h.Legacy.Clone() clone.cachedHash = nil diff --git a/types/pb/evnode/v1/evnode.pb.go b/types/pb/evnode/v1/evnode.pb.go index b0a866e76e..55ab34b0ff 100644 --- a/types/pb/evnode/v1/evnode.pb.go +++ b/types/pb/evnode/v1/evnode.pb.go @@ -102,9 +102,11 @@ type Header struct { // validatorhash for compatibility with tendermint light client. ValidatorHash []byte `protobuf:"bytes,11,opt,name=validator_hash,json=validatorHash,proto3" json:"validator_hash,omitempty"` // Chain ID the block belongs to - ChainId string `protobuf:"bytes,12,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + ChainId string `protobuf:"bytes,12,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Proposer address selected by this block's execution result for the next block. + NextProposerAddress []byte `protobuf:"bytes,13,opt,name=next_proposer_address,json=nextProposerAddress,proto3" json:"next_proposer_address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Header) Reset() { @@ -200,6 +202,13 @@ func (x *Header) GetChainId() string { return "" } +func (x *Header) GetNextProposerAddress() []byte { + if x != nil { + return x.NextProposerAddress + } + return nil +} + // SignedHeader is a header with a signature and a signer. type SignedHeader struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -792,7 +801,7 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x16evnode/v1/evnode.proto\x12\tevnode.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"1\n" + "\aVersion\x12\x14\n" + "\x05block\x18\x01 \x01(\x04R\x05block\x12\x10\n" + - "\x03app\x18\x02 \x01(\x04R\x03app\"\xc3\x02\n" + + "\x03app\x18\x02 \x01(\x04R\x03app\"\xf7\x02\n" + "\x06Header\x12,\n" + "\aversion\x18\x01 \x01(\v2\x12.evnode.v1.VersionR\aversion\x12\x16\n" + "\x06height\x18\x02 \x01(\x04R\x06height\x12\x12\n" + @@ -803,7 +812,8 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x10proposer_address\x18\n" + " \x01(\fR\x0fproposerAddress\x12%\n" + "\x0evalidator_hash\x18\v \x01(\fR\rvalidatorHash\x12\x19\n" + - "\bchain_id\x18\f \x01(\tR\achainIdJ\x04\b\x05\x10\x06J\x04\b\a\x10\bJ\x04\b\t\x10\n" + + "\bchain_id\x18\f \x01(\tR\achainId\x122\n" + + "\x15next_proposer_address\x18\r \x01(\fR\x13nextProposerAddressJ\x04\b\x05\x10\x06J\x04\b\a\x10\bJ\x04\b\t\x10\n" + "\"\x88\x01\n" + "\fSignedHeader\x12)\n" + "\x06header\x18\x01 \x01(\v2\x11.evnode.v1.HeaderR\x06header\x12\x1c\n" + diff --git a/types/pb/evnode/v1/execution.pb.go b/types/pb/evnode/v1/execution.pb.go index 2b33c910d2..86d2ae8031 100644 --- a/types/pb/evnode/v1/execution.pb.go +++ b/types/pb/evnode/v1/execution.pb.go @@ -347,9 +347,12 @@ type ExecuteTxsResponse struct { // New state root after executing transactions UpdatedStateRoot []byte `protobuf:"bytes,1,opt,name=updated_state_root,json=updatedStateRoot,proto3" json:"updated_state_root,omitempty"` // Maximum allowed transaction size (may change with protocol updates) - MaxBytes uint64 `protobuf:"varint,2,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + MaxBytes uint64 `protobuf:"varint,2,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` + // Proposer address that should sign the next block. + // Empty means the current proposer remains active. + NextProposerAddress []byte `protobuf:"bytes,3,opt,name=next_proposer_address,json=nextProposerAddress,proto3" json:"next_proposer_address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExecuteTxsResponse) Reset() { @@ -396,6 +399,13 @@ func (x *ExecuteTxsResponse) GetMaxBytes() uint64 { return 0 } +func (x *ExecuteTxsResponse) GetNextProposerAddress() []byte { + if x != nil { + return x.NextProposerAddress + } + return nil +} + // SetFinalRequest marks a block as finalized type SetFinalRequest struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -521,9 +531,12 @@ type GetExecutionInfoResponse struct { state protoimpl.MessageState `protogen:"open.v1"` // Maximum gas allowed for transactions in a block // For non-gas-based execution layers, this should be 0 - MaxGas uint64 `protobuf:"varint,1,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + MaxGas uint64 `protobuf:"varint,1,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` + // Proposer address that should sign the next block from the execution + // layer's current view. Empty means unchanged or unavailable. + NextProposerAddress []byte `protobuf:"bytes,2,opt,name=next_proposer_address,json=nextProposerAddress,proto3" json:"next_proposer_address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetExecutionInfoResponse) Reset() { @@ -563,6 +576,13 @@ func (x *GetExecutionInfoResponse) GetMaxGas() uint64 { return 0 } +func (x *GetExecutionInfoResponse) GetNextProposerAddress() []byte { + if x != nil { + return x.NextProposerAddress + } + return nil +} + // FilterTxsRequest contains transactions to validate and filter type FilterTxsRequest struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -701,16 +721,18 @@ const file_evnode_v1_execution_proto_rawDesc = "" + "\x03txs\x18\x01 \x03(\fR\x03txs\x12!\n" + "\fblock_height\x18\x02 \x01(\x04R\vblockHeight\x128\n" + "\ttimestamp\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12&\n" + - "\x0fprev_state_root\x18\x04 \x01(\fR\rprevStateRoot\"_\n" + + "\x0fprev_state_root\x18\x04 \x01(\fR\rprevStateRoot\"\x93\x01\n" + "\x12ExecuteTxsResponse\x12,\n" + "\x12updated_state_root\x18\x01 \x01(\fR\x10updatedStateRoot\x12\x1b\n" + - "\tmax_bytes\x18\x02 \x01(\x04R\bmaxBytes\"4\n" + + "\tmax_bytes\x18\x02 \x01(\x04R\bmaxBytes\x122\n" + + "\x15next_proposer_address\x18\x03 \x01(\fR\x13nextProposerAddress\"4\n" + "\x0fSetFinalRequest\x12!\n" + "\fblock_height\x18\x01 \x01(\x04R\vblockHeight\"\x12\n" + "\x10SetFinalResponse\"\x19\n" + - "\x17GetExecutionInfoRequest\"3\n" + + "\x17GetExecutionInfoRequest\"g\n" + "\x18GetExecutionInfoResponse\x12\x17\n" + - "\amax_gas\x18\x01 \x01(\x04R\x06maxGas\"\x9f\x01\n" + + "\amax_gas\x18\x01 \x01(\x04R\x06maxGas\x122\n" + + "\x15next_proposer_address\x18\x02 \x01(\fR\x13nextProposerAddress\"\x9f\x01\n" + "\x10FilterTxsRequest\x12\x10\n" + "\x03txs\x18\x01 \x03(\fR\x03txs\x12\x1b\n" + "\tmax_bytes\x18\x02 \x01(\x04R\bmaxBytes\x12\x17\n" + diff --git a/types/pb/evnode/v1/state.pb.go b/types/pb/evnode/v1/state.pb.go index a76c7efb28..868164e407 100644 --- a/types/pb/evnode/v1/state.pb.go +++ b/types/pb/evnode/v1/state.pb.go @@ -24,17 +24,18 @@ const ( // State is the state of the blockchain. type State struct { - state protoimpl.MessageState `protogen:"open.v1"` - Version *Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - InitialHeight uint64 `protobuf:"varint,3,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` - LastBlockHeight uint64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3" json:"last_block_time,omitempty"` - DaHeight uint64 `protobuf:"varint,6,opt,name=da_height,json=daHeight,proto3" json:"da_height,omitempty"` - AppHash []byte `protobuf:"bytes,8,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - LastHeaderHash []byte `protobuf:"bytes,9,opt,name=last_header_hash,json=lastHeaderHash,proto3" json:"last_header_hash,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Version *Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + InitialHeight uint64 `protobuf:"varint,3,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` + LastBlockHeight uint64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3" json:"last_block_time,omitempty"` + DaHeight uint64 `protobuf:"varint,6,opt,name=da_height,json=daHeight,proto3" json:"da_height,omitempty"` + AppHash []byte `protobuf:"bytes,8,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + LastHeaderHash []byte `protobuf:"bytes,9,opt,name=last_header_hash,json=lastHeaderHash,proto3" json:"last_header_hash,omitempty"` + NextProposerAddress []byte `protobuf:"bytes,10,opt,name=next_proposer_address,json=nextProposerAddress,proto3" json:"next_proposer_address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *State) Reset() { @@ -123,6 +124,13 @@ func (x *State) GetLastHeaderHash() []byte { return nil } +func (x *State) GetNextProposerAddress() []byte { + if x != nil { + return x.NextProposerAddress + } + return nil +} + // RaftBlockState represents a replicated block state type RaftBlockState struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -275,7 +283,7 @@ var File_evnode_v1_state_proto protoreflect.FileDescriptor const file_evnode_v1_state_proto_rawDesc = "" + "\n" + - "\x15evnode/v1/state.proto\x12\tevnode.v1\x1a\x16evnode/v1/evnode.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xcf\x02\n" + + "\x15evnode/v1/state.proto\x12\tevnode.v1\x1a\x16evnode/v1/evnode.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x83\x03\n" + "\x05State\x12,\n" + "\aversion\x18\x01 \x01(\v2\x12.evnode.v1.VersionR\aversion\x12\x19\n" + "\bchain_id\x18\x02 \x01(\tR\achainId\x12%\n" + @@ -284,7 +292,9 @@ const file_evnode_v1_state_proto_rawDesc = "" + "\x0flast_block_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\rlastBlockTime\x12\x1b\n" + "\tda_height\x18\x06 \x01(\x04R\bdaHeight\x12\x19\n" + "\bapp_hash\x18\b \x01(\fR\aappHash\x12(\n" + - "\x10last_header_hash\x18\t \x01(\fR\x0elastHeaderHashJ\x04\b\a\x10\b\"\x8e\x02\n" + + "\x10last_header_hash\x18\t \x01(\fR\x0elastHeaderHash\x122\n" + + "\x15next_proposer_address\x18\n" + + " \x01(\fR\x13nextProposerAddressJ\x04\b\a\x10\b\"\x8e\x02\n" + "\x0eRaftBlockState\x12\x16\n" + "\x06height\x18\x01 \x01(\x04R\x06height\x12D\n" + "\x1flast_submitted_da_header_height\x18\x02 \x01(\x04R\x1blastSubmittedDaHeaderHeight\x12@\n" + diff --git a/types/serialization.go b/types/serialization.go index dd131dd3bd..114de41194 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -89,6 +89,7 @@ func (h *Header) MarshalBinary() ([]byte, error) { ph.AppHash = h.AppHash ph.ProposerAddress = h.ProposerAddress ph.ValidatorHash = h.ValidatorHash + ph.NextProposerAddress = h.NextProposerAddress if unknown := encodeLegacyUnknownFields(h.Legacy); len(unknown) > 0 { ph.ProtoReflect().SetUnknown(unknown) } @@ -238,6 +239,7 @@ func (sh *SignedHeader) MarshalBinary() ([]byte, error) { ph.DataHash = sh.DataHash ph.AppHash = sh.AppHash ph.ProposerAddress = sh.ProposerAddress + ph.NextProposerAddress = sh.NextProposerAddress ph.ValidatorHash = sh.ValidatorHash if unknown := encodeLegacyUnknownFields(sh.Legacy); len(unknown) > 0 { ph.ProtoReflect().SetUnknown(unknown) @@ -378,14 +380,15 @@ func (h *Header) ToProto() *pb.Header { Block: h.Version.Block, App: h.Version.App, }, - Height: h.BaseHeader.Height, - Time: h.BaseHeader.Time, - LastHeaderHash: h.LastHeaderHash[:], - DataHash: h.DataHash[:], - AppHash: h.AppHash[:], - ProposerAddress: h.ProposerAddress[:], - ChainId: h.BaseHeader.ChainID, - ValidatorHash: h.ValidatorHash, + Height: h.BaseHeader.Height, + Time: h.BaseHeader.Time, + LastHeaderHash: h.LastHeaderHash[:], + DataHash: h.DataHash[:], + AppHash: h.AppHash[:], + ProposerAddress: h.ProposerAddress[:], + ChainId: h.BaseHeader.ChainID, + ValidatorHash: h.ValidatorHash, + NextProposerAddress: h.NextProposerAddress, } if unknown := encodeLegacyUnknownFields(h.Legacy); len(unknown) > 0 { pHeader.ProtoReflect().SetUnknown(unknown) @@ -436,6 +439,11 @@ func (h *Header) FromProto(other *pb.Header) error { } else { h.ValidatorHash = nil } + if other.NextProposerAddress != nil { + h.NextProposerAddress = append([]byte(nil), other.NextProposerAddress...) + } else { + h.NextProposerAddress = nil + } legacy, err := decodeLegacyHeaderFields(other) if err != nil { @@ -533,6 +541,7 @@ func (s *State) MarshalBinary() ([]byte, error) { ps.DaHeight = s.DAHeight ps.AppHash = s.AppHash ps.LastHeaderHash = s.LastHeaderHash + ps.NextProposerAddress = s.NextProposerAddress bz, err := proto.Marshal(ps) @@ -554,13 +563,14 @@ func (s *State) ToProto() (*pb.State, error) { Block: s.Version.Block, App: s.Version.App, }, - ChainId: s.ChainID, - InitialHeight: s.InitialHeight, - LastBlockHeight: s.LastBlockHeight, - LastBlockTime: ×tamppb.Timestamp{Seconds: secs, Nanos: nanos}, - DaHeight: s.DAHeight, - AppHash: s.AppHash[:], - LastHeaderHash: s.LastHeaderHash[:], + ChainId: s.ChainID, + InitialHeight: s.InitialHeight, + LastBlockHeight: s.LastBlockHeight, + LastBlockTime: ×tamppb.Timestamp{Seconds: secs, Nanos: nanos}, + DaHeight: s.DAHeight, + AppHash: s.AppHash[:], + LastHeaderHash: s.LastHeaderHash[:], + NextProposerAddress: s.NextProposerAddress, }, nil } @@ -596,6 +606,11 @@ func (s *State) FromProto(other *pb.State) error { s.LastHeaderHash = nil } s.DAHeight = other.GetDaHeight() + if other.NextProposerAddress != nil { + s.NextProposerAddress = append([]byte(nil), other.NextProposerAddress...) + } else { + s.NextProposerAddress = nil + } return nil } @@ -888,5 +903,10 @@ func marshalLegacyHeader(h *Header) ([]byte, error) { payload = append(payload, clone.BaseHeader.ChainID...) } + // next proposer address + if len(clone.NextProposerAddress) > 0 { + payload = appendBytesField(payload, 13, clone.NextProposerAddress) + } + return payload, nil } diff --git a/types/state.go b/types/state.go index ccc383d79b..2d51e761af 100644 --- a/types/state.go +++ b/types/state.go @@ -37,20 +37,32 @@ type State struct { // the latest AppHash we've received from calling abci.Commit() AppHash []byte + + // NextProposerAddress is the proposer expected to sign LastBlockHeight+1. + // It is initialized from genesis and then updated from execution results. + NextProposerAddress []byte } -func (s *State) NextState(header Header, stateRoot []byte) (State, error) { +func (s *State) NextState(header Header, stateRoot []byte, nextProposerAddress ...[]byte) (State, error) { height := header.Height() + nextProposer := header.NextProposerAddress + if len(nextProposerAddress) > 0 && len(nextProposerAddress[0]) > 0 { + nextProposer = nextProposerAddress[0] + } + if len(nextProposer) == 0 { + nextProposer = header.ProposerAddress + } return State{ - Version: s.Version, - ChainID: s.ChainID, - InitialHeight: s.InitialHeight, - LastBlockHeight: height, - LastBlockTime: header.Time(), - AppHash: stateRoot, - LastHeaderHash: header.Hash(), - DAHeight: s.DAHeight, + Version: s.Version, + ChainID: s.ChainID, + InitialHeight: s.InitialHeight, + LastBlockHeight: height, + LastBlockTime: header.Time(), + AppHash: stateRoot, + LastHeaderHash: header.Hash(), + DAHeight: s.DAHeight, + NextProposerAddress: cloneBytes(nextProposer), }, nil } @@ -64,6 +76,9 @@ func (s State) AssertValidForNextState(header *SignedHeader, data *Data) error { if err := Validate(header, data); err != nil { return fmt.Errorf("header-data validation failed: %w", err) } + if len(s.NextProposerAddress) > 0 && !bytes.Equal(header.ProposerAddress, s.NextProposerAddress) { + return fmt.Errorf("unexpected proposer - got: %x, want: %x", header.ProposerAddress, s.NextProposerAddress) + } return nil } From 6b2db4b9347cf9f043444f4d52cd7bec06942ff0 Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Fri, 24 Apr 2026 18:05:05 +0200 Subject: [PATCH 06/13] revert header change --- block/internal/common/replay.go | 34 ++++--------------- block/internal/executing/executor.go | 19 +---------- .../internal/executing/executor_logic_test.go | 4 +-- block/internal/syncing/syncer.go | 12 +------ block/internal/syncing/syncer_test.go | 14 ++++---- .../types/src/proto/evnode.v1.messages.rs | 3 -- .../types/src/proto/evnode.v1.services.rs | 3 -- ...r-023-execution-owned-proposer-rotation.md | 15 ++++---- proto/evnode/v1/evnode.proto | 5 +-- types/header.go | 26 +++----------- types/pb/evnode/v1/evnode.pb.go | 22 ++++-------- types/serialization.go | 30 +++++----------- types/signed_header_test.go | 24 +++---------- types/state.go | 2 +- 14 files changed, 48 insertions(+), 165 deletions(-) diff --git a/block/internal/common/replay.go b/block/internal/common/replay.go index a120450d22..426961422d 100644 --- a/block/internal/common/replay.go +++ b/block/internal/common/replay.go @@ -185,19 +185,10 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { return fmt.Errorf("failed to execute transactions: %w", err) } newAppHash := result.UpdatedStateRoot - if len(result.NextProposerAddress) > 0 { - if len(header.NextProposerAddress) == 0 { - return fmt.Errorf("next proposer mismatch at height %d: header empty, execution %x", height, result.NextProposerAddress) - } - if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { - return fmt.Errorf("next proposer mismatch at height %d: header %x, execution %x", - height, - header.NextProposerAddress, - result.NextProposerAddress, - ) - } - } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { - return fmt.Errorf("next proposer mismatch at height %d: header %x, execution unchanged", height, header.NextProposerAddress) + + newState, err := prevState.NextState(header.Header, newAppHash, result.NextProposerAddress) + if err != nil { + return fmt.Errorf("calculate next state: %w", err) } // The result of ExecuteTxs (newAppHash) should match the stored state at this height. @@ -224,18 +215,11 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { return err } if len(expectedState.NextProposerAddress) > 0 { - expectedNextProposer := header.NextProposerAddress - if len(expectedNextProposer) == 0 { - expectedNextProposer = result.NextProposerAddress - } - if len(expectedNextProposer) == 0 { - expectedNextProposer = header.ProposerAddress - } - if !bytes.Equal(expectedNextProposer, expectedState.NextProposerAddress) { + if !bytes.Equal(newState.NextProposerAddress, expectedState.NextProposerAddress) { return fmt.Errorf("next proposer mismatch at height %d: expected %x got %x", height, expectedState.NextProposerAddress, - expectedNextProposer, + newState.NextProposerAddress, ) } } @@ -251,12 +235,6 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { Msg("replayBlock: ExecuteTxs completed (no stored state to verify against)") } - // Calculate new state - newState, err := prevState.NextState(header.Header, newAppHash) - if err != nil { - return fmt.Errorf("calculate next state: %w", err) - } - // Persist the new state batch, err := s.store.NewBatch(ctx) if err != nil { diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index de825db24b..4d62f8600d 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -572,13 +572,6 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to apply block: %w", err) } - if !bytes.Equal(newState.NextProposerAddress, header.ProposerAddress) { - header.NextProposerAddress = append([]byte(nil), newState.NextProposerAddress...) - header.InvalidateHash() - } else if len(header.NextProposerAddress) > 0 { - header.NextProposerAddress = nil - header.InvalidateHash() - } // set the DA height in the sequencer newState.DAHeight = e.sequencer.GetDAHeight() @@ -861,19 +854,9 @@ func (e *Executor) ApplyBlock(ctx context.Context, header types.Header, data *ty e.sendCriticalError(fmt.Errorf("failed to execute transactions: %w", err)) return types.State{}, fmt.Errorf("failed to execute transactions: %w", err) } - if len(result.NextProposerAddress) > 0 { - if len(header.NextProposerAddress) == 0 { - header.NextProposerAddress = append([]byte(nil), result.NextProposerAddress...) - } else if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { - return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution %x", header.NextProposerAddress, result.NextProposerAddress) - } - header.InvalidateHash() - } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { - return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution unchanged", header.NextProposerAddress) - } // Create new state - newState, err := currentState.NextState(header, result.UpdatedStateRoot) + newState, err := currentState.NextState(header, result.UpdatedStateRoot, result.NextProposerAddress) if err != nil { return types.State{}, fmt.Errorf("failed to create next state: %w", err) } diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 0b1f86769a..f010dd80ef 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -69,14 +69,13 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { require.NoError(t, err) assert.Equal(t, 0, len(data.Txs)) assert.EqualValues(t, common.DataHashForEmptyTxs, sh.DataHash) - assert.Empty(t, sh.NextProposerAddress) state, err := fx.MemStore.GetState(context.Background()) require.NoError(t, err) assert.Equal(t, fx.Exec.genesis.ProposerAddress, state.NextProposerAddress) } -func TestProduceBlock_CommitsExecutionNextProposer(t *testing.T) { +func TestProduceBlock_PersistsExecutionNextProposer(t *testing.T) { fx := setupTestExecutor(t, 1000) defer fx.Cancel() @@ -100,7 +99,6 @@ func TestProduceBlock_CommitsExecutionNextProposer(t *testing.T) { header, data, err := fx.MemStore.GetBlockData(context.Background(), 1) require.NoError(t, err) require.NoError(t, header.ValidateBasicWithData(data)) - assert.Equal(t, nextAddr, header.NextProposerAddress) state, err := fx.MemStore.GetState(context.Background()) require.NoError(t, err) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 9bcbd0f3ee..c615c1f38d 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -837,19 +837,9 @@ func (s *Syncer) ApplyBlock(ctx context.Context, header types.Header, data *type s.sendCriticalError(fmt.Errorf("failed to execute transactions: %w", err)) return types.State{}, fmt.Errorf("failed to execute transactions: %w", err) } - if len(result.NextProposerAddress) > 0 { - if len(header.NextProposerAddress) == 0 { - return types.State{}, fmt.Errorf("next proposer mismatch: header empty, execution %x", result.NextProposerAddress) - } - if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { - return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution %x", header.NextProposerAddress, result.NextProposerAddress) - } - } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { - return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution unchanged", header.NextProposerAddress) - } // Create new state - newState, err := currentState.NextState(header, result.UpdatedStateRoot) + newState, err := currentState.NextState(header, result.UpdatedStateRoot, result.NextProposerAddress) if err != nil { return types.State{}, fmt.Errorf("failed to create next state: %w", err) } diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 696d2c939f..2b4bce1aa6 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -214,17 +214,15 @@ func TestSyncer_ValidateBlock_UsesStateNextProposer(t *testing.T) { require.Contains(t, err.Error(), "unexpected proposer") } -func TestSyncer_ApplyBlockRejectsExecutionNextProposerMismatch(t *testing.T) { +func TestSyncer_ApplyBlockPersistsExecutionNextProposer(t *testing.T) { addr, _, _ := buildSyncTestSigner(t) - headerNext := []byte("header-next-proposer") execNext := []byte("execution-next-proposer") mockExec := testmocks.NewMockExecutor(t) data := makeData("tchain", 1, 1) header := types.Header{ - BaseHeader: types.BaseHeader{ChainID: "tchain", Height: 1, Time: uint64(time.Now().UnixNano())}, - ProposerAddress: addr, - NextProposerAddress: headerNext, + BaseHeader: types.BaseHeader{ChainID: "tchain", Height: 1, Time: uint64(time.Now().UnixNano())}, + ProposerAddress: addr, } currentState := types.State{AppHash: []byte("app0"), NextProposerAddress: addr} @@ -240,9 +238,9 @@ func TestSyncer_ApplyBlockRejectsExecutionNextProposerMismatch(t *testing.T) { logger: zerolog.Nop(), } - _, err := s.ApplyBlock(t.Context(), header, data, currentState) - require.Error(t, err) - require.Contains(t, err.Error(), "next proposer mismatch") + newState, err := s.ApplyBlock(t.Context(), header, data, currentState) + require.NoError(t, err) + require.Equal(t, execNext, newState.NextProposerAddress) } func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { diff --git a/client/crates/types/src/proto/evnode.v1.messages.rs b/client/crates/types/src/proto/evnode.v1.messages.rs index 495aac85d6..e6038f54ce 100644 --- a/client/crates/types/src/proto/evnode.v1.messages.rs +++ b/client/crates/types/src/proto/evnode.v1.messages.rs @@ -65,9 +65,6 @@ pub struct Header { /// Chain ID the block belongs to #[prost(string, tag = "12")] pub chain_id: ::prost::alloc::string::String, - /// Proposer address selected by this block's execution result for the next block. - #[prost(bytes = "vec", tag = "13")] - pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SignedHeader is a header with a signature and a signer. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] diff --git a/client/crates/types/src/proto/evnode.v1.services.rs b/client/crates/types/src/proto/evnode.v1.services.rs index ef7fed4048..013e96db37 100644 --- a/client/crates/types/src/proto/evnode.v1.services.rs +++ b/client/crates/types/src/proto/evnode.v1.services.rs @@ -439,9 +439,6 @@ pub struct Header { /// Chain ID the block belongs to #[prost(string, tag = "12")] pub chain_id: ::prost::alloc::string::String, - /// Proposer address selected by this block's execution result for the next block. - #[prost(bytes = "vec", tag = "13")] - pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SignedHeader is a header with a signature and a signer. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] diff --git a/docs/adr/adr-023-execution-owned-proposer-rotation.md b/docs/adr/adr-023-execution-owned-proposer-rotation.md index c89a78412b..9f78ef27e8 100644 --- a/docs/adr/adr-023-execution-owned-proposer-rotation.md +++ b/docs/adr/adr-023-execution-owned-proposer-rotation.md @@ -27,11 +27,11 @@ An empty `NextProposerAddress` from `ExecuteTxs` means the proposer is unchanged When execution returns a non-empty next proposer: -- The producing node commits it to `Header.NextProposerAddress` before signing the header. -- Syncing nodes require the signed header value to match the execution result. - `State.NextProposerAddress` is updated and used as the expected signer for `LastBlockHeight + 1`. +- Full nodes validate the next block signer against the previous state's `NextProposerAddress`. +- Header encoding remains unchanged. `Header.ProposerAddress` continues to identify the signer of the current block only. -`Header.NextProposerAddress` lets header-only paths and DA envelope validation see proposer transitions without replaying execution first. The execution result remains the authority; mismatches between the signed header and execution are invalid. +The execution result is the authority for proposer rotation. Header-only paths cannot derive proposer transitions without either replaying execution or using a future proof/certificate mechanism. This preserves header compatibility while keeping the rotation rule deterministic for full nodes. ## EVM System Contract Model @@ -47,7 +47,7 @@ The security council or multisig becomes the authority for proposer updates. It The system contract must restrict writes to the configured authority. Unauthorized proposer updates are consensus-critical because they determine who can sign the next block. -ev-node validates the execution output against the signed header. A malicious proposer cannot advertise one next proposer in the header while execution derives another. +ev-node validates each block's signer against the proposer address stored in the previous state. A malicious proposer cannot rotate the next signer through node-local configuration; the rotation must be derived from execution. If the execution interface returns an empty proposer, ev-node treats the proposer as unchanged. At startup, empty execution info falls back to genesis so existing execution implementations remain usable. @@ -60,13 +60,14 @@ Positive: - Proposer rotation becomes deterministic execution state. - EVM chains can use a system contract and multisig-controlled rotation. - Existing chains keep working when execution returns an empty proposer. -- Header verification can follow rotations once the rotating block is known. +- Existing header encoding remains compatible because no new header field is required. Negative: - The execution API changes and all execution adapters must return `ExecuteResult`. - Proposer updates become consensus-critical execution outputs. - ev-reth needs a separate system-contract design and implementation. +- Header-only/light-client paths cannot follow proposer rotation without execution replay or a later proof design. ## Alternatives Considered @@ -78,6 +79,6 @@ Node-local proposer configuration: - Rejected. Nodes could disagree about the active proposer unless every operator updates configuration at the same time. -Execution-only proposer without header commitment: +Header commitment for next proposer: -- Rejected. Syncing nodes can replay execution, but header and DA envelope paths benefit from having the selected next proposer committed in the signed header when it changes. +- Rejected for the first version. It would expose rotations to header-only paths, but it changes the signed header and hash encoding. Keeping rotation in execution/state avoids a header compatibility break. diff --git a/proto/evnode/v1/evnode.proto b/proto/evnode/v1/evnode.proto index 0002f818f6..a86f234998 100644 --- a/proto/evnode/v1/evnode.proto +++ b/proto/evnode/v1/evnode.proto @@ -38,10 +38,7 @@ message Header { bytes validator_hash = 11; // Chain ID the block belongs to string chain_id = 12; - // Proposer address selected by this block's execution result for the next block. - bytes next_proposer_address = 13; - - reserved 5, 7, 9; + reserved 5, 7, 9, 13; } // SignedHeader is a header with a signature and a signer. diff --git a/types/header.go b/types/header.go index 7beb9a9728..3049425ebe 100644 --- a/types/header.go +++ b/types/header.go @@ -1,7 +1,6 @@ package types import ( - "bytes" "context" "encoding" "errors" @@ -43,7 +42,8 @@ var ( // ErrNoProposerAddress is returned when the proposer address is not set. ErrNoProposerAddress = errors.New("no proposer address") - // ErrProposerVerificationFailed is returned when the proposer verification fails. + // ErrProposerVerificationFailed is deprecated. Proposer authorization is + // enforced through State validation because proposer rotation is execution-owned. ErrProposerVerificationFailed = errors.New("proposer verification failed") // ErrInvalidTimestamp is returned when the timestamp is invalid. @@ -82,11 +82,6 @@ type Header struct { // pubkey can't be recovered by the signature (e.g. ed25519). ProposerAddress []byte // original proposer of the block - // NextProposerAddress is selected by executing this block and becomes the - // proposer expected for the next block. Empty means the current proposer - // remains active. - NextProposerAddress []byte - // Legacy holds fields that were removed from the canonical header JSON/Go // representation but may still be required for backwards compatible binary // serialization (e.g. legacy signing payloads). @@ -129,19 +124,9 @@ func (h *Header) Time() time.Time { // Verify verifies the header. func (h *Header) Verify(untrstH *Header) error { - expectedProposer := h.ProposerAddress - if len(h.NextProposerAddress) > 0 { - expectedProposer = h.NextProposerAddress - } - if !bytes.Equal(untrstH.ProposerAddress, expectedProposer) { - return &header.VerifyError{ - Reason: fmt.Errorf("%w: expected proposer (%X) got (%X)", - ErrProposerVerificationFailed, - expectedProposer, - untrstH.ProposerAddress, - ), - } - } + // Proposer rotation is execution/state-owned. The trusted header alone no + // longer contains enough information to authorize the signer of the next + // header, so full nodes enforce proposer validity through State validation. return nil } @@ -279,7 +264,6 @@ func (h Header) Clone() Header { clone.AppHash = cloneBytes(h.AppHash) clone.ValidatorHash = cloneBytes(h.ValidatorHash) clone.ProposerAddress = cloneBytes(h.ProposerAddress) - clone.NextProposerAddress = cloneBytes(h.NextProposerAddress) clone.Legacy = h.Legacy.Clone() clone.cachedHash = nil diff --git a/types/pb/evnode/v1/evnode.pb.go b/types/pb/evnode/v1/evnode.pb.go index 55ab34b0ff..d98acd10a2 100644 --- a/types/pb/evnode/v1/evnode.pb.go +++ b/types/pb/evnode/v1/evnode.pb.go @@ -102,11 +102,9 @@ type Header struct { // validatorhash for compatibility with tendermint light client. ValidatorHash []byte `protobuf:"bytes,11,opt,name=validator_hash,json=validatorHash,proto3" json:"validator_hash,omitempty"` // Chain ID the block belongs to - ChainId string `protobuf:"bytes,12,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - // Proposer address selected by this block's execution result for the next block. - NextProposerAddress []byte `protobuf:"bytes,13,opt,name=next_proposer_address,json=nextProposerAddress,proto3" json:"next_proposer_address,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + ChainId string `protobuf:"bytes,12,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Header) Reset() { @@ -202,13 +200,6 @@ func (x *Header) GetChainId() string { return "" } -func (x *Header) GetNextProposerAddress() []byte { - if x != nil { - return x.NextProposerAddress - } - return nil -} - // SignedHeader is a header with a signature and a signer. type SignedHeader struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -801,7 +792,7 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x16evnode/v1/evnode.proto\x12\tevnode.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"1\n" + "\aVersion\x12\x14\n" + "\x05block\x18\x01 \x01(\x04R\x05block\x12\x10\n" + - "\x03app\x18\x02 \x01(\x04R\x03app\"\xf7\x02\n" + + "\x03app\x18\x02 \x01(\x04R\x03app\"\xc9\x02\n" + "\x06Header\x12,\n" + "\aversion\x18\x01 \x01(\v2\x12.evnode.v1.VersionR\aversion\x12\x16\n" + "\x06height\x18\x02 \x01(\x04R\x06height\x12\x12\n" + @@ -812,9 +803,8 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x10proposer_address\x18\n" + " \x01(\fR\x0fproposerAddress\x12%\n" + "\x0evalidator_hash\x18\v \x01(\fR\rvalidatorHash\x12\x19\n" + - "\bchain_id\x18\f \x01(\tR\achainId\x122\n" + - "\x15next_proposer_address\x18\r \x01(\fR\x13nextProposerAddressJ\x04\b\x05\x10\x06J\x04\b\a\x10\bJ\x04\b\t\x10\n" + - "\"\x88\x01\n" + + "\bchain_id\x18\f \x01(\tR\achainIdJ\x04\b\x05\x10\x06J\x04\b\a\x10\bJ\x04\b\t\x10\n" + + "J\x04\b\r\x10\x0e\"\x88\x01\n" + "\fSignedHeader\x12)\n" + "\x06header\x18\x01 \x01(\v2\x11.evnode.v1.HeaderR\x06header\x12\x1c\n" + "\tsignature\x18\x02 \x01(\fR\tsignature\x12)\n" + diff --git a/types/serialization.go b/types/serialization.go index 114de41194..b16e7d549d 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -89,7 +89,6 @@ func (h *Header) MarshalBinary() ([]byte, error) { ph.AppHash = h.AppHash ph.ProposerAddress = h.ProposerAddress ph.ValidatorHash = h.ValidatorHash - ph.NextProposerAddress = h.NextProposerAddress if unknown := encodeLegacyUnknownFields(h.Legacy); len(unknown) > 0 { ph.ProtoReflect().SetUnknown(unknown) } @@ -239,7 +238,6 @@ func (sh *SignedHeader) MarshalBinary() ([]byte, error) { ph.DataHash = sh.DataHash ph.AppHash = sh.AppHash ph.ProposerAddress = sh.ProposerAddress - ph.NextProposerAddress = sh.NextProposerAddress ph.ValidatorHash = sh.ValidatorHash if unknown := encodeLegacyUnknownFields(sh.Legacy); len(unknown) > 0 { ph.ProtoReflect().SetUnknown(unknown) @@ -380,15 +378,14 @@ func (h *Header) ToProto() *pb.Header { Block: h.Version.Block, App: h.Version.App, }, - Height: h.BaseHeader.Height, - Time: h.BaseHeader.Time, - LastHeaderHash: h.LastHeaderHash[:], - DataHash: h.DataHash[:], - AppHash: h.AppHash[:], - ProposerAddress: h.ProposerAddress[:], - ChainId: h.BaseHeader.ChainID, - ValidatorHash: h.ValidatorHash, - NextProposerAddress: h.NextProposerAddress, + Height: h.BaseHeader.Height, + Time: h.BaseHeader.Time, + LastHeaderHash: h.LastHeaderHash[:], + DataHash: h.DataHash[:], + AppHash: h.AppHash[:], + ProposerAddress: h.ProposerAddress[:], + ChainId: h.BaseHeader.ChainID, + ValidatorHash: h.ValidatorHash, } if unknown := encodeLegacyUnknownFields(h.Legacy); len(unknown) > 0 { pHeader.ProtoReflect().SetUnknown(unknown) @@ -439,12 +436,6 @@ func (h *Header) FromProto(other *pb.Header) error { } else { h.ValidatorHash = nil } - if other.NextProposerAddress != nil { - h.NextProposerAddress = append([]byte(nil), other.NextProposerAddress...) - } else { - h.NextProposerAddress = nil - } - legacy, err := decodeLegacyHeaderFields(other) if err != nil { return err @@ -903,10 +894,5 @@ func marshalLegacyHeader(h *Header) ([]byte, error) { payload = append(payload, clone.BaseHeader.ChainID...) } - // next proposer address - if len(clone.NextProposerAddress) > 0 { - payload = appendBytesField(payload, 13, clone.NextProposerAddress) - } - return payload, nil } diff --git a/types/signed_header_test.go b/types/signed_header_test.go index c159e674cc..c89299964f 100644 --- a/types/signed_header_test.go +++ b/types/signed_header_test.go @@ -70,32 +70,16 @@ func testVerify(t *testing.T, trusted *SignedHeader, untrustedAdj *SignedHeader, }, err: nil, }, - // 4. Test proposer verification - // changes the proposed address to a random address - // Expect failure + // 4. Test proposer rotation at the header layer. + // Proposer authorization is state-owned, so header verification only + // checks the chain link and allows a different proposer address. { prepare: func() (*SignedHeader, bool) { untrusted := *untrustedAdj untrusted.ProposerAddress = GetRandomBytes(32) return &untrusted, true }, - err: &header.VerifyError{ - Reason: ErrProposerVerificationFailed, - }, - }, - // 5. Test proposer verification for non-adjacent headers - // changes the proposed address to a random address and updates height - // Expect failure - { - prepare: func() (*SignedHeader, bool) { - untrusted := *untrustedAdj - untrusted.ProposerAddress = GetRandomBytes(32) - untrusted.BaseHeader.Height++ - return &untrusted, true - }, - err: &header.VerifyError{ - Reason: ErrProposerVerificationFailed, - }, + err: nil, }, } diff --git a/types/state.go b/types/state.go index 2d51e761af..10f11a51ae 100644 --- a/types/state.go +++ b/types/state.go @@ -45,7 +45,7 @@ type State struct { func (s *State) NextState(header Header, stateRoot []byte, nextProposerAddress ...[]byte) (State, error) { height := header.Height() - nextProposer := header.NextProposerAddress + nextProposer := s.NextProposerAddress if len(nextProposerAddress) > 0 && len(nextProposerAddress[0]) > 0 { nextProposer = nextProposerAddress[0] } From 2fbd381adaed2decdce650dc36d1c0e968a4aec8 Mon Sep 17 00:00:00 2001 From: Marko Baricevic Date: Mon, 27 Apr 2026 08:34:19 +0200 Subject: [PATCH 07/13] deps --- apps/evm/go.sum | 2 - apps/grpc/go.sum | 2 - apps/testapp/go.sum | 2 - execution/evm/go.sum | 88 +++++++++++++++++++-------------------- execution/evm/test/go.sum | 2 - execution/grpc/go.sum | 4 -- go.sum | 2 - test/e2e/go.sum | 2 - 8 files changed, 42 insertions(+), 62 deletions(-) diff --git a/apps/evm/go.sum b/apps/evm/go.sum index 9e21ac1ff7..15aa5441d2 100644 --- a/apps/evm/go.sum +++ b/apps/evm/go.sum @@ -228,8 +228,6 @@ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= github.com/ethereum/go-ethereum v1.17.2 h1:ag6geu0kn8Hv5FLKTpH+Hm2DHD+iuFtuqKxEuwUsDOI= github.com/ethereum/go-ethereum v1.17.2/go.mod h1:KHcRXfGOUfUmKg51IhQ0IowiqZ6PqZf08CMtk0g5K1o= -github.com/evstack/ev-node/core v1.0.0 h1:s0Tx0uWHme7SJn/ZNEtee4qNM8UO6PIxXnHhPbbKTz8= -github.com/evstack/ev-node/core v1.0.0/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= diff --git a/apps/grpc/go.sum b/apps/grpc/go.sum index f3d0c05095..fb7d076ef7 100644 --- a/apps/grpc/go.sum +++ b/apps/grpc/go.sum @@ -188,8 +188,6 @@ github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9O github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= -github.com/evstack/ev-node/core v1.0.0 h1:s0Tx0uWHme7SJn/ZNEtee4qNM8UO6PIxXnHhPbbKTz8= -github.com/evstack/ev-node/core v1.0.0/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= diff --git a/apps/testapp/go.sum b/apps/testapp/go.sum index f3d0c05095..fb7d076ef7 100644 --- a/apps/testapp/go.sum +++ b/apps/testapp/go.sum @@ -188,8 +188,6 @@ github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9O github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= -github.com/evstack/ev-node/core v1.0.0 h1:s0Tx0uWHme7SJn/ZNEtee4qNM8UO6PIxXnHhPbbKTz8= -github.com/evstack/ev-node/core v1.0.0/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= diff --git a/execution/evm/go.sum b/execution/evm/go.sum index 22f6a2794b..d296895842 100644 --- a/execution/evm/go.sum +++ b/execution/evm/go.sum @@ -6,12 +6,12 @@ cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIi cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= -cloud.google.com/go/iam v1.6.0 h1:JiSIcEi38dWBKhB3BtfKCW+dMvCZJEhBA2BsaGJgoxs= -cloud.google.com/go/iam v1.6.0/go.mod h1:ZS6zEy7QHmcNO18mjO2viYv/n+wOUkhJqGNkPPGueGU= -cloud.google.com/go/kms v1.27.0 h1:iYYgoD0HJIqz35A+He1G0dS5qTQzQsDXFsyXwzkUCXM= -cloud.google.com/go/kms v1.27.0/go.mod h1:KPxrdf61iYEOZ86uPwR86muBpSik2y4Ion6e83fVl1Q= -cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= -cloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk= +cloud.google.com/go/iam v1.7.0 h1:JD3zh0C6LHl16aCn5Akff0+GELdp1+4hmh6ndoFLl8U= +cloud.google.com/go/iam v1.7.0/go.mod h1:tetWZW1PD/m6vcuY2Zj/aU0eCHNPuxedbnbRTyKXvdY= +cloud.google.com/go/kms v1.29.0 h1:bAW1C5FQf+6GhPkywQzPlsULALCG7c16qpXLFGV9ivY= +cloud.google.com/go/kms v1.29.0/go.mod h1:YIyXZym11R5uovJJt4oN5eUL3oPmirF3yKeIh6QAf4U= +cloud.google.com/go/longrunning v0.9.0 h1:0EzbDEGsAvOZNbqXopgniY0w0a1phvu5IdUFq8grmqY= +cloud.google.com/go/longrunning v0.9.0/go.mod h1:pkTz846W7bF4o2SzdWJ40Hu0Re+UoNT6Q5t+igIcb8E= filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5 h1:JA0fFr+kxpqTdxR9LOBiTWpGNchqmkcsgmdeJZRclZ0= filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5/go.mod h1:OjOXDNlClLblvXdwgFFOQFJEocLhhtai8vGLy0JCZlI= filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b h1:REI1FbdW71yO56Are4XAxD+OS/e+BQsB3gE4mZRQEXY= @@ -26,36 +26,36 @@ github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDO github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU= -github.com/aws/aws-sdk-go-v2 v1.41.5 h1:dj5kopbwUsVUVFgO4Fi5BIT3t4WyqIDjGKCangnV/yY= -github.com/aws/aws-sdk-go-v2 v1.41.5/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= -github.com/aws/aws-sdk-go-v2/config v1.32.14 h1:opVIRo/ZbbI8OIqSOKmpFaY7IwfFUOCCXBsUpJOwDdI= -github.com/aws/aws-sdk-go-v2/config v1.32.14/go.mod h1:U4/V0uKxh0Tl5sxmCBZ3AecYny4UNlVmObYjKuuaiOo= -github.com/aws/aws-sdk-go-v2/credentials v1.19.14 h1:n+UcGWAIZHkXzYt87uMFBv/l8THYELoX6gVcUvgl6fI= -github.com/aws/aws-sdk-go-v2/credentials v1.19.14/go.mod h1:cJKuyWB59Mqi0jM3nFYQRmnHVQIcgoxjEMAbLkpr62w= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 h1:NUS3K4BTDArQqNu2ih7yeDLaS3bmHD0YndtA6UP884g= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21/go.mod h1:YWNWJQNjKigKY1RHVJCuupeWDrrHjRqHm0N9rdrWzYI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 h1:Rgg6wvjjtX8bNHcvi9OnXWwcE0a2vGpbwmtICOsvcf4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21/go.mod h1:A/kJFst/nm//cyqonihbdpQZwiUhhzpqTsdbhDdRF9c= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 h1:PEgGVtPoB6NTpPrBgqSE5hE/o47Ij9qk/SEZFbUOe9A= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21/go.mod h1:p+hz+PRAYlY3zcpJhPwXlLC4C+kqn70WIHwnzAfs6ps= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 h1:qYQ4pzQ2Oz6WpQ8T3HvGHnZydA72MnLuFK9tJwmrbHw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 h1:c31//R3xgIJMSC8S6hEVq+38DcvUlgFY0FM6mSI5oto= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21/go.mod h1:r6+pf23ouCB718FUxaqzZdbpYFyDtehyZcmP5KL9FkA= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.4 h1:PgD1y0ZagPokGIZPmejCBUySBzOFDN+leZxCOfb1OEQ= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.4/go.mod h1:FfXDb5nXrsoGgxsBFxwxr3vdHXheC2tV+6lmuLghhjQ= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 h1:QKZH0S178gCmFEgst8hN0mCX1KxLgHBKKY/CLqwP8lg= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.9/go.mod h1:7yuQJoT+OoH8aqIxw9vwF+8KpvLZ8AWmvmUWHsGQZvI= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.15 h1:lFd1+ZSEYJZYvv9d6kXzhkZu07si3f+GQ1AaYwa2LUM= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.15/go.mod h1:WSvS1NLr7JaPunCXqpJnWk1Bjo7IxzZXrZi1QQCkuqM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.19 h1:dzztQ1YmfPrxdrOiuZRMF6fuOwWlWpD2StNLTceKpys= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.19/go.mod h1:YO8TrYtFdl5w/4vmjL8zaBSsiNp3w0L1FfKVKenZT7w= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 h1:p8ogvvLugcR/zLBXTXrTkj0RYBUdErbMnAFFp12Lm/U= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.10/go.mod h1:60dv0eZJfeVXfbT1tFJinbHrDfSJ2GZl4Q//OSSNAVw= -github.com/aws/smithy-go v1.24.3 h1:XgOAaUgx+HhVBoP4v8n6HCQoTRDhoMghKqw4LNHsDNg= -github.com/aws/smithy-go v1.24.3/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/aws/aws-sdk-go-v2 v1.41.6 h1:1AX0AthnBQzMx1vbmir3Y4WsnJgiydmnJjiLu+LvXOg= +github.com/aws/aws-sdk-go-v2 v1.41.6/go.mod h1:dy0UzBIfwSeot4grGvY1AqFWN5zgziMmWGzysDnHFcQ= +github.com/aws/aws-sdk-go-v2/config v1.32.16 h1:Q0iQ7quUgJP0F/SCRTieScnaMdXr9h/2+wze1u3cNeM= +github.com/aws/aws-sdk-go-v2/config v1.32.16/go.mod h1:duCCnJEFqpt2RC6no1iK6q+8HpwOAkiUua0pY507dQc= +github.com/aws/aws-sdk-go-v2/credentials v1.19.15 h1:fyvgWTszojq8hEnMi8PPBTvZdTtEVmAVyo+NFLHBhH4= +github.com/aws/aws-sdk-go-v2/credentials v1.19.15/go.mod h1:gJiYyMOjNg8OEdRWOf3CrFQxM2a98qmrtjx1zuiQfB8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.22 h1:IOGsJ1xVWhsi+ZO7/NW8OuZZBtMJLZbk4P5HDjJO0jQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.22/go.mod h1:b+hYdbU+jGKfXE8kKM6g1+h+L/Go3vMvzlxBsiuGsxg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22 h1:GmLa5Kw1ESqtFpXsx5MmC84QWa/ZrLZvlJGa2y+4kcQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22/go.mod h1:6sW9iWm9DK9YRpRGga/qzrzNLgKpT2cIxb7Vo2eNOp0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22 h1:dY4kWZiSaXIzxnKlj17nHnBcXXBfac6UlsAx2qL6XrU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22/go.mod h1:KIpEUx0JuRZLO7U6cbV204cWAEco2iC3l061IxlwLtI= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 h1:FPXsW9+gMuIeKmz7j6ENWcWtBGTe1kH8r9thNt5Uxx4= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23/go.mod h1:7J8iGMdRKk6lw2C+cMIphgAnT8uTwBwNOsGkyOCm80U= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 h1:HtOTYcbVcGABLOVuPYaIihj6IlkqubBwFj10K5fxRek= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8/go.mod h1:VsK9abqQeGlzPgUr+isNWzPlK2vKe9INMLWnY65f5Xs= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6Y1Lbvt9WFuJ0ugUK2xn6hIWUBBbKuOWF30s= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 h1:nEzwx/ZlpUZ2Y6WztsgYmfBh5Ixd3QiECawXMzvTMeo= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.5/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 h1:a1Fq/KXn75wSzoJaPQTgZO0wHGqE9mjFnylnqEPTchA= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.10/go.mod h1:p6+MXNxW7IA6dMgHfTAzljuwSKD0NCm/4lbS4t6+7vI= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 h1:x6bKbmDhsgSZwv6q19wY/u3rLk/3FGjJWyqKcIRufpE= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.16/go.mod h1:CudnEVKRtLn0+3uMV0yEXZ+YZOKnAtUJ5DmDhilVnIw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 h1:oK/njaL8GtyEihkWMD4k3VgHCT64RQKkZwh0DG5j8ak= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20/go.mod h1:JHs8/y1f3zY7U5WcuzoJ/yAYGYtNIVPKLIbp61euvmg= +github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 h1:ks8KBcZPh3PYISr5dAiXCM5/Thcuxk8l+PG4+A0exds= +github.com/aws/aws-sdk-go-v2/service/sts v1.42.0/go.mod h1:pFw33T0WLvXU3rw1WBkpMlkgIn54eCB5FYLhjDc9Foo= +github.com/aws/smithy-go v1.25.0 h1:Sz/XJ64rwuiKtB6j98nDIPyYrV1nVNJ4YU74gttcl5U= +github.com/aws/smithy-go v1.25.0/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -121,10 +121,6 @@ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= github.com/ethereum/go-ethereum v1.17.2 h1:ag6geu0kn8Hv5FLKTpH+Hm2DHD+iuFtuqKxEuwUsDOI= github.com/ethereum/go-ethereum v1.17.2/go.mod h1:KHcRXfGOUfUmKg51IhQ0IowiqZ6PqZf08CMtk0g5K1o= -github.com/evstack/ev-node v1.1.0 h1:UGupPg6DwylhI+P1UOdRlTdZynCe1qdMtZXsXgMq+4A= -github.com/evstack/ev-node v1.1.0/go.mod h1:5lIACV0hQGO5Btdb1b3fSw2Vz7Jvrg2yvMefalfWguA= -github.com/evstack/ev-node/core v1.0.0 h1:s0Tx0uWHme7SJn/ZNEtee4qNM8UO6PIxXnHhPbbKTz8= -github.com/evstack/ev-node/core v1.0.0/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= @@ -198,8 +194,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8= github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= -github.com/googleapis/gax-go/v2 v2.20.0 h1:NIKVuLhDlIV74muWlsMM4CcQZqN6JJ20Qcxd9YMuYcs= -github.com/googleapis/gax-go/v2 v2.20.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4= +github.com/googleapis/gax-go/v2 v2.21.0 h1:h45NjjzEO3faG9Lg/cFrBh2PgegVVgzqKzuZl/wMbiI= +github.com/googleapis/gax-go/v2 v2.21.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/pyroscope-go v1.2.7 h1:VWBBlqxjyR0Cwk2W6UrE8CdcdD80GOFNutj0Kb1T8ac= @@ -268,8 +264,8 @@ github.com/libp2p/go-libp2p v0.48.0 h1:h2BrLAgrj7X8bEN05K7qmrjpNHYA+6tnsGRdprjTn github.com/libp2p/go-libp2p v0.48.0/go.mod h1:Q1fBZNdmC2Hf82husCTfkKJVfHm2we5zk+NWmOGEmWk= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-kad-dht v0.39.0 h1:mww38eBYiUvdsu+Xl/GLlBC0Aa8M+5HAwvafkFOygAM= -github.com/libp2p/go-libp2p-kad-dht v0.39.0/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= +github.com/libp2p/go-libp2p-kad-dht v0.39.1 h1:9RzUBc7zywT4ZNamRSgEvPZmVlK3Y6xdlCYfXXvlR/Q= +github.com/libp2p/go-libp2p-kad-dht v0.39.1/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= @@ -571,8 +567,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= -google.golang.org/api v0.273.1 h1:L7G/TmpAMz0nKx/ciAVssVmWQiOF6+pOuXeKrWVsquY= -google.golang.org/api v0.273.1/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= +google.golang.org/api v0.274.0 h1:aYhycS5QQCwxHLwfEHRRLf9yNsfvp1JadKKWBE54RFA= +google.golang.org/api v0.274.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 h1:XzmzkmB14QhVhgnawEVsOn6OFsnpyxNPRY9QV01dNB0= google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:L43LFes82YgSonw6iTXTxXUX1OlULt4AQtkik4ULL/I= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= diff --git a/execution/evm/test/go.sum b/execution/evm/test/go.sum index f56fd2ff67..01ee1c1743 100644 --- a/execution/evm/test/go.sum +++ b/execution/evm/test/go.sum @@ -244,8 +244,6 @@ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= github.com/ethereum/go-ethereum v1.17.2 h1:ag6geu0kn8Hv5FLKTpH+Hm2DHD+iuFtuqKxEuwUsDOI= github.com/ethereum/go-ethereum v1.17.2/go.mod h1:KHcRXfGOUfUmKg51IhQ0IowiqZ6PqZf08CMtk0g5K1o= -github.com/evstack/ev-node/core v1.0.0 h1:s0Tx0uWHme7SJn/ZNEtee4qNM8UO6PIxXnHhPbbKTz8= -github.com/evstack/ev-node/core v1.0.0/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= diff --git a/execution/grpc/go.sum b/execution/grpc/go.sum index 7966cda474..2139063fa3 100644 --- a/execution/grpc/go.sum +++ b/execution/grpc/go.sum @@ -2,10 +2,6 @@ connectrpc.com/connect v1.19.2 h1:McQ83FGdzL+t60peksi0gXC7MQ/iLKgLduAnThbM0mo= connectrpc.com/connect v1.19.2/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w= connectrpc.com/grpcreflect v1.3.0 h1:Y4V+ACf8/vOb1XOc251Qun7jMB75gCUNw6llvB9csXc= connectrpc.com/grpcreflect v1.3.0/go.mod h1:nfloOtCS8VUQOQ1+GTdFzVg2CJo4ZGaat8JIovCtDYs= -github.com/evstack/ev-node v1.1.0 h1:UGupPg6DwylhI+P1UOdRlTdZynCe1qdMtZXsXgMq+4A= -github.com/evstack/ev-node v1.1.0/go.mod h1:5lIACV0hQGO5Btdb1b3fSw2Vz7Jvrg2yvMefalfWguA= -github.com/evstack/ev-node/core v1.0.0 h1:s0Tx0uWHme7SJn/ZNEtee4qNM8UO6PIxXnHhPbbKTz8= -github.com/evstack/ev-node/core v1.0.0/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= diff --git a/go.sum b/go.sum index f3d0c05095..fb7d076ef7 100644 --- a/go.sum +++ b/go.sum @@ -188,8 +188,6 @@ github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9O github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= -github.com/evstack/ev-node/core v1.0.0 h1:s0Tx0uWHme7SJn/ZNEtee4qNM8UO6PIxXnHhPbbKTz8= -github.com/evstack/ev-node/core v1.0.0/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= diff --git a/test/e2e/go.sum b/test/e2e/go.sum index 4bc7248c8d..e3a706a171 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -407,8 +407,6 @@ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= github.com/ethereum/go-ethereum v1.17.2 h1:ag6geu0kn8Hv5FLKTpH+Hm2DHD+iuFtuqKxEuwUsDOI= github.com/ethereum/go-ethereum v1.17.2/go.mod h1:KHcRXfGOUfUmKg51IhQ0IowiqZ6PqZf08CMtk0g5K1o= -github.com/evstack/ev-node/core v1.0.0 h1:s0Tx0uWHme7SJn/ZNEtee4qNM8UO6PIxXnHhPbbKTz8= -github.com/evstack/ev-node/core v1.0.0/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= From e505b3659177dd680e8b4fd364859400ab9310c3 Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Mon, 27 Apr 2026 08:48:32 +0200 Subject: [PATCH 08/13] fix docker and proto --- apps/testapp/Dockerfile | 1 + proto/evnode/v1/evnode.proto | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/apps/testapp/Dockerfile b/apps/testapp/Dockerfile index e41335f7b6..dd21ab5697 100644 --- a/apps/testapp/Dockerfile +++ b/apps/testapp/Dockerfile @@ -21,6 +21,7 @@ WORKDIR /ev-node # Dependencies are only re-downloaded when go.mod or go.sum change. COPY go.mod go.sum ./ COPY apps/testapp/go.mod apps/testapp/go.sum ./apps/testapp/ +COPY core/go.mod core/go.sum ./core/ RUN go mod download && (cd apps/testapp && go mod download) # Copy the rest of the source and build. diff --git a/proto/evnode/v1/evnode.proto b/proto/evnode/v1/evnode.proto index a86f234998..8d7ec8cf4c 100644 --- a/proto/evnode/v1/evnode.proto +++ b/proto/evnode/v1/evnode.proto @@ -38,7 +38,7 @@ message Header { bytes validator_hash = 11; // Chain ID the block belongs to string chain_id = 12; - reserved 5, 7, 9, 13; + reserved 5, 7, 9; } // SignedHeader is a header with a signature and a signer. From 7b4011c32c36c43bb211f3117dd4aef9156e10db Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Mon, 27 Apr 2026 09:39:17 +0200 Subject: [PATCH 09/13] ammendments from commnets --- block/internal/common/replay.go | 9 +++++++-- block/internal/submitting/da_submitter.go | 6 +++--- .../submitting/da_submitter_integration_test.go | 2 +- block/internal/submitting/da_submitter_test.go | 14 +++++++------- .../internal/submitting/da_submitter_tracing.go | 5 ++--- .../submitting/da_submitter_tracing_test.go | 15 +++++++-------- block/internal/submitting/submitter.go | 4 ++-- block/internal/submitting/submitter_test.go | 2 +- block/internal/syncing/p2p_handler.go | 10 ++++++++++ block/internal/syncing/syncer.go | 5 +++++ core/execution/execution.go | 16 +++++++++++----- 11 files changed, 56 insertions(+), 32 deletions(-) diff --git a/block/internal/common/replay.go b/block/internal/common/replay.go index 426961422d..23bf6f7a6a 100644 --- a/block/internal/common/replay.go +++ b/block/internal/common/replay.go @@ -150,14 +150,19 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { // Get the previous state var prevState types.State if height == s.genesis.InitialHeight { - // For the first block, use genesis state. + // For the first block, use genesis state. Mirror Syncer.initializeState(): + // prefer the execution layer's view of the next proposer, fall back to genesis. + nextProposer := append([]byte(nil), s.genesis.ProposerAddress...) + if info, infoErr := s.exec.GetExecutionInfo(ctx); infoErr == nil && len(info.NextProposerAddress) > 0 { + nextProposer = append([]byte(nil), info.NextProposerAddress...) + } prevState = types.State{ ChainID: s.genesis.ChainID, InitialHeight: s.genesis.InitialHeight, LastBlockHeight: s.genesis.InitialHeight - 1, LastBlockTime: s.genesis.StartTime, AppHash: header.AppHash, // Genesis app hash (input to first block execution) - NextProposerAddress: append([]byte(nil), s.genesis.ProposerAddress...), + NextProposerAddress: nextProposer, } } else { // GetStateAtHeight(height-1) returns the state AFTER block height-1 was executed, diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index f5f4a829bf..0a71a347ce 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -425,7 +425,7 @@ func (s *DASubmitter) setCachedEnvelope(height uint64, envelope []byte) { } // SubmitData submits pending data to DA layer -func (s *DASubmitter) SubmitData(ctx context.Context, unsignedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error { +func (s *DASubmitter) SubmitData(ctx context.Context, unsignedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer) error { if len(unsignedDataList) == 0 { return nil } @@ -435,7 +435,7 @@ func (s *DASubmitter) SubmitData(ctx context.Context, unsignedDataList []*types. } // Sign the data (cache returns unsigned SignedData structs) - signedDataList, signedDataListBz, err := s.signData(ctx, unsignedDataList, marshalledData, signer, genesis) + signedDataList, signedDataListBz, err := s.signData(ctx, unsignedDataList, marshalledData, signer) if err != nil { return fmt.Errorf("failed to sign data: %w", err) } @@ -469,7 +469,7 @@ func (s *DASubmitter) SubmitData(ctx context.Context, unsignedDataList []*types. } // signData signs unsigned SignedData structs returned from cache -func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.SignedData, unsignedDataListBz [][]byte, signer signer.Signer, _ genesis.Genesis) ([]*types.SignedData, [][]byte, error) { +func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.SignedData, unsignedDataListBz [][]byte, signer signer.Signer) ([]*types.SignedData, [][]byte, error) { if signer == nil { return nil, nil, fmt.Errorf("signer is nil") } diff --git a/block/internal/submitting/da_submitter_integration_test.go b/block/internal/submitting/da_submitter_integration_test.go index b2c4efcd20..09f9d8aa43 100644 --- a/block/internal/submitting/da_submitter_integration_test.go +++ b/block/internal/submitting/da_submitter_integration_test.go @@ -105,7 +105,7 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( dataList, marshalledData, err := cm.GetPendingData(context.Background()) require.NoError(t, err) - require.NoError(t, daSubmitter.SubmitData(context.Background(), dataList, marshalledData, cm, n, gen)) + require.NoError(t, daSubmitter.SubmitData(context.Background(), dataList, marshalledData, cm, n)) // After submission, inclusion markers should be set _, ok := cm.GetHeaderDAIncludedByHeight(1) diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index d25786018b..72102b090c 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -333,7 +333,7 @@ func TestDASubmitter_SubmitData_Success(t *testing.T) { // Get data from cache and submit signedDataList, marshalledData, err := cm.GetPendingData(ctx) require.NoError(t, err) - err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, signer, gen) + err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, signer) require.NoError(t, err) // Verify data is marked as DA included @@ -387,7 +387,7 @@ func TestDASubmitter_SubmitData_SkipsEmptyData(t *testing.T) { // Get data from cache and submit signedDataList, marshalledData, err := cm.GetPendingData(ctx) require.NoError(t, err) - err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, signer, gen) + err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, signer) require.NoError(t, err) mockDA.AssertNotCalled(t, "Submit", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) @@ -397,7 +397,7 @@ func TestDASubmitter_SubmitData_SkipsEmptyData(t *testing.T) { } func TestDASubmitter_SubmitData_NoPendingData(t *testing.T) { - submitter, _, cm, mockDA, gen := setupDASubmitterTest(t) + submitter, _, cm, mockDA, _ := setupDASubmitterTest(t) ctx := context.Background() // Create test signer @@ -406,7 +406,7 @@ func TestDASubmitter_SubmitData_NoPendingData(t *testing.T) { // Get data from cache (should be empty) and submit dataList, marshalledData, err := cm.GetPendingData(ctx) require.NoError(t, err) - err = submitter.SubmitData(ctx, dataList, marshalledData, cm, signer, gen) + err = submitter.SubmitData(ctx, dataList, marshalledData, cm, signer) require.NoError(t, err) // Should succeed with no action mockDA.AssertNotCalled(t, "Submit", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) } @@ -447,7 +447,7 @@ func TestDASubmitter_SubmitData_NilSigner(t *testing.T) { // Get data from cache and submit with nil signer - should fail signedDataList, marshalledData, err := cm.GetPendingData(ctx) require.NoError(t, err) - err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, nil, gen) + err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, nil) require.Error(t, err) assert.Contains(t, err.Error(), "signer is nil") mockDA.AssertNotCalled(t, "Submit", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) @@ -503,7 +503,7 @@ func TestDASubmitter_SignData(t *testing.T) { } // Create signed data - resultData, resultDataBz, err := submitter.signData(t.Context(), dataList, dataListBz, signer, gen) + resultData, resultDataBz, err := submitter.signData(t.Context(), dataList, dataListBz, signer) require.NoError(t, err) // Should have 2 items (empty data skipped) @@ -542,7 +542,7 @@ func TestDASubmitter_SignData_NilSigner(t *testing.T) { } // Create signed data with nil signer - should fail - _, _, err := submitter.signData(t.Context(), dataList, dataListBz, nil, gen) + _, _, err := submitter.signData(t.Context(), dataList, dataListBz, nil) require.Error(t, err) assert.Contains(t, err.Error(), "signer is nil") } diff --git a/block/internal/submitting/da_submitter_tracing.go b/block/internal/submitting/da_submitter_tracing.go index e3c531fcf8..6d0ab1a9cb 100644 --- a/block/internal/submitting/da_submitter_tracing.go +++ b/block/internal/submitting/da_submitter_tracing.go @@ -9,7 +9,6 @@ import ( "go.opentelemetry.io/otel/trace" "github.com/evstack/ev-node/block/internal/cache" - "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/types" ) @@ -63,7 +62,7 @@ func (t *tracedDASubmitter) SubmitHeaders(ctx context.Context, headers []*types. return nil } -func (t *tracedDASubmitter) SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error { +func (t *tracedDASubmitter) SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer) error { ctx, span := t.tracer.Start(ctx, "DASubmitter.SubmitData", trace.WithAttributes( attribute.Int("data.count", len(signedDataList)), @@ -86,7 +85,7 @@ func (t *tracedDASubmitter) SubmitData(ctx context.Context, signedDataList []*ty ) } - err := t.inner.SubmitData(ctx, signedDataList, marshalledData, cache, signer, genesis) + err := t.inner.SubmitData(ctx, signedDataList, marshalledData, cache, signer) if err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) diff --git a/block/internal/submitting/da_submitter_tracing_test.go b/block/internal/submitting/da_submitter_tracing_test.go index 6edc5c5ec1..a6049aadd2 100644 --- a/block/internal/submitting/da_submitter_tracing_test.go +++ b/block/internal/submitting/da_submitter_tracing_test.go @@ -12,7 +12,6 @@ import ( "go.opentelemetry.io/otel/sdk/trace/tracetest" "github.com/evstack/ev-node/block/internal/cache" - "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/telemetry/testutil" "github.com/evstack/ev-node/types" @@ -20,7 +19,7 @@ import ( type mockDASubmitterAPI struct { submitHeadersFn func(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer) error - submitDataFn func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error + submitDataFn func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer) error } func (m *mockDASubmitterAPI) SubmitHeaders(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer) error { @@ -30,9 +29,9 @@ func (m *mockDASubmitterAPI) SubmitHeaders(ctx context.Context, headers []*types return nil } -func (m *mockDASubmitterAPI) SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error { +func (m *mockDASubmitterAPI) SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer) error { if m.submitDataFn != nil { - return m.submitDataFn(ctx, signedDataList, marshalledData, cache, signer, genesis) + return m.submitDataFn(ctx, signedDataList, marshalledData, cache, signer) } return nil } @@ -131,7 +130,7 @@ func TestTracedDASubmitter_SubmitHeaders_Empty(t *testing.T) { func TestTracedDASubmitter_SubmitData_Success(t *testing.T) { mock := &mockDASubmitterAPI{ - submitDataFn: func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error { + submitDataFn: func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer) error { return nil }, } @@ -147,7 +146,7 @@ func TestTracedDASubmitter_SubmitData_Success(t *testing.T) { []byte("data2data2"), } - err := submitter.SubmitData(ctx, signedDataList, marshalledData, nil, nil, genesis.Genesis{}) + err := submitter.SubmitData(ctx, signedDataList, marshalledData, nil, nil) require.NoError(t, err) spans := sr.Ended() @@ -166,7 +165,7 @@ func TestTracedDASubmitter_SubmitData_Success(t *testing.T) { func TestTracedDASubmitter_SubmitData_Error(t *testing.T) { expectedErr := errors.New("data submission failed") mock := &mockDASubmitterAPI{ - submitDataFn: func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error { + submitDataFn: func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer) error { return expectedErr }, } @@ -178,7 +177,7 @@ func TestTracedDASubmitter_SubmitData_Error(t *testing.T) { } marshalledData := [][]byte{[]byte("data1")} - err := submitter.SubmitData(ctx, signedDataList, marshalledData, nil, nil, genesis.Genesis{}) + err := submitter.SubmitData(ctx, signedDataList, marshalledData, nil, nil) require.Error(t, err) require.Equal(t, expectedErr, err) diff --git a/block/internal/submitting/submitter.go b/block/internal/submitting/submitter.go index 25dcd781a1..fc4a72fe49 100644 --- a/block/internal/submitting/submitter.go +++ b/block/internal/submitting/submitter.go @@ -26,7 +26,7 @@ import ( // DASubmitterAPI defines minimal methods needed by Submitter for DA submissions. type DASubmitterAPI interface { SubmitHeaders(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer) error - SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error + SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer) error } // Submitter handles DA submission and inclusion processing for both sync and aggregator nodes @@ -291,7 +291,7 @@ func (s *Submitter) daSubmissionLoop() { Dur("time_since_last", timeSinceLastSubmit). Msg("batching strategy triggered data submission") - if err := s.daSubmitter.SubmitData(s.ctx, signedDataList, marshalledData, s.cache, s.signer, s.genesis); err != nil { + if err := s.daSubmitter.SubmitData(s.ctx, signedDataList, marshalledData, s.cache, s.signer); err != nil { // Check for unrecoverable errors that indicate a critical issue if errors.Is(err, common.ErrOversizedItem) { s.logger.Error().Err(err). diff --git a/block/internal/submitting/submitter_test.go b/block/internal/submitting/submitter_test.go index b1e2d2e988..8e6ec29abf 100644 --- a/block/internal/submitting/submitter_test.go +++ b/block/internal/submitting/submitter_test.go @@ -429,7 +429,7 @@ func (f *fakeDASubmitter) SubmitHeaders(ctx context.Context, _ []*types.SignedHe return nil } -func (f *fakeDASubmitter) SubmitData(ctx context.Context, _ []*types.SignedData, _ [][]byte, _ cache.Manager, _ signer.Signer, _ genesis.Genesis) error { +func (f *fakeDASubmitter) SubmitData(ctx context.Context, _ []*types.SignedData, _ [][]byte, _ cache.Manager, _ signer.Signer) error { select { case f.chData <- struct{}{}: default: diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 87a8b6a093..c15c042286 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -81,6 +81,11 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC } return err } + if got := p2pHeader.SignedHeader.Height(); got != height { + err := fmt.Errorf("header height mismatch: requested %d, got %d", height, got) + h.logger.Warn().Uint64("requested_height", height).Uint64("header_height", got).Err(err).Msg("discarding mismatched header from P2P") + return err + } p2pData, err := h.dataStore.GetByHeight(ctx, height) if err != nil { @@ -89,6 +94,11 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC } return err } + if got := p2pData.Height(); got != height { + err := fmt.Errorf("data height mismatch: requested %d, got %d", height, got) + h.logger.Warn().Uint64("requested_height", height).Uint64("data_height", got).Err(err).Msg("discarding mismatched data from P2P") + return err + } dataCommitment := p2pData.DACommitment() if !bytes.Equal(p2pHeader.DataHash[:], dataCommitment[:]) { err := fmt.Errorf("data hash mismatch: header %x, data %x", p2pHeader.DataHash, dataCommitment) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index c615c1f38d..ae67c20a20 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -331,6 +331,11 @@ func (s *Syncer) initializeState() error { } if len(state.NextProposerAddress) == 0 { state.NextProposerAddress = s.initialProposerAddress(s.ctx) + if state.LastBlockHeight > s.genesis.InitialHeight-1 { + s.logger.Warn(). + Uint64("height", state.LastBlockHeight). + Msg("loaded state without NextProposerAddress; repaired from execution/genesis. Verify chain has not rotated proposer before this upgrade") + } } if state.DAHeight != 0 && state.DAHeight < s.genesis.DAStartHeight { return fmt.Errorf("DA height (%d) is lower than DA start height (%d)", state.DAHeight, s.genesis.DAStartHeight) diff --git a/core/execution/execution.go b/core/execution/execution.go index 477407e1da..38ba2be328 100644 --- a/core/execution/execution.go +++ b/core/execution/execution.go @@ -133,9 +133,12 @@ type ExecutionInfo struct { // For non-gas-based execution layers, this should be 0. MaxGas uint64 - // NextProposerAddress is the proposer address that should sign the next - // block from the execution layer's current view. Empty means unchanged or - // unavailable, and callers should fall back to their current proposer. + // NextProposerAddress is the execution layer's best-effort view of the + // proposer address for the next block. It is advisory and is consulted + // only at startup/replay seeding when no prior consensus state is + // available; the authoritative source for the next proposer is + // ExecuteResult.NextProposerAddress. Empty means unchanged/unavailable, + // and callers must fall back to the genesis proposer. NextProposerAddress []byte } @@ -144,8 +147,11 @@ type ExecuteResult struct { // UpdatedStateRoot is the new state root after executing transactions. UpdatedStateRoot []byte - // NextProposerAddress is the proposer address selected by execution for the - // next block. Empty means the current proposer remains active. + // NextProposerAddress is the authoritative proposer address selected by + // the execution layer to sign block blockHeight+1 (the block immediately + // after the one just executed). An empty value means the current proposer + // remains active; execution layers that do not support proposer rotation + // MUST leave this field empty. NextProposerAddress []byte } From 68997e7e4e44f33749b29ff2e3770dd3277ef65c Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Mon, 27 Apr 2026 10:18:57 +0200 Subject: [PATCH 10/13] linting --- block/internal/submitting/da_submitter_test.go | 2 -- block/internal/syncing/p2p_handler.go | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index 72102b090c..5c2342539e 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -261,7 +261,6 @@ func TestDASubmitter_SubmitData_Success(t *testing.T) { // Create test signer addr, pub, signer := createTestSigner(t) - gen.ProposerAddress = addr // Update submitter genesis to use correct proposer submitter.genesis.ProposerAddress = addr @@ -349,7 +348,6 @@ func TestDASubmitter_SubmitData_SkipsEmptyData(t *testing.T) { // Create test signer addr, pub, signer := createTestSigner(t) - gen.ProposerAddress = addr // Create empty data emptyData := &types.Data{ diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index c15c042286..e2aa9c6a3b 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -81,7 +81,7 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC } return err } - if got := p2pHeader.SignedHeader.Height(); got != height { + if got := p2pHeader.Height(); got != height { err := fmt.Errorf("header height mismatch: requested %d, got %d", height, got) h.logger.Warn().Uint64("requested_height", height).Uint64("header_height", got).Err(err).Msg("discarding mismatched header from P2P") return err From 04f6b1230bb1c4726dd4aad529dfa448f46f70d3 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Tue, 5 May 2026 11:16:03 +0200 Subject: [PATCH 11/13] test: cover unexpected DA proposer rejection --- block/internal/syncing/syncer_test.go | 56 +++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 2b4bce1aa6..a581ed562a 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -304,6 +304,62 @@ func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { assert.Equal(t, uint64(1), st1.LastBlockHeight) } +func TestProcessHeightEvent_UnexpectedProposerFromDAIsNotCriticalStateError(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + expectedAddr, _, _ := buildSyncTestSigner(t) + wrongAddr, wrongPub, wrongSigner := buildSyncTestSigner(t) + + cfg := config.DefaultConfig() + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: expectedAddr} + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain").Return([]byte("app0"), nil).Once() + + mockHeaderStore := extmocks.NewMockStore[*types.P2PSignedHeader](t) + mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() + mockDataStore := extmocks.NewMockStore[*types.P2PData](t) + mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() + + errChan := make(chan error, 1) + s := NewSyncer( + st, + mockExec, + nil, + cm, + common.NopMetrics(), + cfg, + gen, + mockHeaderStore, + mockDataStore, + zerolog.Nop(), + common.DefaultBlockOptions(), + errChan, + nil, + ) + + require.NoError(t, s.initializeState()) + s.ctx = t.Context() + + lastState := s.getLastState() + data := makeData(gen.ChainID, 1, 0) + _, hdr := makeSignedHeaderBytes(t, gen.ChainID, 1, wrongAddr, wrongPub, wrongSigner, lastState.AppHash, data, nil) + + evt := common.DAHeightEvent{Header: hdr, Data: data, Source: common.SourceDA, DaHeight: 1} + s.processHeightEvent(t.Context(), &evt) + + requireEmptyChan(t, errChan) + assert.False(t, s.hasCriticalError.Load(), "unexpected proposer from DA should be treated as an invalid external block") + + h, err := st.Height(t.Context()) + require.NoError(t, err) + assert.Equal(t, uint64(0), h) +} + func TestSequentialBlockSync(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) From 0b7bb5a631a7373c9d16c67cd9003fe777c3d445 Mon Sep 17 00:00:00 2001 From: Randy Grok <98407738+randygrok@users.noreply.github.com> Date: Fri, 8 May 2026 09:38:22 +0200 Subject: [PATCH 12/13] fix: resolve key rotation conflicts with main (#3313) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * build(deps): Bump the all-go group across 8 directories with 7 updates (#3291) * build(deps): Bump the all-go group across 8 directories with 7 updates Bumps the all-go group with 5 updates in the / directory: | Package | From | To | | --- | --- | --- | | [github.com/aws/aws-sdk-go-v2/service/kms](https://github.com/aws/aws-sdk-go-v2) | `1.50.5` | `1.51.0` | | [github.com/aws/smithy-go](https://github.com/aws/smithy-go) | `1.25.0` | `1.25.1` | | [github.com/libp2p/go-libp2p-pubsub](https://github.com/libp2p/go-libp2p-pubsub) | `0.15.0` | `0.16.0` | | [github.com/rs/zerolog](https://github.com/rs/zerolog) | `1.35.0` | `1.35.1` | | [google.golang.org/api](https://github.com/googleapis/google-api-go-client) | `0.274.0` | `0.276.0` | Bumps the all-go group with 1 update in the /apps/evm directory: [github.com/rs/zerolog](https://github.com/rs/zerolog). Bumps the all-go group with 1 update in the /apps/grpc directory: [github.com/rs/zerolog](https://github.com/rs/zerolog). Bumps the all-go group with 1 update in the /apps/testapp directory: [github.com/rs/zerolog](https://github.com/rs/zerolog). Bumps the all-go group with 2 updates in the /execution/evm directory: [github.com/rs/zerolog](https://github.com/rs/zerolog) and [github.com/evstack/ev-node](https://github.com/evstack/ev-node). Bumps the all-go group with 1 update in the /execution/grpc directory: [github.com/evstack/ev-node](https://github.com/evstack/ev-node). Bumps the all-go group with 1 update in the /test/docker-e2e directory: [github.com/celestiaorg/tastora](https://github.com/celestiaorg/tastora). Bumps the all-go group with 2 updates in the /test/e2e directory: [github.com/rs/zerolog](https://github.com/rs/zerolog) and [github.com/celestiaorg/tastora](https://github.com/celestiaorg/tastora). Updates `github.com/aws/aws-sdk-go-v2/service/kms` from 1.50.5 to 1.51.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ssm/v1.50.5...service/s3/v1.51.0) Updates `github.com/aws/smithy-go` from 1.25.0 to 1.25.1 - [Release notes](https://github.com/aws/smithy-go/releases) - [Changelog](https://github.com/aws/smithy-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/smithy-go/compare/v1.25.0...v1.25.1) Updates `github.com/libp2p/go-libp2p-pubsub` from 0.15.0 to 0.16.0 - [Release notes](https://github.com/libp2p/go-libp2p-pubsub/releases) - [Commits](https://github.com/libp2p/go-libp2p-pubsub/compare/v0.15.0...v0.16.0) Updates `github.com/rs/zerolog` from 1.35.0 to 1.35.1 - [Commits](https://github.com/rs/zerolog/compare/v1.35.0...v1.35.1) Updates `google.golang.org/api` from 0.274.0 to 0.276.0 - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.274.0...v0.276.0) Updates `github.com/rs/zerolog` from 1.35.0 to 1.35.1 - [Commits](https://github.com/rs/zerolog/compare/v1.35.0...v1.35.1) Updates `github.com/rs/zerolog` from 1.35.0 to 1.35.1 - [Commits](https://github.com/rs/zerolog/compare/v1.35.0...v1.35.1) Updates `github.com/rs/zerolog` from 1.35.0 to 1.35.1 - [Commits](https://github.com/rs/zerolog/compare/v1.35.0...v1.35.1) Updates `github.com/rs/zerolog` from 1.35.0 to 1.35.1 - [Commits](https://github.com/rs/zerolog/compare/v1.35.0...v1.35.1) Updates `github.com/evstack/ev-node` from 1.1.0 to 1.1.1 - [Release notes](https://github.com/evstack/ev-node/releases) - [Changelog](https://github.com/evstack/ev-node/blob/main/CHANGELOG.md) - [Commits](https://github.com/evstack/ev-node/compare/v1.1.0...v1.1.1) Updates `github.com/evstack/ev-node` from 1.1.0 to 1.1.1 - [Release notes](https://github.com/evstack/ev-node/releases) - [Changelog](https://github.com/evstack/ev-node/blob/main/CHANGELOG.md) - [Commits](https://github.com/evstack/ev-node/compare/v1.1.0...v1.1.1) Updates `github.com/celestiaorg/tastora` from 0.17.0 to 0.19.0 - [Release notes](https://github.com/celestiaorg/tastora/releases) - [Commits](https://github.com/celestiaorg/tastora/compare/v0.17.0...v0.19.0) Updates `github.com/rs/zerolog` from 1.35.0 to 1.35.1 - [Commits](https://github.com/rs/zerolog/compare/v1.35.0...v1.35.1) Updates `github.com/celestiaorg/tastora` from 0.16.1-0.20260312082036-2ee1b0a2ac4e to 0.19.0 - [Release notes](https://github.com/celestiaorg/tastora/releases) - [Commits](https://github.com/celestiaorg/tastora/compare/v0.17.0...v0.19.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/service/kms dependency-version: 1.51.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: all-go - dependency-name: github.com/aws/smithy-go dependency-version: 1.25.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-go - dependency-name: github.com/libp2p/go-libp2p-pubsub dependency-version: 0.16.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: all-go - dependency-name: github.com/rs/zerolog dependency-version: 1.35.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-go - dependency-name: google.golang.org/api dependency-version: 0.276.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: all-go - dependency-name: github.com/rs/zerolog dependency-version: 1.35.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-go - dependency-name: github.com/rs/zerolog dependency-version: 1.35.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-go - dependency-name: github.com/rs/zerolog dependency-version: 1.35.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-go - dependency-name: github.com/rs/zerolog dependency-version: 1.35.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-go - dependency-name: github.com/evstack/ev-node dependency-version: 1.1.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-go - dependency-name: github.com/evstack/ev-node dependency-version: 1.1.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-go - dependency-name: github.com/celestiaorg/tastora dependency-version: 0.19.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: all-go - dependency-name: github.com/rs/zerolog dependency-version: 1.35.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-go - dependency-name: github.com/celestiaorg/tastora dependency-version: 0.19.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: all-go ... Signed-off-by: dependabot[bot] * tidy --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Julien Robert * build(deps): Bump postcss from 8.5.8 to 8.5.12 in /docs in the npm_and_yarn group across 1 directory (#3292) build(deps): Bump postcss Bumps the npm_and_yarn group with 1 update in the /docs directory: [postcss](https://github.com/postcss/postcss). Updates `postcss` from 8.5.8 to 8.5.12 - [Release notes](https://github.com/postcss/postcss/releases) - [Changelog](https://github.com/postcss/postcss/blob/main/CHANGELOG.md) - [Commits](https://github.com/postcss/postcss/compare/8.5.8...8.5.12) --- updated-dependencies: - dependency-name: postcss dependency-version: 8.5.12 dependency-type: indirect dependency-group: npm_and_yarn ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * ci: skip code jobs on docs-only changes (#3295) Add a `changes` job using dorny/paths-filter to detect whether any non-documentation files were modified. All heavy jobs (lint, docker, test, docker-tests, proto) are gated behind this check and skipped when the PR only touches docs/** or markdown files. Co-authored-by: Claude Sonnet 4.6 * docs: brand-aligned syntax theme for code blocks (#3294) * docs: better code readability * chore: restore yarn.lock to main Co-Authored-By: Claude Sonnet 4.6 * docs(style): address PR review feedback - Add `"type": "dark"` to ev-dark.json theme manifest - Raise punctuation token contrast from #505050 to #767676 (WCAG AA) - Align --vp-code-block-color CSS var with ev-dark default text (#dbd7ca) - Use ThemeRegistration type instead of `as any` in config.ts Co-Authored-By: Claude Sonnet 4.6 --------- Co-authored-by: Claude Sonnet 4.6 * fix(cache): reduce tx cache retention (#3299) * docs: high availability sequencer guide (#3293) * docs: ev-node high availability * docs: node placement * docs(ha): address PR review feedback Critical fixes: - Fix snapshot_threshold math: 5000 ÷ 10 = 500s ≈ 8.3 min (not 83s) - Fix trailing_logs math: 18000 ÷ 10 = 1800s = 30 min (not 5 min) Medium fixes: - Fix heartbeat_timeout description: it is a follower-side election trigger, not the interval at which the leader sends heartbeats - Add explicit restart instruction after Step 5 data copy in single-to-ha.md so the chain keeps producing blocks during preparation (Steps 6-8) - Replace priv_validator_key.json with signer.json in single-to-ha.md to match cluster-setup.md and the E2E tests Minor fixes: - Exclude self from raft.peers in all examples (cluster-setup.md node-1 yaml/CLI/systemd, single-to-ha.md node-1 and node-2) - Add "exclude local node" note to raft.peers description in overview.md - Fix P2P port in overview.md Interaction with P2P section (7676 → 26656) - Add text language tag to all bare fenced blocks (MD040): multiaddr example, RTT equations, and all log snippets Co-Authored-By: Claude Sonnet 4.6 * docs(ha): absorb raft_production.md into ha/overview.md raft_production.md had no sidebar entry and its content was fully superseded by the new ha/ guides. Extract the three pieces that were unique to it — bootstrap flag docs, auto-detection startup mode explanation, and static-membership limitation note — into ha/overview.md, then delete the file. Co-Authored-By: Claude Sonnet 4.6 * docs(ha): use EnvironmentFile for signer passphrase Passing --evnode.signer.passphrase inline exposes the secret in ps aux, journalctl, and shell history. - Add EnvironmentFile=/etc/ev-node/env (chmod 600) to the systemd unit in cluster-setup.md with setup instructions - Replace all inline occurrences with $EV_SIGNER_PASSPHRASE sourced from /etc/ev-node/env in every evm start / evm init snippet across both guides Co-Authored-By: Claude Sonnet 4.6 * docs(ha): explicit node-2 peers and action-based rolling restart - Replace "peers list is identical" stub in node-2 config with an explicit peers list that excludes node-2 itself, and add a note that each node must omit itself from raft.peers - Replace "Wait ~30 seconds" in rolling restart with journalctl one-liners that exit as soon as the node logs follower/leader state, giving a deterministic signal instead of an arbitrary timeout Co-Authored-By: Claude Sonnet 4.6 * docs(ha): fix raft.peers self-inclusion startup bug The abbreviated node-2 snippet with "# peers list is identical" caused a startup failure: with raft_addr=0.0.0.0:5001 the bootstrap code's literal address comparison does not recognise node-2@10.0.0.2:5001 as self, so node-2 is appended twice and deduplicateServers returns "duplicate peers found in config". - Fix intro text: "only raft.node_id and raft_addr differ" → "raft.node_id is unique; raft.peers and p2p.peers must exclude self" - Expand node-2 snippet to a full evnode.yaml with the correct peers list (node-1, node-3, node-4, node-5 — no node-2) and an inline explanation of the wildcard address pitfall - Align overview.md trailing_logs example to 1 block/s (matching block_time: "1s" used throughout) and note the 10 block/s rate too Co-Authored-By: Claude Sonnet 4.6 * docs(ha): fix passphrase flag and failover kill cardinality check Replace non-existent --evnode.signer.passphrase with the actual --evnode.signer.passphrase_file flag throughout cluster-setup and single-to-ha guides. Update passphrase setup to create a chmod 600 file at /etc/ev-node/passphrase referenced directly by the flag. Add mapfile-based cardinality check in the failover test fallback kill command to guard against killing the wrong process. Co-Authored-By: Claude Sonnet 4.6 * docs(ha): fix RPC endpoints, init ordering, and snap_count CLI flag Replace incorrect CometBFT RPC calls (port 26657/status) with the actual ev-node HTTP API (port 7331 /health/ready, /raft/node) and EVM execution layer (cast block latest) throughout both guides. Align single-to-ha Step 2 init ordering with cluster-setup: create passphrase file before evm init so the signer key is encrypted from the start, and pass --evnode.node.aggregator and passphrase_file flags. Fix Step 9a fallback kill in single-to-ha to use mapfile cardinality check, matching the pattern already applied in cluster-setup. Add --evnode.raft.snap_count=3 to the CLI start example to match the YAML config block. Co-Authored-By: Claude Sonnet 4.6 --------- Co-authored-by: Claude Sonnet 4.6 * perf(store): save metadata async (#3298) * perf(store): save metadata async * cl * Optimize metadata writes with batching * feedback * De-duplicate batched writes by key in cached store * fix * updates * chore(deps): security (#3296) * fix security deps * fix helpers * feat: add grpc socket and flattn tx batches to allow for lower allocations (#3297) * add grpc socket and flattn tx batches to allow for lower allocations * redo proto * docs: update changelog for grpc execution transport * remove extra txs * refactor(execution/grpc): move execution service where it belongs (#3302) * refactor(execution/grpc): move execution service where it belongs * reduce diff * fix lint * feat(execution/grpc): adding support for grpc otlp (#3300) * feat: adding support for grpc oltp * chore: fix linting * cl --------- Co-authored-by: Julien Robert * chore: fix some minor issues in comments (#3304) * build(deps): Bump dorny/paths-filter from 3 to 4 (#3308) Bumps [dorny/paths-filter](https://github.com/dorny/paths-filter) from 3 to 4. - [Release notes](https://github.com/dorny/paths-filter/releases) - [Changelog](https://github.com/dorny/paths-filter/blob/master/CHANGELOG.md) - [Commits](https://github.com/dorny/paths-filter/compare/v3...v4) --- updated-dependencies: - dependency-name: dorny/paths-filter dependency-version: '4' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * feat(pkg/sequencers): add queue limit in solo sequencer (#3312) * feat(pkg/sequencers): add queue limit in solo sequencer * use option * cl * move test files * fix: address key rotation CI failures * fix: repair markdown link checks * test: stabilize sync loop persistence test --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Julien Robert Co-authored-by: auricom <27022259+auricom@users.noreply.github.com> Co-authored-by: Claude Sonnet 4.6 Co-authored-by: Marko Co-authored-by: Cian Hatton Co-authored-by: criciss --- .github/workflows/ci.yml | 34 +- .github/workflows/proto.yml | 14 + .just/proto.just | 1 + .mockery.yaml | 4 +- CHANGELOG.md | 8 + apps/evm/go.mod | 16 +- apps/evm/go.sum | 32 +- apps/grpc/README.md | 17 +- apps/grpc/cmd/run.go | 7 +- apps/grpc/go.mod | 16 +- apps/grpc/go.sum | 32 +- apps/testapp/go.mod | 16 +- apps/testapp/go.sum | 32 +- block/internal/cache/generic_cache.go | 2 +- block/internal/cache/generic_cache_test.go | 2 +- block/internal/cache/manager.go | 3 +- block/internal/reaping/reaper.go | 5 +- block/internal/syncing/syncer.go | 3 + block/internal/syncing/syncer_test.go | 1 + buf.gen.grpc.yaml | 11 + buf.yaml | 1 + .../types/src/proto/evnode.v1.messages.rs | 145 --- .../types/src/proto/evnode.v1.services.rs | 835 ------------------ docs/.vitepress/config.ts | 24 + docs/.vitepress/ev-dark.json | 281 ++++++ docs/.vitepress/theme/style.css | 5 +- docs/concepts/block-lifecycle.md | 6 +- docs/guides/advanced/based-sequencing.md | 4 +- docs/guides/advanced/forced-inclusion.md | 2 +- docs/guides/ha/cluster-setup.md | 463 ++++++++++ docs/guides/ha/overview.md | 404 +++++++++ docs/guides/ha/single-to-ha.md | 425 +++++++++ docs/guides/operations/monitoring.md | 2 +- docs/guides/raft_production.md | 102 --- docs/index.md | 2 + docs/package-lock.json | 6 +- .../reference/configuration/ev-node-config.md | 4 +- docs/yarn.lock | 6 +- execution/evm/go.mod | 4 +- execution/evm/go.sum | 32 +- execution/evm/test/execution_test.go | 10 +- execution/evm/test/go.mod | 59 +- execution/evm/test/go.sum | 185 ++-- execution/evm/test/test_helpers.go | 127 ++- execution/grpc/README.md | 31 +- execution/grpc/client.go | 83 +- execution/grpc/client_test.go | 149 +++- execution/grpc/go.mod | 15 +- execution/grpc/go.sum | 33 + execution/grpc/handler.go | 3 +- execution/grpc/otel_propagation.go | 29 + execution/grpc/otel_propagation_test.go | 226 +++++ .../grpc/proto}/evnode/v1/execution.proto | 34 +- execution/grpc/server.go | 23 +- execution/grpc/server_test.go | 129 ++- execution/grpc/tx_batch.go | 74 ++ execution/grpc/tx_batch_test.go | 79 ++ .../grpc/types}/pb/evnode/v1/execution.pb.go | 250 ++++-- .../evnode/v1/v1connect/execution.connect.go | 2 +- execution/grpc/unix.go | 69 ++ execution/grpc/unix_test.go | 59 ++ go.mod | 16 +- go.sum | 32 +- mlc_config.json | 34 + .../forced_inclusion_retriever_mock.go | 2 +- pkg/sequencers/based/sequencer_test.go | 41 +- pkg/sequencers/common/errors.go | 10 + pkg/sequencers/single/queue.go | 5 +- pkg/sequencers/single/sequencer.go | 3 +- pkg/sequencers/solo/sequencer.go | 107 ++- pkg/sequencers/solo/sequencer_test.go | 88 ++ pkg/store/cached_store.go | 115 ++- pkg/store/cached_store_test.go | 130 +++ pkg/store/store.go | 24 + pkg/store/tracing.go | 19 + pkg/store/tracing_test.go | 8 + pkg/store/types.go | 10 + test/docker-e2e/go.mod | 19 +- test/docker-e2e/go.sum | 69 +- test/docker-e2e/resiliency_test.go | 12 +- test/e2e/benchmark/spamoor_node.go | 368 ++++++++ test/e2e/benchmark/suite_test.go | 20 +- test/e2e/benchmark/victoriatraces_node.go | 142 +++ test/e2e/go.mod | 28 +- test/e2e/go.sum | 60 +- test/mocks/store.go | 63 ++ types/pb/evnode/v1/evnode.pb.go | 4 +- types/state.go | 7 +- 88 files changed, 4437 insertions(+), 1677 deletions(-) create mode 100644 buf.gen.grpc.yaml create mode 100644 docs/.vitepress/ev-dark.json create mode 100644 docs/guides/ha/cluster-setup.md create mode 100644 docs/guides/ha/overview.md create mode 100644 docs/guides/ha/single-to-ha.md delete mode 100644 docs/guides/raft_production.md create mode 100644 execution/grpc/otel_propagation.go create mode 100644 execution/grpc/otel_propagation_test.go rename {proto => execution/grpc/proto}/evnode/v1/execution.proto (84%) create mode 100644 execution/grpc/tx_batch.go create mode 100644 execution/grpc/tx_batch_test.go rename {types => execution/grpc/types}/pb/evnode/v1/execution.pb.go (82%) rename {types => execution/grpc/types}/pb/evnode/v1/v1connect/execution.connect.go (99%) create mode 100644 execution/grpc/unix.go create mode 100644 execution/grpc/unix_test.go create mode 100644 mlc_config.json rename pkg/sequencers/{common => based}/forced_inclusion_retriever_mock.go (99%) create mode 100644 pkg/sequencers/common/errors.go create mode 100644 test/e2e/benchmark/spamoor_node.go create mode 100644 test/e2e/benchmark/victoriatraces_node.go diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4df57a5396..72c620f4e8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,8 +9,30 @@ name: CI permissions: {} jobs: + changes: + name: Detect code changes + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: read + outputs: + code: ${{ steps.filter.outputs.code }} + steps: + - uses: actions/checkout@v6.0.2 + - uses: dorny/paths-filter@v4 + id: filter + with: + filters: | + code: + - '**' + - '!docs/**' + - '!**/*.md' + - '!.github/workflows/docs_*.yml' + determine-image-tag: name: Determine Image Tag + needs: changes + if: needs.changes.outputs.code == 'true' runs-on: ubuntu-latest permissions: contents: read @@ -39,12 +61,15 @@ jobs: echo "tag=$TAG" >> $GITHUB_OUTPUT lint: + needs: changes + if: needs.changes.outputs.code == 'true' permissions: contents: read uses: ./.github/workflows/lint.yml docker: - needs: determine-image-tag + needs: [determine-image-tag, changes] + if: needs.changes.outputs.code == 'true' uses: ./.github/workflows/docker-build-push.yml secrets: inherit permissions: @@ -60,6 +85,8 @@ jobs: ] test: + needs: changes + if: needs.changes.outputs.code == 'true' permissions: actions: read contents: read @@ -68,7 +95,8 @@ jobs: secrets: inherit docker-tests: - needs: [determine-image-tag, docker] + needs: [determine-image-tag, docker, changes] + if: needs.changes.outputs.code == 'true' uses: ./.github/workflows/docker-tests.yml secrets: inherit permissions: @@ -77,6 +105,8 @@ jobs: image-tag: ${{ needs.determine-image-tag.outputs.tag }} proto: + needs: changes + if: needs.changes.outputs.code == 'true' permissions: contents: read pull-requests: write diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml index ace34104b1..361cc08520 100644 --- a/.github/workflows/proto.yml +++ b/.github/workflows/proto.yml @@ -15,3 +15,17 @@ jobs: - uses: bufbuild/buf-action@v1 with: format: false + breaking: false + - name: Check protobuf breaking changes + env: + BASE_SHA: ${{ github.event.pull_request.base.sha || github.event.merge_group.base_sha || github.event.before }} + run: | + if [ -z "$BASE_SHA" ] || [[ "$BASE_SHA" =~ ^0+$ ]]; then + echo "No base SHA available for buf breaking check" + exit 0 + fi + + buf breaking proto \ + --limit-to-input-files \ + --error-format github-actions \ + --against "https://github.com/${{ github.repository }}.git#format=git,commit=${BASE_SHA}" diff --git a/.just/proto.just b/.just/proto.just index 722e53397c..35f7436bcb 100644 --- a/.just/proto.just +++ b/.just/proto.just @@ -4,6 +4,7 @@ proto-gen: @echo "--> Generating Protobuf files" buf generate --path="./proto/evnode" --template="buf.gen.yaml" --config="buf.yaml" buf generate --path="./proto/execution/evm" --template="buf.gen.evm.yaml" --config="buf.yaml" + buf generate --template="buf.gen.grpc.yaml" --config="buf.yaml" # Lint protobuf files (requires Docker) [group('proto')] diff --git a/.mockery.yaml b/.mockery.yaml index 106be368cb..36f4483c3f 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -97,6 +97,6 @@ packages: interfaces: ForcedInclusionRetriever: config: - dir: ./pkg/sequencers/common - pkgname: common + dir: ./pkg/sequencers/based + pkgname: based filename: forced_inclusion_retriever_mock.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 0097fb829b..de3cc9ee78 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changes +- Add max bytes contraints in simple solo sequnecer [#3312](https://github.com/evstack/ev-node/pull/3312) +- Add support for otlp in execution/grpc. [#3300](https://github.com/evstack/ev-node/pull/3300) - Optimization of mutex usage in cache for reaper [#3286](https://github.com/evstack/ev-node/pull/3286) +- Add Unix domain socket support for gRPC execution endpoints via `unix:///path/to/socket` [#3297](https://github.com/evstack/ev-node/pull/3297) +- **BREAKING:** (execution/grpc) + - Move execution service where it belongs in execution/grpc. [#3302](https://github.com/evstack/ev-node/pull/3302) + - Replace legacy gRPC execution `txs` payload fields with `tx_batch` so clients and servers use contiguous transaction buffers [#3297](https://github.com/evstack/ev-node/pull/3297) +- Optimize metadata writes by making it async in cache store [#3298](https://github.com/evstack/ev-node/pull/3298) +- Reduce tx cache retention to avoid OOM under (really) heavy tx load [#3299](https://github.com/evstack/ev-node/pull/3299) ## v1.1.1 diff --git a/apps/evm/go.mod b/apps/evm/go.mod index a215b11b07..ce510c830e 100644 --- a/apps/evm/go.mod +++ b/apps/evm/go.mod @@ -14,14 +14,14 @@ require ( github.com/evstack/ev-node/core v1.0.0 github.com/evstack/ev-node/execution/evm v1.0.1 github.com/ipfs/go-datastore v0.9.1 - github.com/rs/zerolog v1.35.0 + github.com/rs/zerolog v1.35.1 github.com/spf13/cobra v1.10.2 gotest.tools/v3 v3.5.2 ) require ( cloud.google.com/go v0.123.0 // indirect - cloud.google.com/go/auth v0.18.2 // indirect + cloud.google.com/go/auth v0.20.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/iam v1.7.0 // indirect @@ -44,12 +44,12 @@ require ( github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 // indirect - github.com/aws/smithy-go v1.25.0 // indirect + github.com/aws/smithy-go v1.25.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.20.0 // indirect @@ -129,7 +129,7 @@ require ( github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-kad-dht v0.39.1 // indirect github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect - github.com/libp2p/go-libp2p-pubsub v0.15.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.16.0 // indirect github.com/libp2p/go-libp2p-record v0.3.1 // indirect github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect @@ -202,8 +202,8 @@ require ( github.com/wlynxg/anet v0.0.5 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect go.opentelemetry.io/otel v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect @@ -231,7 +231,7 @@ require ( golang.org/x/tools v0.43.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.17.0 // indirect - google.golang.org/api v0.274.0 // indirect + google.golang.org/api v0.276.0 // indirect google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect diff --git a/apps/evm/go.sum b/apps/evm/go.sum index 15aa5441d2..fbef5ee087 100644 --- a/apps/evm/go.sum +++ b/apps/evm/go.sum @@ -20,8 +20,8 @@ cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM= -cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -105,8 +105,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 h1:HtOTYcb github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8/go.mod h1:VsK9abqQeGlzPgUr+isNWzPlK2vKe9INMLWnY65f5Xs= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6Y1Lbvt9WFuJ0ugUK2xn6hIWUBBbKuOWF30s= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 h1:nEzwx/ZlpUZ2Y6WztsgYmfBh5Ixd3QiECawXMzvTMeo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 h1:696UM+NwOrETBCLQJyCAGtVmmZmziBT59yMwgg6Fvrw= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 h1:a1Fq/KXn75wSzoJaPQTgZO0wHGqE9mjFnylnqEPTchA= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10/go.mod h1:p6+MXNxW7IA6dMgHfTAzljuwSKD0NCm/4lbS4t6+7vI= github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 h1:x6bKbmDhsgSZwv6q19wY/u3rLk/3FGjJWyqKcIRufpE= @@ -115,8 +115,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 h1:oK/njaL8GtyEihkWMD4k3Vg github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20/go.mod h1:JHs8/y1f3zY7U5WcuzoJ/yAYGYtNIVPKLIbp61euvmg= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 h1:ks8KBcZPh3PYISr5dAiXCM5/Thcuxk8l+PG4+A0exds= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0/go.mod h1:pFw33T0WLvXU3rw1WBkpMlkgIn54eCB5FYLhjDc9Foo= -github.com/aws/smithy-go v1.25.0 h1:Sz/XJ64rwuiKtB6j98nDIPyYrV1nVNJ4YU74gttcl5U= -github.com/aws/smithy-go v1.25.0/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/aws/smithy-go v1.25.1 h1:J8ERsGSU7d+aCmdQur5Txg6bVoYelvQJgtZehD12GkI= +github.com/aws/smithy-go v1.25.1/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -514,8 +514,8 @@ github.com/libp2p/go-libp2p-kad-dht v0.39.1 h1:9RzUBc7zywT4ZNamRSgEvPZmVlK3Y6xdl github.com/libp2p/go-libp2p-kad-dht v0.39.1/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= -github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= -github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= +github.com/libp2p/go-libp2p-pubsub v0.16.0 h1:j7G2C8kJwkcAQqYR7Wmq3d75d3Sgw/N0Hhiv0dVx7OY= +github.com/libp2p/go-libp2p-pubsub v0.16.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= @@ -710,8 +710,8 @@ github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0t github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/zerolog v1.35.0 h1:VD0ykx7HMiMJytqINBsKcbLS+BJ4WYjz+05us+LRTdI= -github.com/rs/zerolog v1.35.0/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= +github.com/rs/zerolog v1.35.1 h1:m7xQeoiLIiV0BCEY4Hs+j2NG4Gp2o2KPKmhnnLiazKI= +github.com/rs/zerolog v1.35.1/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -827,10 +827,10 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= @@ -1195,8 +1195,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.274.0 h1:aYhycS5QQCwxHLwfEHRRLf9yNsfvp1JadKKWBE54RFA= -google.golang.org/api v0.274.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/apps/grpc/README.md b/apps/grpc/README.md index a2d1f2ae11..356dc0462f 100644 --- a/apps/grpc/README.md +++ b/apps/grpc/README.md @@ -1,13 +1,13 @@ # gRPC Single Sequencer App -This application runs a Evolve node with a single sequencer that connects to a remote execution client via gRPC. It allows you to use any execution layer that implements the Evolve execution gRPC interface. +This application runs an Evolve node with a single sequencer that connects to an execution client via gRPC. It allows you to use any execution layer that implements the Evolve execution gRPC interface. ## Overview The gRPC single sequencer app provides: -- A Evolve consensus node with single sequencer -- Connection to remote execution clients via gRPC +- An Evolve consensus node with single sequencer +- Connection to execution clients via TCP or Unix domain socket gRPC - Full data availability layer integration - P2P networking capabilities @@ -58,11 +58,20 @@ Start the Evolve node with: --da.auth-token your-da-token ``` +For a same-machine executor, use a Unix domain socket endpoint: + +```bash +./evgrpc start \ + --root-dir ~/.evgrpc \ + --grpc-executor-url unix:///tmp/evolve-executor.sock \ + --da.address http://localhost:7980 +``` + ## Command-Line Flags ### gRPC-specific Flags -- `--grpc-executor-url`: URL of the gRPC execution service (default: `http://localhost:50051`) +- `--grpc-executor-url`: URL of the gRPC execution service, either `http://host:port` or `unix:///path/to/socket` (default: `http://localhost:50051`) ### Common Evolve Flags diff --git a/apps/grpc/cmd/run.go b/apps/grpc/cmd/run.go index 22ca71f587..026cfcd0a6 100644 --- a/apps/grpc/cmd/run.go +++ b/apps/grpc/cmd/run.go @@ -28,7 +28,7 @@ import ( const ( grpcDbName = "grpc-single" - // FlagGrpcExecutorURL is the flag for the gRPC executor endpoint + // FlagGrpcExecutorURL is the flag for the gRPC executor endpoint. FlagGrpcExecutorURL = "grpc-executor-url" ) @@ -163,11 +163,10 @@ func createGRPCExecutionClient(cmd *cobra.Command) (execution.Executor, error) { return nil, fmt.Errorf("%s flag is required", FlagGrpcExecutorURL) } - // Create and return the gRPC client - return executiongrpc.NewClient(executorURL), nil + return executiongrpc.NewClient(executorURL) } // addGRPCFlags adds flags specific to the gRPC execution client func addGRPCFlags(cmd *cobra.Command) { - cmd.Flags().String(FlagGrpcExecutorURL, "http://localhost:50051", "URL of the gRPC execution service") + cmd.Flags().String(FlagGrpcExecutorURL, "http://localhost:50051", "URL of the gRPC execution service, or unix:///path/to/executor.sock") } diff --git a/apps/grpc/go.mod b/apps/grpc/go.mod index 64a32c143d..1604182f1e 100644 --- a/apps/grpc/go.mod +++ b/apps/grpc/go.mod @@ -13,13 +13,13 @@ require ( github.com/evstack/ev-node/core v1.0.0 github.com/evstack/ev-node/execution/grpc v1.0.0-rc.1 github.com/ipfs/go-datastore v0.9.1 - github.com/rs/zerolog v1.35.0 + github.com/rs/zerolog v1.35.1 github.com/spf13/cobra v1.10.2 ) require ( cloud.google.com/go v0.123.0 // indirect - cloud.google.com/go/auth v0.18.2 // indirect + cloud.google.com/go/auth v0.20.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/iam v1.7.0 // indirect @@ -39,12 +39,12 @@ require ( github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 // indirect - github.com/aws/smithy-go v1.25.0 // indirect + github.com/aws/smithy-go v1.25.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/boltdb/bolt v1.3.1 // indirect @@ -111,7 +111,7 @@ require ( github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-kad-dht v0.39.1 // indirect github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect - github.com/libp2p/go-libp2p-pubsub v0.15.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.16.0 // indirect github.com/libp2p/go-libp2p-record v0.3.1 // indirect github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect @@ -179,8 +179,8 @@ require ( github.com/wlynxg/anet v0.0.5 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect go.opentelemetry.io/otel v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect @@ -208,7 +208,7 @@ require ( golang.org/x/tools v0.43.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.17.0 // indirect - google.golang.org/api v0.274.0 // indirect + google.golang.org/api v0.276.0 // indirect google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect diff --git a/apps/grpc/go.sum b/apps/grpc/go.sum index fb7d076ef7..fb6c0549dc 100644 --- a/apps/grpc/go.sum +++ b/apps/grpc/go.sum @@ -20,8 +20,8 @@ cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM= -cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -95,8 +95,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 h1:HtOTYcb github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8/go.mod h1:VsK9abqQeGlzPgUr+isNWzPlK2vKe9INMLWnY65f5Xs= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6Y1Lbvt9WFuJ0ugUK2xn6hIWUBBbKuOWF30s= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 h1:nEzwx/ZlpUZ2Y6WztsgYmfBh5Ixd3QiECawXMzvTMeo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 h1:696UM+NwOrETBCLQJyCAGtVmmZmziBT59yMwgg6Fvrw= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 h1:a1Fq/KXn75wSzoJaPQTgZO0wHGqE9mjFnylnqEPTchA= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10/go.mod h1:p6+MXNxW7IA6dMgHfTAzljuwSKD0NCm/4lbS4t6+7vI= github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 h1:x6bKbmDhsgSZwv6q19wY/u3rLk/3FGjJWyqKcIRufpE= @@ -105,8 +105,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 h1:oK/njaL8GtyEihkWMD4k3Vg github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20/go.mod h1:JHs8/y1f3zY7U5WcuzoJ/yAYGYtNIVPKLIbp61euvmg= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 h1:ks8KBcZPh3PYISr5dAiXCM5/Thcuxk8l+PG4+A0exds= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0/go.mod h1:pFw33T0WLvXU3rw1WBkpMlkgIn54eCB5FYLhjDc9Foo= -github.com/aws/smithy-go v1.25.0 h1:Sz/XJ64rwuiKtB6j98nDIPyYrV1nVNJ4YU74gttcl5U= -github.com/aws/smithy-go v1.25.0/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/aws/smithy-go v1.25.1 h1:J8ERsGSU7d+aCmdQur5Txg6bVoYelvQJgtZehD12GkI= +github.com/aws/smithy-go v1.25.1/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -445,8 +445,8 @@ github.com/libp2p/go-libp2p-kad-dht v0.39.1 h1:9RzUBc7zywT4ZNamRSgEvPZmVlK3Y6xdl github.com/libp2p/go-libp2p-kad-dht v0.39.1/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= -github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= -github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= +github.com/libp2p/go-libp2p-pubsub v0.16.0 h1:j7G2C8kJwkcAQqYR7Wmq3d75d3Sgw/N0Hhiv0dVx7OY= +github.com/libp2p/go-libp2p-pubsub v0.16.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= @@ -626,8 +626,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/rs/zerolog v1.35.0 h1:VD0ykx7HMiMJytqINBsKcbLS+BJ4WYjz+05us+LRTdI= -github.com/rs/zerolog v1.35.0/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= +github.com/rs/zerolog v1.35.1 h1:m7xQeoiLIiV0BCEY4Hs+j2NG4Gp2o2KPKmhnnLiazKI= +github.com/rs/zerolog v1.35.1/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -727,10 +727,10 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= @@ -1092,8 +1092,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.274.0 h1:aYhycS5QQCwxHLwfEHRRLf9yNsfvp1JadKKWBE54RFA= -google.golang.org/api v0.274.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index 7285464eb2..c71b3ef1b9 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -11,14 +11,14 @@ require ( github.com/evstack/ev-node v1.1.1 github.com/evstack/ev-node/core v1.0.0 github.com/ipfs/go-datastore v0.9.1 - github.com/rs/zerolog v1.35.0 + github.com/rs/zerolog v1.35.1 github.com/spf13/cobra v1.10.2 github.com/stretchr/testify v1.11.1 ) require ( cloud.google.com/go v0.123.0 // indirect - cloud.google.com/go/auth v0.18.2 // indirect + cloud.google.com/go/auth v0.20.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/iam v1.7.0 // indirect @@ -38,12 +38,12 @@ require ( github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 // indirect - github.com/aws/smithy-go v1.25.0 // indirect + github.com/aws/smithy-go v1.25.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/boltdb/bolt v1.3.1 // indirect @@ -110,7 +110,7 @@ require ( github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-kad-dht v0.39.1 // indirect github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect - github.com/libp2p/go-libp2p-pubsub v0.15.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.16.0 // indirect github.com/libp2p/go-libp2p-record v0.3.1 // indirect github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect @@ -177,8 +177,8 @@ require ( github.com/wlynxg/anet v0.0.5 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect go.opentelemetry.io/otel v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect @@ -206,7 +206,7 @@ require ( golang.org/x/tools v0.43.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.17.0 // indirect - google.golang.org/api v0.274.0 // indirect + google.golang.org/api v0.276.0 // indirect google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect diff --git a/apps/testapp/go.sum b/apps/testapp/go.sum index fb7d076ef7..fb6c0549dc 100644 --- a/apps/testapp/go.sum +++ b/apps/testapp/go.sum @@ -20,8 +20,8 @@ cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM= -cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -95,8 +95,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 h1:HtOTYcb github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8/go.mod h1:VsK9abqQeGlzPgUr+isNWzPlK2vKe9INMLWnY65f5Xs= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6Y1Lbvt9WFuJ0ugUK2xn6hIWUBBbKuOWF30s= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 h1:nEzwx/ZlpUZ2Y6WztsgYmfBh5Ixd3QiECawXMzvTMeo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 h1:696UM+NwOrETBCLQJyCAGtVmmZmziBT59yMwgg6Fvrw= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 h1:a1Fq/KXn75wSzoJaPQTgZO0wHGqE9mjFnylnqEPTchA= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10/go.mod h1:p6+MXNxW7IA6dMgHfTAzljuwSKD0NCm/4lbS4t6+7vI= github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 h1:x6bKbmDhsgSZwv6q19wY/u3rLk/3FGjJWyqKcIRufpE= @@ -105,8 +105,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 h1:oK/njaL8GtyEihkWMD4k3Vg github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20/go.mod h1:JHs8/y1f3zY7U5WcuzoJ/yAYGYtNIVPKLIbp61euvmg= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 h1:ks8KBcZPh3PYISr5dAiXCM5/Thcuxk8l+PG4+A0exds= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0/go.mod h1:pFw33T0WLvXU3rw1WBkpMlkgIn54eCB5FYLhjDc9Foo= -github.com/aws/smithy-go v1.25.0 h1:Sz/XJ64rwuiKtB6j98nDIPyYrV1nVNJ4YU74gttcl5U= -github.com/aws/smithy-go v1.25.0/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/aws/smithy-go v1.25.1 h1:J8ERsGSU7d+aCmdQur5Txg6bVoYelvQJgtZehD12GkI= +github.com/aws/smithy-go v1.25.1/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -445,8 +445,8 @@ github.com/libp2p/go-libp2p-kad-dht v0.39.1 h1:9RzUBc7zywT4ZNamRSgEvPZmVlK3Y6xdl github.com/libp2p/go-libp2p-kad-dht v0.39.1/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= -github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= -github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= +github.com/libp2p/go-libp2p-pubsub v0.16.0 h1:j7G2C8kJwkcAQqYR7Wmq3d75d3Sgw/N0Hhiv0dVx7OY= +github.com/libp2p/go-libp2p-pubsub v0.16.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= @@ -626,8 +626,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/rs/zerolog v1.35.0 h1:VD0ykx7HMiMJytqINBsKcbLS+BJ4WYjz+05us+LRTdI= -github.com/rs/zerolog v1.35.0/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= +github.com/rs/zerolog v1.35.1 h1:m7xQeoiLIiV0BCEY4Hs+j2NG4Gp2o2KPKmhnnLiazKI= +github.com/rs/zerolog v1.35.1/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -727,10 +727,10 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= @@ -1092,8 +1092,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.274.0 h1:aYhycS5QQCwxHLwfEHRRLf9yNsfvp1JadKKWBE54RFA= -google.golang.org/api v0.274.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/block/internal/cache/generic_cache.go b/block/internal/cache/generic_cache.go index 96ee82d8b2..dc3e1b7d14 100644 --- a/block/internal/cache/generic_cache.go +++ b/block/internal/cache/generic_cache.go @@ -102,7 +102,7 @@ func (c *Cache) setSeenBatch(hashes []string, height uint64) { return } - // currently not used, but there for compleness against setSeen + // currently not used, but there for completeness against setSeen for _, h := range hashes { if existing, ok := c.hashByHeight[height]; ok && existing == h { c.hashes[existing] = true diff --git a/block/internal/cache/generic_cache_test.go b/block/internal/cache/generic_cache_test.go index d3766fc437..ec4e92e7f7 100644 --- a/block/internal/cache/generic_cache_test.go +++ b/block/internal/cache/generic_cache_test.go @@ -297,7 +297,7 @@ func TestHeightPlaceholderKey(t *testing.T) { // TestCache_NoPlaceholderLeakAfterRefire verifies that when the DA retriever // re-fires setDAIncluded with the real content hash after a restart, the // snapshot placeholder that RestoreFromStore installed is evicted from -// daIncluded. Without the eviction in setDAIncluded, every restart cycle +// daIncluded. Without the eviction in setDAIncluded, every restart cycle // would leak one orphaned placeholder key per in-flight block. func TestCache_NoPlaceholderLeakAfterRefire(t *testing.T) { st := testMemStore(t) diff --git a/block/internal/cache/manager.go b/block/internal/cache/manager.go index e907002600..4d95a7d7e5 100644 --- a/block/internal/cache/manager.go +++ b/block/internal/cache/manager.go @@ -26,7 +26,8 @@ const ( DataDAIncludedPrefix = "cache/data-da-included/" // DefaultTxCacheRetention is the default time to keep transaction hashes in cache. - DefaultTxCacheRetention = 24 * time.Hour + // Keeping a too high value can lead to OOM during heavy transaction load. + DefaultTxCacheRetention = 30 * time.Minute ) // CacheManager provides thread-safe cache operations for tracking seen blocks diff --git a/block/internal/reaping/reaper.go b/block/internal/reaping/reaper.go index d35dbfff3e..1f51c08f10 100644 --- a/block/internal/reaping/reaper.go +++ b/block/internal/reaping/reaper.go @@ -21,7 +21,10 @@ import ( const ( // MaxBackoffInterval is the maximum backoff interval for retries MaxBackoffInterval = 30 * time.Second - CleanupInterval = 1 * time.Hour + + // CleanupInterval is how often the reaper sweeps expired hashes + // out of the seen-tx cache. + CleanupInterval = max(cache.DefaultTxCacheRetention/10, 15*time.Second) ) // Reaper is responsible for periodically retrieving transactions from the executor, diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index ae67c20a20..69a23fdd66 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -750,6 +750,9 @@ func (s *Syncer) trySyncNextBlockWithState(ctx context.Context, event *common.DA s.cache.RemoveHeaderDAIncluded(headerHash) s.cache.RemoveDataDAIncluded(data.DACommitment().String()) + if errors.Is(err, types.ErrUnexpectedProposer) { + return errors.Join(errInvalidBlock, err) + } if !errors.Is(err, errInvalidState) && !errors.Is(err, errInvalidBlock) { return errors.Join(errInvalidBlock, err) } diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index a581ed562a..a6b371995f 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -880,6 +880,7 @@ func TestSyncLoopPersistState(t *testing.T) { eventCh <- datypes.SubscriptionEvent{Height: myFutureDAHeight} syncerInst1.startSyncWorkers(ctx) syncerInst1.wg.Wait() + follower1.Stop() requireEmptyChan(t, errorCh) t.Log("sync workers on instance1 completed") diff --git a/buf.gen.grpc.yaml b/buf.gen.grpc.yaml new file mode 100644 index 0000000000..019de76139 --- /dev/null +++ b/buf.gen.grpc.yaml @@ -0,0 +1,11 @@ +version: v2 + +plugins: + - remote: buf.build/protocolbuffers/go + out: execution/grpc/types/pb + opt: paths=source_relative + - remote: buf.build/connectrpc/go + out: execution/grpc/types/pb + opt: paths=source_relative +inputs: + - directory: execution/grpc/proto diff --git a/buf.yaml b/buf.yaml index bf7debf7cc..f3aef299bd 100644 --- a/buf.yaml +++ b/buf.yaml @@ -2,6 +2,7 @@ version: v2 modules: - path: proto + - path: execution/grpc/proto lint: use: - COMMENTS diff --git a/client/crates/types/src/proto/evnode.v1.messages.rs b/client/crates/types/src/proto/evnode.v1.messages.rs index e6038f54ce..3bba31f88c 100644 --- a/client/crates/types/src/proto/evnode.v1.messages.rs +++ b/client/crates/types/src/proto/evnode.v1.messages.rs @@ -278,151 +278,6 @@ pub struct BlockData { #[prost(bytes = "vec", repeated, tag = "3")] pub blobs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } -/// InitChainRequest contains the genesis parameters for chain initialization -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct InitChainRequest { - /// Timestamp marking chain start time in UTC - #[prost(message, optional, tag = "1")] - pub genesis_time: ::core::option::Option<::prost_types::Timestamp>, - /// First block height (must be > 0) - #[prost(uint64, tag = "2")] - pub initial_height: u64, - /// Unique identifier string for the blockchain - #[prost(string, tag = "3")] - pub chain_id: ::prost::alloc::string::String, -} -/// InitChainResponse contains the initial state and configuration -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct InitChainResponse { - /// Hash representing initial state - #[prost(bytes = "vec", tag = "1")] - pub state_root: ::prost::alloc::vec::Vec, -} -/// GetTxsRequest is the request for fetching transactions -/// -/// Empty for now, may include filtering criteria in the future -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct GetTxsRequest {} -/// GetTxsResponse contains the available transactions -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct GetTxsResponse { - /// Slice of valid transactions from mempool - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, -} -/// ExecuteTxsRequest contains transactions and block context for execution -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ExecuteTxsRequest { - /// Ordered list of transactions to execute - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, - /// Height of block being created (must be > 0) - #[prost(uint64, tag = "2")] - pub block_height: u64, - /// Block creation time in UTC - #[prost(message, optional, tag = "3")] - pub timestamp: ::core::option::Option<::prost_types::Timestamp>, - /// Previous block's state root hash - #[prost(bytes = "vec", tag = "4")] - pub prev_state_root: ::prost::alloc::vec::Vec, -} -/// ExecuteTxsResponse contains the result of transaction execution -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ExecuteTxsResponse { - /// New state root after executing transactions - #[prost(bytes = "vec", tag = "1")] - pub updated_state_root: ::prost::alloc::vec::Vec, - /// Maximum allowed transaction size (may change with protocol updates) - #[prost(uint64, tag = "2")] - pub max_bytes: u64, - /// Proposer address that should sign the next block. - /// Empty means the current proposer remains active. - #[prost(bytes = "vec", tag = "3")] - pub next_proposer_address: ::prost::alloc::vec::Vec, -} -/// SetFinalRequest marks a block as finalized -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SetFinalRequest { - /// Height of block to finalize - #[prost(uint64, tag = "1")] - pub block_height: u64, -} -/// SetFinalResponse indicates whether finalization was successful -/// -/// Empty response, errors are returned via gRPC status -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SetFinalResponse {} -/// GetExecutionInfoRequest requests execution layer parameters -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct GetExecutionInfoRequest {} -/// GetExecutionInfoResponse contains execution layer parameters -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct GetExecutionInfoResponse { - /// Maximum gas allowed for transactions in a block - /// For non-gas-based execution layers, this should be 0 - #[prost(uint64, tag = "1")] - pub max_gas: u64, - /// Proposer address that should sign the next block from the execution - /// layer's current view. Empty means unchanged or unavailable. - #[prost(bytes = "vec", tag = "2")] - pub next_proposer_address: ::prost::alloc::vec::Vec, -} -/// FilterTxsRequest contains transactions to validate and filter -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct FilterTxsRequest { - /// All transactions (force-included + mempool) - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, - /// Maximum cumulative size allowed (0 means no size limit) - #[prost(uint64, tag = "2")] - pub max_bytes: u64, - /// Maximum cumulative gas allowed (0 means no gas limit) - #[prost(uint64, tag = "3")] - pub max_gas: u64, - /// Whether force-included transactions are present - #[prost(bool, tag = "4")] - pub has_force_included_transaction: bool, -} -/// FilterTxsResponse contains the filter status for each transaction -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct FilterTxsResponse { - /// Filter status for each transaction (same length as txs in request) - #[prost(enumeration = "FilterStatus", repeated, tag = "1")] - pub statuses: ::prost::alloc::vec::Vec, -} -/// FilterStatus represents the result of filtering a transaction -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum FilterStatus { - /// Transaction will make it to the next batch - FilterOk = 0, - /// Transaction will be filtered out because invalid (too big, malformed, etc.) - FilterRemove = 1, - /// Transaction is valid but postponed for later processing due to size/gas constraint - FilterPostpone = 2, -} -impl FilterStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::FilterOk => "FILTER_OK", - Self::FilterRemove => "FILTER_REMOVE", - Self::FilterPostpone => "FILTER_POSTPONE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "FILTER_OK" => Some(Self::FilterOk), - "FILTER_REMOVE" => Some(Self::FilterRemove), - "FILTER_POSTPONE" => Some(Self::FilterPostpone), - _ => None, - } - } -} /// Block contains all the components of a complete block #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Block { diff --git a/client/crates/types/src/proto/evnode.v1.services.rs b/client/crates/types/src/proto/evnode.v1.services.rs index 013e96db37..608af18376 100644 --- a/client/crates/types/src/proto/evnode.v1.services.rs +++ b/client/crates/types/src/proto/evnode.v1.services.rs @@ -1023,841 +1023,6 @@ pub struct BlockData { #[prost(bytes = "vec", repeated, tag = "3")] pub blobs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } -/// InitChainRequest contains the genesis parameters for chain initialization -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct InitChainRequest { - /// Timestamp marking chain start time in UTC - #[prost(message, optional, tag = "1")] - pub genesis_time: ::core::option::Option<::prost_types::Timestamp>, - /// First block height (must be > 0) - #[prost(uint64, tag = "2")] - pub initial_height: u64, - /// Unique identifier string for the blockchain - #[prost(string, tag = "3")] - pub chain_id: ::prost::alloc::string::String, -} -/// InitChainResponse contains the initial state and configuration -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct InitChainResponse { - /// Hash representing initial state - #[prost(bytes = "vec", tag = "1")] - pub state_root: ::prost::alloc::vec::Vec, -} -/// GetTxsRequest is the request for fetching transactions -/// -/// Empty for now, may include filtering criteria in the future -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct GetTxsRequest {} -/// GetTxsResponse contains the available transactions -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct GetTxsResponse { - /// Slice of valid transactions from mempool - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, -} -/// ExecuteTxsRequest contains transactions and block context for execution -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ExecuteTxsRequest { - /// Ordered list of transactions to execute - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, - /// Height of block being created (must be > 0) - #[prost(uint64, tag = "2")] - pub block_height: u64, - /// Block creation time in UTC - #[prost(message, optional, tag = "3")] - pub timestamp: ::core::option::Option<::prost_types::Timestamp>, - /// Previous block's state root hash - #[prost(bytes = "vec", tag = "4")] - pub prev_state_root: ::prost::alloc::vec::Vec, -} -/// ExecuteTxsResponse contains the result of transaction execution -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ExecuteTxsResponse { - /// New state root after executing transactions - #[prost(bytes = "vec", tag = "1")] - pub updated_state_root: ::prost::alloc::vec::Vec, - /// Maximum allowed transaction size (may change with protocol updates) - #[prost(uint64, tag = "2")] - pub max_bytes: u64, - /// Proposer address that should sign the next block. - /// Empty means the current proposer remains active. - #[prost(bytes = "vec", tag = "3")] - pub next_proposer_address: ::prost::alloc::vec::Vec, -} -/// SetFinalRequest marks a block as finalized -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SetFinalRequest { - /// Height of block to finalize - #[prost(uint64, tag = "1")] - pub block_height: u64, -} -/// SetFinalResponse indicates whether finalization was successful -/// -/// Empty response, errors are returned via gRPC status -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SetFinalResponse {} -/// GetExecutionInfoRequest requests execution layer parameters -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct GetExecutionInfoRequest {} -/// GetExecutionInfoResponse contains execution layer parameters -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct GetExecutionInfoResponse { - /// Maximum gas allowed for transactions in a block - /// For non-gas-based execution layers, this should be 0 - #[prost(uint64, tag = "1")] - pub max_gas: u64, - /// Proposer address that should sign the next block from the execution - /// layer's current view. Empty means unchanged or unavailable. - #[prost(bytes = "vec", tag = "2")] - pub next_proposer_address: ::prost::alloc::vec::Vec, -} -/// FilterTxsRequest contains transactions to validate and filter -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct FilterTxsRequest { - /// All transactions (force-included + mempool) - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, - /// Maximum cumulative size allowed (0 means no size limit) - #[prost(uint64, tag = "2")] - pub max_bytes: u64, - /// Maximum cumulative gas allowed (0 means no gas limit) - #[prost(uint64, tag = "3")] - pub max_gas: u64, - /// Whether force-included transactions are present - #[prost(bool, tag = "4")] - pub has_force_included_transaction: bool, -} -/// FilterTxsResponse contains the filter status for each transaction -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct FilterTxsResponse { - /// Filter status for each transaction (same length as txs in request) - #[prost(enumeration = "FilterStatus", repeated, tag = "1")] - pub statuses: ::prost::alloc::vec::Vec, -} -/// FilterStatus represents the result of filtering a transaction -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum FilterStatus { - /// Transaction will make it to the next batch - FilterOk = 0, - /// Transaction will be filtered out because invalid (too big, malformed, etc.) - FilterRemove = 1, - /// Transaction is valid but postponed for later processing due to size/gas constraint - FilterPostpone = 2, -} -impl FilterStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::FilterOk => "FILTER_OK", - Self::FilterRemove => "FILTER_REMOVE", - Self::FilterPostpone => "FILTER_POSTPONE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "FILTER_OK" => Some(Self::FilterOk), - "FILTER_REMOVE" => Some(Self::FilterRemove), - "FILTER_POSTPONE" => Some(Self::FilterPostpone), - _ => None, - } - } -} -/// Generated client implementations. -pub mod executor_service_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// ExecutorService defines the execution layer interface for EVNode - #[derive(Debug, Clone)] - pub struct ExecutorServiceClient { - inner: tonic::client::Grpc, - } - impl ExecutorServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ExecutorServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ExecutorServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ExecutorServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// InitChain initializes a new blockchain instance with genesis parameters - pub async fn init_chain( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/evnode.v1.ExecutorService/InitChain", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("evnode.v1.ExecutorService", "InitChain")); - self.inner.unary(req, path, codec).await - } - /// GetTxs fetches available transactions from the execution layer's mempool - pub async fn get_txs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/evnode.v1.ExecutorService/GetTxs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("evnode.v1.ExecutorService", "GetTxs")); - self.inner.unary(req, path, codec).await - } - /// ExecuteTxs processes transactions to produce a new block state - pub async fn execute_txs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/evnode.v1.ExecutorService/ExecuteTxs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("evnode.v1.ExecutorService", "ExecuteTxs")); - self.inner.unary(req, path, codec).await - } - /// SetFinal marks a block as finalized at the specified height - pub async fn set_final( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/evnode.v1.ExecutorService/SetFinal", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("evnode.v1.ExecutorService", "SetFinal")); - self.inner.unary(req, path, codec).await - } - /// GetExecutionInfo returns current execution layer parameters - pub async fn get_execution_info( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/evnode.v1.ExecutorService/GetExecutionInfo", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("evnode.v1.ExecutorService", "GetExecutionInfo"), - ); - self.inner.unary(req, path, codec).await - } - /// FilterTxs validates force-included transactions and calculates gas for all transactions - pub async fn filter_txs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/evnode.v1.ExecutorService/FilterTxs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("evnode.v1.ExecutorService", "FilterTxs")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod executor_service_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ExecutorServiceServer. - #[async_trait] - pub trait ExecutorService: std::marker::Send + std::marker::Sync + 'static { - /// InitChain initializes a new blockchain instance with genesis parameters - async fn init_chain( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// GetTxs fetches available transactions from the execution layer's mempool - async fn get_txs( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// ExecuteTxs processes transactions to produce a new block state - async fn execute_txs( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// SetFinal marks a block as finalized at the specified height - async fn set_final( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// GetExecutionInfo returns current execution layer parameters - async fn get_execution_info( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// FilterTxs validates force-included transactions and calculates gas for all transactions - async fn filter_txs( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// ExecutorService defines the execution layer interface for EVNode - #[derive(Debug)] - pub struct ExecutorServiceServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ExecutorServiceServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ExecutorServiceServer - where - T: ExecutorService, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/evnode.v1.ExecutorService/InitChain" => { - #[allow(non_camel_case_types)] - struct InitChainSvc(pub Arc); - impl< - T: ExecutorService, - > tonic::server::UnaryService - for InitChainSvc { - type Response = super::InitChainResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::init_chain(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = InitChainSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/evnode.v1.ExecutorService/GetTxs" => { - #[allow(non_camel_case_types)] - struct GetTxsSvc(pub Arc); - impl< - T: ExecutorService, - > tonic::server::UnaryService - for GetTxsSvc { - type Response = super::GetTxsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_txs(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetTxsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/evnode.v1.ExecutorService/ExecuteTxs" => { - #[allow(non_camel_case_types)] - struct ExecuteTxsSvc(pub Arc); - impl< - T: ExecutorService, - > tonic::server::UnaryService - for ExecuteTxsSvc { - type Response = super::ExecuteTxsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::execute_txs(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = ExecuteTxsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/evnode.v1.ExecutorService/SetFinal" => { - #[allow(non_camel_case_types)] - struct SetFinalSvc(pub Arc); - impl< - T: ExecutorService, - > tonic::server::UnaryService - for SetFinalSvc { - type Response = super::SetFinalResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::set_final(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SetFinalSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/evnode.v1.ExecutorService/GetExecutionInfo" => { - #[allow(non_camel_case_types)] - struct GetExecutionInfoSvc(pub Arc); - impl< - T: ExecutorService, - > tonic::server::UnaryService - for GetExecutionInfoSvc { - type Response = super::GetExecutionInfoResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_execution_info(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetExecutionInfoSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/evnode.v1.ExecutorService/FilterTxs" => { - #[allow(non_camel_case_types)] - struct FilterTxsSvc(pub Arc); - impl< - T: ExecutorService, - > tonic::server::UnaryService - for FilterTxsSvc { - type Response = super::FilterTxsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::filter_txs(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = FilterTxsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ExecutorServiceServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "evnode.v1.ExecutorService"; - impl tonic::server::NamedService for ExecutorServiceServer { - const NAME: &'static str = SERVICE_NAME; - } -} /// Block contains all the components of a complete block #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Block { diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index 0cfdf5c7ae..b35026c70d 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -1,6 +1,8 @@ import { withMermaid } from "vitepress-plugin-mermaid"; import { useSidebar } from "vitepress-openapi"; import spec from "../src/openapi-rpc.json" with { type: "json" }; +import type { ThemeRegistration } from "shiki"; +import evDarkTheme from "./ev-dark.json" with { type: "json" }; const telegramSVG = ` @@ -25,6 +27,10 @@ export default withMermaid({ ignoreDeadLinks: true, appearance: false, base: base, + + markdown: { + theme: evDarkTheme as ThemeRegistration, + }, sitemap: { hostname: "https://docs.ev.xyz", }, @@ -273,6 +279,24 @@ function sidebarHome() { }, ], }, + { + text: "High Availability", + collapsed: true, + items: [ + { + text: "Overview & Configuration", + link: "/guides/ha/overview", + }, + { + text: "Bootstrap a 5-Node Cluster", + link: "/guides/ha/cluster-setup", + }, + { + text: "Migrate Single → HA", + link: "/guides/ha/single-to-ha", + }, + ], + }, { text: "Run a Full Node", link: "/guides/full-node", diff --git a/docs/.vitepress/ev-dark.json b/docs/.vitepress/ev-dark.json new file mode 100644 index 0000000000..6b4bd4512d --- /dev/null +++ b/docs/.vitepress/ev-dark.json @@ -0,0 +1,281 @@ +{ + "name": "ev-dark", + "displayName": "Evolve Dark", + "type": "dark", + "colors": { + "editor.background": "#000000", + "editor.foreground": "#dbd7ca" + }, + "semanticHighlighting": true, + "semanticTokenColors": { + "class": "#CDA4EB", + "interface": "#CDA4EB", + "namespace": "#CDA4EB", + "property": "#94EEFF", + "type": "#CDA4EB" + }, + "tokenColors": [ + { + "scope": ["comment", "punctuation.definition.comment", "string.comment"], + "settings": { "foreground": "#A0A0A0" } + }, + { + "scope": [ + "delimiter.bracket", "delimiter", + "invalid.illegal.character-not-allowed-here.html", + "keyword.operator.rest", "keyword.operator.spread", + "keyword.operator.type.annotation", "keyword.operator.relational", + "keyword.operator.assignment", "keyword.operator.type", + "meta.brace", "meta.tag.block.any.html", + "meta.tag.inline.any.html", "meta.tag.structure.input.void.html", + "meta.type.annotation", "meta.embedded.block.github-actions-expression", + "storage.type.function.arrow", "meta.objectliteral.ts", + "punctuation", + "punctuation.definition.string.begin.html.vue", + "punctuation.definition.string.end.html.vue" + ], + "settings": { "foreground": "#767676" } + }, + { + "scope": ["constant", "entity.name.constant", "variable.language", "meta.definition.variable"], + "settings": { "foreground": "#FFECB6" } + }, + { + "scope": ["entity", "entity.name"], + "settings": { "foreground": "#94EEFF" } + }, + { + "scope": "variable.parameter.function", + "settings": { "foreground": "#dbd7ca" } + }, + { + "scope": ["entity.name.tag", "tag.html"], + "settings": { "foreground": "#94EEFF" } + }, + { + "scope": "entity.name.function", + "settings": { "foreground": "#94EEFF" } + }, + { + "scope": ["keyword", "storage.type.class.jsdoc", "punctuation.definition.template-expression"], + "settings": { "foreground": "#B8A6FF" } + }, + { + "scope": [ + "storage", "storage.type", "support.type.builtin", + "constant.language.undefined", "constant.language.null", + "constant.language.import-export-all.ts" + ], + "settings": { "foreground": "#B8A6FF" } + }, + { + "scope": [ + "text.html.derivative", "storage.modifier.package", + "storage.modifier.import", "storage.type.java" + ], + "settings": { "foreground": "#dbd7ca" } + }, + { + "scope": ["string", "string punctuation.section.embedded source", "attribute.value"], + "settings": { "foreground": "#BFF3C2" } + }, + { + "scope": "punctuation.definition.string", + "settings": { "foreground": "#BFF3C280" } + }, + { + "scope": "punctuation.support.type.property-name", + "settings": { "foreground": "#94EEFF77" } + }, + { + "scope": "support", + "settings": { "foreground": "#94EEFF" } + }, + { + "scope": [ + "property", "meta.property-name", "meta.object-literal.key", + "entity.name.tag.yaml", "attribute.name" + ], + "settings": { "foreground": "#94EEFF" } + }, + { + "scope": [ + "entity.other.attribute-name", + "invalid.deprecated.entity.other.attribute-name.html" + ], + "settings": { "foreground": "#B8A6FFAA" } + }, + { + "scope": ["variable", "identifier"], + "settings": { "foreground": "#dbd7ca" } + }, + { + "scope": ["support.type.primitive", "entity.name.type"], + "settings": { "foreground": "#CDA4EB" } + }, + { + "scope": "namespace", + "settings": { "foreground": "#CDA4EB" } + }, + { + "scope": ["keyword.operator", "keyword.operator.assignment.compound", "meta.var.expr.ts"], + "settings": { "foreground": "#888888" } + }, + { + "scope": "invalid.broken", + "settings": { "fontStyle": "italic", "foreground": "#FFB7B7" } + }, + { + "scope": "invalid.deprecated", + "settings": { "fontStyle": "italic", "foreground": "#FFB7B7" } + }, + { + "scope": "invalid.illegal", + "settings": { "fontStyle": "italic", "foreground": "#FFB7B7" } + }, + { + "scope": "invalid.unimplemented", + "settings": { "fontStyle": "italic", "foreground": "#FFB7B7" } + }, + { + "scope": "message.error", + "settings": { "foreground": "#FFB7B7" } + }, + { + "scope": "string variable", + "settings": { "foreground": "#BFF3C2" } + }, + { + "scope": ["source.regexp", "string.regexp"], + "settings": { "foreground": "#BFF3C2" } + }, + { + "scope": [ + "string.regexp.character-class", + "string.regexp constant.character.escape", + "string.regexp source.ruby.embedded", + "string.regexp string.regexp.arbitrary-repitition" + ], + "settings": { "foreground": "#BFF3C2CC" } + }, + { + "scope": "string.regexp constant.character.escape", + "settings": { "foreground": "#FFECB6" } + }, + { + "scope": "support.constant", + "settings": { "foreground": "#FFECB6" } + }, + { + "scope": ["keyword.operator.quantifier.regexp", "constant.numeric", "number"], + "settings": { "foreground": "#FFECB6" } + }, + { + "scope": "keyword.other.unit", + "settings": { "foreground": "#FFECB6" } + }, + { + "scope": ["constant.language.boolean", "constant.language"], + "settings": { "foreground": "#FFECB6" } + }, + { + "scope": "meta.module-reference", + "settings": { "foreground": "#B8A6FF" } + }, + { + "scope": "punctuation.definition.list.begin.markdown", + "settings": { "foreground": "#888888" } + }, + { + "scope": ["markup.heading", "markup.heading entity.name"], + "settings": { "fontStyle": "bold", "foreground": "#B8A6FF" } + }, + { + "scope": "markup.quote", + "settings": { "foreground": "#CDA4EB" } + }, + { + "scope": "markup.italic", + "settings": { "fontStyle": "italic", "foreground": "#dbd7ca" } + }, + { + "scope": "markup.bold", + "settings": { "fontStyle": "bold", "foreground": "#dbd7ca" } + }, + { + "scope": "markup.raw", + "settings": { "foreground": "#BFF3C2" } + }, + { + "scope": ["markup.deleted", "meta.diff.header.from-file", "punctuation.definition.deleted"], + "settings": { "background": "#FFB7B722", "foreground": "#FFB7B7" } + }, + { + "scope": ["markup.inserted", "meta.diff.header.to-file", "punctuation.definition.inserted"], + "settings": { "background": "#BFF3C222", "foreground": "#BFF3C2" } + }, + { + "scope": ["markup.changed", "punctuation.definition.changed"], + "settings": { "background": "#FFECB622", "foreground": "#FFECB6" } + }, + { + "scope": ["markup.ignored", "markup.untracked"], + "settings": { "background": "#94EEFF22", "foreground": "#94EEFF" } + }, + { + "scope": "meta.diff.range", + "settings": { "fontStyle": "bold", "foreground": "#CDA4EB" } + }, + { + "scope": "meta.diff.header", + "settings": { "foreground": "#94EEFF" } + }, + { + "scope": "meta.separator", + "settings": { "fontStyle": "bold", "foreground": "#94EEFF" } + }, + { + "scope": "meta.output", + "settings": { "foreground": "#94EEFF" } + }, + { + "scope": [ + "brackethighlighter.tag", "brackethighlighter.curly", + "brackethighlighter.round", "brackethighlighter.square", + "brackethighlighter.angle", "brackethighlighter.quote" + ], + "settings": { "foreground": "#888888" } + }, + { + "scope": "brackethighlighter.unmatched", + "settings": { "foreground": "#FFB7B7" } + }, + { + "scope": [ + "constant.other.reference.link", "string.other.link", + "punctuation.definition.string.begin.markdown", + "punctuation.definition.string.end.markdown" + ], + "settings": { "foreground": "#BFF3C2" } + }, + { + "scope": [ + "markup.underline.link.markdown", + "markup.underline.link.image.markdown" + ], + "settings": { "fontStyle": "underline", "foreground": "#94EEFF" } + }, + { + "scope": ["type.identifier", "constant.other.character-class.regexp"], + "settings": { "foreground": "#CDA4EB" } + }, + { + "scope": "entity.other.attribute-name.html.vue", + "settings": { "foreground": "#94EEFF" } + }, + { + "scope": "invalid.illegal.unrecognized-tag.html", + "settings": { "fontStyle": "normal" } + } + ] +} diff --git a/docs/.vitepress/theme/style.css b/docs/.vitepress/theme/style.css index 7a4bb58562..6bbba19752 100644 --- a/docs/.vitepress/theme/style.css +++ b/docs/.vitepress/theme/style.css @@ -153,12 +153,13 @@ :root { --vp-code-block-bg: #000000; - --vp-code-block-color: #ffffff; + --vp-code-block-color: #dbd7ca; --vp-code-block-divider-color: rgba(255, 255, 255, 0.1); /* Inline code */ --vp-code-color: #000000; --vp-code-bg: rgba(0, 0, 0, 0.05); + } /* Inline code styling */ @@ -179,7 +180,7 @@ div[class*="language-"] pre { } div[class*="language-"] code { - color: #ffffff; + color: #dbd7ca; /* vitesse-black default text */ } /** diff --git a/docs/concepts/block-lifecycle.md b/docs/concepts/block-lifecycle.md index 91e835dea8..4d76e04ece 100644 --- a/docs/concepts/block-lifecycle.md +++ b/docs/concepts/block-lifecycle.md @@ -682,13 +682,13 @@ See [tutorial] for running a multi-node network with both aggregator and non-agg [5] [Tutorial][tutorial] -[6] [Header and Data Separation ADR](../../adr/adr-014-header-and-data-separation.md) +[6] [Header and Data Separation ADR](../adr/adr-014-header-and-data-separation.md) -[7] [Evolve Minimal Header](../../adr/adr-015-rollkit-minimal-header.md) +[7] [Evolve Minimal Header](../adr/adr-015-rollkit-minimal-header.md) [8] [Data Availability](./data-availability.md) -[9] [Lazy Aggregation with DA Layer Consistency ADR](../../adr/adr-021-lazy-aggregation.md) +[9] [Lazy Aggregation with DA Layer Consistency ADR](../adr/adr-021-lazy-aggregation.md) [defaultBlockTime]: https://github.com/evstack/ev-node/blob/main/pkg/config/defaults.go#L50 [defaultDABlockTime]: https://github.com/evstack/ev-node/blob/main/pkg/config/defaults.go#L59 diff --git a/docs/guides/advanced/based-sequencing.md b/docs/guides/advanced/based-sequencing.md index bf1f235fa2..e476896811 100644 --- a/docs/guides/advanced/based-sequencing.md +++ b/docs/guides/advanced/based-sequencing.md @@ -72,5 +72,5 @@ Based sequencing minimizes trust assumptions: ## Further Reading -- [Data Availability](../data-availability.md) - Understanding the DA layer -- [Transaction Flow](../transaction-flow.md) - How transactions move through the system +- [Data Availability](../../concepts/data-availability.md) - Understanding the DA layer +- [Transaction Flow](../../concepts/transaction-flow.md) - How transactions move through the system diff --git a/docs/guides/advanced/forced-inclusion.md b/docs/guides/advanced/forced-inclusion.md index b7c5199aa6..b6e89a0359 100644 --- a/docs/guides/advanced/forced-inclusion.md +++ b/docs/guides/advanced/forced-inclusion.md @@ -14,7 +14,7 @@ Forced inclusion is a censorship-resistance mechanism that allows users to submi - **With lazy mode:** the sequencer produces a block once either - enough transactions are collected - the lazy-mode block interval elapses - More info in the [lazy mode configuration guide](../config.md#lazy-mode-lazy-aggregator). + More info in the [lazy mode configuration guide](../../learn/config.md#lazy-mode-lazy-aggregator). - Each block contains a batch of ordered transactions and metadata. 4. **Data Availability Posting:** diff --git a/docs/guides/ha/cluster-setup.md b/docs/guides/ha/cluster-setup.md new file mode 100644 index 0000000000..23638e38a8 --- /dev/null +++ b/docs/guides/ha/cluster-setup.md @@ -0,0 +1,463 @@ +# Bootstrap a 5-Node HA Cluster from Scratch + +This tutorial walks you through setting up a production-ready 5-node ev-node Raft cluster from zero. By the end you will have five sequencer nodes that automatically elect a leader, replicate block state, and survive individual node failures. + +## Prerequisites + +- Five machines (VMs, bare metal, or containers) with: + - Network connectivity to each other on the Raft port (we use `5001`) and P2P port (`26656`) + - Persistent storage for the Raft data directory + - A working ev-node binary (see the [quickstart guide](../quick-start.md)) +- A private network, VPN, or encrypted mesh between all nodes (Raft transport is plain TCP — never expose the Raft port publicly) +- A shared genesis file for your chain (see [Create Genesis](../create-genesis.md)) +- A signer key on each node (all nodes must share the same signing identity so block hashes match regardless of which node is the current leader) + +### Node addresses used in this guide + +| Node | Private IP | Raft address | P2P port | +|------|------------|--------------|----------| +| node-1 | 10.0.0.1 | 10.0.0.1:5001 | 26656 | +| node-2 | 10.0.0.2 | 10.0.0.2:5001 | 26656 | +| node-3 | 10.0.0.3 | 10.0.0.3:5001 | 26656 | +| node-4 | 10.0.0.4 | 10.0.0.4:5001 | 26656 | +| node-5 | 10.0.0.5 | 10.0.0.5:5001 | 26656 | + +Replace these with your actual IP addresses throughout the guide. + +P2P peers use the libp2p multiaddr format, which includes each node's peer ID: + +```text +/ip4//tcp//p2p/ +``` + +You will collect peer IDs in Step 3 after initializing each node. + +--- + +## Step 1: Measure Network RTT + +The Raft timing parameters must be sized for your network. Run the following from each node to every other node and note the highest average RTT you observe: + +```bash +# Example: from node-1, ping all peers +for ip in 10.0.0.2 10.0.0.3 10.0.0.4 10.0.0.5; do + echo -n "$ip: " + ping -c 20 $ip | tail -1 | awk -F'/' '{print $5 "ms avg"}' +done +``` + +Repeat from each node. Take the single highest value across all measurements — this is your `RTT_MAX`. + +For nodes within the same region or data center, `RTT_MAX` is typically 5–30ms. For the configuration file below we assume `RTT_MAX ≤ 25ms`. If your measurement is higher, adjust the timing parameters using the formulas in the [configuration reference](./overview.md#timing-parameters). + +--- + +## Step 2: Verify Network Connectivity + +Confirm that the Raft port and P2P port are reachable between nodes before starting anything: + +```bash +# From node-2, verify node-1's Raft port is reachable +nc -zv 10.0.0.1 5001 + +# From node-2, verify node-1's P2P port is reachable +nc -zv 10.0.0.1 26656 +``` + +Do this for every node pair in both directions. If any check fails, fix your firewall rules before proceeding. + +--- + +## Step 3: Initialize Each Node + +Run this on every node. Each node gets its own home directory where the config, keys, and data live. + +First, create a passphrase file that only root and the service account can read. This file is referenced by the binary at runtime — the passphrase never appears in process listings or logs. + +```bash +# Run on every node +sudo mkdir -p /etc/ev-node +echo -n "" | sudo tee /etc/ev-node/passphrase > /dev/null +sudo chmod 600 /etc/ev-node/passphrase +sudo chown ev-node:ev-node /etc/ev-node/passphrase +``` + +Then initialize the node: + +```bash +# Run on every node (the binary name depends on your chain) +./evm init --evnode.node.aggregator=true --evnode.signer.passphrase_file /etc/ev-node/passphrase +``` + +This creates the home directory structure (default `~/.evm`) with a `config/evnode.yaml` file and generates the signer key. + +After initializing each node, retrieve its peer ID — you will need all five when writing the configuration in Step 5: + +```bash +# Run on each node after init +./evm net-info +``` + +Note the `peer_id` value from each node's output. It looks like `12D3KooW...`. You will need all five peer IDs before writing the configuration files. + +> **Shared signer key:** All cluster nodes must sign blocks with the same key so that block hashes produced by any leader are identical. Copy the key material from node-1 to all other nodes after initialization: +> +> ```bash +> # On node-1: locate the signer key +> ls ~/.evm/config/ +> +> # Secure-copy it to each peer +> scp ~/.evm/config/signer.json user@10.0.0.2:~/.evm/config/ +> scp ~/.evm/config/signer.json user@10.0.0.3:~/.evm/config/ +> scp ~/.evm/config/signer.json user@10.0.0.4:~/.evm/config/ +> scp ~/.evm/config/signer.json user@10.0.0.5:~/.evm/config/ +> ``` + +--- + +## Step 4: Distribute the Genesis File + +Every node must start with the same genesis file. Create it on node-1 (see [Create Genesis](../create-genesis.md)) then copy it to all peers: + +```bash +scp ~/.evm/config/genesis.json user@10.0.0.2:~/.evm/config/ +scp ~/.evm/config/genesis.json user@10.0.0.3:~/.evm/config/ +scp ~/.evm/config/genesis.json user@10.0.0.4:~/.evm/config/ +scp ~/.evm/config/genesis.json user@10.0.0.5:~/.evm/config/ +``` + +--- + +## Step 5: Write the Configuration Files + +Write the following `evnode.yaml` on each node. `raft.node_id` is unique per node; `raft.peers` and `p2p.peers` must each exclude the local node — everything else is identical. + +### node-1 (`~/.evm/config/evnode.yaml`) + +```yaml +node: + aggregator: true + block_time: "1s" + +raft: + enable: true + node_id: "node-1" + raft_addr: "0.0.0.0:5001" + raft_dir: "/var/lib/ev-node/raft" + peers: "node-2@10.0.0.2:5001,node-3@10.0.0.3:5001,node-4@10.0.0.4:5001,node-5@10.0.0.5:5001" + + # Timing — tuned for RTT_MAX ≤ 25ms + heartbeat_timeout: "92ms" + election_timeout: "368ms" + leader_lease_timeout: "46ms" + send_timeout: "50ms" + + # Log retention — covers ~5 hours of absence at 1 block/s + trailing_logs: 18000 + snapshot_threshold: 5000 + snap_count: 3 + +p2p: + listen_address: "/ip4/0.0.0.0/tcp/26656" + peers: "/ip4/10.0.0.2/tcp/26656/p2p/,/ip4/10.0.0.3/tcp/26656/p2p/,/ip4/10.0.0.4/tcp/26656/p2p/,/ip4/10.0.0.5/tcp/26656/p2p/" +``` + +### node-2 (`~/.evm/config/evnode.yaml`) + +`raft.peers` must omit the local node. Because `raft_addr` is `0.0.0.0:5001` (a wildcard), the self-exclusion check in the bootstrap code compares addresses literally — it will not recognise `node-2@10.0.0.2:5001` as itself and will add node-2 twice, causing a startup error. Always list only the **other** nodes. + +```yaml +node: + aggregator: true + block_time: "1s" + +raft: + enable: true + node_id: "node-2" + raft_addr: "0.0.0.0:5001" + raft_dir: "/var/lib/ev-node/raft" + peers: "node-1@10.0.0.1:5001,node-3@10.0.0.3:5001,node-4@10.0.0.4:5001,node-5@10.0.0.5:5001" + + # Timing — tuned for RTT_MAX ≤ 25ms + heartbeat_timeout: "92ms" + election_timeout: "368ms" + leader_lease_timeout: "46ms" + send_timeout: "50ms" + + # Log retention — covers ~5 hours of absence at 1 block/s + trailing_logs: 18000 + snapshot_threshold: 5000 + snap_count: 3 + +p2p: + listen_address: "/ip4/0.0.0.0/tcp/26656" + peers: "/ip4/10.0.0.1/tcp/26656/p2p/,/ip4/10.0.0.3/tcp/26656/p2p/,/ip4/10.0.0.4/tcp/26656/p2p/,/ip4/10.0.0.5/tcp/26656/p2p/" +``` + +Repeat for node-3 through node-5, updating `node_id`, `raft.peers` (exclude the local node), and `p2p.peers` (exclude the local node). + +--- + +## Step 6: Create the Raft Data Directories + +```bash +# Run on every node +sudo mkdir -p /var/lib/ev-node/raft +sudo chown $(whoami) /var/lib/ev-node/raft +``` + +For Docker deployments, this is handled by the named volume — skip this step. + +--- + +## Step 7: Start All Nodes Simultaneously + +Raft requires a majority of configured peers to be online before it can elect a leader. For a 5-node cluster, you need at least 3 nodes to be up before a leader can be elected and blocks can be produced. + +Start all five nodes as close together as possible. The order does not matter but they should all be up within a few seconds of each other. + +```bash +# Run this on each node, substituting the correct binary name and flags for your chain +./evm start \ + --evnode.node.aggregator=true \ + --evnode.raft.enable=true \ + --evnode.raft.node_id="node-1" \ + --evnode.raft.raft_addr="0.0.0.0:5001" \ + --evnode.raft.raft_dir="/var/lib/ev-node/raft" \ + --evnode.raft.peers="node-2@10.0.0.2:5001,node-3@10.0.0.3:5001,node-4@10.0.0.4:5001,node-5@10.0.0.5:5001" \ + --evnode.raft.heartbeat_timeout="92ms" \ + --evnode.raft.election_timeout="368ms" \ + --evnode.raft.leader_lease_timeout="46ms" \ + --evnode.raft.send_timeout="50ms" \ + --evnode.raft.trailing_logs=18000 \ + --evnode.raft.snapshot_threshold=5000 \ + --evnode.raft.snap_count=3 \ + --evnode.p2p.listen_address="/ip4/0.0.0.0/tcp/26656" \ + --evnode.p2p.peers="/ip4/10.0.0.2/tcp/26656/p2p/,/ip4/10.0.0.3/tcp/26656/p2p/,/ip4/10.0.0.4/tcp/26656/p2p/,/ip4/10.0.0.5/tcp/26656/p2p/" \ + --evnode.signer.passphrase_file=/etc/ev-node/passphrase \ + --evm.jwt-secret=$(cat /path/to/jwt.hex) \ + --evm.genesis-hash= +``` + +Adjust flags for your execution layer (e.g., remove EVM flags if you are running a Cosmos SDK chain). + +--- + +## Step 8: Verify the Cluster Is Healthy + +### Watch the logs + +Within a few seconds of starting, you should see one node win the election: + +```text +INF raft: entering candidate state node=node-1 +INF raft: election won tally=3 +INF raft: entering leader state leader=node-1 +INF block produced height=1 hash=0xabc... +``` + +The other nodes will log: + +```text +INF raft: entering follower state leader=node-1 +INF block applied from raft log height=1 hash=0xabc... +``` + +### Check node health + +Verify each node's HTTP API is responding: + +```bash +# ev-node exposes its health endpoint on port 7331 by default +curl http://10.0.0.1:7331/health/ready + +# Check which node is the current leader +curl http://10.0.0.1:7331/raft/node | jq '{node_id, is_leader}' +``` + +### Check block production + +For EVM chains, query the execution layer to confirm blocks are being produced: + +```bash +# Run from any node; the height should increase each time +cast block latest --rpc-url http://10.0.0.1:8545 +``` + +Repeat after a few seconds — the block number should be increasing. + +### Verify all nodes are synced + +Query each node; all five should report the same block height (or within 1–2 blocks of each other): + +```bash +for ip in 10.0.0.1 10.0.0.2 10.0.0.3 10.0.0.4 10.0.0.5; do + echo -n "$ip: height=" + cast block latest --rpc-url http://$ip:8545 | jq -r '.number' +done +``` + +--- + +## Step 9: Test Failover + +With all five nodes running and producing blocks, simulate a leader failure: + +```bash +# Identify the current leader from its logs, then on that machine. +# Preferred: use the systemd unit if ev-node runs as a service +sudo systemctl stop ev-node + +# Fallback: stop the process directly +mapfile -t PIDS < <(pgrep -f "evm start") +if [ "${#PIDS[@]}" -ne 1 ]; then + echo "Expected exactly 1 evm PID, found ${#PIDS[@]}: ${PIDS[*]}" + exit 1 +fi +echo "Stopping PID ${PIDS[0]}" +kill -SIGTERM "${PIDS[0]}" +``` + +Within `election_timeout` (368ms in this configuration), the remaining four nodes will elect a new leader and resume block production. Measure the actual gap in your logs: + +```bash +# Look for the last block before the kill and first block after +grep "block produced\|block applied" /var/log/ev-node/node-2.log | tail -20 +``` + +The gap should be well under 1 second in most cases (a few election cycles at most). + +--- + +## Running as a Systemd Service + +For production, manage each node with systemd. + +### Create the passphrase file + +If you did not create the passphrase file in Step 3, do it now. The file must exist on every node before the service starts: + +```bash +# Run on every node (skip if you already did this in Step 3) +sudo mkdir -p /etc/ev-node +echo -n "" | sudo tee /etc/ev-node/passphrase > /dev/null +sudo chmod 600 /etc/ev-node/passphrase +sudo chown ev-node:ev-node /etc/ev-node/passphrase +``` + +### Unit file + +```ini +# /etc/systemd/system/ev-node.service +[Unit] +Description=ev-node HA sequencer +After=network-online.target +Wants=network-online.target + +[Service] +User=ev-node +ExecStart=/usr/local/bin/evm start \ + --evnode.node.aggregator=true \ + --evnode.raft.enable=true \ + --evnode.raft.node_id=node-1 \ + --evnode.raft.raft_addr=0.0.0.0:5001 \ + --evnode.raft.raft_dir=/var/lib/ev-node/raft \ + --evnode.raft.peers=node-2@10.0.0.2:5001,node-3@10.0.0.3:5001,node-4@10.0.0.4:5001,node-5@10.0.0.5:5001 \ + --evnode.raft.heartbeat_timeout=92ms \ + --evnode.raft.election_timeout=368ms \ + --evnode.raft.leader_lease_timeout=46ms \ + --evnode.raft.send_timeout=50ms \ + --evnode.raft.trailing_logs=18000 \ + --evnode.raft.snapshot_threshold=5000 \ + --evnode.p2p.listen_address=/ip4/0.0.0.0/tcp/26656 \ + --evnode.p2p.peers=/ip4/10.0.0.2/tcp/26656/p2p/,/ip4/10.0.0.3/tcp/26656/p2p/,/ip4/10.0.0.4/tcp/26656/p2p/,/ip4/10.0.0.5/tcp/26656/p2p/ \ + --evnode.signer.passphrase_file=/etc/ev-node/passphrase +Restart=on-failure +RestartSec=5s + +# Give the process time to transfer leadership before systemd kills it +TimeoutStopSec=30 + +[Install] +WantedBy=multi-user.target +``` + +```bash +sudo systemctl daemon-reload +sudo systemctl enable ev-node +sudo systemctl start ev-node +sudo journalctl -u ev-node -f +``` + +`TimeoutStopSec=30` gives the node enough time to perform a graceful leadership transfer on `SIGTERM` before systemd sends `SIGKILL`. Do not set this too short. + +--- + +## Performing a Rolling Restart + +To restart nodes without taking the cluster offline (e.g., for a config change or binary upgrade): + +1. Restart one non-leader node at a time and wait for it to rejoin before touching the next. +2. For the leader node, restart it last. `ev-node` will transfer leadership to a peer before shutting down. + +```bash +# Restart non-leader nodes first, one at a time. +# After each restart, wait until the node confirms it has rejoined before touching the next. + +ssh user@10.0.0.2 "sudo systemctl restart ev-node" +ssh user@10.0.0.2 "sudo journalctl -u ev-node --since '1 min ago' -f | grep -m1 'follower state\|leader state'" + +ssh user@10.0.0.3 "sudo systemctl restart ev-node" +ssh user@10.0.0.3 "sudo journalctl -u ev-node --since '1 min ago' -f | grep -m1 'follower state\|leader state'" + +ssh user@10.0.0.4 "sudo systemctl restart ev-node" +ssh user@10.0.0.4 "sudo journalctl -u ev-node --since '1 min ago' -f | grep -m1 'follower state\|leader state'" + +ssh user@10.0.0.5 "sudo systemctl restart ev-node" +ssh user@10.0.0.5 "sudo journalctl -u ev-node --since '1 min ago' -f | grep -m1 'follower state\|leader state'" + +# Restart the leader last — ev-node transfers leadership before shutting down +ssh user@10.0.0.1 "sudo systemctl restart ev-node" +ssh user@10.0.0.1 "sudo journalctl -u ev-node --since '1 min ago' -f | grep -m1 'follower state\|leader state'" +``` + +The `grep -m1` exits as soon as the node logs `entering follower state` or `entering leader state`, confirming it has rejoined the cluster. Only then proceed to the next node. + +--- + +## Troubleshooting + +### Cluster does not elect a leader + +Check that: +- At least 3 out of 5 nodes are running and can reach each other on port 5001. +- The `peers` list on every node is identical and all addresses are correct. +- No firewall rule is blocking TCP on port 5001. + +```bash +# Quick connectivity check from node-2 to node-1 Raft port +nc -zv 10.0.0.1 5001 +``` + +### Node panics on startup with "state divergence" + +This means the node's local block store is ahead of or behind the Raft consensus state in a way that cannot be reconciled automatically. This typically happens when a node's `raft_dir` was wiped but the block database was not (or vice versa). + +Stop the node, wipe both `raft_dir` and the node's block data directory, then restart. The node will receive a Raft snapshot and rebuild from there. + +### Spurious elections / leadership flapping + +Symptoms: frequent `election won` and `entering follower state` lines in the logs, block production pausing briefly every few seconds. + +Causes: +- `heartbeat_timeout` is too short for your network RTT — increase it. +- Network congestion or packet loss between nodes. +- Node CPU is saturated and cannot process heartbeats in time. + +As a quick diagnostic, check the RTT between nodes while the cluster is running: + +```bash +ping -c 100 10.0.0.2 | tail -5 +``` + +If the max RTT is close to or above `heartbeat_timeout`, increase `heartbeat_timeout` and `election_timeout` proportionally. diff --git a/docs/guides/ha/overview.md b/docs/guides/ha/overview.md new file mode 100644 index 0000000000..a004e080fb --- /dev/null +++ b/docs/guides/ha/overview.md @@ -0,0 +1,404 @@ +# High Availability Sequencer + +ev-node supports running your sequencer in a **High Availability (HA)** cluster using the [Raft consensus algorithm](https://raft.github.io/). Instead of a single aggregator node that is a point of failure, multiple nodes form a cluster that automatically elects a leader and recovers from individual node failures without manual intervention and without halting block production. + +## Why Raft HA + +A single sequencer node means that if the machine crashes, loses power, or needs maintenance, your chain stops producing blocks until the node is back online. With a Raft cluster: + +- **Automatic failover** — when the active leader fails, remaining nodes elect a new leader within seconds. +- **No double-signing** — the Raft log guarantees at most one leader at a time and synchronizes block state across all nodes before any block is committed. +- **Graceful restarts** — before shutting down, the leader transfers leadership to a healthy peer so downtime is measured in milliseconds. +- **Fault tolerance** — a 5-node cluster keeps producing blocks as long as at least 3 nodes are reachable; it can absorb 2 simultaneous failures. + +## How It Works + +Each node in the cluster runs `ev-node` in aggregator mode with Raft enabled. The nodes communicate over a private TCP transport to: + +1. **Elect a leader** — using Raft leader election. Only the elected leader produces blocks. +2. **Replicate state** — every block the leader produces is appended to the Raft log and replicated to all followers before it is considered committed. +3. **Apply to FSM** — each node applies committed log entries to its Finite State Machine (FSM), which tracks the latest committed block height, hash, and timestamp. +4. **Detect failure** — followers watch for heartbeats from the leader. If heartbeats stop arriving within the election timeout, a follower starts a new election. +5. **Catch up** — a node that was offline rejoins by receiving a Raft snapshot (fast-forward to the current head) and then fetching any missing historical blocks from peers via P2P. + +### Storage + +Raft state is stored in the directory specified by `raft.raft_dir`: + +| File | Purpose | +|------|---------| +| `raft-log.db` | Raft log entries (BoltDB) | +| `raft-stable.db` | Current term and vote state (BoltDB) | +| `*.snp` | Snapshots of the FSM state | + +These files represent the node's **cluster identity**. They must live on persistent storage — loss of this directory is equivalent to removing the node from the cluster. + +## Cluster Sizing + +Always run an **odd number** of nodes. Raft requires a majority (quorum) to elect a leader and commit entries. + +| Nodes | Quorum | Tolerated failures | +|-------|--------|--------------------| +| 3 | 2 | 1 | +| **5** | **3** | **2** | +| 7 | 4 | 3 | + +**5 nodes is the recommended production configuration.** It tolerates two simultaneous node failures — enough to absorb a rolling upgrade plus an unexpected crash at the same time — while keeping the cluster size manageable. + +## Network Requirements + +Raft transport is **plain TCP** with no built-in encryption. Before deploying: + +- Run all nodes inside a **private network, VPN, or encrypted mesh** (WireGuard, Tailscale, AWS VPC, etc.). +- **Never expose the Raft port to the public internet.** An attacker with access to the Raft port can send forged messages that disrupt or hijack cluster consensus. +- Ensure low-latency connectivity between nodes. Timeouts must be sized larger than the worst-case round-trip time (RTT) between any two nodes in the cluster. + +### Node Placement + +**Run all nodes in the same region, spread across different availability zones.** + +This is the single most important infrastructure decision for cluster stability. All nodes must have roughly the same RTT to each other. The timing parameters (heartbeat timeout, election timeout) are sized for a single `RTT_MAX` value — if one node has materially higher latency than its peers, it degrades the entire cluster's ability to detect failures and elect leaders reliably. + +Specifically: +- **Same region, different AZs** gives uniform 5–30ms RTT and is the validated production topology. Nodes are isolated from AZ-level failures while keeping latency uniform. +- **Cross-region nodes** introduce higher and asymmetric RTT (100ms+). Even a single high-latency node can destabilize the cluster under network stress. + +This was observed directly in load testing: a 3-node cluster where one node averaged 99ms RTT (2× higher than its peers at 45–49ms) showed election times up to 284 seconds, three undetected leader elections, and one skipped cycle when 200–500ms of additional latency was injected — the same disruption level where the two lower-latency nodes recovered in under 55 seconds. Moving to a 5-node cluster with uniform ~45ms RTT across all nodes eliminated all undetected elections, reduced the worst-case election time from 284s to 66s, and reduced cascade risk from 10% of cycles to 3%. + +If your deployment requires nodes in different regions, increase `heartbeat_timeout` and `election_timeout` to at least 4–5× the worst-case inter-node RTT, and expect slower failover. See the [timing parameters](#timing-parameters) section for tuning formulas. + +--- + +## Configuration Reference + +Raft is configured under the `raft` section of `evnode.yaml`, or via `--evnode.raft.*` CLI flags. + +### Required Parameters + +These must be set on every node for the cluster to form. + +#### `raft.enable` + +```yaml +raft: + enable: true +``` + +**CLI:** `--evnode.raft.enable` +**Default:** `false` + +Enables Raft consensus. Must be `true` on every cluster member. When disabled (the default), the node runs as a traditional single sequencer. Setting this to `true` also requires `node.aggregator: true`. + +--- + +#### `raft.node_id` + +```yaml +raft: + node_id: "node-1" +``` + +**CLI:** `--evnode.raft.node_id` +**Default:** _(none, required)_ + +A string that uniquely identifies this node within the cluster. Every node must have a different `node_id`. The ID is stored in the Raft log and used by other nodes to route messages — **never change it after the cluster is bootstrapped**, as doing so will break the cluster membership records. + +Convention: use stable, descriptive names like `node-1`, `node-2`, … `node-5` or names tied to the host (`sequencer-us-east-1`, `sequencer-eu-east-2`). + +--- + +#### `raft.raft_addr` + +```yaml +raft: + raft_addr: "0.0.0.0:5001" +``` + +**CLI:** `--evnode.raft.raft_addr` +**Default:** _(none, required)_ + +The TCP address this node listens on for Raft transport messages from other cluster members. The `0.0.0.0` bind address accepts connections on all interfaces; bind to a specific private IP if you want to restrict which interface is used for cluster traffic. + +The port (here `5001`) must be reachable from every other node in the cluster. + +The address you advertise in `raft.peers` must resolve to this port from the perspective of other nodes. If you bind to `0.0.0.0` internally, advertise the node's actual private IP in the peers list. + +--- + +#### `raft.raft_dir` + +```yaml +raft: + raft_dir: "/var/lib/ev-node/raft" +``` + +**CLI:** `--evnode.raft.raft_dir` +**Default:** `/raft` + +The directory where Raft stores its persistent state: log database, stable store, and snapshots. This directory **must be on persistent storage** (not tmpfs, not ephemeral container storage). Losing this directory means the node loses its cluster identity — it cannot rejoin without being reconfigured as a new member. + +For Docker deployments, mount this as a named volume. For bare-metal or systemd services, ensure the directory survives reboots. + +--- + +#### `raft.peers` + +```yaml +raft: + peers: "node-2@10.0.0.2:5001,node-3@10.0.0.3:5001,node-4@10.0.0.4:5001,node-5@10.0.0.5:5001" +``` + +**CLI:** `--evnode.raft.peers` +**Default:** _(none, required)_ + +A comma-separated list of the **other** cluster members (exclude the local node), in the format `nodeID@host:port`. The host and port must be the Raft address (`raft_addr`) of each peer as reachable from this node. Do not list the node's own `node_id` in its own `peers` field. + +Raft uses this list to: +- Bootstrap the cluster on first start (when no persisted state exists). +- Know which addresses to dial when sending log entries or heartbeats. + +> **Limitation — static membership only.** Changing the peer set at runtime (adding or removing nodes without a full cluster restart) is not currently supported. All nodes that will ever participate in the cluster must be listed in `peers` before the cluster is first bootstrapped. + +--- + +#### `raft.bootstrap` + +```yaml +raft: + bootstrap: false +``` + +**CLI:** `--evnode.raft.bootstrap` +**Default:** `false` + +Compatibility flag retained for older deployments. **You do not need to set this.** ev-node auto-detects the correct startup mode from the state of `raft_dir`: + +- If `raft_dir` contains existing Raft state → the node **rejoins** the cluster automatically. +- If `raft_dir` is empty or does not exist → the node **bootstraps** a new cluster from the `peers` list. + +Setting `bootstrap: true` explicitly has no additional effect beyond what auto-detection already does. + +--- + +### Timing Parameters + +These parameters control how quickly the cluster detects failures and elects a new leader. They must be sized relative to the **maximum round-trip time (RTT) between any two nodes** in the cluster. Too tight and the cluster experiences spurious leader changes; too loose and failover takes longer than necessary. + +**To measure your network RTT:** + +```bash +# Run from each node to every other node; note the maximum result +ping -c 20 | tail -1 +``` + +Take the maximum average RTT across all pairs — this is your `RTT_MAX`. + +#### `raft.heartbeat_timeout` + +```yaml +raft: + heartbeat_timeout: "92ms" +``` + +**CLI:** `--evnode.raft.heartbeat_timeout` +**Default:** `350ms` + +The maximum time a follower will wait without receiving a heartbeat from the leader before starting a new election. The leader sends heartbeats more frequently than this value internally; this parameter is purely a follower-side timeout that triggers a new election when crossed. + +**Tuning rule:** Set to **4–5× RTT_MAX**. This ensures followers can distinguish a slow network from a dead leader without triggering spurious elections. + +- Too low (< 2× RTT_MAX): followers time out due to normal network jitter and start unnecessary elections, causing leadership flapping and brief block production pauses. +- Too high: failover takes longer; the cluster is slower to react to a leader crash. + +| RTT_MAX | Recommended heartbeat_timeout | +|---------|-------------------------------| +| 10ms | 40–50ms | +| 23ms | 92ms | +| 50ms | 200–250ms | +| 100ms | 400–500ms | + +--- + +#### `raft.election_timeout` + +```yaml +raft: + election_timeout: "368ms" +``` + +**CLI:** `--evnode.raft.election_timeout` +**Default:** `1000ms` + +How long a follower waits without receiving a heartbeat before it concludes the leader is dead and starts a new election. Must be greater than or equal to `heartbeat_timeout`. + +**Tuning rule:** Set to **4× heartbeat_timeout** (or approximately 16–20× RTT_MAX). The factor of 4 gives the leader several missed heartbeat opportunities before a follower acts — enough to ride out transient packet loss without triggering unnecessary elections. + +A larger election timeout means a slower reaction to leader failure (failover takes longer). A smaller election timeout risks false positives: the cluster starts an election while the leader is merely experiencing a brief network delay, causing a term increment and a short pause in block production. + +--- + +#### `raft.leader_lease_timeout` + +```yaml +raft: + leader_lease_timeout: "46ms" +``` + +**CLI:** `--evnode.raft.leader_lease_timeout` +**Default:** `175ms` + +The duration for which a leader considers its leadership valid after the last successful heartbeat acknowledgment. Leader lease enables local reads from the leader without a round-trip to quorum. + +**Tuning rule:** Set to approximately **half of `heartbeat_timeout`** (i.e., ~2× RTT_MAX), and always **strictly less than `election_timeout`**. If `leader_lease_timeout` is close to or exceeds `election_timeout`, a node may believe it is still the leader after followers have already elected a replacement, which can cause split-brain reads. + +--- + +#### `raft.send_timeout` + +```yaml +raft: + send_timeout: "50ms" +``` + +**CLI:** `--evnode.raft.send_timeout` +**Default:** `200ms` + +The maximum time the leader waits for a single message (log entry, heartbeat) to be delivered to a peer before marking the delivery as failed. A failed send is retried, but repeated failures trigger follower health tracking. + +**Tuning rule:** Set to **2–3× RTT_MAX**. This allows for normal network latency plus one retransmission before giving up on a delivery attempt. + +--- + +### Snapshot and Log Retention Parameters + +These parameters control how frequently Raft snapshots the FSM state and how many log entries are kept around after a snapshot. They affect both disk usage and how quickly a lagging node can catch up. + +#### `raft.snapshot_threshold` + +```yaml +raft: + snapshot_threshold: 5000 +``` + +**CLI:** `--evnode.raft.snapshot_threshold` +**Default:** `500` + +The number of committed log entries that must accumulate before Raft automatically takes a snapshot of the FSM state. After a snapshot, log entries older than the snapshot are compacted away. + +**Effect on operations:** +- **Lower values** (e.g., `500`): snapshots are taken frequently, keeping the log small. A restarting node receives a recent snapshot and has fewer log entries to replay, but snapshot writes happen more often, adding brief I/O bursts. +- **Higher values** (e.g., `5000`): less frequent snapshots mean less I/O overhead during normal operation, but a lagging node may have more log entries to replay when catching up. + +At 10 block/second, `snapshot_threshold: 5000` takes a snapshot roughly every 8.3 minutes (500 seconds). + +--- + +#### `raft.trailing_logs` + +```yaml +raft: + trailing_logs: 18000 +``` + +**CLI:** `--evnode.raft.trailing_logs` +**Default:** `200` + +The number of log entries to **retain after a snapshot** is taken. These entries act as a catch-up buffer: a node that missed fewer than `trailing_logs` entries since the last snapshot can replay from the log without needing to transfer the full snapshot. + +**Effect on operations:** +- **Lower values** (e.g., `200`): tighter disk usage; a node that misses even a few minutes of operation must receive a full snapshot on rejoin. +- **Higher values** (e.g., `18000`): a lagging node can catch up via log replay without needing a full snapshot transfer, reducing the cost of brief outages. At 1 block/second (`block_time: "1s"`), `trailing_logs: 18000` covers ~5 hours; at 10 block/second, ~30 minutes. + +Set this high enough to cover your typical maintenance window (restart, upgrade, brief network partition). Scale proportionally with your chain's block rate. + +--- + +#### `raft.snap_count` + +```yaml +raft: + snap_count: 3 +``` + +**CLI:** `--evnode.raft.snap_count` +**Default:** `3` + +The number of snapshot files to retain on disk. Older snapshots are deleted when new ones are created. Keeping 2–3 snapshots provides a rollback option in case the latest snapshot is corrupt. + +--- + +### Recommended Production Configuration + +The following configuration is recommended for a **5-node cluster on a network with RTT_MAX ≤ 25ms** (typical for nodes in the same region). It was validated by an extensive sweep of 10 configurations across 150 SIGTERM kill cycles and 50 latency-injection cycles, with zero undetected failures and zero split-brain events recorded. + +```yaml +# evnode.yaml — paste this raft section into every node's config +# Replace node_id, raft_addr, and peers with your actual values. + +node: + aggregator: true + +raft: + enable: true + node_id: "node-1" # unique per node + raft_addr: "0.0.0.0:5001" + raft_dir: "/var/lib/ev-node/raft" # must be persistent + + # Remote peers list — different on every node + peers: >- + node-2@10.0.0.2:5001, + node-3@10.0.0.3:5001, + node-4@10.0.0.4:5001, + node-5@10.0.0.5:5001 + + # Timing — tuned for RTT_MAX ≤ 25ms + heartbeat_timeout: "92ms" + election_timeout: "368ms" + leader_lease_timeout: "46ms" + send_timeout: "50ms" + + # Log retention + trailing_logs: 18000 + snapshot_threshold: 5000 + snap_count: 3 +``` + +**Adapting for different RTT values:** + +Measure RTT_MAX first and scale the timing parameters: + +```text +heartbeat_timeout = RTT_MAX × 4 +election_timeout = heartbeat_timeout × 4 +leader_lease_timeout = heartbeat_timeout / 2 +send_timeout = RTT_MAX × 3 +``` + +--- + +## Interaction with P2P + +Even in a Raft cluster, each node must have P2P configured. Raft handles **hot replication** — it replicates the latest block state to all followers in near real-time. But if a node falls far enough behind that the missing entries have already been compacted out of the Raft log (i.e., it missed more entries than `trailing_logs`), it receives a Raft snapshot to jump to the current head. Historical blocks between the node's last known state and the snapshot are then fetched via the **P2P network or DA layer**. + +```yaml +p2p: + listen_address: "/ip4/0.0.0.0/tcp/26656" + peers: "/ip4//tcp/26656/p2p/,..." +``` + +Ensure P2P ports are open between nodes in addition to the Raft port. + +--- + +## Monitoring + +Track these metrics (available via Prometheus if `metrics.enabled: true`) to catch problems early: + +| Signal | What it means | +|--------|---------------| +| Frequent leadership changes | Network instability, asymmetric packet loss, or overloaded nodes | +| Growing applied-index lag | FSM cannot keep up with commits; check CPU and disk I/O | +| Snapshot transfers | Node fell behind `trailing_logs` entries — check network and disk | +| Election timeouts | Heartbeats are being dropped; check MTU, firewall rules, network congestion | + +See the [Monitoring guide](../operations/monitoring.md) for the full Prometheus metric list. diff --git a/docs/guides/ha/single-to-ha.md b/docs/guides/ha/single-to-ha.md new file mode 100644 index 0000000000..238b7c55ab --- /dev/null +++ b/docs/guides/ha/single-to-ha.md @@ -0,0 +1,425 @@ +# Migrate from Single Sequencer to HA Cluster + +This guide walks through converting a live single-sequencer chain into a 5-node Raft HA cluster with zero block-production downtime during the cutover window. + +## Overview + +A single sequencer stores its block production state (latest height, hash, and timestamp) only locally. A Raft cluster shares this state across all nodes via the Raft log. To migrate, you: + +1. Prepare four new nodes with the same genesis, signer key, and chain data as the existing node. +2. Reconfigure all five nodes (existing + four new) with Raft enabled. +3. Stop the existing sequencer and start all five nodes together to bootstrap the cluster. + +The chain experiences one planned downtime window — the gap between stopping the single sequencer and the Raft cluster electing its first leader, which takes a few seconds. + +## Before You Start + +### Understand what changes + +| | Single sequencer | Raft cluster | +|-|-----------------|-------------| +| Produces blocks | One node always | Elected leader | +| Block production key | Local to one node | Shared across all nodes | +| Raft data directory | Not used | Required, persistent | +| Config flags | No `raft.*` flags | All `raft.*` flags required | +| Restart behavior | Manual recovery | Automatic leader election | + +### Requirements + +- Five machines that can reach each other on the Raft port (we use `5001`) and P2P port (`26656`) +- A private network or VPN between all nodes (Raft transport is unencrypted) +- The existing sequencer's: + - Binary (`evm` or your chain binary) + - Config file (`evnode.yaml`) + - Signer key + - Genesis file + - Block data directory (optional — peers can sync from DA, but copying saves time) +- A scheduled maintenance window of ~5 minutes + +--- + +## Step 1: Measure Network RTT + +Before writing any config, measure the maximum RTT between your nodes. The Raft timing parameters must be sized for your actual network: + +```bash +# From the existing node, ping each new node +for ip in 10.0.0.2 10.0.0.3 10.0.0.4 10.0.0.5; do + echo -n "$ip: " + ping -c 20 $ip | tail -1 | awk -F'/' '{print $5 "ms avg"}' +done +``` + +For RTT_MAX ≤ 25ms (same-region nodes), use the recommended values in this guide. For higher RTT, adjust using the formulas in the [configuration reference](./overview.md#timing-parameters). + +--- + +## Step 2: Provision the Four New Nodes + +On each of the four new machines, install the same ev-node binary version as the existing sequencer. + +```bash +# Verify the binary version matches on all machines +./evm version +``` + +Create the passphrase file before initializing so the signer key is encrypted from the start: + +```bash +# On each new node +sudo mkdir -p /etc/ev-node +echo -n "" | sudo tee /etc/ev-node/passphrase > /dev/null +sudo chmod 600 /etc/ev-node/passphrase +``` + +Initialize each new node's home directory: + +```bash +# On each new node +./evm init --evnode.node.aggregator=true --evnode.signer.passphrase_file /etc/ev-node/passphrase +``` + +--- + +## Step 3: Copy the Signer Key to All New Nodes + +All five nodes must sign blocks with the **same key**. The existing sequencer's key is the one to use — do not generate new keys on the new nodes. + +```bash +# On the existing sequencer (node-1) +# Locate the signer key (exact filename depends on your chain) +ls ~/.evm/config/ + +# Copy to each new node +scp ~/.evm/config/signer.json user@10.0.0.2:~/.evm/config/ +scp ~/.evm/config/signer.json user@10.0.0.3:~/.evm/config/ +scp ~/.evm/config/signer.json user@10.0.0.4:~/.evm/config/ +scp ~/.evm/config/signer.json user@10.0.0.5:~/.evm/config/ +``` + +--- + +## Step 4: Copy the Genesis File + +```bash +scp ~/.evm/config/genesis.json user@10.0.0.2:~/.evm/config/ +scp ~/.evm/config/genesis.json user@10.0.0.3:~/.evm/config/ +scp ~/.evm/config/genesis.json user@10.0.0.4:~/.evm/config/ +scp ~/.evm/config/genesis.json user@10.0.0.5:~/.evm/config/ +``` + +--- + +## Step 5: Copy Block Data to New Nodes + +New nodes can sync their block history from the DA layer or P2P peers after the cluster is running, but copying the existing chain data speeds up the initial sync significantly for long-running chains. + +```bash +# Stop the existing sequencer temporarily to get a consistent snapshot +# (you will start it again in step 9) +systemctl stop ev-node # or kill the process + +# Copy the data directory — adjust the path to your chain +rsync -avz ~/.evm/data/ user@10.0.0.2:~/.evm/data/ +rsync -avz ~/.evm/data/ user@10.0.0.3:~/.evm/data/ +rsync -avz ~/.evm/data/ user@10.0.0.4:~/.evm/data/ +rsync -avz ~/.evm/data/ user@10.0.0.5:~/.evm/data/ +``` + +> If your chain uses an EVM execution layer (ev-reth), copy the execution layer database as well. See the [Reth backup guide](../evm/reth-backup.md) for the correct procedure. + +After the copy, note the **latest block height** — this is your reference point: + +```bash +# Note the height before shutdown — replace 8545 with your EVM RPC port +cast block latest --rpc-url http://10.0.0.1:8545 | jq -r '.number' +``` + +**Restart the existing sequencer now** so the chain keeps producing blocks while you prepare the remaining nodes (Steps 6–8). The chain will run uninterrupted until the planned cutover in Step 9. + +```bash +# On node-1 — restart with your original single-sequencer flags +systemctl start ev-node +``` + +--- + +## Step 6: Collect Peer IDs + +Before writing the configuration, collect the peer ID from each node. Peer IDs are needed to build the P2P peers list in multiaddr format. + +```bash +# Run on each node +./evm net-info +``` + +Note the `peer_id` value from each node's output — it looks like `12D3KooW...`. You need all five before writing the configuration files. + +--- + +## Step 7: Write the New Configuration on All Five Nodes + +Write the following `evnode.yaml` on every node. The `node_id` is the only field that differs. + +**Existing sequencer becomes node-1.** Assign `node-2` through `node-5` to the four new machines. + +### node-1 (existing sequencer — `~/.evm/config/evnode.yaml`) + +```yaml +node: + aggregator: true + block_time: "1s" # keep your existing block_time + +raft: + enable: true + node_id: "node-1" + raft_addr: "0.0.0.0:5001" + raft_dir: "/var/lib/ev-node/raft" + peers: "node-2@10.0.0.2:5001,node-3@10.0.0.3:5001,node-4@10.0.0.4:5001,node-5@10.0.0.5:5001" + + # Timing — tuned for RTT_MAX ≤ 25ms + heartbeat_timeout: "92ms" + election_timeout: "368ms" + leader_lease_timeout: "46ms" + send_timeout: "50ms" + + # Log retention + trailing_logs: 18000 + snapshot_threshold: 5000 + snap_count: 3 + +p2p: + listen_address: "/ip4/0.0.0.0/tcp/26656" + peers: "/ip4/10.0.0.2/tcp/26656/p2p/,/ip4/10.0.0.3/tcp/26656/p2p/,/ip4/10.0.0.4/tcp/26656/p2p/,/ip4/10.0.0.5/tcp/26656/p2p/" +``` + +### node-2 through node-5 + +Identical except for `node_id`, raft and P2P peers (exclude self from the P2P list): + +```yaml +# node-2 +node: + aggregator: true + block_time: "1s" + +raft: + enable: true + node_id: "node-2" # change per node + raft_addr: "0.0.0.0:5001" + raft_dir: "/var/lib/ev-node/raft" + peers: "node-1@10.0.0.1:5001,node-3@10.0.0.3:5001,node-4@10.0.0.4:5001,node-5@10.0.0.5:5001" + heartbeat_timeout: "92ms" + election_timeout: "368ms" + leader_lease_timeout: "46ms" + send_timeout: "50ms" + trailing_logs: 18000 + snapshot_threshold: 5000 + snap_count: 3 + +p2p: + listen_address: "/ip4/0.0.0.0/tcp/26656" + peers: "/ip4/10.0.0.1/tcp/26656/p2p/,/ip4/10.0.0.3/tcp/26656/p2p/,/ip4/10.0.0.4/tcp/26656/p2p/,/ip4/10.0.0.5/tcp/26656/p2p/" +``` + +--- + +## Step 8: Create Raft Data Directories and Passphrase File + +Run on every node: + +```bash +sudo mkdir -p /var/lib/ev-node/raft +sudo chown $(whoami) /var/lib/ev-node/raft + +# Create the passphrase file — the binary reads this at startup +sudo mkdir -p /etc/ev-node +echo -n "" | sudo tee /etc/ev-node/passphrase > /dev/null +sudo chmod 600 /etc/ev-node/passphrase +``` + +--- + +## Step 9: The Cutover + +This is the planned maintenance window. The chain pauses block production from when you stop the existing sequencer until the new Raft cluster elects its first leader (a few seconds). + +### 9a. Stop the existing single sequencer + +```bash +# On node-1 (existing sequencer) +# Preferred: use systemd if the node runs as a service +sudo systemctl stop ev-node + +# Fallback: stop the process directly +mapfile -t PIDS < <(pgrep -f "evm start") +if [ "${#PIDS[@]}" -ne 1 ]; then + echo "Expected exactly 1 evm PID, found ${#PIDS[@]}: ${PIDS[*]}" + exit 1 +fi +echo "Stopping PID ${PIDS[0]}" +kill -SIGTERM "${PIDS[0]}" +``` + +Confirm it has stopped: + +```bash +pgrep evm || echo "stopped" +``` + +### 9b. Start all five nodes simultaneously + +The key requirement here is that all nodes must start within a short window of each other. Raft needs a majority (3 out of 5) online to elect a leader. If you start only 2 nodes and wait, the cluster will not elect a leader until the 3rd node joins. + +Use a coordination mechanism — a simple approach is to open five terminals (or tmux panes) and fire the start commands in quick succession: + +```bash +# On node-1 +./evm start \ + --evnode.node.aggregator=true \ + --evnode.raft.enable=true \ + --evnode.raft.node_id="node-1" \ + --evnode.raft.raft_addr="0.0.0.0:5001" \ + --evnode.raft.raft_dir="/var/lib/ev-node/raft" \ + --evnode.raft.peers="node-2@10.0.0.2:5001,node-3@10.0.0.3:5001,node-4@10.0.0.4:5001,node-5@10.0.0.5:5001" \ + --evnode.raft.heartbeat_timeout="92ms" \ + --evnode.raft.election_timeout="368ms" \ + --evnode.raft.leader_lease_timeout="46ms" \ + --evnode.raft.send_timeout="50ms" \ + --evnode.raft.trailing_logs=18000 \ + --evnode.raft.snapshot_threshold=5000 \ + --evnode.p2p.listen_address="/ip4/0.0.0.0/tcp/26656" \ + --evnode.p2p.peers="/ip4/10.0.0.2/tcp/26656/p2p/,/ip4/10.0.0.3/tcp/26656/p2p/,/ip4/10.0.0.4/tcp/26656/p2p/,/ip4/10.0.0.5/tcp/26656/p2p/" \ + --evnode.signer.passphrase_file=/etc/ev-node/passphrase \ + --evm.jwt-secret=$(cat /path/to/jwt.hex) \ + --evm.genesis-hash= +``` + +```bash +# On node-2 (at the same time, or within a few seconds) +./evm start \ + --evnode.raft.node_id="node-2" \ + # ... same flags, change node_id and p2p.peers +``` + +Repeat for node-3, node-4, node-5. + +--- + +## Step 10: Verify the Migration Succeeded + +### Check leader election + +Within seconds of starting, one node will win the election. Look for: + +```text +INF raft: election won tally=3 leader=node-1 +INF raft: entering leader state +INF block produced height= +``` + +where `N` is the last block produced by the old single sequencer. + +The followers will show: + +```text +INF raft: entering follower state leader=node-1 +INF block applied from raft log height= +``` + +### Verify block height continuity + +The new cluster must continue from exactly where the old sequencer left off. Query the EVM execution layer: + +```bash +# From the existing sequencer's last known height (noted in step 5) +LAST_HEIGHT= + +# Query node-1 (or any node) — replace 8545 with your EVM RPC port +NEW_HEIGHT=$(cast block latest --rpc-url http://10.0.0.1:8545 | jq -r '.number') + +echo "Last old height: $LAST_HEIGHT" +echo "New cluster height: $NEW_HEIGHT" + +# New height should be LAST_HEIGHT + 1 (or a few blocks ahead if it took a moment) +``` + +### Check all nodes are synced + +```bash +for ip in 10.0.0.1 10.0.0.2 10.0.0.3 10.0.0.4 10.0.0.5; do + echo -n "$ip: height=" + cast block latest --rpc-url http://$ip:8545 | jq -r '.number' +done +``` + +All nodes should be at the same height (within 1–2 blocks of each other). + +--- + +## Step 11: Set Up Systemd on All Nodes + +Once you have confirmed the cluster is healthy, set up systemd for automatic restarts and service management. See the [cluster setup guide](./cluster-setup.md#running-as-a-systemd-service) for a ready-to-use unit file template. + +--- + +## Rollback Plan + +If anything goes wrong during the cutover, you can revert to the single sequencer: + +1. Stop all five nodes. +2. Wipe the Raft data directories (`/var/lib/ev-node/raft`) on all nodes to clear any bootstrapped cluster state. +3. Remove the Raft configuration from node-1's `evnode.yaml` (or revert to the pre-migration config file). +4. Start node-1 with `raft.enable: false` — it resumes as a single sequencer from the block height it was at when you stopped it. + +```bash +# Emergency rollback — revert node-1 to single sequencer +./evm start \ + --evnode.node.aggregator=true \ + --evnode.raft.enable=false \ + --evnode.signer.passphrase_file=/etc/ev-node/passphrase \ + # ... your original flags +``` + +The chain continues from the last block committed before the cutover. No blocks are lost because the single sequencer's data was never modified. + +--- + +## New Nodes Without Existing Chain Data + +If you did not copy the block data in step 5 (or if you are adding nodes long after the chain started), the new nodes will sync historical block data via P2P and the DA layer after joining the cluster. This process runs in the background and does not prevent the cluster from electing a leader or producing new blocks. + +Monitor sync progress on a new node: + +```bash +# The node will log progress as it fetches historical blocks +journalctl -u ev-node -f | grep "sync\|height" +``` + +--- + +## Troubleshooting + +### Node-1 starts but no leader is elected + +The cluster cannot elect a leader without a quorum (3 out of 5 nodes). Ensure all five nodes are running and can reach each other on port 5001. + +### New nodes report height mismatch or divergence panic + +This happens if the block data on the new nodes was copied from a different snapshot than the final state of the old sequencer, or if the copy was done while the old sequencer was still running and produced additional blocks during the copy. + +Wipe the new nodes' block data and Raft directories, re-copy from the stopped node-1, and retry. + +### Block height jumps backward or chain forks + +This should not happen if all five nodes are running the same binary version and have the same genesis file and signer key. If you see it: + +1. Stop all nodes immediately. +2. Identify which node produced the offending block. +3. Check that its genesis hash and signer key match the other nodes. + +### Old single sequencer comes back online accidentally + +If the old sequencer (without Raft) is started again after the new cluster is already producing blocks, it will attempt to produce blocks independently, creating a fork. This is why it is important to disable or remove the old single-sequencer startup scripts immediately after the cutover. + +With Raft enabled on all five nodes, only the elected leader will produce blocks — there is no risk of a sixth "rogue" leader as long as the old machine is not restarted with the old non-Raft configuration. diff --git a/docs/guides/operations/monitoring.md b/docs/guides/operations/monitoring.md index 6e47357703..0093b4371a 100644 --- a/docs/guides/operations/monitoring.md +++ b/docs/guides/operations/monitoring.md @@ -12,7 +12,7 @@ Metrics will be served under `/metrics` on port 26660 by default. The listening ## List of available metrics -You can find the full list of available metrics in the [Technical Specifications](../learn/specs/block-manager.md#metrics). +You can find the full list of available metrics in the [Technical Specifications](../../learn/specs/block-manager.md#metrics). ## Viewing Metrics diff --git a/docs/guides/raft_production.md b/docs/guides/raft_production.md deleted file mode 100644 index 9e02758da9..0000000000 --- a/docs/guides/raft_production.md +++ /dev/null @@ -1,102 +0,0 @@ -# Raft Implementation & Production Configuration - -This guide details the Raft consensus implementation in `ev-node`, used for High Availability (HA) of the Sequencer/Aggregator. It is targeted at experienced DevOps and developers configuring production environments. - -## Overview - -`ev-node` uses the [HashiCorp Raft](https://github.com/hashicorp/raft) implementation to manage leader election and state replication when running in **Aggregator Mode**. - -* **Role**: Ensures only one active Aggregator (Leader) produces blocks at a time. -* **Failover**: Automatically elects a new leader if the current leader fails. -* **Safety**: Synchronizes the block production state to prevent double-signing or fork divergence. - -### Architecture - -* **Transport**: TCP-based transport for inter-node communication. -* **Storage**: [BoltDB](https://github.com/etcd-io/bbolt) is used for both the Raft Log (`raft-log.db`) and Stable Store (`raft-stable.db`). Snapshots are stored as files. -* **FSM (Finite State Machine)**: The State Machine applies `RaftBlockState` (Protobuf) containing the latest block height, hash, and timestamp. -* **Safety Checks**: - * **Startup**: Nodes check for divergence between local block store and Raft state. - * **Leadership Transfer**: Before becoming leader, a node waits for its FSM to catch up (`waitForMsgsLanded`) to prevent proposing blocks from a stale state. - * **Shutdown**: The leader attempts to transfer leadership gracefully before shutting down to minimize downtime. - -## Configuration - -Raft is configured via CLI flags or the `config.toml` file under the `[raft]` (or `[evnode.raft]`) section. - -### Essential Flags - -| Flag | Config Key | Description | Production Value | -|------|------------|-------------|------------------| -| `--evnode.raft.enable` | `raft.enable` | Enable Raft consensus. | `true` | -| `--evnode.raft.node_id` | `raft.node_id` | **Unique** identifier for the node. | e.g., `node-01` | -| `--evnode.raft.raft_addr` | `raft.raft_addr` | TCP address for Raft transport. | `0.0.0.0:5001` (Bind to private IP) | -| `--evnode.raft.raft_dir` | `raft.raft_dir` | Directory for Raft data. | `/data/raft` (Must be persistent) | -| `--evnode.raft.peers` | `raft.peers` | Comma-separated list of peer addresses in format `nodeID@host:port`. | `node-1@10.0.0.1:5001,node-2@10.0.0.2:5001,node-3@10.0.0.3:5001` | -| `--evnode.raft.bootstrap` | `raft.bootstrap` | Compatibility flag. Startup mode is selected automatically from persisted raft configuration state. | optional | - -### Timeout Tuning - -Raft timeouts should be tuned relative to your **Block Time** (`--evnode.node.block_time`) to utilize the fast failover capabilities without causing instability. - -| Flag | Default | Recommended Tuning | -|------|---------|--------------------| -| `--evnode.raft.heartbeat_timeout` | `1s` | **10-30% of Leader Lease**. For sub-second block times, lower to `50ms-100ms`. | -| `--evnode.raft.leader_lease_timeout` | `500ms` | **Must be < Election Timeout**. Use `500ms` for 1s block times. For slower chains (e.g., 10s blocks), increase to `1s-2s` to tolerate network jitter. | -| `--evnode.raft.send_timeout` | `1s` | Should be `> 2x RTT`. | - -**Relation to Block Time**: -Ideally, a failover should complete within `2 * BlockTime` to minimize user impact. -* **Fast Chain (BlockTime < 1s)**: Tighten timeouts. Heartbeat `50ms`, Lease `250ms`. -* **Standard Chain (BlockTime = 1s)**: Heartbeat `100ms`, Lease `500ms`. -* **Slow Chain (BlockTime > 5s)**: Defaults are usually sufficient (`1s` heartbeat). - -> **Warning**: Setting timeouts too low (< RTT + Jitter) will cause leadership flapping and halted block production. - -## Production Deployment Principles - -### 1. Static Peering & Automatic Startup Mode -Use static peering with automatic mode selection from local raft configuration: -* If local raft configuration already exists in `--evnode.raft.raft_dir`, the node starts in rejoin mode. -* If no local raft configuration exists yet, the node bootstraps from configured peers. -* `--evnode.raft.bootstrap` is retained for compatibility but does not control mode selection. -* **All configured cluster members** should list the full set of peers in `--evnode.raft.peers`. -* The `peers` list format is strict: `NodeID@Host:Port`. -* **Limitation**: Dynamic addition of peers (run-time membership changes) via RPC/CLI is not currently exposed. -* **Not supported**: Joining an existing cluster as a brand-new node that was not part of the initial static membership. - -### 2. Infrastructure Requirements -* **Encrypted Network (CRITICAL)**: Raft traffic is **unencrypted** (plain TCP). You **MUST** run the cluster inside a private network, VPN, or encrypted mesh (e.g., WireGuard, Tailscale). **Never expose Raft ports to the public internet**; doing so allows attackers to hijack the cluster consensus. -* **Cluster Size**: Run an **odd number** of nodes (3 or 5) to tolerate failures (3 nodes tolerate 1 failure; 5 nodes tolerate 2). -* **Storage**: The `--evnode.raft.raft_dir` **MUST** be mounted on persistent storage. Loss of this directory will cause the node to lose its identity and commit history, effectively removing it from the cluster. -* **Network**: Raft requires low-latency, reliable connectivity. Ensure firewall rules allow TCP traffic on `raft_addr`. - -### 3. P2P Interaction & Catch-Up -Raft and P2P work in parallel to ensure reliability: -* **Hot Replication (Raft)**: New blocks produced by the leader are replicated via the Raft transport (Header + Data) to all followers. This ensures low-latency propagation of the chain tip. -* **Catch-Up (P2P)**: If a node falls behind (e.g., disconnected for longer than the Raft log retention), it will receive a **Raft Snapshot** to update its consensus state to the latest head. However, the *historical blocks* between its local state and the new head are fetched via the **P2P Network** (or DA). - * **Implication**: You must ensure P2P connectivity (`--p2p.listen_address` and `--p2p.peers`) is configured even for Raft nodes, to allow them to backfill missing data from peers. - -### 4. Lifecycle Management -* **Rolling Restarts**: You can restart nodes one by one. The `ev-node` implementation handles graceful shutdown (leadership transfer) to minimize impact. -* **State Divergence**: If a node falls too far behind or its local store conflicts with Raft (e.g., due to catastrophic disk failure), it may panic on startup to protect safety. In such cases, a manual extensive recovery (wiping state and re-syncing) may be required. - -### 4. Monitoring -Monitor the following metrics (propagated via Prometheus if enabled): -* **Leadership Changes**: Frequent changes indicate network instability or overloaded nodes. -* **Applied Index vs Commit Index**: A growing lag indicates the FSM cannot keep up. - -## Example Command - -```bash -./ev-node start \ - --evnode.node.aggregator=true \ - --evnode.raft.enable=true \ - --evnode.raft.node_id="node-1" \ - --evnode.raft.raft_addr="0.0.0.0:5001" \ - --evnode.raft.raft_dir="/var/lib/ev-node/raft" \ - --evnode.raft.bootstrap=true \ - --evnode.raft.peers="node-1@10.0.1.1:5001,node-2@10.0.1.2:5001,node-3@10.0.1.3:5001" \ - --evnode.p2p.listen_address="/ip4/0.0.0.0/tcp/26656" \ - ...other flags -``` diff --git a/docs/index.md b/docs/index.md index 04b3aff631..dbc7e4ee98 100644 --- a/docs/index.md +++ b/docs/index.md @@ -4,6 +4,8 @@ title: Evolve Documentation titleTemplate: ':title' --- + + diff --git a/docs/package-lock.json b/docs/package-lock.json index 85f62da763..e373f6e016 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -3396,9 +3396,9 @@ } }, "node_modules/postcss": { - "version": "8.5.8", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz", - "integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==", + "version": "8.5.12", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.12.tgz", + "integrity": "sha512-W62t/Se6rA0Az3DfCL0AqJwXuKwBeYg6nOaIgzP+xZ7N5BFCI7DYi1qs6ygUYT6rvfi6t9k65UMLJC+PHZpDAA==", "funding": [ { "type": "opencollective", diff --git a/docs/reference/configuration/ev-node-config.md b/docs/reference/configuration/ev-node-config.md index 56eaadef72..425b00c901 100644 --- a/docs/reference/configuration/ev-node-config.md +++ b/docs/reference/configuration/ev-node-config.md @@ -730,7 +730,7 @@ _Example:_ `--evnode.rpc.enable_da_visualization` _Default:_ `false` _Constant:_ `FlagRPCEnableDAVisualization` -See the [DA Visualizer Guide](../guides/da/visualizer.md) for detailed information on using this feature. +See the [DA Visualizer Guide](../../guides/da/visualizer.md) for detailed information on using this feature. ### Health Endpoints @@ -797,7 +797,7 @@ _Constant:_ `FlagPrometheus` **Description:** The network address (host:port) where the Prometheus metrics server will listen for scraping requests. -See [Metrics](../guides/metrics.md) for more details on what metrics are exposed. +See [Metrics](../../guides/metrics.md) for more details on what metrics are exposed. **YAML:** diff --git a/docs/yarn.lock b/docs/yarn.lock index 7e4c071a38..bb69a1092e 100644 --- a/docs/yarn.lock +++ b/docs/yarn.lock @@ -1967,9 +1967,9 @@ points-on-path@^0.2.1: points-on-curve "0.2.0" postcss@^8.4.43, postcss@^8.5.8: - version "8.5.8" - resolved "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz" - integrity sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg== + version "8.5.12" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.5.12.tgz#cd0c0f667f7cb0521e2313234ea6e707a9ec1ddb" + integrity sha512-W62t/Se6rA0Az3DfCL0AqJwXuKwBeYg6nOaIgzP+xZ7N5BFCI7DYi1qs6ygUYT6rvfi6t9k65UMLJC+PHZpDAA== dependencies: nanoid "^3.3.11" picocolors "^1.1.1" diff --git a/execution/evm/go.mod b/execution/evm/go.mod index 17b6831d67..5b7db9e0c4 100644 --- a/execution/evm/go.mod +++ b/execution/evm/go.mod @@ -9,11 +9,11 @@ replace ( require ( github.com/ethereum/go-ethereum v1.17.2 - github.com/evstack/ev-node v1.1.0 + github.com/evstack/ev-node v1.1.1 github.com/evstack/ev-node/core v1.0.0 github.com/golang-jwt/jwt/v5 v5.3.1 github.com/ipfs/go-datastore v0.9.1 - github.com/rs/zerolog v1.35.0 + github.com/rs/zerolog v1.35.1 github.com/stretchr/testify v1.11.1 go.opentelemetry.io/otel v1.43.0 go.opentelemetry.io/otel/sdk v1.43.0 diff --git a/execution/evm/go.sum b/execution/evm/go.sum index d296895842..7cfe70f669 100644 --- a/execution/evm/go.sum +++ b/execution/evm/go.sum @@ -1,7 +1,7 @@ cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM= -cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= @@ -44,8 +44,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 h1:HtOTYcb github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8/go.mod h1:VsK9abqQeGlzPgUr+isNWzPlK2vKe9INMLWnY65f5Xs= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6Y1Lbvt9WFuJ0ugUK2xn6hIWUBBbKuOWF30s= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 h1:nEzwx/ZlpUZ2Y6WztsgYmfBh5Ixd3QiECawXMzvTMeo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 h1:696UM+NwOrETBCLQJyCAGtVmmZmziBT59yMwgg6Fvrw= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 h1:a1Fq/KXn75wSzoJaPQTgZO0wHGqE9mjFnylnqEPTchA= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10/go.mod h1:p6+MXNxW7IA6dMgHfTAzljuwSKD0NCm/4lbS4t6+7vI= github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 h1:x6bKbmDhsgSZwv6q19wY/u3rLk/3FGjJWyqKcIRufpE= @@ -54,8 +54,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 h1:oK/njaL8GtyEihkWMD4k3Vg github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20/go.mod h1:JHs8/y1f3zY7U5WcuzoJ/yAYGYtNIVPKLIbp61euvmg= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 h1:ks8KBcZPh3PYISr5dAiXCM5/Thcuxk8l+PG4+A0exds= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0/go.mod h1:pFw33T0WLvXU3rw1WBkpMlkgIn54eCB5FYLhjDc9Foo= -github.com/aws/smithy-go v1.25.0 h1:Sz/XJ64rwuiKtB6j98nDIPyYrV1nVNJ4YU74gttcl5U= -github.com/aws/smithy-go v1.25.0/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/aws/smithy-go v1.25.1 h1:J8ERsGSU7d+aCmdQur5Txg6bVoYelvQJgtZehD12GkI= +github.com/aws/smithy-go v1.25.1/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -268,8 +268,8 @@ github.com/libp2p/go-libp2p-kad-dht v0.39.1 h1:9RzUBc7zywT4ZNamRSgEvPZmVlK3Y6xdl github.com/libp2p/go-libp2p-kad-dht v0.39.1/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= -github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= -github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= +github.com/libp2p/go-libp2p-pubsub v0.16.0 h1:j7G2C8kJwkcAQqYR7Wmq3d75d3Sgw/N0Hhiv0dVx7OY= +github.com/libp2p/go-libp2p-pubsub v0.16.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= @@ -404,8 +404,8 @@ github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0t github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/zerolog v1.35.0 h1:VD0ykx7HMiMJytqINBsKcbLS+BJ4WYjz+05us+LRTdI= -github.com/rs/zerolog v1.35.0/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= +github.com/rs/zerolog v1.35.1 h1:m7xQeoiLIiV0BCEY4Hs+j2NG4Gp2o2KPKmhnnLiazKI= +github.com/rs/zerolog v1.35.1/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= @@ -457,10 +457,10 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= @@ -567,8 +567,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= -google.golang.org/api v0.274.0 h1:aYhycS5QQCwxHLwfEHRRLf9yNsfvp1JadKKWBE54RFA= -google.golang.org/api v0.274.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 h1:XzmzkmB14QhVhgnawEVsOn6OFsnpyxNPRY9QV01dNB0= google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:L43LFes82YgSonw6iTXTxXUX1OlULt4AQtkik4ULL/I= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= diff --git a/execution/evm/test/execution_test.go b/execution/evm/test/execution_test.go index 4b6c6986c7..e556a718b4 100644 --- a/execution/evm/test/execution_test.go +++ b/execution/evm/test/execution_test.go @@ -21,8 +21,8 @@ import ( const ( CHAIN_ID = "1234" - GENESIS_HASH = "0x2b8bbb1ea1e04f9c9809b4b278a8687806edc061a356c7dbc491930d8e922503" - GENESIS_STATEROOT = "0x05e9954443da80d86f2104e56ffdfd98fe21988730684360104865b3dc8191b4" + GENESIS_HASH = "0x6699a4ef6e7b76499c6cd68443455a3584e505b1da43cf82e6efaef41f5e1d8a" + GENESIS_STATEROOT = "0x0373244015ce5fa583ed6e575b7a22c39a542233a62d2bd47c37fd1a2b035366" TEST_PRIVATE_KEY = "cece4f25ac74deb1468965160c7185e07dff413f23fcadb611b05ca37ab0a52e" TEST_TO_ADDRESS = "0x944fDcD1c868E3cC566C78023CcB38A32cDA836E" ) @@ -129,8 +129,9 @@ func TestEngineExecution(t *testing.T) { allTimestamps = append(allTimestamps, blockTimestamp) // Execute transactions and get the new state root - newStateRoot, err := executionClient.ExecuteTxs(ctx, payload, blockHeight, blockTimestamp, prevStateRoot) + executeResult, err := executionClient.ExecuteTxs(ctx, payload, blockHeight, blockTimestamp, prevStateRoot) require.NoError(tt, err) + newStateRoot := executeResult.UpdatedStateRoot err = executionClient.SetFinal(ctx, blockHeight) require.NoError(tt, err) @@ -201,8 +202,9 @@ func TestEngineExecution(t *testing.T) { // Use timestamp from build phase for each block to ensure proper ordering blockTimestamp := allTimestamps[blockHeight-1] - newStateRoot, err := executionClient.ExecuteTxs(ctx, payload, blockHeight, blockTimestamp, prevStateRoot) + executeResult, err := executionClient.ExecuteTxs(ctx, payload, blockHeight, blockTimestamp, prevStateRoot) require.NoError(t, err) + newStateRoot := executeResult.UpdatedStateRoot if len(payload) == 0 { require.Equal(tt, prevStateRoot, newStateRoot) } else { diff --git a/execution/evm/test/go.mod b/execution/evm/test/go.mod index 23aadab045..16db8398db 100644 --- a/execution/evm/test/go.mod +++ b/execution/evm/test/go.mod @@ -3,12 +3,12 @@ module github.com/evstack/ev-node/execution/evm/test go 1.25.7 require ( - github.com/celestiaorg/tastora v0.8.0 + github.com/celestiaorg/tastora v0.19.0 github.com/ethereum/go-ethereum v1.17.2 github.com/evstack/ev-node/execution/evm v0.0.0-00010101000000-000000000000 github.com/golang-jwt/jwt/v5 v5.3.1 github.com/ipfs/go-datastore v0.9.1 - github.com/rs/zerolog v1.35.0 + github.com/rs/zerolog v1.35.1 github.com/stretchr/testify v1.11.1 go.uber.org/zap v1.27.1 ) @@ -18,43 +18,50 @@ require ( cosmossdk.io/collections v0.4.0 // indirect cosmossdk.io/core v0.11.1 // indirect cosmossdk.io/depinject v1.1.0 // indirect - cosmossdk.io/errors v1.0.1 // indirect - cosmossdk.io/log v1.4.1 // indirect - cosmossdk.io/math v1.4.0 // indirect - cosmossdk.io/store v1.1.1 // indirect - cosmossdk.io/x/tx v0.13.7 // indirect + cosmossdk.io/errors v1.0.2 // indirect + cosmossdk.io/log v1.6.0 // indirect + cosmossdk.io/math v1.5.1 // indirect + cosmossdk.io/store v1.1.2 // indirect + cosmossdk.io/x/tx v0.13.8 // indirect filippo.io/edwards25519 v1.1.1 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect - github.com/99designs/keyring v1.2.1 // indirect + github.com/99designs/keyring v1.2.2 // indirect github.com/BurntSushi/toml v1.5.0 // indirect - github.com/DataDog/zstd v1.5.5 // indirect + github.com/DataDog/zstd v1.5.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/avast/retry-go/v4 v4.6.1 // indirect + github.com/bcp-innovations/hyperlane-cosmos v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/bgentry/speakeasy v0.2.0 // indirect github.com/bits-and-blooms/bitset v1.20.0 // indirect + github.com/bytedance/gopkg v0.1.3 // indirect + github.com/bytedance/sonic v1.15.0 // indirect + github.com/bytedance/sonic/loader v0.5.0 // indirect github.com/celestiaorg/go-square/v3 v3.0.2 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect + github.com/cockroachdb/apd/v3 v3.2.1 // indirect github.com/cockroachdb/errors v1.11.3 // indirect - github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect - github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 // indirect + github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect github.com/cockroachdb/pebble v1.1.5 // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cometbft/cometbft v0.38.21 // indirect - github.com/cometbft/cometbft-db v0.14.1 // indirect + github.com/cometbft/cometbft-db v1.0.4 // indirect github.com/consensys/gnark-crypto v0.18.2 // indirect github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/cosmos-db v1.1.1 // indirect github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect github.com/cosmos/cosmos-sdk v0.50.15 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect github.com/cosmos/gogoproto v1.7.0 // indirect - github.com/cosmos/iavl v1.2.2 // indirect + github.com/cosmos/iavl v1.2.6 // indirect github.com/cosmos/ics23/go v0.11.0 // indirect github.com/cosmos/ledger-cosmos-go v0.15.0 // indirect github.com/crate-crypto/go-eth-kzg v1.5.0 // indirect @@ -65,25 +72,25 @@ require ( github.com/dgraph-io/badger/v4 v4.5.1 // indirect github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v28.4.0+incompatible // indirect - github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/dvsekhvalnov/jose2go v1.7.0 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/ethereum/c-kzg-4844/v2 v2.1.6 // indirect - github.com/evstack/ev-node v1.1.0 // indirect + github.com/evstack/ev-node v1.1.1 // indirect github.com/evstack/ev-node/core v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ferranbt/fastssz v0.1.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/getsentry/sentry-go v0.31.1 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/go-viper/mapstructure/v2 v2.5.0 // indirect github.com/goccy/go-yaml v1.19.2 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect @@ -103,7 +110,7 @@ require ( github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-metrics v0.5.4 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect - github.com/hdevalence/ed25519consensus v0.1.0 // indirect + github.com/hdevalence/ed25519consensus v0.2.0 // indirect github.com/holiman/uint256 v1.3.2 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -113,15 +120,15 @@ require ( github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/linxGnu/grocksdb v1.8.14 // indirect + github.com/linxGnu/grocksdb v1.9.8 // indirect github.com/magiconair/properties v1.8.10 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/moby v27.5.1+incompatible // indirect - github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/moby/api v1.54.1 // indirect + github.com/moby/moby/client v0.4.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect @@ -161,12 +168,13 @@ require ( github.com/tidwall/btree v1.7.0 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v1.0.0 // indirect - go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 // indirect + go.etcd.io/bbolt v1.4.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect go.opentelemetry.io/otel v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect @@ -177,6 +185,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.4 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/arch v0.15.0 // indirect golang.org/x/crypto v0.50.0 // indirect golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 // indirect golang.org/x/net v0.53.0 // indirect @@ -193,7 +202,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect lukechampine.com/blake3 v1.4.1 // indirect - pgregory.net/rapid v1.1.0 // indirect + pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/execution/evm/test/go.sum b/execution/evm/test/go.sum index 01ee1c1743..0b1f89609c 100644 --- a/execution/evm/test/go.sum +++ b/execution/evm/test/go.sum @@ -2,8 +2,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM= -cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/compute v1.54.0 h1:4CKmnpO+40z44bKG5bdcKxQ7ocNpRtOc9SCLLUzze1w= @@ -23,16 +23,16 @@ cosmossdk.io/core v0.11.1 h1:h9WfBey7NAiFfIcUhDVNS503I2P2HdZLebJlUIs8LPA= cosmossdk.io/core v0.11.1/go.mod h1:OJzxcdC+RPrgGF8NJZR2uoQr56tc7gfBKhiKeDO7hH0= cosmossdk.io/depinject v1.1.0 h1:wLan7LG35VM7Yo6ov0jId3RHWCGRhe8E8bsuARorl5E= cosmossdk.io/depinject v1.1.0/go.mod h1:kkI5H9jCGHeKeYWXTqYdruogYrEeWvBQCw1Pj4/eCFI= -cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= -cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= -cosmossdk.io/log v1.4.1 h1:wKdjfDRbDyZRuWa8M+9nuvpVYxrEOwbD/CA8hvhU8QM= -cosmossdk.io/log v1.4.1/go.mod h1:k08v0Pyq+gCP6phvdI6RCGhLf/r425UT6Rk/m+o74rU= -cosmossdk.io/math v1.4.0 h1:XbgExXFnXmF/CccPPEto40gOO7FpWu9yWNAZPN3nkNQ= -cosmossdk.io/math v1.4.0/go.mod h1:O5PkD4apz2jZs4zqFdTr16e1dcaQCc5z6lkEnrrppuk= -cosmossdk.io/store v1.1.1 h1:NA3PioJtWDVU7cHHeyvdva5J/ggyLDkyH0hGHl2804Y= -cosmossdk.io/store v1.1.1/go.mod h1:8DwVTz83/2PSI366FERGbWSH7hL6sB7HbYp8bqksNwM= -cosmossdk.io/x/tx v0.13.7 h1:8WSk6B/OHJLYjiZeMKhq7DK7lHDMyK0UfDbBMxVmeOI= -cosmossdk.io/x/tx v0.13.7/go.mod h1:V6DImnwJMTq5qFjeGWpXNiT/fjgE4HtmclRmTqRVM3w= +cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= +cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= +cosmossdk.io/log v1.6.0 h1:SJIOmJ059wi1piyRgNRXKXhlDXGqnB5eQwhcZKv2tOk= +cosmossdk.io/log v1.6.0/go.mod h1:5cXXBvfBkR2/BcXmosdCSLXllvgSjphrrDVdfVRmBGM= +cosmossdk.io/math v1.5.1 h1:c6zo52nBRlqOeSIIQrn/zbxwcNwhaLjTMRn6e4vD7uc= +cosmossdk.io/math v1.5.1/go.mod h1:ToembcWID/wR94cucsMD+2gq6xrlBBOfWcGwC7ZdwZA= +cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= +cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= +cosmossdk.io/x/tx v0.13.8 h1:dQwC8jMe7awx/edi1HPPZ40AjHnsix6KSO/jbKMUYKk= +cosmossdk.io/x/tx v0.13.8/go.mod h1:V6DImnwJMTq5qFjeGWpXNiT/fjgE4HtmclRmTqRVM3w= cosmossdk.io/x/upgrade v0.1.4 h1:/BWJim24QHoXde8Bc64/2BSEB6W4eTydq0X/2f8+g38= cosmossdk.io/x/upgrade v0.1.4/go.mod h1:9v0Aj+fs97O+Ztw+tG3/tp5JSlrmT7IcFhAebQHmOPo= filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5 h1:JA0fFr+kxpqTdxR9LOBiTWpGNchqmkcsgmdeJZRclZ0= @@ -43,17 +43,16 @@ filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b h1:REI1FbdW71yO56Are4XAxD+O filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b/go.mod h1:9nnw1SlYHYuPSo/3wjQzNjSbeHlq2NsKo5iEtfJPWP0= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= -github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= -github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= -github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= -github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= +github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= +github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU= @@ -91,8 +90,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 h1:HtOTYcb github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8/go.mod h1:VsK9abqQeGlzPgUr+isNWzPlK2vKe9INMLWnY65f5Xs= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6Y1Lbvt9WFuJ0ugUK2xn6hIWUBBbKuOWF30s= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 h1:nEzwx/ZlpUZ2Y6WztsgYmfBh5Ixd3QiECawXMzvTMeo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 h1:696UM+NwOrETBCLQJyCAGtVmmZmziBT59yMwgg6Fvrw= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 h1:a1Fq/KXn75wSzoJaPQTgZO0wHGqE9mjFnylnqEPTchA= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10/go.mod h1:p6+MXNxW7IA6dMgHfTAzljuwSKD0NCm/4lbS4t6+7vI= github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 h1:x6bKbmDhsgSZwv6q19wY/u3rLk/3FGjJWyqKcIRufpE= @@ -101,28 +100,36 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 h1:oK/njaL8GtyEihkWMD4k3Vg github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20/go.mod h1:JHs8/y1f3zY7U5WcuzoJ/yAYGYtNIVPKLIbp61euvmg= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 h1:ks8KBcZPh3PYISr5dAiXCM5/Thcuxk8l+PG4+A0exds= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0/go.mod h1:pFw33T0WLvXU3rw1WBkpMlkgIn54eCB5FYLhjDc9Foo= -github.com/aws/smithy-go v1.25.0 h1:Sz/XJ64rwuiKtB6j98nDIPyYrV1nVNJ4YU74gttcl5U= -github.com/aws/smithy-go v1.25.0/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/aws/smithy-go v1.25.1 h1:J8ERsGSU7d+aCmdQur5Txg6bVoYelvQJgtZehD12GkI= +github.com/aws/smithy-go v1.25.1/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/bcp-innovations/hyperlane-cosmos v1.0.1 h1:gT8OqyJ866Q6AHOlIXKxSdLjd0p8crKG9XXERIWoh4c= +github.com/bcp-innovations/hyperlane-cosmos v1.0.1/go.mod h1:3yfa0io5Ii6GmhHWsWl2LEAOEHsqWuMgw2R02+LPogw= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bgentry/speakeasy v0.2.0 h1:tgObeVOf8WAvtuAX6DhJ4xks4CFNwPDZiqzGqIHE51E= +github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd/btcec/v2 v2.3.5 h1:dpAlnAwmT1yIBm3exhT1/8iUSD98RDJM5vqJVQDQLiU= github.com/btcsuite/btcd/btcec/v2 v2.3.5/go.mod h1:m22FrOAiuxl/tht9wIqAoGHcbnCCaPWyauO8y2LGGtQ= github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= +github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= +github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= +github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uSE= +github.com/bytedance/sonic v1.15.0/go.mod h1:tFkWrPz0/CUCLEF4ri4UkHekCIcdnkqXw9VduqpJh0k= +github.com/bytedance/sonic/loader v0.5.0 h1:gXH3KVnatgY7loH5/TkeVyXPfESoqSBSBEiDd5VjlgE= +github.com/bytedance/sonic/loader v0.5.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo= github.com/celestiaorg/go-header v0.8.5 h1:MkzlioiSeybKVNDa0805fS3mS3NG8ub93Gs2xaKwSZ4= github.com/celestiaorg/go-header v0.8.5/go.mod h1:DKl6pcKCJ0ehGUgDmfxBNz6Lv0Ky4E1Oyrcx96eQm/4= github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= -github.com/celestiaorg/tastora v0.8.0 h1:+FWAIsP2onwwqPTGzBLIBtx8B1h9sImdx4msv2N4DsI= -github.com/celestiaorg/tastora v0.8.0/go.mod h1:9b5GsL/+pKEw3HZG/nd3qhnGadUnNNoTBygy9HeGIyw= +github.com/celestiaorg/tastora v0.19.0 h1:m5MiDxYqlEdeY2i+vIpPZzjn5iA7U0JUM1PxE7Vs/UA= +github.com/celestiaorg/tastora v0.19.0/go.mod h1:uhEz7v8YJmJuVgsJaCe0M0Q/HJiQAQNMu3w/OtmFIQY= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= @@ -140,15 +147,19 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= +github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 h1:pU88SPhIFid6/k0egdR5V6eALQYq2qbSmukrkgIh/0A= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 h1:ASDL+UJcILMqgNeV5jiqR4j+sTuvQNHdf2chuKj1M5k= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506/go.mod h1:Mw7HqKr2kdtu6aYGn3tPmAftiP3QPX63LdK/zcariIo= github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= @@ -157,14 +168,14 @@ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAK github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/cometbft/cometbft v0.38.21 h1:qcIJSH9LiwU5s6ZgKR5eRbsLNucbubfraDs5bzgjtOI= github.com/cometbft/cometbft v0.38.21/go.mod h1:UCu8dlHqvkAsmAFmWDRWNZJPlu6ya2fTWZlDrWsivwo= -github.com/cometbft/cometbft-db v0.14.1 h1:SxoamPghqICBAIcGpleHbmoPqy+crij/++eZz3DlerQ= -github.com/cometbft/cometbft-db v0.14.1/go.mod h1:KHP1YghilyGV/xjD5DP3+2hyigWx0WTp9X+0Gnx0RxQ= +github.com/cometbft/cometbft-db v1.0.4 h1:cezb8yx/ZWcF124wqUtAFjAuDksS1y1yXedvtprUFxs= +github.com/cometbft/cometbft-db v1.0.4/go.mod h1:M+BtHAGU2XLrpUxo3Nn1nOCcnVCiLM9yx5OuT0u5SCA= github.com/consensys/gnark-crypto v0.18.2 h1:+unEU7+M6vc9JszZPNTcRTwtrJg85tb57+5Gkyrz3hU= github.com/consensys/gnark-crypto v0.18.2/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNCM= @@ -179,8 +190,8 @@ github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiK github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= github.com/cosmos/gogoproto v1.7.0 h1:79USr0oyXAbxg3rspGh/m4SWNyoz/GLaAh0QlCe2fro= github.com/cosmos/gogoproto v1.7.0/go.mod h1:yWChEv5IUEYURQasfyBW5ffkMHR/90hiHgbNgrtp4j0= -github.com/cosmos/iavl v1.2.2 h1:qHhKW3I70w+04g5KdsdVSHRbFLgt3yY3qTMd4Xa4rC8= -github.com/cosmos/iavl v1.2.2/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= +github.com/cosmos/iavl v1.2.6 h1:Hs3LndJbkIB+rEvToKJFXZvKo6Vy0Ex1SJ54hhtioIs= +github.com/cosmos/iavl v1.2.6/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= github.com/cosmos/ibc-go/modules/capability v1.0.1 h1:ibwhrpJ3SftEEZRxCRkH0fQZ9svjthrX2+oXdZvzgGI= github.com/cosmos/ibc-go/modules/capability v1.0.1/go.mod h1:rquyOV262nGJplkumH+/LeYs04P3eV8oB7ZM4Ygqk4E= github.com/cosmos/ibc-go/v8 v8.7.0 h1:HqhVOkO8bDpClXE81DFQgFjroQcTvtpm0tCS7SQVKVY= @@ -210,8 +221,8 @@ github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1 h1:5RVFMOWjMyRy8cARdy79nAmgYw3hK/4HUq48LQ6Wwqo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= -github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= -github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/desertbit/timer v1.0.1 h1:yRpYNn5Vaaj6QXecdLMPMJsW81JLiI1eokUft5nBmeo= +github.com/desertbit/timer v1.0.1/go.mod h1:htRrYeY5V/t4iu1xCJ5XsQvp4xve8QulXXctAzxqcwE= github.com/dgraph-io/badger/v4 v4.5.1 h1:7DCIXrQjo1LKmM96YD+hLVJ2EEsyyoWxJfpdd56HLps= github.com/dgraph-io/badger/v4 v4.5.1/go.mod h1:qn3Be0j3TfV4kPbVoK0arXCD1/nr1ftth6sbL5jxdoA= github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I= @@ -220,10 +231,8 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WA github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk= -github.com/docker/docker v28.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dunglas/httpsfv v1.1.0 h1:Jw76nAyKWKZKFrpMMcL76y35tOpYHqQPzHQiwDvpe54= @@ -263,8 +272,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= -github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/getsentry/sentry-go v0.31.1 h1:ELVc0h7gwyhnXHDouXkhqTFSO5oslsRDk0++eyE0KJ4= +github.com/getsentry/sentry-go v0.31.1/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= @@ -290,6 +299,8 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM= @@ -358,6 +369,8 @@ github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -395,8 +408,8 @@ github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= -github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= -github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= +github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= @@ -406,10 +419,10 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= -github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= +github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db h1:IZUYC/xb3giYwBLMnr8d0TGTzPKFGNTCGgGLoyeX330= github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db/go.mod h1:xTEYN9KCHxuYHs+NmrmzFcnvHMzLLNiGFafCb1n3Mfg= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= @@ -417,8 +430,8 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= -github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/huandu/skiplist v1.2.1 h1:dTi93MgjwErA/8idWTzIw4Y1kZsMWx35fmI2c8Rij7w= +github.com/huandu/skiplist v1.2.1/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= @@ -493,8 +506,8 @@ github.com/libp2p/go-libp2p-kad-dht v0.39.1 h1:9RzUBc7zywT4ZNamRSgEvPZmVlK3Y6xdl github.com/libp2p/go-libp2p-kad-dht v0.39.1/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= -github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= -github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= +github.com/libp2p/go-libp2p-pubsub v0.16.0 h1:j7G2C8kJwkcAQqYR7Wmq3d75d3Sgw/N0Hhiv0dVx7OY= +github.com/libp2p/go-libp2p-pubsub v0.16.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= @@ -507,8 +520,8 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg= github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= -github.com/linxGnu/grocksdb v1.8.14 h1:HTgyYalNwBSG/1qCQUIott44wU5b2Y9Kr3z7SK5OfGQ= -github.com/linxGnu/grocksdb v1.8.14/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= +github.com/linxGnu/grocksdb v1.9.8 h1:vOIKv9/+HKiqJAElJIEYv3ZLcihRxyP7Suu/Mu8Dxjs= +github.com/linxGnu/grocksdb v1.9.8/go.mod h1:C3CNe9UYc9hlEM2pC82AqiGS3LRW537u9LFV4wIZuHk= github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= @@ -528,28 +541,20 @@ github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/moby v27.5.1+incompatible h1:/pN59F/t3U7Q4FPzV88nzqf7Fp0qqCSL2KzhZaiKcKw= -github.com/moby/moby v27.5.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= -github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= -github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= -github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/moby/moby/api v1.54.1 h1:TqVzuJkOLsgLDDwNLmYqACUuTehOHRGKiPhvH8V3Nn4= +github.com/moby/moby/api v1.54.1/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs= +github.com/moby/moby/client v0.4.0 h1:S+2XegzHQrrvTCvF6s5HFzcrywWQmuVnhOXe2kiWjIw= +github.com/moby/moby/client v0.4.0/go.mod h1:QWPbvWchQbxBNdaLSpoKpCdf5E+WxFAgNHogCWDoa7g= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= @@ -592,12 +597,14 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= -github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -704,8 +711,8 @@ github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0t github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/zerolog v1.35.0 h1:VD0ykx7HMiMJytqINBsKcbLS+BJ4WYjz+05us+LRTdI= -github.com/rs/zerolog v1.35.0/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= +github.com/rs/zerolog v1.35.1 h1:m7xQeoiLIiV0BCEY4Hs+j2NG4Gp2o2KPKmhnnLiazKI= +github.com/rs/zerolog v1.35.1/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -718,8 +725,6 @@ github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= @@ -750,6 +755,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= @@ -767,6 +774,8 @@ github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0h github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -781,16 +790,16 @@ github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v1.0.0 h1:BvNoksIyRqyQTW78rIZP9A44WwAminKiomQa7jXp9EI= github.com/zondax/ledger-go v1.0.0/go.mod h1:HpgkgFh3Jkwi9iYLDATdyRxc8CxqxcywsFj6QerWzvo= -go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 h1:qxen9oVGzDdIRP6ejyAJc760RwW4SnVDiTYTzwnXuxo= -go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5/go.mod h1:eW0HG9/oHQhvRCvb1/pIXW4cOvtDqeQK+XSi3TnwaXY= +go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= +go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= @@ -823,6 +832,8 @@ go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw= +golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -942,8 +953,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= -google.golang.org/api v0.274.0 h1:aYhycS5QQCwxHLwfEHRRLf9yNsfvp1JadKKWBE54RFA= -google.golang.org/api v0.274.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1006,9 +1017,9 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= -nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= -nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= -pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +nhooyr.io/websocket v1.8.17 h1:KEVeLJkUywCKVsnLIDlD/5gtayKp8VoCkksHCGGfT9Y= +nhooyr.io/websocket v1.8.17/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/execution/evm/test/test_helpers.go b/execution/evm/test/test_helpers.go index b7e96a923d..64ff6cafa8 100644 --- a/execution/evm/test/test_helpers.go +++ b/execution/evm/test/test_helpers.go @@ -5,6 +5,7 @@ package test import ( "context" "encoding/hex" + "encoding/json" "fmt" mathrand "math/rand" "net/http" @@ -79,51 +80,115 @@ func SetupTestRethNode(t testing.TB, client types.TastoraDockerClient, networkID // waitForRethContainer waits for the Reth container to be ready by polling the provided endpoints with JWT authentication. func waitForRethContainer(t testing.TB, jwtSecret, ethURL, engineURL string) error { t.Helper() - client := &http.Client{Timeout: 100 * time.Millisecond} + + secret, err := decodeSecret(jwtSecret) + if err != nil { + return err + } + authToken, err := getAuthToken(secret) + if err != nil { + return err + } + + client := &http.Client{Timeout: 500 * time.Millisecond} timer := time.NewTimer(30 * time.Second) defer timer.Stop() + var lastErr error for { select { case <-timer.C: + if lastErr != nil { + return fmt.Errorf("timeout waiting for reth container to be ready: %w", lastErr) + } return fmt.Errorf("timeout waiting for reth container to be ready") default: - rpcReq := strings.NewReader(`{"jsonrpc":"2.0","method":"net_version","params":[],"id":1}`) - resp, err := client.Post(ethURL, "application/json", rpcReq) + genesisHash, err := getGenesisHash(client, ethURL) + if err == nil { + err = waitForEngineForkchoice(client, engineURL, authToken, genesisHash) + } if err == nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("failed to close response body: %w", err) - } - if resp.StatusCode == http.StatusOK { - req, err := http.NewRequest("POST", engineURL, strings.NewReader(`{"jsonrpc":"2.0","method":"engine_getClientVersionV1","params":[],"id":1}`)) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/json") - secret, err := decodeSecret(jwtSecret) - if err != nil { - return err - } - authToken, err := getAuthToken(secret) - if err != nil { - return err - } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", authToken)) - resp, err := client.Do(req) - if err == nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("failed to close response body: %w", err) - } - if resp.StatusCode == http.StatusOK { - return nil - } - } - } + return nil } + lastErr = err time.Sleep(100 * time.Millisecond) } } } +type jsonRPCError struct { + Code int `json:"code"` + Message string `json:"message"` +} + +func getGenesisHash(client *http.Client, ethURL string) (string, error) { + rpcReq := strings.NewReader(`{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x0",false],"id":1}`) + resp, err := client.Post(ethURL, "application/json", rpcReq) + if err != nil { + return "", err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("eth endpoint returned status %d", resp.StatusCode) + } + + var rpcResp struct { + Result struct { + Hash string `json:"hash"` + } `json:"result"` + Error *jsonRPCError `json:"error,omitempty"` + } + if err := json.NewDecoder(resp.Body).Decode(&rpcResp); err != nil { + return "", fmt.Errorf("decode genesis block response: %w", err) + } + if rpcResp.Error != nil { + return "", fmt.Errorf("eth_getBlockByNumber failed: %s", rpcResp.Error.Message) + } + if rpcResp.Result.Hash == "" { + return "", fmt.Errorf("eth_getBlockByNumber returned empty genesis hash") + } + return rpcResp.Result.Hash, nil +} + +func waitForEngineForkchoice(client *http.Client, engineURL, authToken, genesisHash string) error { + body := fmt.Sprintf(`{"jsonrpc":"2.0","method":"engine_forkchoiceUpdatedV3","params":[{"headBlockHash":%q,"safeBlockHash":%q,"finalizedBlockHash":%q},null],"id":1}`, genesisHash, genesisHash, genesisHash) + req, err := http.NewRequest("POST", engineURL, strings.NewReader(body)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", authToken)) + + resp, err := client.Do(req) + if err != nil { + return err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("engine endpoint returned status %d", resp.StatusCode) + } + + var rpcResp struct { + Result struct { + PayloadStatus struct { + Status string `json:"status"` + } `json:"payloadStatus"` + } `json:"result"` + Error *jsonRPCError `json:"error,omitempty"` + } + if err := json.NewDecoder(resp.Body).Decode(&rpcResp); err != nil { + return fmt.Errorf("decode engine forkchoice response: %w", err) + } + if rpcResp.Error != nil { + return fmt.Errorf("engine_forkchoiceUpdatedV3 failed: %s", rpcResp.Error.Message) + } + if rpcResp.Result.PayloadStatus.Status != "VALID" { + return fmt.Errorf("engine forkchoice status %s", rpcResp.Result.PayloadStatus.Status) + } + return nil +} + // decodeSecret decodes a hex-encoded JWT secret string into a byte slice. func decodeSecret(jwtSecret string) ([]byte, error) { secret, err := hex.DecodeString(strings.TrimPrefix(jwtSecret, "0x")) diff --git a/execution/grpc/README.md b/execution/grpc/README.md index 8647b2ad90..4b3742bc4a 100644 --- a/execution/grpc/README.md +++ b/execution/grpc/README.md @@ -1,10 +1,10 @@ # gRPC Execution Client -This package provides a gRPC-based implementation of the Evolve execution interface. It allows Evolve to communicate with remote execution clients via gRPC using the Connect-RPC framework. +This package provides a gRPC-based implementation of the Evolve execution interface. It allows Evolve to communicate with execution clients via gRPC using the Connect-RPC framework. ## Overview -The gRPC execution client enables separation between the consensus layer (Evolve) and the execution layer by providing a network interface for communication. This allows execution clients to run in separate processes or even on different machines. +The gRPC execution client enables separation between the consensus layer (Evolve) and the execution layer by providing a process boundary for communication. Execution clients can run on different machines over TCP, or on the same machine over a Unix domain socket to avoid TCP/IP overhead. ## Usage @@ -17,12 +17,21 @@ import ( "github.com/evstack/ev-node/execution/grpc" ) -// Create a new gRPC client -client := grpc.NewClient("http://localhost:50051") +// Create a new gRPC client over TCP +client, err := grpc.NewClient("http://localhost:50051") +if err != nil { + return err +} + +// Or connect to an executor on the same machine over a Unix domain socket +client, err = grpc.NewClient("unix:///tmp/evolve-executor.sock") +if err != nil { + return err +} // Use the client as an execution.Executor ctx := context.Background() -stateRoot, maxBytes, err := client.InitChain(ctx, time.Now(), 1, "my-chain") +stateRoot, err := client.InitChain(ctx, time.Now(), 1, "my-chain") ``` ### Server @@ -42,6 +51,14 @@ handler := grpc.NewExecutorServiceHandler(myExecutor) http.ListenAndServe(":50051", handler) ``` +To serve on a Unix domain socket: + +```go +import "github.com/evstack/ev-node/execution/grpc" + +err := grpc.ListenAndServeUnix("/tmp/evolve-executor.sock", myExecutor) +``` + ## Protocol The gRPC service is defined in `proto/evnode/v1/execution.proto` and provides the following methods: @@ -50,13 +67,17 @@ The gRPC service is defined in `proto/evnode/v1/execution.proto` and provides th - `GetTxs`: Fetch transactions from the mempool - `ExecuteTxs`: Execute transactions and update state - `SetFinal`: Mark a block as finalized +- `GetExecutionInfo`: Return current execution limits +- `FilterTxs`: Validate and filter force-included transactions ## Features - Full implementation of the `execution.Executor` interface - Support for HTTP/1.1 and HTTP/2 (via h2c) +- Support for Unix domain socket connections with `unix:///path/to/socket` - gRPC reflection for debugging and service discovery - Compression for efficient data transfer +- Contiguous `tx_batch` transaction encoding to reduce per-transaction protobuf overhead - Comprehensive error handling and validation ## Testing diff --git a/execution/grpc/client.go b/execution/grpc/client.go index fcc805a1c2..2fadabd83f 100644 --- a/execution/grpc/client.go +++ b/execution/grpc/client.go @@ -3,9 +3,11 @@ package grpc import ( "context" "crypto/tls" + "errors" "fmt" "net" "net/http" + "strings" "time" "connectrpc.com/connect" @@ -13,8 +15,8 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "github.com/evstack/ev-node/core/execution" - pb "github.com/evstack/ev-node/types/pb/evnode/v1" - "github.com/evstack/ev-node/types/pb/evnode/v1/v1connect" + pb "github.com/evstack/ev-node/execution/grpc/types/pb/evnode/v1" + "github.com/evstack/ev-node/execution/grpc/types/pb/evnode/v1/v1connect" ) // Ensure Client implements the execution.Executor interface @@ -26,6 +28,11 @@ type Client struct { client v1connect.ExecutorServiceClient } +const ( + unixURLPrefix = "unix://" + unixHTTPBaseURL = "http://unix" +) + // newHTTP2Client creates an HTTP/2 client that supports cleartext (h2c) connections. // This is required to connect to native gRPC servers without TLS. func newHTTP2Client() *http.Client { @@ -40,24 +47,65 @@ func newHTTP2Client() *http.Client { } } +// newUnixHTTP2Client creates an HTTP/2 client that speaks h2c over a Unix domain socket. +func newUnixHTTP2Client(socketPath string) (*http.Client, error) { + if socketPath == "" { + return nil, errors.New("unix socket path is required") + } + return &http.Client{ + Transport: &http2.Transport{ + AllowHTTP: true, + DialTLSContext: func(ctx context.Context, _, _ string, _ *tls.Config) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, "unix", socketPath) + }, + }, + }, nil +} + +func clientTransportForTarget(target string) (*http.Client, string, error) { + socketPath, ok := unixSocketPath(target) + if ok { + httpClient, err := newUnixHTTP2Client(socketPath) + if err != nil { + return nil, "", err + } + return httpClient, unixHTTPBaseURL, nil + } + return newHTTP2Client(), target, nil +} + +func unixSocketPath(target string) (string, bool) { + if !strings.HasPrefix(target, unixURLPrefix) { + return "", false + } + return strings.TrimPrefix(target, unixURLPrefix), true +} + // NewClient creates a new gRPC execution client. // // Parameters: -// - url: The URL of the gRPC server (e.g., "http://localhost:50051") +// - url: The URL of the gRPC server (e.g., "http://localhost:50051" or "unix:///tmp/executor.sock") // - opts: Optional Connect client options for configuring the connection // // Returns: // - *Client: The initialized gRPC client -func NewClient(url string, opts ...connect.ClientOption) *Client { +// - error: Any client construction error +func NewClient(url string, opts ...connect.ClientOption) (*Client, error) { // Prepend WithGRPC to use the native gRPC protocol (required for tonic/gRPC servers) opts = append([]connect.ClientOption{connect.WithGRPC()}, opts...) + httpClient, targetURL, err := clientTransportForTarget(url) + if err != nil { + return nil, err + } + opts = append([]connect.ClientOption{connect.WithInterceptors(outboundPropagationInterceptor())}, opts...) return &Client{ client: v1connect.NewExecutorServiceClient( - newHTTP2Client(), - url, + httpClient, + targetURL, opts..., ), - } + }, nil } // InitChain initializes a new blockchain instance with genesis parameters. @@ -91,7 +139,12 @@ func (c *Client) GetTxs(ctx context.Context) ([][]byte, error) { return nil, fmt.Errorf("grpc client: failed to get txs: %w", err) } - return resp.Msg.Txs, nil + txs, err := decodeTxBatch(resp.Msg.TxBatch) + if err != nil { + return nil, fmt.Errorf("grpc client: invalid get txs response: %w", err) + } + + return txs, nil } // ExecuteTxs processes transactions to produce a new block state. @@ -100,8 +153,13 @@ func (c *Client) GetTxs(ctx context.Context) ([][]byte, error) { // returns the updated state root after execution. The execution service ensures // deterministic execution and validates the state transition. func (c *Client) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { + txBatch, err := encodeTxBatch(txs) + if err != nil { + return execution.ExecuteResult{}, fmt.Errorf("grpc client: failed to encode tx batch: %w", err) + } + req := connect.NewRequest(&pb.ExecuteTxsRequest{ - Txs: txs, + TxBatch: txBatch, BlockHeight: blockHeight, Timestamp: timestamppb.New(timestamp), PrevStateRoot: prevStateRoot, @@ -158,8 +216,13 @@ func (c *Client) GetExecutionInfo(ctx context.Context) (execution.ExecutionInfo, // This method sends transactions to the remote execution service for validation. // Returns a slice of FilterStatus for each transaction. func (c *Client) FilterTxs(ctx context.Context, txs [][]byte, maxBytes, maxGas uint64, hasForceIncludedTransaction bool) ([]execution.FilterStatus, error) { + txBatch, err := encodeTxBatch(txs) + if err != nil { + return nil, fmt.Errorf("grpc client: failed to encode tx batch: %w", err) + } + req := connect.NewRequest(&pb.FilterTxsRequest{ - Txs: txs, + TxBatch: txBatch, MaxBytes: maxBytes, MaxGas: maxGas, HasForceIncludedTransaction: hasForceIncludedTransaction, diff --git a/execution/grpc/client_test.go b/execution/grpc/client_test.go index 3a9477823d..aad53ef0fd 100644 --- a/execution/grpc/client_test.go +++ b/execution/grpc/client_test.go @@ -2,7 +2,11 @@ package grpc import ( "context" + "errors" + "net" + "net/http" "net/http/httptest" + "strings" "testing" "time" @@ -66,6 +70,16 @@ func (m *mockExecutor) FilterTxs(ctx context.Context, txs [][]byte, maxBytes, ma return result, nil } +func newTestClient(t *testing.T, url string) *Client { + t.Helper() + + client, err := NewClient(url) + if err != nil { + t.Fatalf("new client: %v", err) + } + return client +} + func TestClient_InitChain(t *testing.T) { ctx := context.Background() expectedStateRoot := []byte("test_state_root") @@ -94,7 +108,7 @@ func TestClient_InitChain(t *testing.T) { defer server.Close() // Create client - client := NewClient(server.URL) + client := newTestClient(t, server.URL) // Test InitChain stateRoot, err := client.InitChain(ctx, genesisTime, initialHeight, chainID) @@ -123,7 +137,7 @@ func TestClient_GetTxs(t *testing.T) { defer server.Close() // Create client - client := NewClient(server.URL) + client := newTestClient(t, server.URL) // Test GetTxs txs, err := client.GetTxs(ctx) @@ -174,7 +188,7 @@ func TestClient_ExecuteTxs(t *testing.T) { defer server.Close() // Create client - client := NewClient(server.URL) + client := newTestClient(t, server.URL) // Test ExecuteTxs result, err := client.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) @@ -206,7 +220,7 @@ func TestClient_SetFinal(t *testing.T) { defer server.Close() // Create client - client := NewClient(server.URL) + client := newTestClient(t, server.URL) // Test SetFinal err := client.SetFinal(ctx, blockHeight) @@ -214,3 +228,130 @@ func TestClient_SetFinal(t *testing.T) { t.Fatalf("unexpected error: %v", err) } } + +func TestClient_FilterTxs(t *testing.T) { + ctx := context.Background() + txs := [][]byte{[]byte("tx1"), []byte{}, []byte("tx3")} + maxBytes := uint64(100) + maxGas := uint64(200) + hasForced := true + expectedStatuses := []execution.FilterStatus{ + execution.FilterOK, + execution.FilterRemove, + execution.FilterPostpone, + } + + mockExec := &mockExecutor{ + filterTxsFunc: func(ctx context.Context, txsIn [][]byte, mb, mg uint64, forced bool) ([]execution.FilterStatus, error) { + if len(txsIn) != len(txs) { + t.Fatalf("expected %d txs, got %d", len(txs), len(txsIn)) + } + for i, tx := range txsIn { + if string(tx) != string(txs[i]) { + t.Fatalf("tx %d: expected %q, got %q", i, txs[i], tx) + } + } + if mb != maxBytes { + t.Fatalf("expected max bytes %d, got %d", maxBytes, mb) + } + if mg != maxGas { + t.Fatalf("expected max gas %d, got %d", maxGas, mg) + } + if forced != hasForced { + t.Fatalf("expected forced=%t, got %t", hasForced, forced) + } + return expectedStatuses, nil + }, + } + + handler := NewExecutorServiceHandler(mockExec) + server := httptest.NewServer(handler) + defer server.Close() + + client := newTestClient(t, server.URL) + + statuses, err := client.FilterTxs(ctx, txs, maxBytes, maxGas, hasForced) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(statuses) != len(expectedStatuses) { + t.Fatalf("expected %d statuses, got %d", len(expectedStatuses), len(statuses)) + } + for i, status := range statuses { + if status != expectedStatuses[i] { + t.Fatalf("status %d: expected %v, got %v", i, expectedStatuses[i], status) + } + } +} + +func TestClient_UnixSocket(t *testing.T) { + ctx := context.Background() + socketPath := testUnixSocketPath(t) + expectedTxs := [][]byte{[]byte("tx1"), []byte("tx2")} + + mockExec := &mockExecutor{ + getTxsFunc: func(ctx context.Context) ([][]byte, error) { + return expectedTxs, nil + }, + } + + startUnixTestServer(t, mockExec, socketPath) + + client := newTestClient(t, "unix://"+socketPath) + txs, err := client.GetTxs(ctx) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(txs) != len(expectedTxs) { + t.Fatalf("expected %d txs, got %d", len(expectedTxs), len(txs)) + } + for i, tx := range txs { + if string(tx) != string(expectedTxs[i]) { + t.Fatalf("tx %d: expected %q, got %q", i, expectedTxs[i], tx) + } + } +} + +func TestNewClientRejectsEmptyUnixSocketPath(t *testing.T) { + client, err := NewClient("unix://") + if err == nil { + t.Fatalf("expected empty unix socket path error") + } + if client != nil { + t.Fatalf("expected nil client, got %v", client) + } + if !strings.Contains(err.Error(), "unix socket path is required") { + t.Fatalf("expected unix socket path error, got %v", err) + } +} + +func startUnixTestServer(t *testing.T, executor execution.Executor, socketPath string) { + t.Helper() + + listener, err := ListenUnix(socketPath) + if err != nil { + t.Fatalf("listen unix socket: %v", err) + } + + server := &http.Server{Handler: NewExecutorServiceHandler(executor)} + done := make(chan error, 1) + go func() { + err := server.Serve(listener) + if errors.Is(err, http.ErrServerClosed) || errors.Is(err, net.ErrClosed) { + err = nil + } + done <- err + }() + + t.Cleanup(func() { + _ = server.Close() + select { + case err := <-done: + if err != nil { + t.Errorf("unix socket server error: %v", err) + } + case <-time.After(time.Second): + t.Error("unix socket server did not stop") + } + }) +} diff --git a/execution/grpc/go.mod b/execution/grpc/go.mod index 7817bb8f91..24066c7604 100644 --- a/execution/grpc/go.mod +++ b/execution/grpc/go.mod @@ -10,10 +10,21 @@ replace ( require ( connectrpc.com/connect v1.19.2 connectrpc.com/grpcreflect v1.3.0 - github.com/evstack/ev-node v1.1.0 github.com/evstack/ev-node/core v1.0.0 + go.opentelemetry.io/otel v1.43.0 + go.opentelemetry.io/otel/sdk v1.43.0 + go.opentelemetry.io/otel/trace v1.43.0 golang.org/x/net v0.53.0 google.golang.org/protobuf v1.36.11 ) -require golang.org/x/text v0.36.0 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/uuid v1.6.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel/metric v1.43.0 // indirect + golang.org/x/sys v0.43.0 // indirect + golang.org/x/text v0.36.0 // indirect +) diff --git a/execution/grpc/go.sum b/execution/grpc/go.sum index 2139063fa3..afe63e0b95 100644 --- a/execution/grpc/go.sum +++ b/execution/grpc/go.sum @@ -2,11 +2,44 @@ connectrpc.com/connect v1.19.2 h1:McQ83FGdzL+t60peksi0gXC7MQ/iLKgLduAnThbM0mo= connectrpc.com/connect v1.19.2/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w= connectrpc.com/grpcreflect v1.3.0 h1:Y4V+ACf8/vOb1XOc251Qun7jMB75gCUNw6llvB9csXc= connectrpc.com/grpcreflect v1.3.0/go.mod h1:nfloOtCS8VUQOQ1+GTdFzVg2CJo4ZGaat8JIovCtDYs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/execution/grpc/handler.go b/execution/grpc/handler.go index 8c785987fc..76db8e87f2 100644 --- a/execution/grpc/handler.go +++ b/execution/grpc/handler.go @@ -10,7 +10,7 @@ import ( "golang.org/x/net/http2/h2c" "github.com/evstack/ev-node/core/execution" - "github.com/evstack/ev-node/types/pb/evnode/v1/v1connect" + "github.com/evstack/ev-node/execution/grpc/types/pb/evnode/v1/v1connect" ) // NewExecutorServiceHandler creates a new HTTP handler for the ExecutorService. @@ -25,6 +25,7 @@ import ( // - http.Handler: The configured HTTP handler func NewExecutorServiceHandler(executor execution.Executor, opts ...connect.HandlerOption) http.Handler { server := NewServer(executor) + opts = append([]connect.HandlerOption{connect.WithInterceptors(inboundPropagationInterceptor())}, opts...) mux := http.NewServeMux() diff --git a/execution/grpc/otel_propagation.go b/execution/grpc/otel_propagation.go new file mode 100644 index 0000000000..da4d1595ed --- /dev/null +++ b/execution/grpc/otel_propagation.go @@ -0,0 +1,29 @@ +package grpc + +import ( + "context" + + "connectrpc.com/connect" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" +) + +func inboundPropagationInterceptor() connect.UnaryInterceptorFunc { + return connect.UnaryInterceptorFunc(func(next connect.UnaryFunc) connect.UnaryFunc { + return func(ctx context.Context, req connect.AnyRequest) (connect.AnyResponse, error) { + prop := otel.GetTextMapPropagator() + ctx = prop.Extract(ctx, propagation.HeaderCarrier(req.Header())) + return next(ctx, req) + } + }) +} + +func outboundPropagationInterceptor() connect.UnaryInterceptorFunc { + return connect.UnaryInterceptorFunc(func(next connect.UnaryFunc) connect.UnaryFunc { + return func(ctx context.Context, req connect.AnyRequest) (connect.AnyResponse, error) { + prop := otel.GetTextMapPropagator() + prop.Inject(ctx, propagation.HeaderCarrier(req.Header())) + return next(ctx, req) + } + }) +} diff --git a/execution/grpc/otel_propagation_test.go b/execution/grpc/otel_propagation_test.go new file mode 100644 index 0000000000..5815732f2a --- /dev/null +++ b/execution/grpc/otel_propagation_test.go @@ -0,0 +1,226 @@ +package grpc + +import ( + "context" + "net/http/httptest" + "testing" + "time" + + "connectrpc.com/connect" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/propagation" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + "go.opentelemetry.io/otel/trace" + + "github.com/evstack/ev-node/core/execution" +) + +func setupTracer(t *testing.T) (*tracetest.SpanRecorder, func()) { + t.Helper() + rec := tracetest.NewSpanRecorder() + tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(rec)) + oldTP := otel.GetTracerProvider() + oldProp := otel.GetTextMapPropagator() + otel.SetTracerProvider(tp) + otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) + return rec, func() { + _ = tp.Shutdown(context.Background()) + otel.SetTracerProvider(oldTP) + otel.SetTextMapPropagator(oldProp) + } +} + +func TestInboundMetadataCreatesChildSpanWithSameTraceID(t *testing.T) { + rec, cleanup := setupTracer(t) + defer cleanup() + + tracer := otel.Tracer("test") + parentCtx, parent := tracer.Start(context.Background(), "parent") + defer parent.End() + parentTraceID := parent.SpanContext().TraceID() + + mockExec := &mockExecutor{getTxsFunc: func(ctx context.Context) ([][]byte, error) { + _, span := tracer.Start(ctx, "server-child") + span.End() + return [][]byte{}, nil + }} + + handler := NewExecutorServiceHandler(mockExec) + ts := httptest.NewServer(handler) + defer ts.Close() + + client, err := NewClient(ts.URL) + if err != nil { + t.Fatalf("NewClient failed: %v", err) + } + + _, err = client.GetTxs(parentCtx) + if err != nil { + t.Fatalf("GetTxs failed: %v", err) + } + + var found bool + for _, s := range rec.Ended() { + if s.Name() == "server-child" { + found = true + if s.SpanContext().TraceID() != parentTraceID { + t.Fatalf("trace id mismatch: got %s want %s", s.SpanContext().TraceID(), parentTraceID) + } + } + } + if !found { + t.Fatalf("server-child span not found") + } +} + +func TestOutboundGRPCCallCarriesTraceparentMetadata(t *testing.T) { + rec, cleanup := setupTracer(t) + _ = rec + defer cleanup() + + tracer := otel.Tracer("test") + ctx, parent := tracer.Start(context.Background(), "parent") + defer parent.End() + + gotTraceparent := "" + captureHeader := connect.UnaryInterceptorFunc(func(next connect.UnaryFunc) connect.UnaryFunc { + return func(ctx context.Context, req connect.AnyRequest) (connect.AnyResponse, error) { + gotTraceparent = req.Header().Get("traceparent") + return next(ctx, req) + } + }) + + mockExec := &mockExecutor{} + handler := NewExecutorServiceHandler(mockExec, connect.WithInterceptors(captureHeader)) + ts := httptest.NewServer(handler) + defer ts.Close() + + client, err := NewClient(ts.URL) + if err != nil { + t.Fatalf("NewClient failed: %v", err) + } + + if _, err = client.GetTxs(ctx); err != nil { + t.Fatalf("GetTxs failed: %v", err) + } + if gotTraceparent == "" { + t.Fatalf("expected traceparent metadata to be propagated") + } +} + +func TestOutboundGRPCCallCarriesPropagationHeaders(t *testing.T) { + rec, cleanup := setupTracer(t) + _ = rec + defer cleanup() + + tracer := otel.Tracer("test") + ctx, parent := tracer.Start(context.Background(), "parent") + defer parent.End() + member, err := baggage.NewMember("tenant", "alpha") + if err != nil { + t.Fatalf("failed to create baggage member: %v", err) + } + bg, err := baggage.New(member) + if err != nil { + t.Fatalf("failed to create baggage: %v", err) + } + ctx = baggage.ContextWithBaggage(ctx, bg) + + var gotTraceparent string + var gotBaggage string + captureHeader := connect.UnaryInterceptorFunc(func(next connect.UnaryFunc) connect.UnaryFunc { + return func(ctx context.Context, req connect.AnyRequest) (connect.AnyResponse, error) { + gotTraceparent = req.Header().Get("traceparent") + gotBaggage = req.Header().Get("baggage") + return next(ctx, req) + } + }) + + mockExec := &mockExecutor{} + handler := NewExecutorServiceHandler(mockExec, connect.WithInterceptors(captureHeader)) + ts := httptest.NewServer(handler) + defer ts.Close() + + client, err := NewClient(ts.URL) + if err != nil { + t.Fatalf("NewClient failed: %v", err) + } + + if _, err = client.GetTxs(ctx); err != nil { + t.Fatalf("GetTxs failed: %v", err) + } + + if gotTraceparent == "" { + t.Fatalf("expected traceparent metadata to be propagated") + } + if gotBaggage == "" { + t.Fatalf("expected baggage metadata to be propagated") + } +} + +func TestEndToEndParentChildAcrossServerClientHop(t *testing.T) { + rec, cleanup := setupTracer(t) + defer cleanup() + + tracer := otel.Tracer("test") + var midSpan trace.Span + + downstreamExec := &mockExecutor{getExecutionInfoFunc: func(ctx context.Context) (executionInfo execution.ExecutionInfo, err error) { + _, span := tracer.Start(ctx, "downstream-child") + span.End() + return execution.ExecutionInfo{MaxGas: 1}, nil + }} + downstreamHandler := NewExecutorServiceHandler(downstreamExec) + downstreamSrv := httptest.NewServer(downstreamHandler) + defer downstreamSrv.Close() + downstreamClient, err := NewClient(downstreamSrv.URL) + if err != nil { + t.Fatalf("NewClient failed: %v", err) + } + + upstreamExec := &mockExecutor{getTxsFunc: func(ctx context.Context) ([][]byte, error) { + ctx, span := tracer.Start(ctx, "upstream-mid") + midSpan = span + defer span.End() + _, err := downstreamClient.GetExecutionInfo(ctx) + if err != nil { + return nil, err + } + return [][]byte{}, nil + }} + upstreamHandler := NewExecutorServiceHandler(upstreamExec) + upstreamSrv := httptest.NewServer(upstreamHandler) + defer upstreamSrv.Close() + + client, err := NewClient(upstreamSrv.URL) + if err != nil { + t.Fatalf("NewClient failed: %v", err) + } + + rootCtx, root := tracer.Start(context.Background(), "root") + defer root.End() + if _, err := client.GetTxs(rootCtx); err != nil { + t.Fatalf("GetTxs failed: %v", err) + } + + time.Sleep(10 * time.Millisecond) + + rootTraceID := root.SpanContext().TraceID() + if midSpan.SpanContext().TraceID() != rootTraceID { + t.Fatalf("mid span trace id mismatch") + } + var found bool + for _, s := range rec.Ended() { + if s.Name() == "downstream-child" { + found = true + if s.SpanContext().TraceID() != rootTraceID { + t.Fatalf("downstream trace id mismatch") + } + } + } + if !found { + t.Fatalf("downstream-child span not found") + } +} diff --git a/proto/evnode/v1/execution.proto b/execution/grpc/proto/evnode/v1/execution.proto similarity index 84% rename from proto/evnode/v1/execution.proto rename to execution/grpc/proto/evnode/v1/execution.proto index 13d19db336..c982f2f6af 100644 --- a/proto/evnode/v1/execution.proto +++ b/execution/grpc/proto/evnode/v1/execution.proto @@ -3,9 +3,9 @@ package evnode.v1; import "google/protobuf/timestamp.proto"; -option go_package = "github.com/evstack/ev-node/types/pb/evnode/v1"; +option go_package = "github.com/evstack/ev-node/execution/grpc/types/pb/evnode/v1"; -// ExecutorService defines the execution layer interface for EVNode +// ExecutorService defines the execution layer interface for execution/grpc service ExecutorService { // InitChain initializes a new blockchain instance with genesis parameters rpc InitChain(InitChainRequest) returns (InitChainResponse) {} @@ -49,16 +49,28 @@ message GetTxsRequest { // Empty for now, may include filtering criteria in the future } +// TxBatch stores ordered transactions in one contiguous bytes buffer. +message TxBatch { + // Concatenated transaction bytes. + bytes data = 1; + + // Byte length for each transaction in order. + repeated uint32 tx_sizes = 2; +} + // GetTxsResponse contains the available transactions message GetTxsResponse { - // Slice of valid transactions from mempool - repeated bytes txs = 1; + reserved 1; + reserved "txs"; + + // Valid transactions from mempool in contiguous batch form. + TxBatch tx_batch = 2; } // ExecuteTxsRequest contains transactions and block context for execution message ExecuteTxsRequest { - // Ordered list of transactions to execute - repeated bytes txs = 1; + reserved 1; + reserved "txs"; // Height of block being created (must be > 0) uint64 block_height = 2; @@ -68,6 +80,9 @@ message ExecuteTxsRequest { // Previous block's state root hash bytes prev_state_root = 4; + + // Ordered transactions to execute in contiguous batch form. + TxBatch tx_batch = 5; } // ExecuteTxsResponse contains the result of transaction execution @@ -120,8 +135,8 @@ enum FilterStatus { // FilterTxsRequest contains transactions to validate and filter message FilterTxsRequest { - // All transactions (force-included + mempool) - repeated bytes txs = 1; + reserved 1; + reserved "txs"; // Maximum cumulative size allowed (0 means no size limit) uint64 max_bytes = 2; @@ -131,6 +146,9 @@ message FilterTxsRequest { // Whether force-included transactions are present bool has_force_included_transaction = 4; + + // All transactions (force-included + mempool) in contiguous batch form. + TxBatch tx_batch = 5; } // FilterTxsResponse contains the filter status for each transaction diff --git a/execution/grpc/server.go b/execution/grpc/server.go index 1123d60fe7..18b5558b3a 100644 --- a/execution/grpc/server.go +++ b/execution/grpc/server.go @@ -8,7 +8,7 @@ import ( "connectrpc.com/connect" "github.com/evstack/ev-node/core/execution" - pb "github.com/evstack/ev-node/types/pb/evnode/v1" + pb "github.com/evstack/ev-node/execution/grpc/types/pb/evnode/v1" ) // Server is a gRPC server that wraps an execution.Executor implementation. @@ -77,8 +77,13 @@ func (s *Server) GetTxs( return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get txs: %w", err)) } + txBatch, err := encodeTxBatch(txs) + if err != nil { + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to encode tx batch: %w", err)) + } + return connect.NewResponse(&pb.GetTxsResponse{ - Txs: txs, + TxBatch: txBatch, }), nil } @@ -102,9 +107,14 @@ func (s *Server) ExecuteTxs( return nil, connect.NewError(connect.CodeInvalidArgument, errors.New("prev_state_root is required")) } + txs, err := decodeTxBatch(req.Msg.TxBatch) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("invalid tx_batch: %w", err)) + } + result, err := s.executor.ExecuteTxs( ctx, - req.Msg.Txs, + txs, req.Msg.BlockHeight, req.Msg.Timestamp.AsTime(), req.Msg.PrevStateRoot, @@ -164,7 +174,12 @@ func (s *Server) FilterTxs( ctx context.Context, req *connect.Request[pb.FilterTxsRequest], ) (*connect.Response[pb.FilterTxsResponse], error) { - result, err := s.executor.FilterTxs(ctx, req.Msg.Txs, req.Msg.MaxBytes, req.Msg.MaxGas, req.Msg.HasForceIncludedTransaction) + txs, err := decodeTxBatch(req.Msg.TxBatch) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("invalid tx_batch: %w", err)) + } + + result, err := s.executor.FilterTxs(ctx, txs, req.Msg.MaxBytes, req.Msg.MaxGas, req.Msg.HasForceIncludedTransaction) if err != nil { return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to filter transactions: %w", err)) } diff --git a/execution/grpc/server_test.go b/execution/grpc/server_test.go index 559d8457b2..0fde2d2800 100644 --- a/execution/grpc/server_test.go +++ b/execution/grpc/server_test.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "github.com/evstack/ev-node/core/execution" - pb "github.com/evstack/ev-node/types/pb/evnode/v1" + pb "github.com/evstack/ev-node/execution/grpc/types/pb/evnode/v1" ) func TestServer_InitChain(t *testing.T) { @@ -173,8 +173,17 @@ func TestServer_GetTxs(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - if len(resp.Msg.Txs) != len(expectedTxs) { - t.Fatalf("expected %d txs, got %d", len(expectedTxs), len(resp.Msg.Txs)) + txs, err := decodeTxBatch(resp.Msg.TxBatch) + if err != nil { + t.Fatalf("unexpected tx batch decode error: %v", err) + } + if len(txs) != len(expectedTxs) { + t.Fatalf("expected %d txs, got %d", len(expectedTxs), len(txs)) + } + for i := range expectedTxs { + if string(txs[i]) != string(expectedTxs[i]) { + t.Fatalf("tx batch tx %d: expected %q, got %q", i, expectedTxs[i], txs[i]) + } } }) } @@ -198,7 +207,7 @@ func TestServer_ExecuteTxs(t *testing.T) { { name: "success", req: &pb.ExecuteTxsRequest{ - Txs: txs, + TxBatch: mustEncodeTxBatch(t, txs), BlockHeight: blockHeight, Timestamp: timestamppb.New(timestamp), PrevStateRoot: prevStateRoot, @@ -211,7 +220,7 @@ func TestServer_ExecuteTxs(t *testing.T) { { name: "missing block height", req: &pb.ExecuteTxsRequest{ - Txs: txs, + TxBatch: mustEncodeTxBatch(t, txs), Timestamp: timestamppb.New(timestamp), PrevStateRoot: prevStateRoot, }, @@ -221,7 +230,7 @@ func TestServer_ExecuteTxs(t *testing.T) { { name: "missing timestamp", req: &pb.ExecuteTxsRequest{ - Txs: txs, + TxBatch: mustEncodeTxBatch(t, txs), BlockHeight: blockHeight, PrevStateRoot: prevStateRoot, }, @@ -231,17 +240,28 @@ func TestServer_ExecuteTxs(t *testing.T) { { name: "missing prev state root", req: &pb.ExecuteTxsRequest{ - Txs: txs, + TxBatch: mustEncodeTxBatch(t, txs), BlockHeight: blockHeight, Timestamp: timestamppb.New(timestamp), }, wantErr: true, wantCode: connect.CodeInvalidArgument, }, + { + name: "invalid tx batch", + req: &pb.ExecuteTxsRequest{ + TxBatch: &pb.TxBatch{Data: []byte("tx"), TxSizes: []uint32{3}}, + BlockHeight: blockHeight, + Timestamp: timestamppb.New(timestamp), + PrevStateRoot: prevStateRoot, + }, + wantErr: true, + wantCode: connect.CodeInvalidArgument, + }, { name: "executor error", req: &pb.ExecuteTxsRequest{ - Txs: txs, + TxBatch: mustEncodeTxBatch(t, txs), BlockHeight: blockHeight, Timestamp: timestamppb.New(timestamp), PrevStateRoot: prevStateRoot, @@ -363,3 +383,96 @@ func TestServer_SetFinal(t *testing.T) { }) } } + +func TestServer_FilterTxs(t *testing.T) { + ctx := context.Background() + txs := [][]byte{[]byte("tx1"), []byte("tx2")} + expectedStatuses := []execution.FilterStatus{execution.FilterOK, execution.FilterPostpone} + + tests := []struct { + name string + req *pb.FilterTxsRequest + mockFunc func(ctx context.Context, txs [][]byte, maxBytes, maxGas uint64, hasForceIncludedTransaction bool) ([]execution.FilterStatus, error) + wantErr bool + wantCode connect.Code + }{ + { + name: "success", + req: &pb.FilterTxsRequest{ + TxBatch: mustEncodeTxBatch(t, txs), + MaxBytes: 100, + MaxGas: 200, + HasForceIncludedTransaction: true, + }, + mockFunc: func(ctx context.Context, txsIn [][]byte, maxBytes, maxGas uint64, forced bool) ([]execution.FilterStatus, error) { + if len(txsIn) != len(txs) { + t.Fatalf("expected %d txs, got %d", len(txs), len(txsIn)) + } + if maxBytes != 100 { + t.Fatalf("expected max bytes 100, got %d", maxBytes) + } + if maxGas != 200 { + t.Fatalf("expected max gas 200, got %d", maxGas) + } + if !forced { + t.Fatalf("expected forced transaction flag") + } + return expectedStatuses, nil + }, + wantErr: false, + }, + { + name: "invalid tx batch", + req: &pb.FilterTxsRequest{ + TxBatch: &pb.TxBatch{Data: []byte("tx"), TxSizes: []uint32{3}}, + }, + wantErr: true, + wantCode: connect.CodeInvalidArgument, + }, + { + name: "executor error", + req: &pb.FilterTxsRequest{ + TxBatch: mustEncodeTxBatch(t, txs), + }, + mockFunc: func(ctx context.Context, txs [][]byte, maxBytes, maxGas uint64, hasForceIncludedTransaction bool) ([]execution.FilterStatus, error) { + return nil, errors.New("filter failed") + }, + wantErr: true, + wantCode: connect.CodeInternal, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockExec := &mockExecutor{ + filterTxsFunc: tt.mockFunc, + } + server := NewServer(mockExec) + + req := connect.NewRequest(tt.req) + resp, err := server.FilterTxs(ctx, req) + + if tt.wantErr { + if err == nil { + t.Fatalf("expected error but got none") + } + var connectErr *connect.Error + if errors.As(err, &connectErr) { + if connectErr.Code() != tt.wantCode { + t.Errorf("expected error code %v, got %v", tt.wantCode, connectErr.Code()) + } + } else { + t.Errorf("expected connect error, got %v", err) + } + return + } + + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(resp.Msg.Statuses) != len(expectedStatuses) { + t.Fatalf("expected %d statuses, got %d", len(expectedStatuses), len(resp.Msg.Statuses)) + } + }) + } +} diff --git a/execution/grpc/tx_batch.go b/execution/grpc/tx_batch.go new file mode 100644 index 0000000000..e80d7abbc6 --- /dev/null +++ b/execution/grpc/tx_batch.go @@ -0,0 +1,74 @@ +package grpc + +import ( + "fmt" + + pb "github.com/evstack/ev-node/execution/grpc/types/pb/evnode/v1" +) + +// maxTxBatchTxSize is the largest transaction length representable in TxBatch.TxSizes: +// 4 GiB - 1 byte, or 4,294,967,295 bytes. +const maxTxBatchTxSize = uint64(1<<32 - 1) + +func encodeTxBatch(txs [][]byte) (*pb.TxBatch, error) { + if len(txs) == 0 { + return &pb.TxBatch{}, nil + } + + maxInt := uint64(int(^uint(0) >> 1)) + var total uint64 + txSizes := make([]uint32, len(txs)) + for i, tx := range txs { + txLen := uint64(len(tx)) + if txLen > maxTxBatchTxSize { + return nil, fmt.Errorf("tx %d size %d exceeds uint32", i, txLen) + } + total += txLen + if total > maxInt { + return nil, fmt.Errorf("tx batch size %d exceeds int", total) + } + txSizes[i] = uint32(txLen) + } + + data := make([]byte, 0, int(total)) + for _, tx := range txs { + data = append(data, tx...) + } + + return &pb.TxBatch{ + Data: data, + TxSizes: txSizes, + }, nil +} + +func decodeTxBatch(batch *pb.TxBatch) ([][]byte, error) { + if batch == nil { + return nil, nil + } + if len(batch.TxSizes) == 0 { + if len(batch.Data) != 0 { + return nil, fmt.Errorf("tx batch has %d data bytes but no tx sizes", len(batch.Data)) + } + return nil, nil + } + + var total uint64 + for i, txSize := range batch.TxSizes { + total += uint64(txSize) + if total > uint64(len(batch.Data)) { + return nil, fmt.Errorf("tx sizes exceed data length at index %d", i) + } + } + if total != uint64(len(batch.Data)) { + return nil, fmt.Errorf("tx sizes total %d does not match data length %d", total, len(batch.Data)) + } + + txs := make([][]byte, len(batch.TxSizes)) + offset := 0 + for i, txSize := range batch.TxSizes { + end := offset + int(txSize) + txs[i] = batch.Data[offset:end:end] + offset = end + } + return txs, nil +} diff --git a/execution/grpc/tx_batch_test.go b/execution/grpc/tx_batch_test.go new file mode 100644 index 0000000000..7e21055651 --- /dev/null +++ b/execution/grpc/tx_batch_test.go @@ -0,0 +1,79 @@ +package grpc + +import ( + "bytes" + "testing" + + pb "github.com/evstack/ev-node/execution/grpc/types/pb/evnode/v1" +) + +func mustEncodeTxBatch(t *testing.T, txs [][]byte) *pb.TxBatch { + t.Helper() + + batch, err := encodeTxBatch(txs) + if err != nil { + t.Fatalf("encode tx batch: %v", err) + } + return batch +} + +func TestEncodeDecodeTxBatch(t *testing.T) { + txs := [][]byte{[]byte("tx1"), nil, []byte("tx3"), []byte{}} + + batch := mustEncodeTxBatch(t, txs) + decoded, err := decodeTxBatch(batch) + if err != nil { + t.Fatalf("decode tx batch: %v", err) + } + if len(decoded) != len(txs) { + t.Fatalf("expected %d txs, got %d", len(txs), len(decoded)) + } + for i := range txs { + if !bytes.Equal(decoded[i], txs[i]) { + t.Fatalf("tx %d: expected %q, got %q", i, txs[i], decoded[i]) + } + } + + decoded[0] = append(decoded[0], 'x') + if !bytes.Equal(decoded[2], txs[2]) { + t.Fatalf("decoded tx slices should not have capacity overlap") + } +} + +func TestDecodeTxBatchRejectsMalformedInput(t *testing.T) { + tests := []struct { + name string + batch *pb.TxBatch + }{ + { + name: "data without sizes", + batch: &pb.TxBatch{Data: []byte("tx")}, + }, + { + name: "sizes exceed data", + batch: &pb.TxBatch{Data: []byte("tx"), TxSizes: []uint32{3}}, + }, + { + name: "sizes do not consume data", + batch: &pb.TxBatch{Data: []byte("tx"), TxSizes: []uint32{1}}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if _, err := decodeTxBatch(tt.batch); err == nil { + t.Fatalf("expected decode error") + } + }) + } +} + +func TestDecodeTxBatchNil(t *testing.T) { + txs, err := decodeTxBatch(nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(txs) != 0 { + t.Fatalf("expected nil tx_batch to decode to empty txs, got %d txs", len(txs)) + } +} diff --git a/types/pb/evnode/v1/execution.pb.go b/execution/grpc/types/pb/evnode/v1/execution.pb.go similarity index 82% rename from types/pb/evnode/v1/execution.pb.go rename to execution/grpc/types/pb/evnode/v1/execution.pb.go index 86d2ae8031..49d8f53b45 100644 --- a/types/pb/evnode/v1/execution.pb.go +++ b/execution/grpc/types/pb/evnode/v1/execution.pb.go @@ -222,18 +222,73 @@ func (*GetTxsRequest) Descriptor() ([]byte, []int) { return file_evnode_v1_execution_proto_rawDescGZIP(), []int{2} } +// TxBatch stores ordered transactions in one contiguous bytes buffer. +type TxBatch struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Concatenated transaction bytes. + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // Byte length for each transaction in order. + TxSizes []uint32 `protobuf:"varint,2,rep,packed,name=tx_sizes,json=txSizes,proto3" json:"tx_sizes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TxBatch) Reset() { + *x = TxBatch{} + mi := &file_evnode_v1_execution_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TxBatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TxBatch) ProtoMessage() {} + +func (x *TxBatch) ProtoReflect() protoreflect.Message { + mi := &file_evnode_v1_execution_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TxBatch.ProtoReflect.Descriptor instead. +func (*TxBatch) Descriptor() ([]byte, []int) { + return file_evnode_v1_execution_proto_rawDescGZIP(), []int{3} +} + +func (x *TxBatch) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *TxBatch) GetTxSizes() []uint32 { + if x != nil { + return x.TxSizes + } + return nil +} + // GetTxsResponse contains the available transactions type GetTxsResponse struct { state protoimpl.MessageState `protogen:"open.v1"` - // Slice of valid transactions from mempool - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + // Valid transactions from mempool in contiguous batch form. + TxBatch *TxBatch `protobuf:"bytes,2,opt,name=tx_batch,json=txBatch,proto3" json:"tx_batch,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *GetTxsResponse) Reset() { *x = GetTxsResponse{} - mi := &file_evnode_v1_execution_proto_msgTypes[3] + mi := &file_evnode_v1_execution_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -245,7 +300,7 @@ func (x *GetTxsResponse) String() string { func (*GetTxsResponse) ProtoMessage() {} func (x *GetTxsResponse) ProtoReflect() protoreflect.Message { - mi := &file_evnode_v1_execution_proto_msgTypes[3] + mi := &file_evnode_v1_execution_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -258,12 +313,12 @@ func (x *GetTxsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTxsResponse.ProtoReflect.Descriptor instead. func (*GetTxsResponse) Descriptor() ([]byte, []int) { - return file_evnode_v1_execution_proto_rawDescGZIP(), []int{3} + return file_evnode_v1_execution_proto_rawDescGZIP(), []int{4} } -func (x *GetTxsResponse) GetTxs() [][]byte { +func (x *GetTxsResponse) GetTxBatch() *TxBatch { if x != nil { - return x.Txs + return x.TxBatch } return nil } @@ -271,21 +326,21 @@ func (x *GetTxsResponse) GetTxs() [][]byte { // ExecuteTxsRequest contains transactions and block context for execution type ExecuteTxsRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - // Ordered list of transactions to execute - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` // Height of block being created (must be > 0) BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` // Block creation time in UTC Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Previous block's state root hash PrevStateRoot []byte `protobuf:"bytes,4,opt,name=prev_state_root,json=prevStateRoot,proto3" json:"prev_state_root,omitempty"` + // Ordered transactions to execute in contiguous batch form. + TxBatch *TxBatch `protobuf:"bytes,5,opt,name=tx_batch,json=txBatch,proto3" json:"tx_batch,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *ExecuteTxsRequest) Reset() { *x = ExecuteTxsRequest{} - mi := &file_evnode_v1_execution_proto_msgTypes[4] + mi := &file_evnode_v1_execution_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -297,7 +352,7 @@ func (x *ExecuteTxsRequest) String() string { func (*ExecuteTxsRequest) ProtoMessage() {} func (x *ExecuteTxsRequest) ProtoReflect() protoreflect.Message { - mi := &file_evnode_v1_execution_proto_msgTypes[4] + mi := &file_evnode_v1_execution_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -310,14 +365,7 @@ func (x *ExecuteTxsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteTxsRequest.ProtoReflect.Descriptor instead. func (*ExecuteTxsRequest) Descriptor() ([]byte, []int) { - return file_evnode_v1_execution_proto_rawDescGZIP(), []int{4} -} - -func (x *ExecuteTxsRequest) GetTxs() [][]byte { - if x != nil { - return x.Txs - } - return nil + return file_evnode_v1_execution_proto_rawDescGZIP(), []int{5} } func (x *ExecuteTxsRequest) GetBlockHeight() uint64 { @@ -341,6 +389,13 @@ func (x *ExecuteTxsRequest) GetPrevStateRoot() []byte { return nil } +func (x *ExecuteTxsRequest) GetTxBatch() *TxBatch { + if x != nil { + return x.TxBatch + } + return nil +} + // ExecuteTxsResponse contains the result of transaction execution type ExecuteTxsResponse struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -357,7 +412,7 @@ type ExecuteTxsResponse struct { func (x *ExecuteTxsResponse) Reset() { *x = ExecuteTxsResponse{} - mi := &file_evnode_v1_execution_proto_msgTypes[5] + mi := &file_evnode_v1_execution_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -369,7 +424,7 @@ func (x *ExecuteTxsResponse) String() string { func (*ExecuteTxsResponse) ProtoMessage() {} func (x *ExecuteTxsResponse) ProtoReflect() protoreflect.Message { - mi := &file_evnode_v1_execution_proto_msgTypes[5] + mi := &file_evnode_v1_execution_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -382,7 +437,7 @@ func (x *ExecuteTxsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteTxsResponse.ProtoReflect.Descriptor instead. func (*ExecuteTxsResponse) Descriptor() ([]byte, []int) { - return file_evnode_v1_execution_proto_rawDescGZIP(), []int{5} + return file_evnode_v1_execution_proto_rawDescGZIP(), []int{6} } func (x *ExecuteTxsResponse) GetUpdatedStateRoot() []byte { @@ -417,7 +472,7 @@ type SetFinalRequest struct { func (x *SetFinalRequest) Reset() { *x = SetFinalRequest{} - mi := &file_evnode_v1_execution_proto_msgTypes[6] + mi := &file_evnode_v1_execution_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -429,7 +484,7 @@ func (x *SetFinalRequest) String() string { func (*SetFinalRequest) ProtoMessage() {} func (x *SetFinalRequest) ProtoReflect() protoreflect.Message { - mi := &file_evnode_v1_execution_proto_msgTypes[6] + mi := &file_evnode_v1_execution_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -442,7 +497,7 @@ func (x *SetFinalRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetFinalRequest.ProtoReflect.Descriptor instead. func (*SetFinalRequest) Descriptor() ([]byte, []int) { - return file_evnode_v1_execution_proto_rawDescGZIP(), []int{6} + return file_evnode_v1_execution_proto_rawDescGZIP(), []int{7} } func (x *SetFinalRequest) GetBlockHeight() uint64 { @@ -461,7 +516,7 @@ type SetFinalResponse struct { func (x *SetFinalResponse) Reset() { *x = SetFinalResponse{} - mi := &file_evnode_v1_execution_proto_msgTypes[7] + mi := &file_evnode_v1_execution_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -473,7 +528,7 @@ func (x *SetFinalResponse) String() string { func (*SetFinalResponse) ProtoMessage() {} func (x *SetFinalResponse) ProtoReflect() protoreflect.Message { - mi := &file_evnode_v1_execution_proto_msgTypes[7] + mi := &file_evnode_v1_execution_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -486,7 +541,7 @@ func (x *SetFinalResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetFinalResponse.ProtoReflect.Descriptor instead. func (*SetFinalResponse) Descriptor() ([]byte, []int) { - return file_evnode_v1_execution_proto_rawDescGZIP(), []int{7} + return file_evnode_v1_execution_proto_rawDescGZIP(), []int{8} } // GetExecutionInfoRequest requests execution layer parameters @@ -498,7 +553,7 @@ type GetExecutionInfoRequest struct { func (x *GetExecutionInfoRequest) Reset() { *x = GetExecutionInfoRequest{} - mi := &file_evnode_v1_execution_proto_msgTypes[8] + mi := &file_evnode_v1_execution_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -510,7 +565,7 @@ func (x *GetExecutionInfoRequest) String() string { func (*GetExecutionInfoRequest) ProtoMessage() {} func (x *GetExecutionInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_evnode_v1_execution_proto_msgTypes[8] + mi := &file_evnode_v1_execution_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -523,7 +578,7 @@ func (x *GetExecutionInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetExecutionInfoRequest.ProtoReflect.Descriptor instead. func (*GetExecutionInfoRequest) Descriptor() ([]byte, []int) { - return file_evnode_v1_execution_proto_rawDescGZIP(), []int{8} + return file_evnode_v1_execution_proto_rawDescGZIP(), []int{9} } // GetExecutionInfoResponse contains execution layer parameters @@ -541,7 +596,7 @@ type GetExecutionInfoResponse struct { func (x *GetExecutionInfoResponse) Reset() { *x = GetExecutionInfoResponse{} - mi := &file_evnode_v1_execution_proto_msgTypes[9] + mi := &file_evnode_v1_execution_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -553,7 +608,7 @@ func (x *GetExecutionInfoResponse) String() string { func (*GetExecutionInfoResponse) ProtoMessage() {} func (x *GetExecutionInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_evnode_v1_execution_proto_msgTypes[9] + mi := &file_evnode_v1_execution_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -566,7 +621,7 @@ func (x *GetExecutionInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetExecutionInfoResponse.ProtoReflect.Descriptor instead. func (*GetExecutionInfoResponse) Descriptor() ([]byte, []int) { - return file_evnode_v1_execution_proto_rawDescGZIP(), []int{9} + return file_evnode_v1_execution_proto_rawDescGZIP(), []int{10} } func (x *GetExecutionInfoResponse) GetMaxGas() uint64 { @@ -586,21 +641,21 @@ func (x *GetExecutionInfoResponse) GetNextProposerAddress() []byte { // FilterTxsRequest contains transactions to validate and filter type FilterTxsRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - // All transactions (force-included + mempool) - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` // Maximum cumulative size allowed (0 means no size limit) MaxBytes uint64 `protobuf:"varint,2,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` // Maximum cumulative gas allowed (0 means no gas limit) MaxGas uint64 `protobuf:"varint,3,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` // Whether force-included transactions are present HasForceIncludedTransaction bool `protobuf:"varint,4,opt,name=has_force_included_transaction,json=hasForceIncludedTransaction,proto3" json:"has_force_included_transaction,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // All transactions (force-included + mempool) in contiguous batch form. + TxBatch *TxBatch `protobuf:"bytes,5,opt,name=tx_batch,json=txBatch,proto3" json:"tx_batch,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FilterTxsRequest) Reset() { *x = FilterTxsRequest{} - mi := &file_evnode_v1_execution_proto_msgTypes[10] + mi := &file_evnode_v1_execution_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -612,7 +667,7 @@ func (x *FilterTxsRequest) String() string { func (*FilterTxsRequest) ProtoMessage() {} func (x *FilterTxsRequest) ProtoReflect() protoreflect.Message { - mi := &file_evnode_v1_execution_proto_msgTypes[10] + mi := &file_evnode_v1_execution_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -625,14 +680,7 @@ func (x *FilterTxsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FilterTxsRequest.ProtoReflect.Descriptor instead. func (*FilterTxsRequest) Descriptor() ([]byte, []int) { - return file_evnode_v1_execution_proto_rawDescGZIP(), []int{10} -} - -func (x *FilterTxsRequest) GetTxs() [][]byte { - if x != nil { - return x.Txs - } - return nil + return file_evnode_v1_execution_proto_rawDescGZIP(), []int{11} } func (x *FilterTxsRequest) GetMaxBytes() uint64 { @@ -656,6 +704,13 @@ func (x *FilterTxsRequest) GetHasForceIncludedTransaction() bool { return false } +func (x *FilterTxsRequest) GetTxBatch() *TxBatch { + if x != nil { + return x.TxBatch + } + return nil +} + // FilterTxsResponse contains the filter status for each transaction type FilterTxsResponse struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -667,7 +722,7 @@ type FilterTxsResponse struct { func (x *FilterTxsResponse) Reset() { *x = FilterTxsResponse{} - mi := &file_evnode_v1_execution_proto_msgTypes[11] + mi := &file_evnode_v1_execution_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -679,7 +734,7 @@ func (x *FilterTxsResponse) String() string { func (*FilterTxsResponse) ProtoMessage() {} func (x *FilterTxsResponse) ProtoReflect() protoreflect.Message { - mi := &file_evnode_v1_execution_proto_msgTypes[11] + mi := &file_evnode_v1_execution_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -692,7 +747,7 @@ func (x *FilterTxsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FilterTxsResponse.ProtoReflect.Descriptor instead. func (*FilterTxsResponse) Descriptor() ([]byte, []int) { - return file_evnode_v1_execution_proto_rawDescGZIP(), []int{11} + return file_evnode_v1_execution_proto_rawDescGZIP(), []int{12} } func (x *FilterTxsResponse) GetStatuses() []FilterStatus { @@ -714,14 +769,17 @@ const file_evnode_v1_execution_proto_rawDesc = "" + "\x11InitChainResponse\x12\x1d\n" + "\n" + "state_root\x18\x01 \x01(\fR\tstateRoot\"\x0f\n" + - "\rGetTxsRequest\"\"\n" + - "\x0eGetTxsResponse\x12\x10\n" + - "\x03txs\x18\x01 \x03(\fR\x03txs\"\xaa\x01\n" + - "\x11ExecuteTxsRequest\x12\x10\n" + - "\x03txs\x18\x01 \x03(\fR\x03txs\x12!\n" + + "\rGetTxsRequest\"8\n" + + "\aTxBatch\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data\x12\x19\n" + + "\btx_sizes\x18\x02 \x03(\rR\atxSizes\"J\n" + + "\x0eGetTxsResponse\x12-\n" + + "\btx_batch\x18\x02 \x01(\v2\x12.evnode.v1.TxBatchR\atxBatchJ\x04\b\x01\x10\x02R\x03txs\"\xd2\x01\n" + + "\x11ExecuteTxsRequest\x12!\n" + "\fblock_height\x18\x02 \x01(\x04R\vblockHeight\x128\n" + "\ttimestamp\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12&\n" + - "\x0fprev_state_root\x18\x04 \x01(\fR\rprevStateRoot\"\x93\x01\n" + + "\x0fprev_state_root\x18\x04 \x01(\fR\rprevStateRoot\x12-\n" + + "\btx_batch\x18\x05 \x01(\v2\x12.evnode.v1.TxBatchR\atxBatchJ\x04\b\x01\x10\x02R\x03txs\"\x93\x01\n" + "\x12ExecuteTxsResponse\x12,\n" + "\x12updated_state_root\x18\x01 \x01(\fR\x10updatedStateRoot\x12\x1b\n" + "\tmax_bytes\x18\x02 \x01(\x04R\bmaxBytes\x122\n" + @@ -732,12 +790,12 @@ const file_evnode_v1_execution_proto_rawDesc = "" + "\x17GetExecutionInfoRequest\"g\n" + "\x18GetExecutionInfoResponse\x12\x17\n" + "\amax_gas\x18\x01 \x01(\x04R\x06maxGas\x122\n" + - "\x15next_proposer_address\x18\x02 \x01(\fR\x13nextProposerAddress\"\x9f\x01\n" + - "\x10FilterTxsRequest\x12\x10\n" + - "\x03txs\x18\x01 \x03(\fR\x03txs\x12\x1b\n" + + "\x15next_proposer_address\x18\x02 \x01(\fR\x13nextProposerAddress\"\xc7\x01\n" + + "\x10FilterTxsRequest\x12\x1b\n" + "\tmax_bytes\x18\x02 \x01(\x04R\bmaxBytes\x12\x17\n" + "\amax_gas\x18\x03 \x01(\x04R\x06maxGas\x12C\n" + - "\x1ehas_force_included_transaction\x18\x04 \x01(\bR\x1bhasForceIncludedTransaction\"H\n" + + "\x1ehas_force_included_transaction\x18\x04 \x01(\bR\x1bhasForceIncludedTransaction\x12-\n" + + "\btx_batch\x18\x05 \x01(\v2\x12.evnode.v1.TxBatchR\atxBatchJ\x04\b\x01\x10\x02R\x03txs\"H\n" + "\x11FilterTxsResponse\x123\n" + "\bstatuses\x18\x01 \x03(\x0e2\x17.evnode.v1.FilterStatusR\bstatuses*E\n" + "\fFilterStatus\x12\r\n" + @@ -751,7 +809,7 @@ const file_evnode_v1_execution_proto_rawDesc = "" + "ExecuteTxs\x12\x1c.evnode.v1.ExecuteTxsRequest\x1a\x1d.evnode.v1.ExecuteTxsResponse\"\x00\x12E\n" + "\bSetFinal\x12\x1a.evnode.v1.SetFinalRequest\x1a\x1b.evnode.v1.SetFinalResponse\"\x00\x12]\n" + "\x10GetExecutionInfo\x12\".evnode.v1.GetExecutionInfoRequest\x1a#.evnode.v1.GetExecutionInfoResponse\"\x00\x12H\n" + - "\tFilterTxs\x12\x1b.evnode.v1.FilterTxsRequest\x1a\x1c.evnode.v1.FilterTxsResponse\"\x00B/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" + "\tFilterTxs\x12\x1b.evnode.v1.FilterTxsRequest\x1a\x1c.evnode.v1.FilterTxsResponse\"\x00B>Z google.protobuf.Timestamp - 13, // 1: evnode.v1.ExecuteTxsRequest.timestamp:type_name -> google.protobuf.Timestamp - 0, // 2: evnode.v1.FilterTxsResponse.statuses:type_name -> evnode.v1.FilterStatus - 1, // 3: evnode.v1.ExecutorService.InitChain:input_type -> evnode.v1.InitChainRequest - 3, // 4: evnode.v1.ExecutorService.GetTxs:input_type -> evnode.v1.GetTxsRequest - 5, // 5: evnode.v1.ExecutorService.ExecuteTxs:input_type -> evnode.v1.ExecuteTxsRequest - 7, // 6: evnode.v1.ExecutorService.SetFinal:input_type -> evnode.v1.SetFinalRequest - 9, // 7: evnode.v1.ExecutorService.GetExecutionInfo:input_type -> evnode.v1.GetExecutionInfoRequest - 11, // 8: evnode.v1.ExecutorService.FilterTxs:input_type -> evnode.v1.FilterTxsRequest - 2, // 9: evnode.v1.ExecutorService.InitChain:output_type -> evnode.v1.InitChainResponse - 4, // 10: evnode.v1.ExecutorService.GetTxs:output_type -> evnode.v1.GetTxsResponse - 6, // 11: evnode.v1.ExecutorService.ExecuteTxs:output_type -> evnode.v1.ExecuteTxsResponse - 8, // 12: evnode.v1.ExecutorService.SetFinal:output_type -> evnode.v1.SetFinalResponse - 10, // 13: evnode.v1.ExecutorService.GetExecutionInfo:output_type -> evnode.v1.GetExecutionInfoResponse - 12, // 14: evnode.v1.ExecutorService.FilterTxs:output_type -> evnode.v1.FilterTxsResponse - 9, // [9:15] is the sub-list for method output_type - 3, // [3:9] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 14, // 0: evnode.v1.InitChainRequest.genesis_time:type_name -> google.protobuf.Timestamp + 4, // 1: evnode.v1.GetTxsResponse.tx_batch:type_name -> evnode.v1.TxBatch + 14, // 2: evnode.v1.ExecuteTxsRequest.timestamp:type_name -> google.protobuf.Timestamp + 4, // 3: evnode.v1.ExecuteTxsRequest.tx_batch:type_name -> evnode.v1.TxBatch + 4, // 4: evnode.v1.FilterTxsRequest.tx_batch:type_name -> evnode.v1.TxBatch + 0, // 5: evnode.v1.FilterTxsResponse.statuses:type_name -> evnode.v1.FilterStatus + 1, // 6: evnode.v1.ExecutorService.InitChain:input_type -> evnode.v1.InitChainRequest + 3, // 7: evnode.v1.ExecutorService.GetTxs:input_type -> evnode.v1.GetTxsRequest + 6, // 8: evnode.v1.ExecutorService.ExecuteTxs:input_type -> evnode.v1.ExecuteTxsRequest + 8, // 9: evnode.v1.ExecutorService.SetFinal:input_type -> evnode.v1.SetFinalRequest + 10, // 10: evnode.v1.ExecutorService.GetExecutionInfo:input_type -> evnode.v1.GetExecutionInfoRequest + 12, // 11: evnode.v1.ExecutorService.FilterTxs:input_type -> evnode.v1.FilterTxsRequest + 2, // 12: evnode.v1.ExecutorService.InitChain:output_type -> evnode.v1.InitChainResponse + 5, // 13: evnode.v1.ExecutorService.GetTxs:output_type -> evnode.v1.GetTxsResponse + 7, // 14: evnode.v1.ExecutorService.ExecuteTxs:output_type -> evnode.v1.ExecuteTxsResponse + 9, // 15: evnode.v1.ExecutorService.SetFinal:output_type -> evnode.v1.SetFinalResponse + 11, // 16: evnode.v1.ExecutorService.GetExecutionInfo:output_type -> evnode.v1.GetExecutionInfoResponse + 13, // 17: evnode.v1.ExecutorService.FilterTxs:output_type -> evnode.v1.FilterTxsResponse + 12, // [12:18] is the sub-list for method output_type + 6, // [6:12] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_evnode_v1_execution_proto_init() } @@ -817,7 +879,7 @@ func file_evnode_v1_execution_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_evnode_v1_execution_proto_rawDesc), len(file_evnode_v1_execution_proto_rawDesc)), NumEnums: 1, - NumMessages: 12, + NumMessages: 13, NumExtensions: 0, NumServices: 1, }, diff --git a/types/pb/evnode/v1/v1connect/execution.connect.go b/execution/grpc/types/pb/evnode/v1/v1connect/execution.connect.go similarity index 99% rename from types/pb/evnode/v1/v1connect/execution.connect.go rename to execution/grpc/types/pb/evnode/v1/v1connect/execution.connect.go index 35a85ec77b..c4f7f1df64 100644 --- a/types/pb/evnode/v1/v1connect/execution.connect.go +++ b/execution/grpc/types/pb/evnode/v1/v1connect/execution.connect.go @@ -8,7 +8,7 @@ import ( connect "connectrpc.com/connect" context "context" errors "errors" - v1 "github.com/evstack/ev-node/types/pb/evnode/v1" + v1 "github.com/evstack/ev-node/execution/grpc/types/pb/evnode/v1" http "net/http" strings "strings" ) diff --git a/execution/grpc/unix.go b/execution/grpc/unix.go new file mode 100644 index 0000000000..69cb59daea --- /dev/null +++ b/execution/grpc/unix.go @@ -0,0 +1,69 @@ +package grpc + +import ( + "errors" + "fmt" + "net" + "net/http" + "os" + + "connectrpc.com/connect" + + "github.com/evstack/ev-node/core/execution" +) + +// ListenUnix creates a Unix domain socket listener for the gRPC execution service. +// +// If socketPath already exists, ListenUnix removes it only when it is a stale +// socket. Regular files, directories, and other path types are left untouched. +func ListenUnix(socketPath string) (net.Listener, error) { + if socketPath == "" { + return nil, errors.New("unix socket path is required") + } + if err := removeStaleUnixSocket(socketPath); err != nil { + return nil, err + } + listener, err := net.Listen("unix", socketPath) + if err != nil { + return nil, fmt.Errorf("listen unix socket %q: %w", socketPath, err) + } + return listener, nil +} + +// ListenAndServeUnix serves the gRPC execution service over a Unix domain socket. +// +// The NewExecutorServiceHandler handler is passed to http.Serve, so this +// function blocks until http.Serve returns an error. When it returns, deferred +// cleanup closes the listener with listener.Close and then removes the socket +// with removeStaleUnixSocket. Cleanup errors are currently ignored. +func ListenAndServeUnix(socketPath string, executor execution.Executor, opts ...connect.HandlerOption) error { + listener, err := ListenUnix(socketPath) + if err != nil { + return err + } + defer func() { + _ = listener.Close() + }() + defer func() { + _ = removeStaleUnixSocket(socketPath) + }() + + return http.Serve(listener, NewExecutorServiceHandler(executor, opts...)) +} + +func removeStaleUnixSocket(socketPath string) error { + info, err := os.Lstat(socketPath) + if errors.Is(err, os.ErrNotExist) { + return nil + } + if err != nil { + return fmt.Errorf("stat unix socket %q: %w", socketPath, err) + } + if info.Mode()&os.ModeSocket == 0 { + return fmt.Errorf("refusing to remove non-socket path %q", socketPath) + } + if err := os.Remove(socketPath); err != nil { + return fmt.Errorf("remove stale unix socket %q: %w", socketPath, err) + } + return nil +} diff --git a/execution/grpc/unix_test.go b/execution/grpc/unix_test.go new file mode 100644 index 0000000000..d90133a8e9 --- /dev/null +++ b/execution/grpc/unix_test.go @@ -0,0 +1,59 @@ +package grpc + +import ( + "fmt" + "net" + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +func TestListenUnixRejectsNonSocketPath(t *testing.T) { + socketPath := filepath.Join(t.TempDir(), "executor.sock") + if err := os.WriteFile(socketPath, []byte("not a socket"), 0o600); err != nil { + t.Fatalf("write test file: %v", err) + } + + listener, err := ListenUnix(socketPath) + if err == nil { + _ = listener.Close() + t.Fatal("expected error for non-socket path") + } + if !strings.Contains(err.Error(), "refusing to remove non-socket path") { + t.Fatalf("expected non-socket refusal, got %v", err) + } +} + +func TestListenUnixRemovesStaleSocket(t *testing.T) { + socketPath := testUnixSocketPath(t) + staleListener, err := net.Listen("unix", socketPath) + if err != nil { + t.Fatalf("create stale unix socket: %v", err) + } + if err := staleListener.Close(); err != nil { + t.Fatalf("close stale unix socket: %v", err) + } + + listener, err := ListenUnix(socketPath) + if err != nil { + t.Fatalf("listen unix socket: %v", err) + } + if err := listener.Close(); err != nil { + t.Fatalf("close unix socket: %v", err) + } +} + +func testUnixSocketPath(t *testing.T) string { + t.Helper() + + socketPath := filepath.Join( + os.TempDir(), + fmt.Sprintf("ev-node-grpc-%d-%d.sock", os.Getpid(), time.Now().UnixNano()), + ) + t.Cleanup(func() { + _ = os.Remove(socketPath) + }) + return socketPath +} diff --git a/go.mod b/go.mod index 37b153773f..34e2549e3e 100644 --- a/go.mod +++ b/go.mod @@ -10,8 +10,8 @@ require ( connectrpc.com/grpcreflect v1.3.0 github.com/aws/aws-sdk-go-v2 v1.41.6 github.com/aws/aws-sdk-go-v2/config v1.32.16 - github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 - github.com/aws/smithy-go v1.25.0 + github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 + github.com/aws/smithy-go v1.25.1 github.com/celestiaorg/go-header v0.8.5 github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3 github.com/celestiaorg/go-square/v3 v3.0.2 @@ -30,10 +30,10 @@ require ( github.com/ipfs/go-ds-badger4 v0.1.8 github.com/libp2p/go-libp2p v0.48.0 github.com/libp2p/go-libp2p-kad-dht v0.39.1 - github.com/libp2p/go-libp2p-pubsub v0.15.0 + github.com/libp2p/go-libp2p-pubsub v0.16.0 github.com/multiformats/go-multiaddr v0.16.1 github.com/prometheus/client_golang v1.23.2 - github.com/rs/zerolog v1.35.0 + github.com/rs/zerolog v1.35.1 github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 github.com/spf13/viper v1.21.0 @@ -45,7 +45,7 @@ require ( golang.org/x/crypto v0.50.0 golang.org/x/net v0.53.0 golang.org/x/sync v0.20.0 - google.golang.org/api v0.274.0 + google.golang.org/api v0.276.0 google.golang.org/grpc v1.80.0 google.golang.org/protobuf v1.36.11 gotest.tools/v3 v3.5.2 @@ -55,7 +55,7 @@ replace github.com/evstack/ev-node/core => ./core require ( cloud.google.com/go v0.123.0 // indirect - cloud.google.com/go/auth v0.18.2 // indirect + cloud.google.com/go/auth v0.20.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/iam v1.7.0 // indirect @@ -189,8 +189,8 @@ require ( github.com/wlynxg/anet v0.0.5 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/otel/metric v1.43.0 // indirect go.opentelemetry.io/proto/otlp v1.10.0 // indirect diff --git a/go.sum b/go.sum index fb7d076ef7..fb6c0549dc 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM= -cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -95,8 +95,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 h1:HtOTYcb github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8/go.mod h1:VsK9abqQeGlzPgUr+isNWzPlK2vKe9INMLWnY65f5Xs= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6Y1Lbvt9WFuJ0ugUK2xn6hIWUBBbKuOWF30s= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 h1:nEzwx/ZlpUZ2Y6WztsgYmfBh5Ixd3QiECawXMzvTMeo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 h1:696UM+NwOrETBCLQJyCAGtVmmZmziBT59yMwgg6Fvrw= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 h1:a1Fq/KXn75wSzoJaPQTgZO0wHGqE9mjFnylnqEPTchA= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10/go.mod h1:p6+MXNxW7IA6dMgHfTAzljuwSKD0NCm/4lbS4t6+7vI= github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 h1:x6bKbmDhsgSZwv6q19wY/u3rLk/3FGjJWyqKcIRufpE= @@ -105,8 +105,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 h1:oK/njaL8GtyEihkWMD4k3Vg github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20/go.mod h1:JHs8/y1f3zY7U5WcuzoJ/yAYGYtNIVPKLIbp61euvmg= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 h1:ks8KBcZPh3PYISr5dAiXCM5/Thcuxk8l+PG4+A0exds= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0/go.mod h1:pFw33T0WLvXU3rw1WBkpMlkgIn54eCB5FYLhjDc9Foo= -github.com/aws/smithy-go v1.25.0 h1:Sz/XJ64rwuiKtB6j98nDIPyYrV1nVNJ4YU74gttcl5U= -github.com/aws/smithy-go v1.25.0/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/aws/smithy-go v1.25.1 h1:J8ERsGSU7d+aCmdQur5Txg6bVoYelvQJgtZehD12GkI= +github.com/aws/smithy-go v1.25.1/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -445,8 +445,8 @@ github.com/libp2p/go-libp2p-kad-dht v0.39.1 h1:9RzUBc7zywT4ZNamRSgEvPZmVlK3Y6xdl github.com/libp2p/go-libp2p-kad-dht v0.39.1/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= -github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= -github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= +github.com/libp2p/go-libp2p-pubsub v0.16.0 h1:j7G2C8kJwkcAQqYR7Wmq3d75d3Sgw/N0Hhiv0dVx7OY= +github.com/libp2p/go-libp2p-pubsub v0.16.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= @@ -626,8 +626,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/rs/zerolog v1.35.0 h1:VD0ykx7HMiMJytqINBsKcbLS+BJ4WYjz+05us+LRTdI= -github.com/rs/zerolog v1.35.0/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= +github.com/rs/zerolog v1.35.1 h1:m7xQeoiLIiV0BCEY4Hs+j2NG4Gp2o2KPKmhnnLiazKI= +github.com/rs/zerolog v1.35.1/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -727,10 +727,10 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= @@ -1092,8 +1092,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.274.0 h1:aYhycS5QQCwxHLwfEHRRLf9yNsfvp1JadKKWBE54RFA= -google.golang.org/api v0.274.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/mlc_config.json b/mlc_config.json new file mode 100644 index 0000000000..c5b3c40534 --- /dev/null +++ b/mlc_config.json @@ -0,0 +1,34 @@ +{ + "ignorePatterns": [ + { + "pattern": "^/" + }, + { + "pattern": "^http://localhost:" + }, + { + "pattern": "^https://rpc\\.your-chain\\.com$" + }, + { + "pattern": "^https://claude\\.ai/code$" + }, + { + "pattern": "^https://docs\\.celestia\\.org/" + }, + { + "pattern": "^https://docs\\.cosmos\\.network/" + }, + { + "pattern": "^https://faucet\\.celestia-arabica\\.com/" + }, + { + "pattern": "^https://medium\\.com/zeeve/" + }, + { + "pattern": "^https://github\\.com/ignite/apps/blob/main/evolve/template/init\\.go#L48-L60$" + }, + { + "pattern": "^https://github\\.com/evstack/ev-node/blob/main/specs/src/adr/adr-022-validator-network\\.md$" + } + ] +} diff --git a/pkg/sequencers/common/forced_inclusion_retriever_mock.go b/pkg/sequencers/based/forced_inclusion_retriever_mock.go similarity index 99% rename from pkg/sequencers/common/forced_inclusion_retriever_mock.go rename to pkg/sequencers/based/forced_inclusion_retriever_mock.go index e4dc2b0c8e..ada1bca82b 100644 --- a/pkg/sequencers/common/forced_inclusion_retriever_mock.go +++ b/pkg/sequencers/based/forced_inclusion_retriever_mock.go @@ -2,7 +2,7 @@ // github.com/vektra/mockery // template: testify -package common +package based import ( "context" diff --git a/pkg/sequencers/based/sequencer_test.go b/pkg/sequencers/based/sequencer_test.go index b8124f988b..2b947b2016 100644 --- a/pkg/sequencers/based/sequencer_test.go +++ b/pkg/sequencers/based/sequencer_test.go @@ -18,7 +18,6 @@ import ( "github.com/evstack/ev-node/pkg/config" datypes "github.com/evstack/ev-node/pkg/da/types" "github.com/evstack/ev-node/pkg/genesis" - "github.com/evstack/ev-node/pkg/sequencers/common" "github.com/evstack/ev-node/test/mocks" ) @@ -62,7 +61,7 @@ type testData struct { cancel context.CancelFunc } -func createTestSequencer(t *testing.T, mockRetriever *common.MockForcedInclusionRetriever, gen genesis.Genesis) (*BasedSequencer, testData) { +func createTestSequencer(t *testing.T, mockRetriever *MockForcedInclusionRetriever, gen genesis.Genesis) (*BasedSequencer, testData) { t.Helper() // Create in-memory datastore @@ -95,7 +94,7 @@ func createTestSequencer(t *testing.T, mockRetriever *common.MockForcedInclusion } func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) gen := genesis.Genesis{ ChainID: "test-chain", DAEpochForcedInclusion: 10, @@ -122,7 +121,7 @@ func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { testBlobs := [][]byte{[]byte("tx1"), []byte("tx2")} - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ Txs: testBlobs, StartDaHeight: 100, @@ -158,7 +157,7 @@ func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { } func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ Txs: [][]byte{}, StartDaHeight: 100, @@ -189,7 +188,7 @@ func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { } func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(nil, block.ErrForceInclusionNotConfigured) gen := genesis.Genesis{ @@ -219,7 +218,7 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { tx3 := make([]byte, 200) testBlobs := [][]byte{tx1, tx2, tx3} - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ Txs: testBlobs, StartDaHeight: 100, @@ -274,7 +273,7 @@ func TestBasedSequencer_GetNextBatch_MultipleDABlocks(t *testing.T) { testBlobs1 := [][]byte{[]byte("tx1"), []byte("tx2")} testBlobs2 := [][]byte{[]byte("tx3"), []byte("tx4")} - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) // First DA block mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ Txs: testBlobs1, @@ -327,7 +326,7 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T largeTx := make([]byte, 2000) testBlobs := [][]byte{largeTx} - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ Txs: testBlobs, StartDaHeight: 100, @@ -358,7 +357,7 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T } func TestBasedSequencer_VerifyBatch(t *testing.T) { - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) gen := genesis.Genesis{ ChainID: "test-chain", DAEpochForcedInclusion: 10, @@ -379,7 +378,7 @@ func TestBasedSequencer_VerifyBatch(t *testing.T) { } func TestBasedSequencer_SetDAHeight(t *testing.T) { - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) gen := genesis.Genesis{ ChainID: "test-chain", DAStartHeight: 100, @@ -397,7 +396,7 @@ func TestBasedSequencer_SetDAHeight(t *testing.T) { } func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(nil, block.ErrForceInclusionNotConfigured) gen := genesis.Genesis{ @@ -421,7 +420,7 @@ func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { } func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(nil, datypes.ErrHeightFromFuture) gen := genesis.Genesis{ @@ -450,7 +449,7 @@ func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { func TestBasedSequencer_CheckpointPersistence(t *testing.T) { testBlobs := [][]byte{[]byte("tx1"), []byte("tx2")} - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ Txs: testBlobs, StartDaHeight: 100, @@ -525,7 +524,7 @@ func TestBasedSequencer_CrashRecoveryMidEpoch(t *testing.T) { testBlobs := [][]byte{[]byte("tx0"), []byte("tx1"), []byte("tx2"), []byte("tx3"), []byte("tx4")} - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) // The epoch will be fetched twice: once before crash, once after restart mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ Txs: testBlobs, @@ -650,7 +649,7 @@ func TestBasedSequencer_CrashRecoveryMidEpoch(t *testing.T) { } func TestBasedSequencer_GetNextBatch_EmptyDABatch_IncreasesDAHeight(t *testing.T) { - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) // First DA block returns empty transactions mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ @@ -715,7 +714,7 @@ func TestBasedSequencer_GetNextBatch_TimestampAdjustment(t *testing.T) { testBlobs := [][]byte{[]byte("tx1"), []byte("tx2"), []byte("tx3")} daEndTime := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC) - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ Txs: testBlobs, StartDaHeight: 100, @@ -759,7 +758,7 @@ func TestBasedSequencer_GetNextBatch_TimestampAdjustment_PartialBatch(t *testing testBlobs := [][]byte{tx1, tx2, tx3} daEndTime := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC) - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ Txs: testBlobs, StartDaHeight: 100, @@ -817,7 +816,7 @@ func TestBasedSequencer_GetNextBatch_TimestampAdjustment_EmptyBatch(t *testing.T // The checkpoint must still advance past the empty epoch. daEndTime := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC) - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ Txs: [][]byte{}, StartDaHeight: 100, @@ -869,7 +868,7 @@ func TestBasedSequencer_GetNextBatch_GasFilteringPreservesUnprocessedTxs(t *test testBlobs := [][]byte{tx0, tx1, tx2, tx3, tx4} - mockRetriever := common.NewMockForcedInclusionRetriever(t) + mockRetriever := NewMockForcedInclusionRetriever(t) // Only expect one call to retrieve - all txs come from one DA epoch mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ Txs: testBlobs, @@ -988,7 +987,7 @@ func TestBasedSequencer_GetNextBatch_GasFilteringPreservesUnprocessedTxs(t *test assert.GreaterOrEqual(t, totalTxsProcessed, 3, "should process at least 3 valid transactions from the cache") } -func replaceWithMockRetriever(seq *BasedSequencer, mockRetriever *common.MockForcedInclusionRetriever) { +func replaceWithMockRetriever(seq *BasedSequencer, mockRetriever *MockForcedInclusionRetriever) { if seq.fiRetriever != nil { seq.fiRetriever.Stop() } diff --git a/pkg/sequencers/common/errors.go b/pkg/sequencers/common/errors.go new file mode 100644 index 0000000000..b960965442 --- /dev/null +++ b/pkg/sequencers/common/errors.go @@ -0,0 +1,10 @@ +package common + +import "errors" + +var ( + // ErrInvalidId is returned when the chain id is invalid + ErrInvalidID = errors.New("invalid chain id") + // ErrQueueFull is returned when the batch queue has reached its maximum size + ErrQueueFull = errors.New("sequencer queue full") +) diff --git a/pkg/sequencers/single/queue.go b/pkg/sequencers/single/queue.go index 40dcc5997f..6940e66c7f 100644 --- a/pkg/sequencers/single/queue.go +++ b/pkg/sequencers/single/queue.go @@ -4,7 +4,6 @@ import ( "context" "encoding/binary" "encoding/hex" - "errors" "fmt" "strconv" "sync" @@ -14,12 +13,12 @@ import ( "google.golang.org/protobuf/proto" coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/sequencers/common" "github.com/evstack/ev-node/pkg/store" pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) -// ErrQueueFull is returned when the batch queue has reached its maximum size -var ErrQueueFull = errors.New("batch queue is full") +var ErrQueueFull = common.ErrQueueFull // initialSeqNum is the starting sequence number for new queues. // It is set to the middle of the uint64 range to allow for both diff --git a/pkg/sequencers/single/sequencer.go b/pkg/sequencers/single/sequencer.go index 5aec08984d..7b63334703 100644 --- a/pkg/sequencers/single/sequencer.go +++ b/pkg/sequencers/single/sequencer.go @@ -24,8 +24,7 @@ import ( "github.com/evstack/ev-node/types" ) -// ErrInvalidId is returned when the chain id is invalid -var ErrInvalidId = errors.New("invalid chain id") +var ErrInvalidId = seqcommon.ErrInvalidID // Catch-up state machine states const ( diff --git a/pkg/sequencers/solo/sequencer.go b/pkg/sequencers/solo/sequencer.go index 0fcae9f31c..8cf7249d52 100644 --- a/pkg/sequencers/solo/sequencer.go +++ b/pkg/sequencers/solo/sequencer.go @@ -3,7 +3,6 @@ package solo import ( "bytes" "context" - "errors" "sync" "sync/atomic" "time" @@ -12,12 +11,35 @@ import ( "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/sequencers/common" ) -var ErrInvalidID = errors.New("invalid chain id") +var ( + ErrInvalidID = common.ErrInvalidID + ErrQueueFull = common.ErrQueueFull +) + +var ( + emptyBatch = &coresequencer.Batch{} + submitBatchResp = &coresequencer.SubmitBatchTxsResponse{} + verifyBatchOKResp = &coresequencer.VerifyBatchResponse{Status: true} +) var _ coresequencer.Sequencer = (*SoloSequencer)(nil) +// Option configures a SoloSequencer. +type Option func(*SoloSequencer) + +// WithMaxQueueBytes sets a soft cap on the sequencer's in-memory tx queue. +// SubmitBatchTxs admits txs while the cap has room and returns ErrQueueFull +// when the incoming batch would exceed it. A zero value (default) disables +// the cap. +func WithMaxQueueBytes(n uint64) Option { + return func(s *SoloSequencer) { + s.maxQueueBytes = n + } +} + // SoloSequencer is a single-leader sequencer without forced inclusion // support. It maintains a simple in-memory queue of mempool transactions and // produces batches on demand. @@ -28,21 +50,38 @@ type SoloSequencer struct { daHeight atomic.Uint64 - mu sync.Mutex - queue [][]byte + mu sync.Mutex + queue [][]byte + queueBytes uint64 + maxQueueBytes uint64 } func NewSoloSequencer( logger zerolog.Logger, id []byte, executor execution.Executor, + opts ...Option, ) *SoloSequencer { - return &SoloSequencer{ + if executor == nil { + panic("solo: executor must not be nil") + } + + s := &SoloSequencer{ logger: logger, id: id, executor: executor, queue: make([][]byte, 0), } + + for _, opt := range opts { + opt(s) + } + + logger.Debug(). + Uint64("max_queue_bytes", s.maxQueueBytes). + Msg("solo sequencer initialized") + + return s } func (s *SoloSequencer) isValid(id []byte) bool { @@ -55,14 +94,38 @@ func (s *SoloSequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.Su } if req.Batch == nil || len(req.Batch.Transactions) == 0 { - return &coresequencer.SubmitBatchTxsResponse{}, nil + return submitBatchResp, nil } s.mu.Lock() defer s.mu.Unlock() + if s.maxQueueBytes == 0 { + s.queue = append(s.queue, req.Batch.Transactions...) + return submitBatchResp, nil + } + + // All-or-nothing: if the whole incoming batch doesn't fit, reject + // it untouched. Partial admission would force the caller (e.g. + // the reaper bridging executor mempool → sequencer) to reason + // about which prefix was admitted and re-feed only the suffix on + // retry, which it doesn't currently do — leading to duplicate-tx + // resubmission on each retry. Rejecting the whole batch lets the + // reaper just retry with the same batch later when the queue has + // drained. + var batchBytes uint64 + for _, tx := range req.Batch.Transactions { + batchBytes += uint64(len(tx)) + } + + if s.queueBytes+batchBytes > s.maxQueueBytes { + return submitBatchResp, ErrQueueFull + } + s.queue = append(s.queue, req.Batch.Transactions...) - return &coresequencer.SubmitBatchTxsResponse{}, nil + s.queueBytes += batchBytes + + return submitBatchResp, nil } func (s *SoloSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { @@ -73,11 +136,12 @@ func (s *SoloSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetN s.mu.Lock() txs := s.queue s.queue = nil + s.queueBytes = 0 s.mu.Unlock() if len(txs) == 0 { return &coresequencer.GetNextBatchResponse{ - Batch: &coresequencer.Batch{}, + Batch: emptyBatch, Timestamp: time.Now().UTC(), BatchData: req.LastBatchData, }, nil @@ -94,32 +158,41 @@ func (s *SoloSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetN filterStatuses, err := s.executor.FilterTxs(ctx, txs, req.MaxBytes, maxGas, false) if err != nil { s.logger.Warn().Err(err).Msg("failed to filter transactions, proceeding with unfiltered") - filterStatuses = make([]execution.FilterStatus, len(txs)) - for i := range filterStatuses { - filterStatuses[i] = execution.FilterOK - } + return &coresequencer.GetNextBatchResponse{ + Batch: &coresequencer.Batch{Transactions: txs}, + Timestamp: time.Now().UTC(), + BatchData: req.LastBatchData, + }, nil } - var validTxs [][]byte + write := 0 var postponedTxs [][]byte for i, status := range filterStatuses { switch status { case execution.FilterOK: - validTxs = append(validTxs, txs[i]) + txs[write] = txs[i] + write++ case execution.FilterPostpone: postponedTxs = append(postponedTxs, txs[i]) - case execution.FilterRemove: } } if len(postponedTxs) > 0 { s.mu.Lock() s.queue = append(postponedTxs, s.queue...) + // Postponed txs were already in the queue's byte count when + // SubmitBatchTxs admitted them. We zeroed queueBytes on drain + // above, so re-queuing requires re-counting whatever survived. + var bytes uint64 + for _, tx := range postponedTxs { + bytes += uint64(len(tx)) + } + s.queueBytes += bytes s.mu.Unlock() } return &coresequencer.GetNextBatchResponse{ - Batch: &coresequencer.Batch{Transactions: validTxs}, + Batch: &coresequencer.Batch{Transactions: txs[:write]}, Timestamp: time.Now().UTC(), BatchData: req.LastBatchData, }, nil @@ -130,7 +203,7 @@ func (s *SoloSequencer) VerifyBatch(ctx context.Context, req coresequencer.Verif return nil, ErrInvalidID } - return &coresequencer.VerifyBatchResponse{Status: true}, nil + return verifyBatchOKResp, nil } func (s *SoloSequencer) SetDAHeight(height uint64) { diff --git a/pkg/sequencers/solo/sequencer_test.go b/pkg/sequencers/solo/sequencer_test.go index 7f3bc9e196..1d1274a0b6 100644 --- a/pkg/sequencers/solo/sequencer_test.go +++ b/pkg/sequencers/solo/sequencer_test.go @@ -217,3 +217,91 @@ func TestSoloSequencer_DAHeight(t *testing.T) { seq.SetDAHeight(42) assert.Equal(t, uint64(42), seq.GetDAHeight()) } + +func TestSoloSequencer_SubmitBatchTxs_QueueFull(t *testing.T) { + seq := NewSoloSequencer( + zerolog.Nop(), + []byte("test"), + createDefaultMockExecutor(t), + WithMaxQueueBytes(10), + ) + + _, err := seq.SubmitBatchTxs(context.Background(), coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test"), + Batch: &coresequencer.Batch{Transactions: [][]byte{make([]byte, 6)}}, + }) + require.NoError(t, err) + + _, err = seq.SubmitBatchTxs(context.Background(), coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test"), + Batch: &coresequencer.Batch{Transactions: [][]byte{make([]byte, 5)}}, + }) + assert.ErrorIs(t, err, ErrQueueFull) +} + +func TestSoloSequencer_SubmitBatchTxs_QueueFull_AllOrNothing(t *testing.T) { + seq := NewSoloSequencer( + zerolog.Nop(), + []byte("test"), + createDefaultMockExecutor(t), + WithMaxQueueBytes(5), + ) + + _, err := seq.SubmitBatchTxs(context.Background(), coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test"), + Batch: &coresequencer.Batch{Transactions: [][]byte{{1}, {2}, {3}}}, + }) + require.NoError(t, err) + + _, err = seq.SubmitBatchTxs(context.Background(), coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test"), + Batch: &coresequencer.Batch{Transactions: [][]byte{{4}, {5}, {6}}}, + }) + assert.ErrorIs(t, err, ErrQueueFull) + + assert.Len(t, seq.queue, 3, "queue should contain only the first batch") +} + +func TestSoloSequencer_SubmitBatchTxs_DrainReleasesCapacity(t *testing.T) { + seq := NewSoloSequencer( + zerolog.Nop(), + []byte("test"), + createDefaultMockExecutor(t), + WithMaxQueueBytes(10), + ) + + _, err := seq.SubmitBatchTxs(context.Background(), coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test"), + Batch: &coresequencer.Batch{Transactions: [][]byte{make([]byte, 10)}}, + }) + require.NoError(t, err) + + _, err = seq.SubmitBatchTxs(context.Background(), coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test"), + Batch: &coresequencer.Batch{Transactions: [][]byte{{1}}}, + }) + assert.ErrorIs(t, err, ErrQueueFull) + + _, err = seq.GetNextBatch(context.Background(), coresequencer.GetNextBatchRequest{Id: []byte("test")}) + require.NoError(t, err) + + _, err = seq.SubmitBatchTxs(context.Background(), coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test"), + Batch: &coresequencer.Batch{Transactions: [][]byte{{1}}}, + }) + assert.NoError(t, err, "submit should succeed after queue is drained") +} + +func TestSoloSequencer_SubmitBatchTxs_UnboundedByDefault(t *testing.T) { + seq := newTestSequencer(t) + + bigTx := make([]byte, 1024*1024) + for i := 0; i < 10; i++ { + _, err := seq.SubmitBatchTxs(context.Background(), coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test"), + Batch: &coresequencer.Batch{Transactions: [][]byte{bigTx}}, + }) + require.NoError(t, err) + } + assert.Len(t, seq.queue, 10) +} diff --git a/pkg/store/cached_store.go b/pkg/store/cached_store.go index 3c7af2f611..39031ccde3 100644 --- a/pkg/store/cached_store.go +++ b/pkg/store/cached_store.go @@ -2,8 +2,11 @@ package store import ( "context" + "sync" + "time" lru "github.com/hashicorp/golang-lru/v2" + "github.com/rs/zerolog" "github.com/evstack/ev-node/types" ) @@ -14,15 +17,37 @@ const ( // DefaultBlockDataCacheSize is the default number of block data entries to cache. DefaultBlockDataCacheSize = 200_000 + + asyncWriteBufferSize = 8192 + + // batchWindow is the time the write goroutine waits after receiving the first + // op before flushing. This allows bursts of metadata writes (e.g. 3-4 per + // height in the submitter) to be coalesced into a single Badger WriteBatch. + batchWindow = 100 * time.Microsecond ) +type asyncWriteOp struct { + key string + value []byte + isDelete bool +} + // CachedStore wraps a Store with LRU caching for frequently accessed data. // The underlying LRU cache is thread-safe, so no additional synchronization is needed. +// Metadata writes (SetMetadata, DeleteMetadata) are processed asynchronously via a +// buffered channel to avoid blocking Badger's write pipeline for critical operations +// like block production (batch commits). type CachedStore struct { Store headerCache *lru.Cache[uint64, *types.SignedHeader] blockDataCache *lru.Cache[uint64, *blockDataEntry] + + writeCh chan asyncWriteOp + done chan struct{} + stopMu sync.RWMutex + stopped bool + logger zerolog.Logger } type blockDataEntry struct { @@ -73,6 +98,9 @@ func NewCachedStore(store Store, opts ...CachedStoreOption) (*CachedStore, error Store: store, headerCache: headerCache, blockDataCache: blockDataCache, + writeCh: make(chan asyncWriteOp, asyncWriteBufferSize), + done: make(chan struct{}), + logger: zerolog.Nop(), } for _, opt := range opts { @@ -81,9 +109,58 @@ func NewCachedStore(store Store, opts ...CachedStoreOption) (*CachedStore, error } } + cs.startWriteLoop() + return cs, nil } +func (cs *CachedStore) startWriteLoop() { + go func() { + defer close(cs.done) + for op := range cs.writeCh { + ops := []asyncWriteOp{op} + + timer := time.NewTimer(batchWindow) + collect: + for { + select { + case op, ok := <-cs.writeCh: + if !ok { + timer.Stop() + break collect + } + ops = append(ops, op) + case <-timer.C: + break collect + } + } + + last := make(map[string]asyncWriteOp, len(ops)) + for _, o := range ops { + last[o.key] = o + } + + var puts []MetadataKV + var deletes []string + for _, o := range last { + if o.isDelete { + deletes = append(deletes, o.key) + } else { + puts = append(puts, MetadataKV{Key: o.key, Value: o.value}) + } + } + + if err := cs.BatchMetadata(context.Background(), puts, deletes); err != nil { + for _, o := range ops { + cs.logger.Error().Err(err).Str("key", o.key). + Bool("delete", o.isDelete). + Msg("async metadata batch write failed") + } + } + } + }() +} + // GetHeader returns the header at the given height, using the cache if available. func (cs *CachedStore) GetHeader(ctx context.Context, height uint64) (*types.SignedHeader, error) { // Try cache first @@ -162,7 +239,7 @@ func (cs *CachedStore) Rollback(ctx context.Context, height uint64, aggregator b } // PruneBlocks wraps the underlying store's PruneBlocks and invalidates caches -// up to the heigh that we purne +// up to the height that we prune func (cs *CachedStore) PruneBlocks(ctx context.Context, height uint64) error { if err := cs.Store.PruneBlocks(ctx, height); err != nil { return err @@ -173,8 +250,42 @@ func (cs *CachedStore) PruneBlocks(ctx context.Context, height uint64) error { return nil } -// Close closes the underlying store. +// SetMetadata queues an asynchronous metadata write. The write is persisted +// by the background goroutine via BatchMetadata. If the store has been stopped, +// the write falls back to synchronous execution on the underlying store. +func (cs *CachedStore) SetMetadata(ctx context.Context, key string, value []byte) error { + cs.stopMu.RLock() + defer cs.stopMu.RUnlock() + + if cs.stopped { + return cs.Store.SetMetadata(ctx, key, value) + } + valueCopy := append([]byte(nil), value...) + cs.writeCh <- asyncWriteOp{key: key, value: valueCopy} + return nil +} + +// DeleteMetadata queues an asynchronous metadata delete. If the store has been +// stopped, the delete falls back to synchronous execution. +func (cs *CachedStore) DeleteMetadata(ctx context.Context, key string) error { + cs.stopMu.RLock() + defer cs.stopMu.RUnlock() + + if cs.stopped { + return cs.Store.DeleteMetadata(ctx, key) + } + cs.writeCh <- asyncWriteOp{key: key, isDelete: true} + return nil +} + +// Close drains pending async writes, then closes the underlying store. func (cs *CachedStore) Close() error { + cs.stopMu.Lock() + cs.stopped = true + close(cs.writeCh) + cs.stopMu.Unlock() + <-cs.done + cs.ClearCache() return cs.Store.Close() } diff --git a/pkg/store/cached_store_test.go b/pkg/store/cached_store_test.go index 2579eb48a6..432d90f035 100644 --- a/pkg/store/cached_store_test.go +++ b/pkg/store/cached_store_test.go @@ -2,8 +2,12 @@ package store import ( "context" + "fmt" "testing" + "time" + ds "github.com/ipfs/go-datastore" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -270,3 +274,129 @@ func TestCachedStore_Close(t *testing.T) { err = cachedStore.Close() require.NoError(t, err) } + +func TestCachedStore_AsyncSetMetadata(t *testing.T) { + t.Parallel() + ctx := context.Background() + + kv, err := NewTestInMemoryKVStore() + require.NoError(t, err) + + base := New(kv) + cs, err := NewCachedStore(base) + require.NoError(t, err) + t.Cleanup(func() { cs.Close() }) + + require.NoError(t, cs.SetMetadata(ctx, "key1", []byte("value1"))) + + require.Eventually(t, func() bool { + v, err := base.GetMetadata(ctx, "key1") + return err == nil && string(v) == "value1" + }, time.Second, 10*time.Millisecond) +} + +func TestCachedStore_AsyncDeleteMetadata(t *testing.T) { + t.Parallel() + ctx := context.Background() + + kv, err := NewTestInMemoryKVStore() + require.NoError(t, err) + + base := New(kv) + require.NoError(t, base.SetMetadata(ctx, "key1", []byte("value1"))) + + cs, err := NewCachedStore(base) + require.NoError(t, err) + t.Cleanup(func() { cs.Close() }) + + require.NoError(t, cs.DeleteMetadata(ctx, "key1")) + + require.Eventually(t, func() bool { + _, err := base.GetMetadata(ctx, "key1") + return err != nil + }, time.Second, 10*time.Millisecond) +} + +func TestCachedStore_Close_FlushesPendingWrites(t *testing.T) { + ctx := context.Background() + + dir := t.TempDir() + kv, err := NewDefaultKVStore(dir, "", "test-db") + require.NoError(t, err) + + base := New(kv) + cs, err := NewCachedStore(base) + require.NoError(t, err) + + const n = 100 + for i := range n { + k := fmt.Sprintf("key-%d", i) + require.NoError(t, cs.SetMetadata(ctx, k, []byte(k))) + } + + require.NoError(t, cs.Close()) + + kv2, err := NewDefaultKVStore(dir, "", "test-db") + require.NoError(t, err) + t.Cleanup(func() { kv2.Close() }) + reopened := New(kv2) + + for i := range n { + k := fmt.Sprintf("key-%d", i) + v, err := reopened.GetMetadata(ctx, k) + require.NoError(t, err) + require.Equal(t, []byte(k), v) + } +} + +func TestCachedStore_WriteAfterClose_FallsBack(t *testing.T) { + kv, err := NewTestInMemoryKVStore() + require.NoError(t, err) + + base := New(kv) + cs, err := NewCachedStore(base) + require.NoError(t, err) + + ctx := context.Background() + require.NoError(t, cs.SetMetadata(ctx, "before", []byte("ok"))) + + require.NoError(t, cs.Close()) + + err = cs.SetMetadata(ctx, "after", []byte("sync")) + require.Error(t, err) +} + +func TestCachedStore_CoalescesSameKeyOps(t *testing.T) { + ctx := context.Background() + + kv, err := NewTestInMemoryKVStore() + require.NoError(t, err) + + require.NoError(t, kv.Put(ctx, ds.NewKey(GetMetaKey("k")), []byte("original"))) + + base := New(kv) + + writeCh := make(chan asyncWriteOp, asyncWriteBufferSize) + done := make(chan struct{}) + cs := &CachedStore{ + Store: base, + writeCh: writeCh, + done: done, + logger: zerolog.Nop(), + } + cs.startWriteLoop() + + require.NoError(t, cs.SetMetadata(ctx, "k", []byte("v1"))) + require.NoError(t, cs.DeleteMetadata(ctx, "k")) + require.NoError(t, cs.SetMetadata(ctx, "k", []byte("v2"))) + + cs.stopMu.Lock() + cs.stopped = true + close(writeCh) + cs.stopMu.Unlock() + <-done + + v, err := base.GetMetadata(ctx, "k") + require.NoError(t, err) + require.Equal(t, []byte("v2"), v, "last write (Set) should win over delete") +} diff --git a/pkg/store/store.go b/pkg/store/store.go index 975db4e163..4c045d1d89 100644 --- a/pkg/store/store.go +++ b/pkg/store/store.go @@ -190,6 +190,30 @@ func (s *DefaultStore) SetMetadata(ctx context.Context, key string, value []byte return nil } +func (s *DefaultStore) BatchMetadata(ctx context.Context, puts []MetadataKV, deletes []string) error { + if len(puts) == 0 && len(deletes) == 0 { + return nil + } + batch, err := s.db.Batch(ctx) + if err != nil { + return fmt.Errorf("failed to create metadata batch: %w", err) + } + for _, kv := range puts { + if err := batch.Put(ctx, ds.NewKey(GetMetaKey(kv.Key)), kv.Value); err != nil { + return fmt.Errorf("failed to batch-put metadata key '%s': %w", kv.Key, err) + } + } + for _, key := range deletes { + if err := batch.Delete(ctx, ds.NewKey(GetMetaKey(key))); err != nil { + return fmt.Errorf("failed to batch-delete metadata key '%s': %w", key, err) + } + } + if err := batch.Commit(ctx); err != nil { + return fmt.Errorf("failed to commit metadata batch: %w", err) + } + return nil +} + // GetMetadata returns values stored for given key with SetMetadata. func (s *DefaultStore) GetMetadata(ctx context.Context, key string) ([]byte, error) { data, err := s.db.Get(ctx, ds.NewKey(GetMetaKey(key))) diff --git a/pkg/store/tracing.go b/pkg/store/tracing.go index 259c6cb600..e94de6dad1 100644 --- a/pkg/store/tracing.go +++ b/pkg/store/tracing.go @@ -211,6 +211,25 @@ func (t *tracedStore) DeleteMetadata(ctx context.Context, key string) error { return nil } +func (t *tracedStore) BatchMetadata(ctx context.Context, puts []MetadataKV, deletes []string) error { + ctx, span := t.tracer.Start(ctx, "Store.BatchMetadata", + trace.WithAttributes( + attribute.Int("puts", len(puts)), + attribute.Int("deletes", len(deletes)), + ), + ) + defer span.End() + + err := t.inner.BatchMetadata(ctx, puts, deletes) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return err + } + + return nil +} + func (t *tracedStore) DeleteStateAtHeight(ctx context.Context, height uint64) error { ctx, span := t.tracer.Start(ctx, "Store.DeleteStateAtHeight", trace.WithAttributes(attribute.Int64("height", int64(height))), diff --git a/pkg/store/tracing_test.go b/pkg/store/tracing_test.go index 3ae8d8902e..66211a2624 100644 --- a/pkg/store/tracing_test.go +++ b/pkg/store/tracing_test.go @@ -28,6 +28,7 @@ type tracingMockStore struct { getMetadataFn func(ctx context.Context, key string) ([]byte, error) setMetadataFn func(ctx context.Context, key string, value []byte) error + batchMetadataFn func(ctx context.Context, puts []MetadataKV, deletes []string) error deleteMetadataFn func(ctx context.Context, key string) error rollbackFn func(ctx context.Context, height uint64, aggregator bool) error pruneBlocksFn func(ctx context.Context, height uint64) error @@ -105,6 +106,13 @@ func (m *tracingMockStore) SetMetadata(ctx context.Context, key string, value [] return nil } +func (m *tracingMockStore) BatchMetadata(ctx context.Context, puts []MetadataKV, deletes []string) error { + if m.batchMetadataFn != nil { + return m.batchMetadataFn(ctx, puts, deletes) + } + return nil +} + func (m *tracingMockStore) DeleteMetadata(ctx context.Context, key string) error { if m.deleteMetadataFn != nil { return m.deleteMetadataFn(ctx, key) diff --git a/pkg/store/types.go b/pkg/store/types.go index b1b1f2bd5e..d37b5debf9 100644 --- a/pkg/store/types.go +++ b/pkg/store/types.go @@ -54,6 +54,12 @@ type Store interface { NewBatch(ctx context.Context) (Batch, error) } +// MetadataKV is a key-value pair for batched metadata operations. +type MetadataKV struct { + Key string + Value []byte +} + type Metadata interface { // SetMetadata saves arbitrary value in the store. // @@ -62,6 +68,10 @@ type Metadata interface { // DeleteMetadata removes a metadata key from the store. DeleteMetadata(ctx context.Context, key string) error + + // BatchMetadata writes and deletes metadata keys in a single Badger + // WriteBatch transaction, reducing contention on the write pipeline. + BatchMetadata(ctx context.Context, puts []MetadataKV, deletes []string) error } type Reader interface { diff --git a/test/docker-e2e/go.mod b/test/docker-e2e/go.mod index dd754f337f..80078d8095 100644 --- a/test/docker-e2e/go.mod +++ b/test/docker-e2e/go.mod @@ -4,15 +4,15 @@ go 1.25.7 require ( cosmossdk.io/math v1.5.3 - github.com/celestiaorg/tastora v0.17.0 + github.com/celestiaorg/tastora v0.19.0 github.com/ethereum/go-ethereum v1.17.2 github.com/evstack/ev-node/execution/evm v1.0.1 github.com/libp2p/go-libp2p v0.48.0 + github.com/moby/moby/client v0.4.0 github.com/stretchr/testify v1.11.1 ) require ( - github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 // indirect @@ -26,11 +26,12 @@ require ( github.com/cloudwego/base64x v0.1.6 // indirect github.com/consensys/gnark-crypto v0.18.2 // indirect github.com/containerd/continuity v0.4.5 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/crate-crypto/go-eth-kzg v1.5.0 // indirect github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect github.com/ethereum/c-kzg-4844/v2 v2.1.6 // indirect - github.com/evstack/ev-node v1.1.0 // indirect + github.com/evstack/ev-node v1.1.1 // indirect github.com/evstack/ev-node/core v1.0.0 // indirect github.com/ferranbt/fastssz v0.1.4 // indirect github.com/go-ole/go-ole v1.3.0 // indirect @@ -48,8 +49,7 @@ require ( github.com/magiconair/properties v1.8.10 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/sys/sequential v0.6.0 // indirect - github.com/morikuni/aec v1.1.0 // indirect + github.com/moby/moby/api v1.54.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect @@ -82,7 +82,7 @@ require ( cosmossdk.io/log v1.6.0 // indirect cosmossdk.io/store v1.1.2 // indirect cosmossdk.io/x/tx v0.13.8 // indirect - filippo.io/edwards25519 v1.1.0 // indirect + filippo.io/edwards25519 v1.1.1 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect github.com/BurntSushi/toml v1.5.0 // indirect @@ -106,7 +106,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 // indirect - github.com/aws/smithy-go v1.25.0 // indirect + github.com/aws/smithy-go v1.25.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.2.0 // indirect github.com/celestiaorg/nmt v0.24.3 // indirect @@ -137,7 +137,6 @@ require ( github.com/desertbit/timer v1.0.1 // indirect github.com/dgraph-io/badger/v4 v4.5.1 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v28.5.2+incompatible github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -196,7 +195,6 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/minio/highwayhash v1.0.3 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/moby v28.3.3+incompatible // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect @@ -214,7 +212,7 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/rs/cors v1.11.1 // indirect - github.com/rs/zerolog v1.35.0 // indirect + github.com/rs/zerolog v1.35.1 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sasha-s/go-deadlock v0.3.5 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect @@ -268,5 +266,4 @@ replace ( github.com/cometbft/cometbft => github.com/celestiaorg/celestia-core v0.39.4 github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v0.51.2 github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2 - github.com/moby/moby => github.com/moby/moby v27.5.1+incompatible ) diff --git a/test/docker-e2e/go.sum b/test/docker-e2e/go.sum index 4cc1b63de8..4fd8c768e4 100644 --- a/test/docker-e2e/go.sum +++ b/test/docker-e2e/go.sum @@ -9,12 +9,12 @@ cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3R cloud.google.com/go/compute v1.54.0 h1:4CKmnpO+40z44bKG5bdcKxQ7ocNpRtOc9SCLLUzze1w= cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= -cloud.google.com/go/iam v1.6.0 h1:JiSIcEi38dWBKhB3BtfKCW+dMvCZJEhBA2BsaGJgoxs= -cloud.google.com/go/iam v1.6.0/go.mod h1:ZS6zEy7QHmcNO18mjO2viYv/n+wOUkhJqGNkPPGueGU= -cloud.google.com/go/kms v1.27.0 h1:iYYgoD0HJIqz35A+He1G0dS5qTQzQsDXFsyXwzkUCXM= -cloud.google.com/go/kms v1.27.0/go.mod h1:KPxrdf61iYEOZ86uPwR86muBpSik2y4Ion6e83fVl1Q= -cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= -cloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk= +cloud.google.com/go/iam v1.7.0 h1:JD3zh0C6LHl16aCn5Akff0+GELdp1+4hmh6ndoFLl8U= +cloud.google.com/go/iam v1.7.0/go.mod h1:tetWZW1PD/m6vcuY2Zj/aU0eCHNPuxedbnbRTyKXvdY= +cloud.google.com/go/kms v1.29.0 h1:bAW1C5FQf+6GhPkywQzPlsULALCG7c16qpXLFGV9ivY= +cloud.google.com/go/kms v1.29.0/go.mod h1:YIyXZym11R5uovJJt4oN5eUL3oPmirF3yKeIh6QAf4U= +cloud.google.com/go/longrunning v0.9.0 h1:0EzbDEGsAvOZNbqXopgniY0w0a1phvu5IdUFq8grmqY= +cloud.google.com/go/longrunning v0.9.0/go.mod h1:pkTz846W7bF4o2SzdWJ40Hu0Re+UoNT6Q5t+igIcb8E= cosmossdk.io/api v0.7.6 h1:PC20PcXy1xYKH2KU4RMurVoFjjKkCgYRbVAD4PdqUuY= cosmossdk.io/api v0.7.6/go.mod h1:IcxpYS5fMemZGqyYtErK7OqvdM0C8kdW3dq8Q/XIG38= cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= @@ -36,8 +36,8 @@ cosmossdk.io/x/tx v0.13.8/go.mod h1:V6DImnwJMTq5qFjeGWpXNiT/fjgE4HtmclRmTqRVM3w= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5 h1:JA0fFr+kxpqTdxR9LOBiTWpGNchqmkcsgmdeJZRclZ0= filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5/go.mod h1:OjOXDNlClLblvXdwgFFOQFJEocLhhtai8vGLy0JCZlI= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +filippo.io/edwards25519 v1.1.1 h1:YpjwWWlNmGIDyXOn8zLzqiD+9TyIlPhGFG96P39uBpw= +filippo.io/edwards25519 v1.1.1/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b h1:REI1FbdW71yO56Are4XAxD+OS/e+BQsB3gE4mZRQEXY= filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b/go.mod h1:9nnw1SlYHYuPSo/3wjQzNjSbeHlq2NsKo5iEtfJPWP0= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= @@ -115,8 +115,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 h1:ZlvrNcHSFFWURB8avufQq9gFsheUgjVD9536obIknfM= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21/go.mod h1:cv3TNhVrssKR0O/xxLJVRfd2oazSnZnkUeTf6ctUwfQ= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.4 h1:PgD1y0ZagPokGIZPmejCBUySBzOFDN+leZxCOfb1OEQ= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.4/go.mod h1:FfXDb5nXrsoGgxsBFxwxr3vdHXheC2tV+6lmuLghhjQ= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 h1:nEzwx/ZlpUZ2Y6WztsgYmfBh5Ixd3QiECawXMzvTMeo= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.5/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= github.com/aws/aws-sdk-go-v2/service/s3 v1.99.0 h1:hlSuz394kV0vhv9drL5lhuEFbEOEP1VyQpy15qWh1Pk= github.com/aws/aws-sdk-go-v2/service/s3 v1.99.0/go.mod h1:uoA43SdFwacedBfSgfFSjjCvYe8aYBS7EnU5GZ/YKMM= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 h1:a1Fq/KXn75wSzoJaPQTgZO0wHGqE9mjFnylnqEPTchA= @@ -127,8 +127,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 h1:oK/njaL8GtyEihkWMD4k3Vg github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20/go.mod h1:JHs8/y1f3zY7U5WcuzoJ/yAYGYtNIVPKLIbp61euvmg= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 h1:ks8KBcZPh3PYISr5dAiXCM5/Thcuxk8l+PG4+A0exds= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0/go.mod h1:pFw33T0WLvXU3rw1WBkpMlkgIn54eCB5FYLhjDc9Foo= -github.com/aws/smithy-go v1.25.0 h1:Sz/XJ64rwuiKtB6j98nDIPyYrV1nVNJ4YU74gttcl5U= -github.com/aws/smithy-go v1.25.0/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/aws/smithy-go v1.25.1 h1:J8ERsGSU7d+aCmdQur5Txg6bVoYelvQJgtZehD12GkI= +github.com/aws/smithy-go v1.25.1/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/bcp-innovations/hyperlane-cosmos v1.0.1 h1:gT8OqyJ866Q6AHOlIXKxSdLjd0p8crKG9XXERIWoh4c= github.com/bcp-innovations/hyperlane-cosmos v1.0.1/go.mod h1:3yfa0io5Ii6GmhHWsWl2LEAOEHsqWuMgw2R02+LPogw= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -168,8 +168,8 @@ github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY7 github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= github.com/celestiaorg/nmt v0.24.3 h1:ylQnRlXkVoTtq36CxtCyXYZX4JISBsHgKlAAUAnf7ig= github.com/celestiaorg/nmt v0.24.3/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= -github.com/celestiaorg/tastora v0.17.0 h1:pqhAkR9lmaAG5OXI0cs2Al2LraAtXyT3Ku34tqamEvE= -github.com/celestiaorg/tastora v0.17.0/go.mod h1:C867PBm6Ne6e/1JlmsRqcLeJ6RHAuMoMRCvwJzV/q8g= +github.com/celestiaorg/tastora v0.19.0 h1:m5MiDxYqlEdeY2i+vIpPZzjn5iA7U0JUM1PxE7Vs/UA= +github.com/celestiaorg/tastora v0.19.0/go.mod h1:uhEz7v8YJmJuVgsJaCe0M0Q/HJiQAQNMu3w/OtmFIQY= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= @@ -225,8 +225,8 @@ github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -288,8 +288,6 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WA github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= -github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -321,8 +319,8 @@ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= github.com/ethereum/go-ethereum v1.17.2 h1:ag6geu0kn8Hv5FLKTpH+Hm2DHD+iuFtuqKxEuwUsDOI= github.com/ethereum/go-ethereum v1.17.2/go.mod h1:KHcRXfGOUfUmKg51IhQ0IowiqZ6PqZf08CMtk0g5K1o= -github.com/evstack/ev-node v1.1.0 h1:UGupPg6DwylhI+P1UOdRlTdZynCe1qdMtZXsXgMq+4A= -github.com/evstack/ev-node v1.1.0/go.mod h1:5lIACV0hQGO5Btdb1b3fSw2Vz7Jvrg2yvMefalfWguA= +github.com/evstack/ev-node v1.1.1 h1:J9h5PKx177XdvNWLZCDOkWJEGRIrPzYxkCFhbGkVUm8= +github.com/evstack/ev-node v1.1.1/go.mod h1:/d/i+SSTDFnxffoijcrwmlt0LgfUU8D4S3HQqucwtu8= github.com/evstack/ev-node/core v1.0.0 h1:s0Tx0uWHme7SJn/ZNEtee4qNM8UO6PIxXnHhPbbKTz8= github.com/evstack/ev-node/core v1.0.0/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/evstack/ev-node/execution/evm v1.0.1 h1:cpFCcrAajTRoptAaSbaqkU6bIRc7hnmXK346zwqk/Lo= @@ -480,8 +478,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8= github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= -github.com/googleapis/gax-go/v2 v2.20.0 h1:NIKVuLhDlIV74muWlsMM4CcQZqN6JJ20Qcxd9YMuYcs= -github.com/googleapis/gax-go/v2 v2.20.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4= +github.com/googleapis/gax-go/v2 v2.21.0 h1:h45NjjzEO3faG9Lg/cFrBh2PgegVVgzqKzuZl/wMbiI= +github.com/googleapis/gax-go/v2 v2.21.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -650,8 +648,8 @@ github.com/libp2p/go-libp2p v0.48.0 h1:h2BrLAgrj7X8bEN05K7qmrjpNHYA+6tnsGRdprjTn github.com/libp2p/go-libp2p v0.48.0/go.mod h1:Q1fBZNdmC2Hf82husCTfkKJVfHm2we5zk+NWmOGEmWk= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-kad-dht v0.39.0 h1:mww38eBYiUvdsu+Xl/GLlBC0Aa8M+5HAwvafkFOygAM= -github.com/libp2p/go-libp2p-kad-dht v0.39.0/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= +github.com/libp2p/go-libp2p-kad-dht v0.39.1 h1:9RzUBc7zywT4ZNamRSgEvPZmVlK3Y6xdlCYfXXvlR/Q= +github.com/libp2p/go-libp2p-kad-dht v0.39.1/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= @@ -714,20 +712,14 @@ github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjU github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/moby v27.5.1+incompatible h1:/pN59F/t3U7Q4FPzV88nzqf7Fp0qqCSL2KzhZaiKcKw= -github.com/moby/moby v27.5.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= -github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= -github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= -github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/moby/moby/api v1.54.1 h1:TqVzuJkOLsgLDDwNLmYqACUuTehOHRGKiPhvH8V3Nn4= +github.com/moby/moby/api v1.54.1/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs= +github.com/moby/moby/client v0.4.0 h1:S+2XegzHQrrvTCvF6s5HFzcrywWQmuVnhOXe2kiWjIw= +github.com/moby/moby/client v0.4.0/go.mod h1:QWPbvWchQbxBNdaLSpoKpCdf5E+WxFAgNHogCWDoa7g= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ= -github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= @@ -932,8 +924,8 @@ github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7 github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/zerolog v1.35.0 h1:VD0ykx7HMiMJytqINBsKcbLS+BJ4WYjz+05us+LRTdI= -github.com/rs/zerolog v1.35.0/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= +github.com/rs/zerolog v1.35.1 h1:m7xQeoiLIiV0BCEY4Hs+j2NG4Gp2o2KPKmhnnLiazKI= +github.com/rs/zerolog v1.35.1/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -1230,7 +1222,6 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1295,8 +1286,8 @@ golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNq gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.273.1 h1:L7G/TmpAMz0nKx/ciAVssVmWQiOF6+pOuXeKrWVsquY= -google.golang.org/api v0.273.1/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= +google.golang.org/api v0.274.0 h1:aYhycS5QQCwxHLwfEHRRLf9yNsfvp1JadKKWBE54RFA= +google.golang.org/api v0.274.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/test/docker-e2e/resiliency_test.go b/test/docker-e2e/resiliency_test.go index a2b95c6e73..c1744259fa 100644 --- a/test/docker-e2e/resiliency_test.go +++ b/test/docker-e2e/resiliency_test.go @@ -13,7 +13,7 @@ import ( da "github.com/celestiaorg/tastora/framework/docker/dataavailability" "github.com/celestiaorg/tastora/framework/docker/evstack" tastoratypes "github.com/celestiaorg/tastora/framework/types" - "github.com/docker/docker/api/types/container" + mobyclient "github.com/moby/moby/client" ) // TestEvNodeRestart tests the ability to stop and restart an EV node, @@ -889,27 +889,27 @@ func (s *DockerTestSuite) monitorDARecoveryInLogs(ctx context.Context, node *evs // execCommandInContainer executes a command inside a Docker container func (s *DockerTestSuite) execCommandInContainer(ctx context.Context, containerID string, cmd []string) ([]byte, error) { // Create exec configuration - execConfig := container.ExecOptions{ + execConfig := mobyclient.ExecCreateOptions{ Cmd: cmd, AttachStdout: true, AttachStderr: true, } // Create the exec instance - exec, err := s.dockerClient.ContainerExecCreate(ctx, containerID, execConfig) + exec, err := s.dockerClient.ExecCreate(ctx, containerID, execConfig) if err != nil { return nil, fmt.Errorf("failed to create exec: %w", err) } // Attach to exec to get output - resp, err := s.dockerClient.ContainerExecAttach(ctx, exec.ID, container.ExecStartOptions{}) + resp, err := s.dockerClient.ExecAttach(ctx, exec.ID, mobyclient.ExecAttachOptions{}) if err != nil { return nil, fmt.Errorf("failed to attach to exec: %w", err) } defer resp.Close() // Start the exec - err = s.dockerClient.ContainerExecStart(ctx, exec.ID, container.ExecStartOptions{}) + _, err = s.dockerClient.ExecStart(ctx, exec.ID, mobyclient.ExecStartOptions{}) if err != nil { return nil, fmt.Errorf("failed to start exec: %w", err) } @@ -926,7 +926,7 @@ func (s *DockerTestSuite) execCommandInContainer(ctx context.Context, containerI // getContainerLogs retrieves logs from a Docker container func (s *DockerTestSuite) getContainerLogs(ctx context.Context, containerID string) (string, error) { // Get container logs - containerLogs, err := s.dockerClient.ContainerLogs(ctx, containerID, container.LogsOptions{ + containerLogs, err := s.dockerClient.ContainerLogs(ctx, containerID, mobyclient.ContainerLogsOptions{ ShowStdout: true, ShowStderr: true, Tail: "100", // Get last 100 lines diff --git a/test/e2e/benchmark/spamoor_node.go b/test/e2e/benchmark/spamoor_node.go new file mode 100644 index 0000000000..9ddc2e9778 --- /dev/null +++ b/test/e2e/benchmark/spamoor_node.go @@ -0,0 +1,368 @@ +//go:build evm + +package benchmark + +import ( + "context" + "fmt" + "net/http" + "net/netip" + "strings" + "sync" + "time" + + "github.com/celestiaorg/tastora/framework/docker/consts" + tastoracontainer "github.com/celestiaorg/tastora/framework/docker/container" + "github.com/celestiaorg/tastora/framework/docker/evstack/spamoor" + "github.com/celestiaorg/tastora/framework/docker/port" + tastoratypes "github.com/celestiaorg/tastora/framework/types" + "github.com/containerd/errdefs" + mobycontainer "github.com/moby/moby/api/types/container" + mobynetwork "github.com/moby/moby/api/types/network" + mobyclient "github.com/moby/moby/client" + "go.uber.org/zap" +) + +type spamoorBenchmarkNodeType int + +func (spamoorBenchmarkNodeType) String() string { return "spamoor" } + +const defaultSpamoorHTTPPort = "8080" + +type spamoorBenchmarkConfig struct { + DockerClient tastoratypes.TastoraDockerClient + DockerNetworkID string + Logger *zap.Logger + Image tastoracontainer.Image + + RPCHosts []string + PrivateKey string + AdditionalStartArgs []string + HostNetwork bool +} + +type spamoorBenchmarkBuilder struct { + testName string + dockerClient tastoratypes.TastoraDockerClient + dockerNetwork string + logger *zap.Logger + image tastoracontainer.Image + + rpcHosts []string + privateKey string + nameSuffix string + additionalStartArgs []string + hostNetwork bool +} + +func newSpamoorNodeBuilder(testName string) *spamoorBenchmarkBuilder { + return &spamoorBenchmarkBuilder{ + testName: testName, + image: tastoracontainer.NewImage("ethpandaops/spamoor", "latest", ""), + } +} + +func (b *spamoorBenchmarkBuilder) WithDockerClient(c tastoratypes.TastoraDockerClient) *spamoorBenchmarkBuilder { + b.dockerClient = c + return b +} + +func (b *spamoorBenchmarkBuilder) WithDockerNetworkID(id string) *spamoorBenchmarkBuilder { + b.dockerNetwork = id + return b +} + +func (b *spamoorBenchmarkBuilder) WithLogger(l *zap.Logger) *spamoorBenchmarkBuilder { + b.logger = l + return b +} + +func (b *spamoorBenchmarkBuilder) WithImage(img tastoracontainer.Image) *spamoorBenchmarkBuilder { + b.image = img + return b +} + +func (b *spamoorBenchmarkBuilder) WithRPCHosts(hosts ...string) *spamoorBenchmarkBuilder { + b.rpcHosts = hosts + return b +} + +func (b *spamoorBenchmarkBuilder) WithPrivateKey(pk string) *spamoorBenchmarkBuilder { + b.privateKey = pk + return b +} + +func (b *spamoorBenchmarkBuilder) WithNameSuffix(s string) *spamoorBenchmarkBuilder { + b.nameSuffix = s + return b +} + +func (b *spamoorBenchmarkBuilder) WithAdditionalStartArgs(args ...string) *spamoorBenchmarkBuilder { + b.additionalStartArgs = args + return b +} + +func (b *spamoorBenchmarkBuilder) WithHostNetwork() *spamoorBenchmarkBuilder { + b.hostNetwork = true + return b +} + +func (b *spamoorBenchmarkBuilder) Build(ctx context.Context) (*spamoorBenchmarkNode, error) { + cfg := spamoorBenchmarkConfig{ + DockerClient: b.dockerClient, + DockerNetworkID: b.dockerNetwork, + Logger: b.logger, + Image: b.image, + RPCHosts: b.rpcHosts, + PrivateKey: b.privateKey, + AdditionalStartArgs: b.additionalStartArgs, + HostNetwork: b.hostNetwork, + } + return newSpamoorBenchmarkNode(ctx, cfg, b.testName, 0, b.nameSuffix) +} + +type spamoorBenchmarkNode struct { + *tastoracontainer.Node + + cfg spamoorBenchmarkConfig + logger *zap.Logger + started bool + mu sync.Mutex + external tastoratypes.Ports + name string + containerID string + preStartListeners port.Listeners +} + +func newSpamoorBenchmarkNode(ctx context.Context, cfg spamoorBenchmarkConfig, testName string, index int, name string) (*spamoorBenchmarkNode, error) { + if cfg.Image.Repository == "" { + cfg.Image = tastoracontainer.NewImage("ethpandaops/spamoor", "latest", "") + } + if cfg.Logger == nil { + cfg.Logger = zap.NewNop() + } + + log := cfg.Logger.With(zap.String("component", "spamoor-daemon"), zap.Int("i", index)) + n := &spamoorBenchmarkNode{cfg: cfg, logger: log, name: name} + n.Node = tastoracontainer.NewNode(cfg.DockerNetworkID, cfg.DockerClient, testName, cfg.Image, "/home/spamoor", index, spamoorBenchmarkNodeType(0), log) + if err := n.CreateAndSetupVolume(ctx, n.Name()); err != nil { + return nil, err + } + return n, nil +} + +func (n *spamoorBenchmarkNode) Name() string { + if n.name != "" { + return fmt.Sprintf("spamoor-%s-%d-%s", n.name, n.Index, sanitizeDockerResourceName(n.TestName)) + } + return fmt.Sprintf("spamoor-%d-%s", n.Index, sanitizeDockerResourceName(n.TestName)) +} + +func (n *spamoorBenchmarkNode) HostName() string { + return condenseHostName(n.Name()) +} + +func (n *spamoorBenchmarkNode) GetNetworkInfo(ctx context.Context) (tastoratypes.NetworkInfo, error) { + internalIP, err := n.containerInternalIP(ctx) + if err != nil { + return tastoratypes.NetworkInfo{}, err + } + return tastoratypes.NetworkInfo{ + Internal: tastoratypes.Network{Hostname: n.HostName(), IP: internalIP, Ports: tastoratypes.Ports{HTTP: defaultSpamoorHTTPPort}}, + External: tastoratypes.Network{Hostname: "0.0.0.0", Ports: n.external}, + }, nil +} + +func (n *spamoorBenchmarkNode) Start(ctx context.Context) error { + n.mu.Lock() + defer n.mu.Unlock() + if n.started { + return n.startContainer(ctx) + } + if err := n.createContainer(ctx); err != nil { + return err + } + if err := n.startContainer(ctx); err != nil { + return err + } + + mapped := defaultSpamoorHTTPPort + if !n.cfg.HostNetwork { + hostPort, err := n.hostPort(ctx, defaultSpamoorHTTPPort+"/tcp") + if err != nil { + return err + } + mapped, err = extractPort(hostPort) + if err != nil { + return err + } + } + n.external = tastoratypes.Ports{HTTP: mapped} + n.started = true + waitHTTP("http://127.0.0.1:"+n.external.HTTP+"/metrics", 20*time.Second) + return nil +} + +func (n *spamoorBenchmarkNode) API() *spamoor.API { + return spamoor.NewAPI("http://127.0.0.1:" + n.external.HTTP) +} + +func (n *spamoorBenchmarkNode) Remove(ctx context.Context, opts ...tastoratypes.RemoveOption) error { + removeOpts := mobyclient.ContainerRemoveOptions{ + Force: true, + RemoveVolumes: true, + } + for _, opt := range opts { + opt(&removeOpts) + } + + if n.containerID != "" { + if err := n.stopContainer(ctx); err != nil { + return err + } + if _, err := n.DockerClient.ContainerRemove(ctx, n.containerID, removeOpts); err != nil && !errdefs.IsNotFound(err) { + return fmt.Errorf("remove container %s: %w", n.Name(), err) + } + } + if removeOpts.RemoveVolumes && n.VolumeName != "" { + if _, err := n.DockerClient.VolumeRemove(ctx, n.VolumeName, mobyclient.VolumeRemoveOptions{Force: true}); err != nil && !errdefs.IsNotFound(err) { + return fmt.Errorf("remove volume %s: %w", n.VolumeName, err) + } + } + return nil +} + +func (n *spamoorBenchmarkNode) createContainer(ctx context.Context) error { + imageRef := n.cfg.Image.Ref() + if err := n.cfg.Image.PullImage(ctx, n.DockerClient); err != nil { + return err + } + + cmd := []string{ + "--privkey", n.cfg.PrivateKey, + "--port", defaultSpamoorHTTPPort, + "--db", fmt.Sprintf("%s/%s", n.HomeDir(), "spamoor.db"), + } + for _, host := range n.cfg.RPCHosts { + if host = strings.TrimSpace(host); host != "" { + cmd = append(cmd, "--rpchost", host) + } + } + cmd = append(cmd, n.cfg.AdditionalStartArgs...) + + containerCfg := &mobycontainer.Config{ + Image: imageRef, + Entrypoint: []string{"/app/spamoor-daemon"}, + Cmd: cmd, + Hostname: n.HostName(), + Labels: map[string]string{consts.CleanupLabel: n.DockerClient.CleanupLabel()}, + } + hostCfg := &mobycontainer.HostConfig{ + Binds: n.Bind(), + AutoRemove: false, + DNS: []netip.Addr{}, + } + var netCfg *mobynetwork.NetworkingConfig + if n.cfg.HostNetwork { + hostCfg.NetworkMode = mobycontainer.NetworkMode(mobynetwork.NetworkHost) + } else { + ports := mobynetwork.PortMap{ + mobynetwork.MustParsePort(defaultSpamoorHTTPPort + "/tcp"): {}, + } + portBindings, listeners, err := port.GenerateBindings(ports) + if err != nil { + return fmt.Errorf("generate port bindings: %w", err) + } + n.preStartListeners = listeners + containerCfg.ExposedPorts = portSet(ports) + hostCfg.PortBindings = portBindings + hostCfg.PublishAllPorts = true + netCfg = &mobynetwork.NetworkingConfig{ + EndpointsConfig: map[string]*mobynetwork.EndpointSettings{ + n.NetworkID: &mobynetwork.EndpointSettings{}, + }, + } + } + + n.logger.Info("Will run spamoor daemon", zap.String("image", imageRef), zap.String("container", n.Name())) + created, err := n.DockerClient.ContainerCreate(ctx, mobyclient.ContainerCreateOptions{ + Name: n.Name(), + Config: containerCfg, + HostConfig: hostCfg, + NetworkingConfig: netCfg, + }) + if err != nil { + n.preStartListeners.CloseAll() + n.preStartListeners = nil + return err + } + n.containerID = created.ID + return nil +} + +func (n *spamoorBenchmarkNode) startContainer(ctx context.Context) error { + n.preStartListeners.CloseAll() + n.preStartListeners = nil + if _, err := n.DockerClient.ContainerStart(ctx, n.containerID, mobyclient.ContainerStartOptions{}); err != nil { + return err + } + n.logger.Info("Container started", zap.String("container", n.Name())) + return nil +} + +func (n *spamoorBenchmarkNode) stopContainer(ctx context.Context) error { + timeoutSec := 30 + if _, err := n.DockerClient.ContainerStop(ctx, n.containerID, mobyclient.ContainerStopOptions{Timeout: &timeoutSec}); err != nil && !errdefs.IsNotFound(err) && !errdefs.IsNotModified(err) { + return fmt.Errorf("stop container %s: %w", n.Name(), err) + } + return nil +} + +func (n *spamoorBenchmarkNode) hostPort(ctx context.Context, portID string) (string, error) { + inspectResult, err := n.DockerClient.ContainerInspect(ctx, n.containerID, mobyclient.ContainerInspectOptions{}) + if err != nil { + return "", err + } + return port.GetForHost(inspectResult.Container, portID), nil +} + +func (n *spamoorBenchmarkNode) containerInternalIP(ctx context.Context) (string, error) { + inspectResult, err := n.DockerClient.ContainerInspect(ctx, n.containerID, mobyclient.ContainerInspectOptions{}) + if err != nil { + return "", fmt.Errorf("inspecting container: %w", err) + } + inspect := inspectResult.Container + if inspect.NetworkSettings == nil || inspect.NetworkSettings.Networks == nil { + return "", nil + } + for _, network := range inspect.NetworkSettings.Networks { + if network.IPAddress.IsValid() { + return network.IPAddress.String(), nil + } + } + return "", nil +} + +func portSet(ports mobynetwork.PortMap) mobynetwork.PortSet { + portSet := mobynetwork.PortSet{} + for port := range ports { + portSet[port] = struct{}{} + } + return portSet +} + +func waitHTTP(url string, timeout time.Duration) { + deadline := time.Now().Add(timeout) + client := &http.Client{Timeout: 500 * time.Millisecond} + for time.Now().Before(deadline) { + resp, err := client.Get(url) + if err == nil && resp.StatusCode >= 200 && resp.StatusCode < 500 { + _ = resp.Body.Close() + return + } + if resp != nil && resp.Body != nil { + _ = resp.Body.Close() + } + time.Sleep(100 * time.Millisecond) + } +} diff --git a/test/e2e/benchmark/suite_test.go b/test/e2e/benchmark/suite_test.go index b58c496acf..dec6810f9a 100644 --- a/test/e2e/benchmark/suite_test.go +++ b/test/e2e/benchmark/suite_test.go @@ -11,7 +11,6 @@ import ( tastoradocker "github.com/celestiaorg/tastora/framework/docker" "github.com/celestiaorg/tastora/framework/docker/evstack/reth" "github.com/celestiaorg/tastora/framework/docker/evstack/spamoor" - "github.com/celestiaorg/tastora/framework/docker/victoriatraces" "github.com/celestiaorg/tastora/framework/testutil/maps" tastoratypes "github.com/celestiaorg/tastora/framework/types" "github.com/ethereum/go-ethereum/ethclient" @@ -40,11 +39,11 @@ func (s *SpamoorSuite) SetupTest() { // env holds a fully-wired environment created by setupEnv. type env struct { - traces traceProvider - spamoorAPI *spamoor.API - ethClient *ethclient.Client - evNodeServiceName string - evRethServiceName string + traces traceProvider + spamoorAPI *spamoor.API + ethClient *ethclient.Client + evNodeServiceName string + evRethServiceName string } // TODO: temporary hardcoded tag, will be replaced with a proper release tag @@ -75,8 +74,8 @@ func (s *SpamoorSuite) setupLocalEnv(cfg benchConfig) *env { sut := e2e.NewSystemUnderTest(t) // victoriatraces - vtCfg := victoriatraces.Config{Logger: zaptest.NewLogger(t), DockerClient: s.dockerCli, DockerNetworkID: s.networkID} - vt, err := victoriatraces.New(ctx, vtCfg, t.Name(), 0) + vtCfg := victoriaTracesConfig{Logger: zaptest.NewLogger(t), DockerClient: s.dockerCli, DockerNetworkID: s.networkID} + vt, err := newVictoriaTracesNode(ctx, vtCfg, t.Name(), 0) s.Require().NoError(err, "failed to create victoriatraces node") t.Cleanup(func() { _ = vt.Remove(t.Context()) }) s.Require().NoError(vt.Start(ctx), "failed to start victoriatraces node") @@ -129,7 +128,7 @@ func (s *SpamoorSuite) setupLocalEnv(cfg benchConfig) *env { s.Require().NoError(err, "failed to get reth network info") internalRPC := "http://" + ni.Internal.RPCAddress() - spBuilder := spamoor.NewNodeBuilder(t.Name()). + spBuilder := newSpamoorNodeBuilder(t.Name()). WithDockerClient(evmEnv.RethNode.DockerClient). WithDockerNetworkID(evmEnv.RethNode.NetworkID). WithLogger(evmEnv.RethNode.Logger). @@ -179,7 +178,7 @@ func (s *SpamoorSuite) setupExternalEnv(cfg benchConfig, rpcURL string) *env { // spamoor — connects to the external RPC via host networking so it can // resolve the same hostnames as the host machine. - spBuilder := spamoor.NewNodeBuilder(t.Name()). + spBuilder := newSpamoorNodeBuilder(t.Name()). WithDockerClient(s.dockerCli). WithDockerNetworkID(s.networkID). WithLogger(zaptest.NewLogger(t)). @@ -252,4 +251,3 @@ func (s *SpamoorSuite) collectTraces(e *env) *traceResult { tr.displayFlowcharts(t, e.evNodeServiceName) return tr } - diff --git a/test/e2e/benchmark/victoriatraces_node.go b/test/e2e/benchmark/victoriatraces_node.go new file mode 100644 index 0000000000..cb5f1a5ed7 --- /dev/null +++ b/test/e2e/benchmark/victoriatraces_node.go @@ -0,0 +1,142 @@ +//go:build evm + +package benchmark + +import ( + "context" + "fmt" + "net" + "regexp" + "sync" + + "github.com/celestiaorg/tastora/framework/docker/container" + tastoratypes "github.com/celestiaorg/tastora/framework/types" + "github.com/moby/moby/api/types/network" + "go.uber.org/zap" +) + +type victoriaTracesNodeType int + +func (victoriaTracesNodeType) String() string { return "victoriatraces" } + +const defaultVictoriaTracesHTTPPort = "10428" + +type victoriaTracesConfig struct { + Logger *zap.Logger + DockerClient tastoratypes.TastoraDockerClient + DockerNetworkID string + Image container.Image +} + +type victoriaTracesNode struct { + *container.Node + + cfg victoriaTracesConfig + logger *zap.Logger + started bool + mu sync.Mutex + + internalHTTPPort string + externalHTTPPort string + + Internal victoriaTracesScope + External victoriaTracesScope +} + +func newVictoriaTracesNode(ctx context.Context, cfg victoriaTracesConfig, testName string, index int) (*victoriaTracesNode, error) { + img := cfg.Image + if img.Repository == "" { + img = container.NewImage("victoriametrics/victoria-traces", "latest", "") + } + + log := cfg.Logger.With(zap.String("component", "victoriatraces"), zap.Int("i", index)) + home := "/home/victoriatraces" + n := &victoriaTracesNode{cfg: cfg, logger: log} + n.Node = container.NewNode(cfg.DockerNetworkID, cfg.DockerClient, testName, img, home, index, victoriaTracesNodeType(0), log) + n.SetContainerLifecycle(container.NewLifecycle(cfg.Logger, cfg.DockerClient, n.Name())) + if err := n.CreateAndSetupVolume(ctx, n.Name()); err != nil { + return nil, err + } + n.Internal = victoriaTracesScope{hostname: func() string { return n.Name() }, port: &n.internalHTTPPort} + n.External = victoriaTracesScope{hostname: func() string { return "0.0.0.0" }, port: &n.externalHTTPPort} + return n, nil +} + +func (n *victoriaTracesNode) Name() string { + return fmt.Sprintf("victoriatraces-%d-%s", n.Index, sanitizeDockerResourceName(n.TestName)) +} + +func (n *victoriaTracesNode) HostName() string { + return condenseHostName(n.Name()) +} + +func (n *victoriaTracesNode) Start(ctx context.Context) error { + n.mu.Lock() + defer n.mu.Unlock() + if n.started { + return n.StartContainer(ctx) + } + if err := n.createContainer(ctx); err != nil { + return err + } + if err := n.ContainerLifecycle.StartContainer(ctx); err != nil { + return err + } + hostPorts, err := n.ContainerLifecycle.GetHostPorts(ctx, n.internalHTTPPort+"/tcp") + if err != nil { + return err + } + n.externalHTTPPort, err = extractPort(hostPorts[0]) + if err != nil { + return err + } + n.started = true + return nil +} + +func (n *victoriaTracesNode) createContainer(ctx context.Context) error { + if n.internalHTTPPort == "" { + n.internalHTTPPort = defaultVictoriaTracesHTTPPort + } + + ports := network.PortMap{ + network.MustParsePort(n.internalHTTPPort + "/tcp"): {}, + } + cmd := []string{"-storageDataPath", n.HomeDir() + "/data"} + return n.CreateContainer(ctx, n.TestName, n.NetworkID, n.Image, ports, "", n.Bind(), nil, n.HostName(), cmd, nil, nil) +} + +type victoriaTracesScope struct { + hostname func() string + port *string +} + +func (s victoriaTracesScope) IngestHTTPEndpoint() string { + return fmt.Sprintf("http://%s:%s/insert/opentelemetry/v1/traces", s.hostname(), *s.port) +} + +func (s victoriaTracesScope) OTLPBaseEndpoint() string { + return fmt.Sprintf("http://%s:%s/insert/opentelemetry", s.hostname(), *s.port) +} + +func (s victoriaTracesScope) QueryURL() string { + return fmt.Sprintf("http://%s:%s", s.hostname(), *s.port) +} + +func extractPort(address string) (string, error) { + _, port, err := net.SplitHostPort(address) + return port, err +} + +func condenseHostName(name string) string { + if len(name) < 64 { + return name + } + return name[:30] + "_._" + name[len(name)-30:] +} + +var validContainerCharsRE = regexp.MustCompile(`[^a-zA-Z0-9_.-]`) + +func sanitizeDockerResourceName(name string) string { + return validContainerCharsRE.ReplaceAllLiteralString(name, "_") +} diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 6d58d40d17..9b74fb6548 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -5,16 +5,19 @@ go 1.25.7 require ( cosmossdk.io/math v1.5.3 github.com/celestiaorg/go-square/v3 v3.0.2 - github.com/celestiaorg/tastora v0.16.1-0.20260312082036-2ee1b0a2ac4e + github.com/celestiaorg/tastora v0.19.0 + github.com/containerd/errdefs v1.0.0 github.com/cosmos/cosmos-sdk v0.53.6 github.com/cosmos/ibc-go/v8 v8.8.0 github.com/ethereum/go-ethereum v1.17.2 - github.com/evstack/ev-node v1.1.0 + github.com/evstack/ev-node v1.1.1 github.com/evstack/ev-node/execution/evm v0.0.0-20250602130019-2a732cf903a5 github.com/evstack/ev-node/execution/evm/test v0.0.0-00010101000000-000000000000 github.com/libp2p/go-libp2p v0.48.0 + github.com/moby/moby/api v1.54.1 + github.com/moby/moby/client v0.4.0 github.com/prometheus/client_model v0.6.2 - github.com/rs/zerolog v1.35.0 + github.com/rs/zerolog v1.35.1 github.com/stretchr/testify v1.11.1 go.opentelemetry.io/proto/otlp v1.10.0 go.uber.org/zap v1.27.1 @@ -30,7 +33,7 @@ replace ( require ( cloud.google.com/go v0.123.0 // indirect - cloud.google.com/go/auth v0.18.2 // indirect + cloud.google.com/go/auth v0.20.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/iam v1.7.0 // indirect @@ -69,12 +72,12 @@ require ( github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 // indirect - github.com/aws/smithy-go v1.25.0 // indirect + github.com/aws/smithy-go v1.25.1 // indirect github.com/bcp-innovations/hyperlane-cosmos v1.0.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -102,7 +105,7 @@ require ( github.com/cometbft/cometbft v0.38.21 // indirect github.com/cometbft/cometbft-db v1.0.4 // indirect github.com/consensys/gnark-crypto v0.18.2 // indirect - github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/cosmos-db v1.1.3 // indirect github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect @@ -123,7 +126,6 @@ require ( github.com/dgraph-io/badger/v4 v4.5.1 // indirect github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v28.5.2+incompatible // indirect github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dunglas/httpsfv v1.1.0 // indirect @@ -211,7 +213,7 @@ require ( github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-kad-dht v0.39.1 // indirect github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect - github.com/libp2p/go-libp2p-pubsub v0.15.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.16.0 // indirect github.com/libp2p/go-libp2p-record v0.3.1 // indirect github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect @@ -229,8 +231,6 @@ require ( github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/moby v27.5.1+incompatible // indirect - github.com/morikuni/aec v1.1.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect @@ -308,8 +308,8 @@ require ( go.etcd.io/bbolt v1.4.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect go.opentelemetry.io/otel v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect @@ -337,7 +337,7 @@ require ( golang.org/x/tools v0.43.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.17.0 // indirect - google.golang.org/api v0.274.0 // indirect + google.golang.org/api v0.276.0 // indirect google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect diff --git a/test/e2e/go.sum b/test/e2e/go.sum index e3a706a171..a07f83ec77 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -22,8 +22,8 @@ cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM= -cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -101,8 +101,8 @@ github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMb github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= @@ -175,8 +175,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 h1:HtOTYcb github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8/go.mod h1:VsK9abqQeGlzPgUr+isNWzPlK2vKe9INMLWnY65f5Xs= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6Y1Lbvt9WFuJ0ugUK2xn6hIWUBBbKuOWF30s= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5 h1:nEzwx/ZlpUZ2Y6WztsgYmfBh5Ixd3QiECawXMzvTMeo= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.5/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 h1:696UM+NwOrETBCLQJyCAGtVmmZmziBT59yMwgg6Fvrw= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 h1:a1Fq/KXn75wSzoJaPQTgZO0wHGqE9mjFnylnqEPTchA= github.com/aws/aws-sdk-go-v2/service/signin v1.0.10/go.mod h1:p6+MXNxW7IA6dMgHfTAzljuwSKD0NCm/4lbS4t6+7vI= github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 h1:x6bKbmDhsgSZwv6q19wY/u3rLk/3FGjJWyqKcIRufpE= @@ -185,8 +185,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 h1:oK/njaL8GtyEihkWMD4k3Vg github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20/go.mod h1:JHs8/y1f3zY7U5WcuzoJ/yAYGYtNIVPKLIbp61euvmg= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 h1:ks8KBcZPh3PYISr5dAiXCM5/Thcuxk8l+PG4+A0exds= github.com/aws/aws-sdk-go-v2/service/sts v1.42.0/go.mod h1:pFw33T0WLvXU3rw1WBkpMlkgIn54eCB5FYLhjDc9Foo= -github.com/aws/smithy-go v1.25.0 h1:Sz/XJ64rwuiKtB6j98nDIPyYrV1nVNJ4YU74gttcl5U= -github.com/aws/smithy-go v1.25.0/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/aws/smithy-go v1.25.1 h1:J8ERsGSU7d+aCmdQur5Txg6bVoYelvQJgtZehD12GkI= +github.com/aws/smithy-go v1.25.1/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/bcp-innovations/hyperlane-cosmos v1.0.1 h1:gT8OqyJ866Q6AHOlIXKxSdLjd0p8crKG9XXERIWoh4c= github.com/bcp-innovations/hyperlane-cosmos v1.0.1/go.mod h1:3yfa0io5Ii6GmhHWsWl2LEAOEHsqWuMgw2R02+LPogw= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -231,8 +231,8 @@ github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY7 github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= github.com/celestiaorg/nmt v0.24.3 h1:ylQnRlXkVoTtq36CxtCyXYZX4JISBsHgKlAAUAnf7ig= github.com/celestiaorg/nmt v0.24.3/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= -github.com/celestiaorg/tastora v0.16.1-0.20260312082036-2ee1b0a2ac4e h1:B8VoB7COih6c8vSJR4MvREHIB7CrJhzVZoIJw8Ho6tY= -github.com/celestiaorg/tastora v0.16.1-0.20260312082036-2ee1b0a2ac4e/go.mod h1:C867PBm6Ne6e/1JlmsRqcLeJ6RHAuMoMRCvwJzV/q8g= +github.com/celestiaorg/tastora v0.19.0 h1:m5MiDxYqlEdeY2i+vIpPZzjn5iA7U0JUM1PxE7Vs/UA= +github.com/celestiaorg/tastora v0.19.0/go.mod h1:uhEz7v8YJmJuVgsJaCe0M0Q/HJiQAQNMu3w/OtmFIQY= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= @@ -297,8 +297,8 @@ github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -367,8 +367,6 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WA github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= -github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -808,8 +806,8 @@ github.com/libp2p/go-libp2p-kad-dht v0.39.1 h1:9RzUBc7zywT4ZNamRSgEvPZmVlK3Y6xdl github.com/libp2p/go-libp2p-kad-dht v0.39.1/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= -github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= -github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= +github.com/libp2p/go-libp2p-pubsub v0.16.0 h1:j7G2C8kJwkcAQqYR7Wmq3d75d3Sgw/N0Hhiv0dVx7OY= +github.com/libp2p/go-libp2p-pubsub v0.16.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= @@ -881,21 +879,15 @@ github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjU github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/moby v27.5.1+incompatible h1:/pN59F/t3U7Q4FPzV88nzqf7Fp0qqCSL2KzhZaiKcKw= -github.com/moby/moby v27.5.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= -github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= -github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= -github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/moby/moby/api v1.54.1 h1:TqVzuJkOLsgLDDwNLmYqACUuTehOHRGKiPhvH8V3Nn4= +github.com/moby/moby/api v1.54.1/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs= +github.com/moby/moby/client v0.4.0 h1:S+2XegzHQrrvTCvF6s5HFzcrywWQmuVnhOXe2kiWjIw= +github.com/moby/moby/client v0.4.0/go.mod h1:QWPbvWchQbxBNdaLSpoKpCdf5E+WxFAgNHogCWDoa7g= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ= -github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -1113,8 +1105,8 @@ github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7 github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/zerolog v1.35.0 h1:VD0ykx7HMiMJytqINBsKcbLS+BJ4WYjz+05us+LRTdI= -github.com/rs/zerolog v1.35.0/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= +github.com/rs/zerolog v1.35.1 h1:m7xQeoiLIiV0BCEY4Hs+j2NG4Gp2o2KPKmhnnLiazKI= +github.com/rs/zerolog v1.35.1/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -1277,10 +1269,10 @@ go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= @@ -1701,8 +1693,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.274.0 h1:aYhycS5QQCwxHLwfEHRRLf9yNsfvp1JadKKWBE54RFA= -google.golang.org/api v0.274.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/test/mocks/store.go b/test/mocks/store.go index 911832ebab..75024718a5 100644 --- a/test/mocks/store.go +++ b/test/mocks/store.go @@ -39,6 +39,69 @@ func (_m *MockStore) EXPECT() *MockStore_Expecter { return &MockStore_Expecter{mock: &_m.Mock} } +// BatchMetadata provides a mock function for the type MockStore +func (_mock *MockStore) BatchMetadata(ctx context.Context, puts []store.MetadataKV, deletes []string) error { + ret := _mock.Called(ctx, puts, deletes) + + if len(ret) == 0 { + panic("no return value specified for BatchMetadata") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context, []store.MetadataKV, []string) error); ok { + r0 = returnFunc(ctx, puts, deletes) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockStore_BatchMetadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BatchMetadata' +type MockStore_BatchMetadata_Call struct { + *mock.Call +} + +// BatchMetadata is a helper method to define mock.On call +// - ctx context.Context +// - puts []store.MetadataKV +// - deletes []string +func (_e *MockStore_Expecter) BatchMetadata(ctx interface{}, puts interface{}, deletes interface{}) *MockStore_BatchMetadata_Call { + return &MockStore_BatchMetadata_Call{Call: _e.mock.On("BatchMetadata", ctx, puts, deletes)} +} + +func (_c *MockStore_BatchMetadata_Call) Run(run func(ctx context.Context, puts []store.MetadataKV, deletes []string)) *MockStore_BatchMetadata_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 []store.MetadataKV + if args[1] != nil { + arg1 = args[1].([]store.MetadataKV) + } + var arg2 []string + if args[2] != nil { + arg2 = args[2].([]string) + } + run( + arg0, + arg1, + arg2, + ) + }) + return _c +} + +func (_c *MockStore_BatchMetadata_Call) Return(err error) *MockStore_BatchMetadata_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockStore_BatchMetadata_Call) RunAndReturn(run func(ctx context.Context, puts []store.MetadataKV, deletes []string) error) *MockStore_BatchMetadata_Call { + _c.Call.Return(run) + return _c +} + // Close provides a mock function for the type MockStore func (_mock *MockStore) Close() error { ret := _mock.Called() diff --git a/types/pb/evnode/v1/evnode.pb.go b/types/pb/evnode/v1/evnode.pb.go index d98acd10a2..b0a866e76e 100644 --- a/types/pb/evnode/v1/evnode.pb.go +++ b/types/pb/evnode/v1/evnode.pb.go @@ -792,7 +792,7 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x16evnode/v1/evnode.proto\x12\tevnode.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"1\n" + "\aVersion\x12\x14\n" + "\x05block\x18\x01 \x01(\x04R\x05block\x12\x10\n" + - "\x03app\x18\x02 \x01(\x04R\x03app\"\xc9\x02\n" + + "\x03app\x18\x02 \x01(\x04R\x03app\"\xc3\x02\n" + "\x06Header\x12,\n" + "\aversion\x18\x01 \x01(\v2\x12.evnode.v1.VersionR\aversion\x12\x16\n" + "\x06height\x18\x02 \x01(\x04R\x06height\x12\x12\n" + @@ -804,7 +804,7 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + " \x01(\fR\x0fproposerAddress\x12%\n" + "\x0evalidator_hash\x18\v \x01(\fR\rvalidatorHash\x12\x19\n" + "\bchain_id\x18\f \x01(\tR\achainIdJ\x04\b\x05\x10\x06J\x04\b\a\x10\bJ\x04\b\t\x10\n" + - "J\x04\b\r\x10\x0e\"\x88\x01\n" + + "\"\x88\x01\n" + "\fSignedHeader\x12)\n" + "\x06header\x18\x01 \x01(\v2\x11.evnode.v1.HeaderR\x06header\x12\x1c\n" + "\tsignature\x18\x02 \x01(\fR\tsignature\x12)\n" + diff --git a/types/state.go b/types/state.go index 10f11a51ae..a639bd09f6 100644 --- a/types/state.go +++ b/types/state.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "errors" "fmt" "sync" "time" @@ -16,6 +17,10 @@ var InitStateVersion = Version{ App: 0, } +// ErrUnexpectedProposer is returned when a block was signed by a proposer +// different from the proposer expected by the current state. +var ErrUnexpectedProposer = errors.New("unexpected proposer") + // State contains information about current state of the blockchain. type State struct { Version Version @@ -77,7 +82,7 @@ func (s State) AssertValidForNextState(header *SignedHeader, data *Data) error { return fmt.Errorf("header-data validation failed: %w", err) } if len(s.NextProposerAddress) > 0 && !bytes.Equal(header.ProposerAddress, s.NextProposerAddress) { - return fmt.Errorf("unexpected proposer - got: %x, want: %x", header.ProposerAddress, s.NextProposerAddress) + return fmt.Errorf("%w - got: %x, want: %x", ErrUnexpectedProposer, header.ProposerAddress, s.NextProposerAddress) } return nil } From e8ce3b915ed2f56b9e4e0545f058766c92099347 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 8 May 2026 09:55:11 +0200 Subject: [PATCH 13/13] fix merge --- .../types/src/proto/evnode.v1.messages.rs | 28 +++++++++++++------ .../types/src/proto/evnode.v1.services.rs | 28 +++++++++++++------ 2 files changed, 38 insertions(+), 18 deletions(-) diff --git a/client/crates/types/src/proto/evnode.v1.messages.rs b/client/crates/types/src/proto/evnode.v1.messages.rs index e6038f54ce..2e3d53e0af 100644 --- a/client/crates/types/src/proto/evnode.v1.messages.rs +++ b/client/crates/types/src/proto/evnode.v1.messages.rs @@ -303,19 +303,26 @@ pub struct InitChainResponse { /// Empty for now, may include filtering criteria in the future #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetTxsRequest {} +/// TxBatch stores ordered transactions in one contiguous bytes buffer. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct TxBatch { + /// Concatenated transaction bytes. + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, + /// Byte length for each transaction in order. + #[prost(uint32, repeated, tag = "2")] + pub tx_sizes: ::prost::alloc::vec::Vec, +} /// GetTxsResponse contains the available transactions #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetTxsResponse { - /// Slice of valid transactions from mempool - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// Valid transactions from mempool in contiguous batch form. + #[prost(message, optional, tag = "2")] + pub tx_batch: ::core::option::Option, } /// ExecuteTxsRequest contains transactions and block context for execution #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ExecuteTxsRequest { - /// Ordered list of transactions to execute - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// Height of block being created (must be > 0) #[prost(uint64, tag = "2")] pub block_height: u64, @@ -325,6 +332,9 @@ pub struct ExecuteTxsRequest { /// Previous block's state root hash #[prost(bytes = "vec", tag = "4")] pub prev_state_root: ::prost::alloc::vec::Vec, + /// Ordered transactions to execute in contiguous batch form. + #[prost(message, optional, tag = "5")] + pub tx_batch: ::core::option::Option, } /// ExecuteTxsResponse contains the result of transaction execution #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] @@ -370,9 +380,6 @@ pub struct GetExecutionInfoResponse { /// FilterTxsRequest contains transactions to validate and filter #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct FilterTxsRequest { - /// All transactions (force-included + mempool) - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// Maximum cumulative size allowed (0 means no size limit) #[prost(uint64, tag = "2")] pub max_bytes: u64, @@ -382,6 +389,9 @@ pub struct FilterTxsRequest { /// Whether force-included transactions are present #[prost(bool, tag = "4")] pub has_force_included_transaction: bool, + /// All transactions (force-included + mempool) in contiguous batch form. + #[prost(message, optional, tag = "5")] + pub tx_batch: ::core::option::Option, } /// FilterTxsResponse contains the filter status for each transaction #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] diff --git a/client/crates/types/src/proto/evnode.v1.services.rs b/client/crates/types/src/proto/evnode.v1.services.rs index 013e96db37..c6fc86dd4a 100644 --- a/client/crates/types/src/proto/evnode.v1.services.rs +++ b/client/crates/types/src/proto/evnode.v1.services.rs @@ -1048,19 +1048,26 @@ pub struct InitChainResponse { /// Empty for now, may include filtering criteria in the future #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetTxsRequest {} +/// TxBatch stores ordered transactions in one contiguous bytes buffer. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct TxBatch { + /// Concatenated transaction bytes. + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, + /// Byte length for each transaction in order. + #[prost(uint32, repeated, tag = "2")] + pub tx_sizes: ::prost::alloc::vec::Vec, +} /// GetTxsResponse contains the available transactions #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetTxsResponse { - /// Slice of valid transactions from mempool - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// Valid transactions from mempool in contiguous batch form. + #[prost(message, optional, tag = "2")] + pub tx_batch: ::core::option::Option, } /// ExecuteTxsRequest contains transactions and block context for execution #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ExecuteTxsRequest { - /// Ordered list of transactions to execute - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// Height of block being created (must be > 0) #[prost(uint64, tag = "2")] pub block_height: u64, @@ -1070,6 +1077,9 @@ pub struct ExecuteTxsRequest { /// Previous block's state root hash #[prost(bytes = "vec", tag = "4")] pub prev_state_root: ::prost::alloc::vec::Vec, + /// Ordered transactions to execute in contiguous batch form. + #[prost(message, optional, tag = "5")] + pub tx_batch: ::core::option::Option, } /// ExecuteTxsResponse contains the result of transaction execution #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] @@ -1115,9 +1125,6 @@ pub struct GetExecutionInfoResponse { /// FilterTxsRequest contains transactions to validate and filter #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct FilterTxsRequest { - /// All transactions (force-included + mempool) - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// Maximum cumulative size allowed (0 means no size limit) #[prost(uint64, tag = "2")] pub max_bytes: u64, @@ -1127,6 +1134,9 @@ pub struct FilterTxsRequest { /// Whether force-included transactions are present #[prost(bool, tag = "4")] pub has_force_included_transaction: bool, + /// All transactions (force-included + mempool) in contiguous batch form. + #[prost(message, optional, tag = "5")] + pub tx_batch: ::core::option::Option, } /// FilterTxsResponse contains the filter status for each transaction #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]