Skip to content

Commit

Permalink
CV Cleanup dead code
Browse files Browse the repository at this point in the history
  • Loading branch information
chillyvee committed Oct 28, 2022
1 parent e879e9f commit 994cb64
Show file tree
Hide file tree
Showing 7 changed files with 19 additions and 85 deletions.
5 changes: 5 additions & 0 deletions blocksync/reactor.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,14 @@ type Reactor struct {
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
blockSync bool) *Reactor {

// Mismatch is okay if tendermint can get new store via RPC
if state.LastBlockHeight != store.Height() {
// TODO: use logger before SetLogger?
// TODO: Panic if statesync RPC/Trust* are not set?
fmt.Sprintf("WARN: state (%v) != store (%v) height mismatch\n", state.LastBlockHeight, store.Height())
}

// Tendermint can only find future heights to match app state. Tendermint is not able to fetch lower heights.
if state.LastBlockHeight < store.Height() {
panic(fmt.Sprintf("Unable to recover via RPC when state (%v) < store (%v) height ", state.LastBlockHeight,
store.Height()))
Expand Down
1 change: 0 additions & 1 deletion config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -817,7 +817,6 @@ type StateSyncConfig struct {
DiscoveryTime time.Duration `mapstructure:"discovery_time"`
ChunkRequestTimeout time.Duration `mapstructure:"chunk_request_timeout"`
ChunkFetchers int32 `mapstructure:"chunk_fetchers"`
RestoreHeight uint64 `mapstructure:"restore_height"`
}

func (cfg *StateSyncConfig) TrustHashBytes() []byte {
Expand Down
26 changes: 7 additions & 19 deletions consensus/replay.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,14 +210,13 @@ type Handshaker struct {
genDoc *types.GenesisDoc
logger log.Logger
stateProvider statesync.StateProvider
// blockStore *store.BlockStore

nBlocks int // number of blocks applied to the state
}

func NewHandshaker(stateStore sm.Store, state sm.State,
store sm.BlockStore, genDoc *types.GenesisDoc,
stateProvider statesync.StateProvider /*, blockStore *store.BlockStore*/) *Handshaker {
stateProvider statesync.StateProvider) *Handshaker {

return &Handshaker{
stateStore: stateStore,
Expand All @@ -227,7 +226,7 @@ func NewHandshaker(stateStore sm.Store, state sm.State,
genDoc: genDoc,
logger: log.NewNopLogger(),
stateProvider: stateProvider,
//blockStore: blockStore,

nBlocks: 0,
}
}
Expand Down Expand Up @@ -306,7 +305,7 @@ func (h *Handshaker) localSync(appBlockHeight uint64) (sm.State, *types.Commit,
// Optimistically build new state, so we don't discover any light client failures at the end.
state, err = h.stateProvider.State(pctx, appBlockHeight)
if err != nil {
h.logger.Info("failed to fetch and verify tendermint state", "err", err)
h.logger.Error("failed to fetch and verify tendermint state", "err", err)
if err == light.ErrNoWitnesses {
return sm.State{}, nil, err
}
Expand All @@ -315,7 +314,7 @@ func (h *Handshaker) localSync(appBlockHeight uint64) (sm.State, *types.Commit,

commit, err = h.stateProvider.Commit(pctx, appBlockHeight)
if err != nil {
h.logger.Info("failed to fetch and verify commit", "err", err)
h.logger.Error("failed to fetch and verify commit", "err", err)
if err == light.ErrNoWitnesses {
return sm.State{}, nil, err
}
Expand All @@ -325,11 +324,6 @@ func (h *Handshaker) localSync(appBlockHeight uint64) (sm.State, *types.Commit,
if err := h.stateStore.Save(state); err != nil {
return sm.State{}, nil, err
}
/*
if err := h.stateStore.Bootstrap(state); err != nil {
return sm.State{}, nil, err
}
*/
if err := h.store.(*store.BlockStore).SaveSeenCommit(int64(appBlockHeight), commit); err != nil {
return sm.State{}, nil, err
}
Expand Down Expand Up @@ -361,19 +355,17 @@ func (h *Handshaker) ReplayBlocks(
"stateHeight",
stateBlockHeight)

fmt.Sprintf("HS: Pre switch")
// Check restorable conditions for localsync
if storeBlockHeight == 0 && appBlockHeight > 0 {
var syncErr error
if state, _, syncErr = h.localSync(uint64(appBlockHeight)); syncErr != nil {
panic("dead")
panic("Unable to use RPC to update state")
}
h.logger.Info("localSync resulting state.Version.Consensus.Block", "state", state.Version.Consensus.Block)

// Assume Heights Restored
// Assume Heights Restored, Update Heights for edge cases and constraints on the storeBlockHeight and storeBlockBase.
storeBlockHeight = appBlockHeight
stateBlockHeight = appBlockHeight

}

// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain.
Expand Down Expand Up @@ -449,11 +441,7 @@ func (h *Handshaker) ReplayBlocks(
case storeBlockHeight < appBlockHeight:
// the app should never be ahead of the store (but this is under app's control)
return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight}
/*
if err := h.localSync(uint64(appBlockHeight)); err != nil {
return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight}
}
*/

case storeBlockHeight < stateBlockHeight:
// the state should never be ahead of the store (this is under tendermint's control)
panic(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
Expand Down
34 changes: 7 additions & 27 deletions node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,19 +208,8 @@ func NewNode(config *cfg.Config,
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
// and replays any blocks as necessary to sync tendermint with the app.
consensusLogger := logger.With("module", "consensus")
// Detect when a RPC recovery is required

/*
fmt.Printf("state.LastBlockHeight %v\n", state.LastBlockHeight)
fmt.Printf("blockStore.Height() %v\n", blockStore.Height())
bsCommit := blockStore.LoadSeenCommit(state.LastBlockHeight)
if bsCommit == nil {
fmt.Printf("blockStore commit %v\n", bsCommit)
} else {
fmt.Printf("blockStore commit found %v\n", bsCommit)
}
*/

// RPC recovery is required if blockStore Height is 0
if blockStore.Height() == 0 || !stateSync {
if err := doHandshake(config.StateSync, stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
return nil, err
Expand Down Expand Up @@ -431,14 +420,8 @@ func (n *Node) OnStart() error {
return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
}

// Statesync reqeusting all data from P2P
if n.stateSync {
fmt.Printf("statesync enabled\n\n")
} else {
fmt.Printf("statesync DISABLED\n\n")
}

if n.stateSync {
// P2P stateSync
// If state and blockStore.Height are both at the same height, skip the P2P Statesync and immediately enter consensus
bcR, ok := n.bcReactor.(blockSyncReactor)
if !ok {
Expand All @@ -451,11 +434,10 @@ func (n *Node) OnStart() error {
}
}

// Finish Local stateSync by fetching latest heights from RPC
if n.stateSyncGenesis.LastBlockHeight > 0 && n.blockStore.Height() == 0 {
// Local stateSync
// statesync will be disabled if appState.Height > 0, but if blockStore has just been RPC restored, then we must force swtich to consensus
fmt.Printf("state.LastBlockHeight %v\n", n.stateSyncGenesis.LastBlockHeight)
fmt.Printf("Detected recent RPC blockStore restore\n")
// if appState.Height > 0, but if blockStore has just been RPC restored, then we must force swtich to consensus
n.Logger.Info("Detected recent RPC blockStore restore")

state, err := n.stateStore.Load()
if err != nil {
Expand All @@ -466,16 +448,14 @@ func (n *Node) OnStart() error {
n.Logger.Error("Failed to bootstrap node with new state", "err", err)
return err
}
//n.consensusReactor.SwitchToConsensus(state, true)
n.consensusReactor.Metrics.StateSyncing.Set(0)
n.consensusReactor.Metrics.BlockSyncing.Set(1)

bcR, _ := n.bcReactor.(blockSyncReactor)
err = bcR.SwitchToBlockSync(state)
// might already be in fastsync if this is a second start
if err != nil {
n.Logger.Error("Failed to switch to fast sync", "err", err)
//return err
// If already fastsync, we expect an error
n.Logger.Info("Failed to switch to fast sync", "err", err)
}
}

Expand Down
2 changes: 0 additions & 2 deletions rpc/core/env.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"fmt"
"time"

abciclient "github.com/tendermint/tendermint/abci/client"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/consensus"
"github.com/tendermint/tendermint/crypto"
Expand Down Expand Up @@ -77,7 +76,6 @@ type Environment struct {
// external, thread safe interfaces
ProxyAppQuery proxy.AppConnQuery
ProxyAppMempool proxy.AppConnMempool
ProxyApp abciclient.Client

// interfaces defined in types and above
StateStore sm.Store
Expand Down
35 changes: 0 additions & 35 deletions statesync/syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ type syncer struct {
tempDir string
chunkFetchers int32
retryTimeout time.Duration
restoreHeight uint64

mtx tmsync.RWMutex
chunks *chunkQueue
Expand All @@ -84,7 +83,6 @@ func newSyncer(
tempDir: tempDir,
chunkFetchers: cfg.ChunkFetchers,
retryTimeout: cfg.ChunkRequestTimeout,
restoreHeight: cfg.RestoreHeight,
}
}

Expand Down Expand Up @@ -319,39 +317,6 @@ func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types.
return state, commit, nil
}

// LocalSync restores stateStore and blockStore state which is not included in the snapshot
// This function assumes that the implicit trusted local snapshot chunks have already been applied
// State restoration depends on heights after the restored snapshot so the RPC dependency remains
/*
func (s *syncer) LocalSync() (sm.State, *types.Commit, error) {
pctx, pcancel := context.WithTimeout(context.TODO(), 30*time.Second)
defer pcancel()
// Optimistically build new state, so we don't discover any light client failures at the end.
state, err := s.stateProvider.State(pctx, s.restoreHeight)
if err != nil {
s.logger.Info("failed to fetch and verify tendermint state", "err", err)
if err == light.ErrNoWitnesses {
return sm.State{}, nil, err
}
return sm.State{}, nil, errRejectSnapshot
}
commit, err := s.stateProvider.Commit(pctx, s.restoreHeight)
if err != nil {
s.logger.Info("failed to fetch and verify commit", "err", err)
if err == light.ErrNoWitnesses {
return sm.State{}, nil, err
}
return sm.State{}, nil, errRejectSnapshot
}
// Done! 🎉
s.logger.Info("🎉 Local Snapshot Restored", "height", s.restoreHeight)
return state, commit, nil
}
*/

// offerSnapshot offers a snapshot to the app. It returns various errors depending on the app's
// response, or nil if the snapshot was accepted.
func (s *syncer) offerSnapshot(snapshot *snapshot) error {
Expand Down
1 change: 0 additions & 1 deletion store/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,6 @@ func (bs *BlockStore) saveState() {

// SaveSeenCommit saves a seen commit, used by e.g. the state sync reactor when bootstrapping node.
func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) error {
fmt.Printf("store.BlockStore.SaveSeenCommit height %d\n", height)
pbc := seenCommit.ToProto()
seenCommitBytes, err := proto.Marshal(pbc)
if err != nil {
Expand Down

0 comments on commit 994cb64

Please sign in to comment.