7 log "github.com/sirupsen/logrus"
9 "github.com/vapor/chain"
10 "github.com/vapor/consensus"
11 "github.com/vapor/errors"
12 "github.com/vapor/mining/tensority"
13 "github.com/vapor/protocol/bc"
14 "github.com/vapor/protocol/bc/types"
18 syncCycle = 5 * time.Second
19 blockProcessChSize = 1024
20 blocksProcessChSize = 128
21 headersProcessChSize = 1024
25 maxBlockPerMsg = uint64(128)
26 maxBlockHeadersPerMsg = uint64(2048)
27 syncTimeout = 30 * time.Second
29 errAppendHeaders = errors.New("fail to append list due to order dismatch")
30 errRequestTimeout = errors.New("request timeout")
31 errPeerDropped = errors.New("Peer dropped")
32 errPeerMisbehave = errors.New("peer is misbehave")
35 type blockMsg struct {
40 type blocksMsg struct {
45 type headersMsg struct {
46 headers []*types.BlockHeader
50 type blockKeeper struct {
55 blockProcessCh chan *blockMsg
56 blocksProcessCh chan *blocksMsg
57 headersProcessCh chan *headersMsg
62 func newBlockKeeper(chain chain.Chain, peers *peerSet) *blockKeeper {
66 blockProcessCh: make(chan *blockMsg, blockProcessChSize),
67 blocksProcessCh: make(chan *blocksMsg, blocksProcessChSize),
68 headersProcessCh: make(chan *headersMsg, headersProcessChSize),
69 headerList: list.New(),
76 func (bk *blockKeeper) appendHeaderList(headers []*types.BlockHeader) error {
77 for _, header := range headers {
78 prevHeader := bk.headerList.Back().Value.(*types.BlockHeader)
79 if prevHeader.Hash() != header.PreviousBlockHash {
80 return errAppendHeaders
82 bk.headerList.PushBack(header)
87 func (bk *blockKeeper) blockLocator() []*bc.Hash {
88 header := bk.chain.BestBlockHeader()
89 locator := []*bc.Hash{}
93 headerHash := header.Hash()
94 locator = append(locator, &headerHash)
95 if header.Height == 0 {
100 if header.Height < step {
101 header, err = bk.chain.GetHeaderByHeight(0)
103 header, err = bk.chain.GetHeaderByHeight(header.Height - step)
106 log.WithFields(log.Fields{"module": logModule, "err": err}).Error("blockKeeper fail on get blockLocator")
110 if len(locator) >= 9 {
117 func (bk *blockKeeper) fastBlockSync(checkPoint *consensus.Checkpoint) error {
118 bk.resetHeaderState()
119 lastHeader := bk.headerList.Back().Value.(*types.BlockHeader)
120 for ; lastHeader.Hash() != checkPoint.Hash; lastHeader = bk.headerList.Back().Value.(*types.BlockHeader) {
121 if lastHeader.Height >= checkPoint.Height {
122 return errors.Wrap(errPeerMisbehave, "peer is not in the checkpoint branch")
125 lastHash := lastHeader.Hash()
126 headers, err := bk.requireHeaders([]*bc.Hash{&lastHash}, &checkPoint.Hash)
131 if len(headers) == 0 {
132 return errors.Wrap(errPeerMisbehave, "requireHeaders return empty list")
135 if err := bk.appendHeaderList(headers); err != nil {
140 fastHeader := bk.headerList.Front()
141 for bk.chain.BestBlockHeight() < checkPoint.Height {
142 locator := bk.blockLocator()
143 blocks, err := bk.requireBlocks(locator, &checkPoint.Hash)
148 if len(blocks) == 0 {
149 return errors.Wrap(errPeerMisbehave, "requireBlocks return empty list")
152 for _, block := range blocks {
153 if fastHeader = fastHeader.Next(); fastHeader == nil {
154 return errors.New("get block than is higher than checkpoint")
157 blockHash := block.Hash()
158 if blockHash != fastHeader.Value.(*types.BlockHeader).Hash() {
159 return errPeerMisbehave
162 seed, err := bk.chain.CalcNextSeed(&block.PreviousBlockHash)
164 return errors.Wrap(err, "fail on fastBlockSync calculate seed")
167 tensority.AIHash.AddCache(&blockHash, seed, &bc.Hash{})
168 _, err = bk.chain.ProcessBlock(block)
169 tensority.AIHash.RemoveCache(&blockHash, seed)
171 return errors.Wrap(err, "fail on fastBlockSync process block")
178 func (bk *blockKeeper) locateBlocks(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.Block, error) {
179 headers, err := bk.locateHeaders(locator, stopHash)
184 blocks := []*types.Block{}
185 for i, header := range headers {
186 if uint64(i) >= maxBlockPerMsg {
190 headerHash := header.Hash()
191 block, err := bk.chain.GetBlockByHash(&headerHash)
196 blocks = append(blocks, block)
201 func (bk *blockKeeper) locateHeaders(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.BlockHeader, error) {
202 stopHeader, err := bk.chain.GetHeaderByHash(stopHash)
207 startHeader, err := bk.chain.GetHeaderByHeight(0)
212 for _, hash := range locator {
213 header, err := bk.chain.GetHeaderByHash(hash)
214 if err == nil && bk.chain.InMainChain(header.Hash()) {
220 totalHeaders := stopHeader.Height - startHeader.Height
221 if totalHeaders > maxBlockHeadersPerMsg {
222 totalHeaders = maxBlockHeadersPerMsg
225 headers := []*types.BlockHeader{}
226 for i := uint64(1); i <= totalHeaders; i++ {
227 header, err := bk.chain.GetHeaderByHeight(startHeader.Height + i)
232 headers = append(headers, header)
237 func (bk *blockKeeper) nextCheckpoint() *consensus.Checkpoint {
238 height := bk.chain.BestBlockHeader().Height
239 checkpoints := consensus.ActiveNetParams.Checkpoints
240 if len(checkpoints) == 0 || height >= checkpoints[len(checkpoints)-1].Height {
244 nextCheckpoint := &checkpoints[len(checkpoints)-1]
245 for i := len(checkpoints) - 2; i >= 0; i-- {
246 if height >= checkpoints[i].Height {
249 nextCheckpoint = &checkpoints[i]
251 return nextCheckpoint
254 func (bk *blockKeeper) processBlock(peerID string, block *types.Block) {
255 bk.blockProcessCh <- &blockMsg{block: block, peerID: peerID}
258 func (bk *blockKeeper) processBlocks(peerID string, blocks []*types.Block) {
259 bk.blocksProcessCh <- &blocksMsg{blocks: blocks, peerID: peerID}
262 func (bk *blockKeeper) processHeaders(peerID string, headers []*types.BlockHeader) {
263 bk.headersProcessCh <- &headersMsg{headers: headers, peerID: peerID}
266 func (bk *blockKeeper) regularBlockSync(wantHeight uint64) error {
267 i := bk.chain.BestBlockHeight() + 1
268 for i <= wantHeight {
269 block, err := bk.requireBlock(i)
274 isOrphan, err := bk.chain.ProcessBlock(block)
283 i = bk.chain.BestBlockHeight() + 1
288 func (bk *blockKeeper) requireBlock(height uint64) (*types.Block, error) {
289 if ok := bk.syncPeer.getBlockByHeight(height); !ok {
290 return nil, errPeerDropped
293 waitTicker := time.NewTimer(syncTimeout)
296 case msg := <-bk.blockProcessCh:
297 if msg.peerID != bk.syncPeer.ID() {
300 if msg.block.Height != height {
303 return msg.block, nil
305 return nil, errors.Wrap(errRequestTimeout, "requireBlock")
310 func (bk *blockKeeper) requireBlocks(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.Block, error) {
311 if ok := bk.syncPeer.getBlocks(locator, stopHash); !ok {
312 return nil, errPeerDropped
315 waitTicker := time.NewTimer(syncTimeout)
318 case msg := <-bk.blocksProcessCh:
319 if msg.peerID != bk.syncPeer.ID() {
322 return msg.blocks, nil
324 return nil, errors.Wrap(errRequestTimeout, "requireBlocks")
329 func (bk *blockKeeper) requireHeaders(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.BlockHeader, error) {
330 if ok := bk.syncPeer.getHeaders(locator, stopHash); !ok {
331 return nil, errPeerDropped
334 waitTicker := time.NewTimer(syncTimeout)
337 case msg := <-bk.headersProcessCh:
338 if msg.peerID != bk.syncPeer.ID() {
341 return msg.headers, nil
343 return nil, errors.Wrap(errRequestTimeout, "requireHeaders")
348 // resetHeaderState sets the headers-first mode state to values appropriate for
349 // syncing from a new peer.
350 func (bk *blockKeeper) resetHeaderState() {
351 header := bk.chain.BestBlockHeader()
353 if bk.nextCheckpoint() != nil {
354 bk.headerList.PushBack(header)
358 func (bk *blockKeeper) startSync() bool {
359 checkPoint := bk.nextCheckpoint()
360 peer := bk.peers.bestPeer(consensus.SFFastSync | consensus.SFFullNode)
361 if peer != nil && checkPoint != nil && peer.Height() >= checkPoint.Height {
363 if err := bk.fastBlockSync(checkPoint); err != nil {
364 log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on fastBlockSync")
365 bk.peers.errorHandler(peer.ID(), err)
371 blockHeight := bk.chain.BestBlockHeight()
372 peer = bk.peers.bestPeer(consensus.SFFullNode)
373 if peer != nil && peer.Height() > blockHeight {
375 targetHeight := blockHeight + maxBlockPerMsg
376 if targetHeight > peer.Height() {
377 targetHeight = peer.Height()
380 if err := bk.regularBlockSync(targetHeight); err != nil {
381 log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on regularBlockSync")
382 bk.peers.errorHandler(peer.ID(), err)
390 func (bk *blockKeeper) syncWorker() {
391 genesisBlock, err := bk.chain.GetBlockByHeight(0)
393 log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleStatusRequestMsg get genesis")
396 syncTicker := time.NewTicker(syncCycle)
399 if update := bk.startSync(); !update {
403 block, err := bk.chain.GetBlockByHeight(bk.chain.BestBlockHeight())
405 log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker get best block")
408 if err := bk.peers.broadcastMinedBlock(block); err != nil {
409 log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker broadcast new block")
412 if err = bk.peers.broadcastNewStatus(block, genesisBlock); err != nil {
413 log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker broadcast new status")