7 log "github.com/sirupsen/logrus"
9 "github.com/vapor/consensus"
10 "github.com/vapor/errors"
11 "github.com/vapor/netsync/peers"
12 "github.com/vapor/p2p/security"
13 "github.com/vapor/protocol/bc"
14 "github.com/vapor/protocol/bc/types"
18 syncCycle = 5 * time.Second
19 blockProcessChSize = 1024
20 blocksProcessChSize = 128
21 headersProcessChSize = 1024
25 maxBlockPerMsg = uint64(128)
26 maxBlockHeadersPerMsg = uint64(2048)
27 syncTimeout = 30 * time.Second
29 errAppendHeaders = errors.New("fail to append list due to order dismatch")
30 errRequestTimeout = errors.New("request timeout")
31 errPeerDropped = errors.New("Peer dropped")
34 type blockMsg struct {
39 type blocksMsg struct {
44 type headersMsg struct {
45 headers []*types.BlockHeader
49 type blockKeeper struct {
54 blockProcessCh chan *blockMsg
55 blocksProcessCh chan *blocksMsg
56 headersProcessCh chan *headersMsg
61 func newBlockKeeper(chain Chain, peers *peers.PeerSet) *blockKeeper {
65 blockProcessCh: make(chan *blockMsg, blockProcessChSize),
66 blocksProcessCh: make(chan *blocksMsg, blocksProcessChSize),
67 headersProcessCh: make(chan *headersMsg, headersProcessChSize),
68 headerList: list.New(),
75 func (bk *blockKeeper) appendHeaderList(headers []*types.BlockHeader) error {
76 for _, header := range headers {
77 prevHeader := bk.headerList.Back().Value.(*types.BlockHeader)
78 if prevHeader.Hash() != header.PreviousBlockHash {
79 return errAppendHeaders
81 bk.headerList.PushBack(header)
86 func (bk *blockKeeper) blockLocator() []*bc.Hash {
87 header := bk.chain.BestBlockHeader()
88 locator := []*bc.Hash{}
92 headerHash := header.Hash()
93 locator = append(locator, &headerHash)
94 if header.Height == 0 {
99 if header.Height < step {
100 header, err = bk.chain.GetHeaderByHeight(0)
102 header, err = bk.chain.GetHeaderByHeight(header.Height - step)
105 log.WithFields(log.Fields{"module": logModule, "err": err}).Error("blockKeeper fail on get blockLocator")
109 if len(locator) >= 9 {
116 func (bk *blockKeeper) fastBlockSync(checkPoint *consensus.Checkpoint) error {
117 bk.resetHeaderState()
118 lastHeader := bk.headerList.Back().Value.(*types.BlockHeader)
119 for ; lastHeader.Hash() != checkPoint.Hash; lastHeader = bk.headerList.Back().Value.(*types.BlockHeader) {
120 if lastHeader.Height >= checkPoint.Height {
121 return errors.Wrap(peers.ErrPeerMisbehave, "peer is not in the checkpoint branch")
124 lastHash := lastHeader.Hash()
125 headers, err := bk.requireHeaders([]*bc.Hash{&lastHash}, &checkPoint.Hash)
130 if len(headers) == 0 {
131 return errors.Wrap(peers.ErrPeerMisbehave, "requireHeaders return empty list")
134 if err := bk.appendHeaderList(headers); err != nil {
139 fastHeader := bk.headerList.Front()
140 for bk.chain.BestBlockHeight() < checkPoint.Height {
141 locator := bk.blockLocator()
142 blocks, err := bk.requireBlocks(locator, &checkPoint.Hash)
147 if len(blocks) == 0 {
148 return errors.Wrap(peers.ErrPeerMisbehave, "requireBlocks return empty list")
151 for _, block := range blocks {
152 if fastHeader = fastHeader.Next(); fastHeader == nil {
153 return errors.New("get block than is higher than checkpoint")
156 if _, err = bk.chain.ProcessBlock(block); err != nil {
157 return errors.Wrap(err, "fail on fastBlockSync process block")
164 func (bk *blockKeeper) locateBlocks(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.Block, error) {
165 headers, err := bk.locateHeaders(locator, stopHash)
170 blocks := []*types.Block{}
171 for i, header := range headers {
172 if uint64(i) >= maxBlockPerMsg {
176 headerHash := header.Hash()
177 block, err := bk.chain.GetBlockByHash(&headerHash)
182 blocks = append(blocks, block)
187 func (bk *blockKeeper) locateHeaders(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.BlockHeader, error) {
188 stopHeader, err := bk.chain.GetHeaderByHash(stopHash)
193 startHeader, err := bk.chain.GetHeaderByHeight(0)
198 for _, hash := range locator {
199 header, err := bk.chain.GetHeaderByHash(hash)
200 if err == nil && bk.chain.InMainChain(header.Hash()) {
206 totalHeaders := stopHeader.Height - startHeader.Height
207 if totalHeaders > maxBlockHeadersPerMsg {
208 totalHeaders = maxBlockHeadersPerMsg
211 headers := []*types.BlockHeader{}
212 for i := uint64(1); i <= totalHeaders; i++ {
213 header, err := bk.chain.GetHeaderByHeight(startHeader.Height + i)
218 headers = append(headers, header)
223 func (bk *blockKeeper) nextCheckpoint() *consensus.Checkpoint {
224 height := bk.chain.BestBlockHeader().Height
225 checkpoints := consensus.ActiveNetParams.Checkpoints
226 if len(checkpoints) == 0 || height >= checkpoints[len(checkpoints)-1].Height {
230 nextCheckpoint := &checkpoints[len(checkpoints)-1]
231 for i := len(checkpoints) - 2; i >= 0; i-- {
232 if height >= checkpoints[i].Height {
235 nextCheckpoint = &checkpoints[i]
237 return nextCheckpoint
240 func (bk *blockKeeper) processBlock(peerID string, block *types.Block) {
241 bk.blockProcessCh <- &blockMsg{block: block, peerID: peerID}
244 func (bk *blockKeeper) processBlocks(peerID string, blocks []*types.Block) {
245 bk.blocksProcessCh <- &blocksMsg{blocks: blocks, peerID: peerID}
248 func (bk *blockKeeper) processHeaders(peerID string, headers []*types.BlockHeader) {
249 bk.headersProcessCh <- &headersMsg{headers: headers, peerID: peerID}
252 func (bk *blockKeeper) regularBlockSync(wantHeight uint64) error {
253 i := bk.chain.BestBlockHeight() + 1
254 for i <= wantHeight {
255 block, err := bk.requireBlock(i)
260 isOrphan, err := bk.chain.ProcessBlock(block)
269 i = bk.chain.BestBlockHeight() + 1
274 func (bk *blockKeeper) requireBlock(height uint64) (*types.Block, error) {
275 if ok := bk.syncPeer.GetBlockByHeight(height); !ok {
276 return nil, errPeerDropped
279 timeout := time.NewTimer(syncTimeout)
284 case msg := <-bk.blockProcessCh:
285 if msg.peerID != bk.syncPeer.ID() {
288 if msg.block.Height != height {
291 return msg.block, nil
293 return nil, errors.Wrap(errRequestTimeout, "requireBlock")
298 func (bk *blockKeeper) requireBlocks(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.Block, error) {
299 if ok := bk.syncPeer.GetBlocks(locator, stopHash); !ok {
300 return nil, errPeerDropped
303 timeout := time.NewTimer(syncTimeout)
308 case msg := <-bk.blocksProcessCh:
309 if msg.peerID != bk.syncPeer.ID() {
312 return msg.blocks, nil
314 return nil, errors.Wrap(errRequestTimeout, "requireBlocks")
319 func (bk *blockKeeper) requireHeaders(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.BlockHeader, error) {
320 if ok := bk.syncPeer.GetHeaders(locator, stopHash); !ok {
321 return nil, errPeerDropped
324 timeout := time.NewTimer(syncTimeout)
329 case msg := <-bk.headersProcessCh:
330 if msg.peerID != bk.syncPeer.ID() {
333 return msg.headers, nil
335 return nil, errors.Wrap(errRequestTimeout, "requireHeaders")
340 // resetHeaderState sets the headers-first mode state to values appropriate for
341 // syncing from a new peer.
342 func (bk *blockKeeper) resetHeaderState() {
343 header := bk.chain.BestBlockHeader()
345 if bk.nextCheckpoint() != nil {
346 bk.headerList.PushBack(header)
350 func (bk *blockKeeper) startSync() bool {
351 checkPoint := bk.nextCheckpoint()
352 peer := bk.peers.BestPeer(consensus.SFFastSync | consensus.SFFullNode)
353 if peer != nil && checkPoint != nil && peer.Height() >= checkPoint.Height {
355 if err := bk.fastBlockSync(checkPoint); err != nil {
356 log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on fastBlockSync")
357 bk.peers.ErrorHandler(peer.ID(), security.LevelMsgIllegal, err)
363 blockHeight := bk.chain.BestBlockHeight()
364 peer = bk.peers.BestPeer(consensus.SFFullNode)
365 if peer != nil && peer.Height() > blockHeight {
367 targetHeight := blockHeight + maxBlockPerMsg
368 if targetHeight > peer.Height() {
369 targetHeight = peer.Height()
372 if err := bk.regularBlockSync(targetHeight); err != nil {
373 log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on regularBlockSync")
374 bk.peers.ErrorHandler(peer.ID(),security.LevelMsgIllegal, err)
382 func (bk *blockKeeper) syncWorker() {
383 syncTicker := time.NewTicker(syncCycle)
384 defer syncTicker.Stop()
388 if update := bk.startSync(); !update {
392 block, err := bk.chain.GetBlockByHeight(bk.chain.BestBlockHeight())
394 log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker get best block")
397 if err = bk.peers.BroadcastNewStatus(block); err != nil {
398 log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker broadcast new status")