OSDN Git Service

Merge pull request #41 from Bytom/dev
[bytom/vapor.git] / netsync / block_keeper.go
1 package netsync
2
3 import (
4         "container/list"
5         "time"
6
7         log "github.com/sirupsen/logrus"
8
9         "github.com/vapor/chain"
10         "github.com/vapor/consensus"
11         "github.com/vapor/errors"
12         "github.com/vapor/protocol/bc"
13         "github.com/vapor/protocol/bc/types"
14 )
15
16 const (
17         syncCycle            = 5 * time.Second
18         blockProcessChSize   = 1024
19         blocksProcessChSize  = 128
20         headersProcessChSize = 1024
21 )
22
23 var (
24         maxBlockPerMsg        = uint64(128)
25         maxBlockHeadersPerMsg = uint64(2048)
26         syncTimeout           = 30 * time.Second
27
28         errAppendHeaders  = errors.New("fail to append list due to order dismatch")
29         errRequestTimeout = errors.New("request timeout")
30         errPeerDropped    = errors.New("Peer dropped")
31         errPeerMisbehave  = errors.New("peer is misbehave")
32 )
33
34 type blockMsg struct {
35         block  *types.Block
36         peerID string
37 }
38
39 type blocksMsg struct {
40         blocks []*types.Block
41         peerID string
42 }
43
44 type headersMsg struct {
45         headers []*types.BlockHeader
46         peerID  string
47 }
48
49 type blockKeeper struct {
50         chain chain.Chain
51         peers *peerSet
52
53         syncPeer         *peer
54         blockProcessCh   chan *blockMsg
55         blocksProcessCh  chan *blocksMsg
56         headersProcessCh chan *headersMsg
57
58         headerList *list.List
59 }
60
61 func newBlockKeeper(chain chain.Chain, peers *peerSet) *blockKeeper {
62         bk := &blockKeeper{
63                 chain:            chain,
64                 peers:            peers,
65                 blockProcessCh:   make(chan *blockMsg, blockProcessChSize),
66                 blocksProcessCh:  make(chan *blocksMsg, blocksProcessChSize),
67                 headersProcessCh: make(chan *headersMsg, headersProcessChSize),
68                 headerList:       list.New(),
69         }
70         bk.resetHeaderState()
71         go bk.syncWorker()
72         return bk
73 }
74
75 func (bk *blockKeeper) appendHeaderList(headers []*types.BlockHeader) error {
76         for _, header := range headers {
77                 prevHeader := bk.headerList.Back().Value.(*types.BlockHeader)
78                 if prevHeader.Hash() != header.PreviousBlockHash {
79                         return errAppendHeaders
80                 }
81                 bk.headerList.PushBack(header)
82         }
83         return nil
84 }
85
86 func (bk *blockKeeper) blockLocator() []*bc.Hash {
87         header := bk.chain.BestBlockHeader()
88         locator := []*bc.Hash{}
89
90         step := uint64(1)
91         for header != nil {
92                 headerHash := header.Hash()
93                 locator = append(locator, &headerHash)
94                 if header.Height == 0 {
95                         break
96                 }
97
98                 var err error
99                 if header.Height < step {
100                         header, err = bk.chain.GetHeaderByHeight(0)
101                 } else {
102                         header, err = bk.chain.GetHeaderByHeight(header.Height - step)
103                 }
104                 if err != nil {
105                         log.WithFields(log.Fields{"module": logModule, "err": err}).Error("blockKeeper fail on get blockLocator")
106                         break
107                 }
108
109                 if len(locator) >= 9 {
110                         step *= 2
111                 }
112         }
113         return locator
114 }
115
116 func (bk *blockKeeper) fastBlockSync(checkPoint *consensus.Checkpoint) error {
117         bk.resetHeaderState()
118         lastHeader := bk.headerList.Back().Value.(*types.BlockHeader)
119         for ; lastHeader.Hash() != checkPoint.Hash; lastHeader = bk.headerList.Back().Value.(*types.BlockHeader) {
120                 if lastHeader.Height >= checkPoint.Height {
121                         return errors.Wrap(errPeerMisbehave, "peer is not in the checkpoint branch")
122                 }
123
124                 lastHash := lastHeader.Hash()
125                 headers, err := bk.requireHeaders([]*bc.Hash{&lastHash}, &checkPoint.Hash)
126                 if err != nil {
127                         return err
128                 }
129
130                 if len(headers) == 0 {
131                         return errors.Wrap(errPeerMisbehave, "requireHeaders return empty list")
132                 }
133
134                 if err := bk.appendHeaderList(headers); err != nil {
135                         return err
136                 }
137         }
138
139         fastHeader := bk.headerList.Front()
140         for bk.chain.BestBlockHeight() < checkPoint.Height {
141                 locator := bk.blockLocator()
142                 blocks, err := bk.requireBlocks(locator, &checkPoint.Hash)
143                 if err != nil {
144                         return err
145                 }
146
147                 if len(blocks) == 0 {
148                         return errors.Wrap(errPeerMisbehave, "requireBlocks return empty list")
149                 }
150
151                 for _, block := range blocks {
152                         if fastHeader = fastHeader.Next(); fastHeader == nil {
153                                 return errors.New("get block than is higher than checkpoint")
154                         }
155
156                         blockHash := block.Hash()
157                         if blockHash != fastHeader.Value.(*types.BlockHeader).Hash() {
158                                 return errPeerMisbehave
159                         }
160
161                         _, err := bk.chain.ProcessBlock(block)
162                         if err != nil {
163                                 return errors.Wrap(err, "fail on fastBlockSync process block")
164                         }
165                 }
166         }
167         return nil
168 }
169
170 func (bk *blockKeeper) locateBlocks(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.Block, error) {
171         headers, err := bk.locateHeaders(locator, stopHash)
172         if err != nil {
173                 return nil, err
174         }
175
176         blocks := []*types.Block{}
177         for i, header := range headers {
178                 if uint64(i) >= maxBlockPerMsg {
179                         break
180                 }
181
182                 headerHash := header.Hash()
183                 block, err := bk.chain.GetBlockByHash(&headerHash)
184                 if err != nil {
185                         return nil, err
186                 }
187
188                 blocks = append(blocks, block)
189         }
190         return blocks, nil
191 }
192
193 func (bk *blockKeeper) locateHeaders(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.BlockHeader, error) {
194         stopHeader, err := bk.chain.GetHeaderByHash(stopHash)
195         if err != nil {
196                 return nil, err
197         }
198
199         startHeader, err := bk.chain.GetHeaderByHeight(0)
200         if err != nil {
201                 return nil, err
202         }
203
204         for _, hash := range locator {
205                 header, err := bk.chain.GetHeaderByHash(hash)
206                 if err == nil && bk.chain.InMainChain(header.Hash()) {
207                         startHeader = header
208                         break
209                 }
210         }
211
212         totalHeaders := stopHeader.Height - startHeader.Height
213         if totalHeaders > maxBlockHeadersPerMsg {
214                 totalHeaders = maxBlockHeadersPerMsg
215         }
216
217         headers := []*types.BlockHeader{}
218         for i := uint64(1); i <= totalHeaders; i++ {
219                 header, err := bk.chain.GetHeaderByHeight(startHeader.Height + i)
220                 if err != nil {
221                         return nil, err
222                 }
223
224                 headers = append(headers, header)
225         }
226         return headers, nil
227 }
228
229 func (bk *blockKeeper) nextCheckpoint() *consensus.Checkpoint {
230         height := bk.chain.BestBlockHeader().Height
231         checkpoints := consensus.ActiveNetParams.Checkpoints
232         if len(checkpoints) == 0 || height >= checkpoints[len(checkpoints)-1].Height {
233                 return nil
234         }
235
236         nextCheckpoint := &checkpoints[len(checkpoints)-1]
237         for i := len(checkpoints) - 2; i >= 0; i-- {
238                 if height >= checkpoints[i].Height {
239                         break
240                 }
241                 nextCheckpoint = &checkpoints[i]
242         }
243         return nextCheckpoint
244 }
245
246 func (bk *blockKeeper) processBlock(peerID string, block *types.Block) {
247         bk.blockProcessCh <- &blockMsg{block: block, peerID: peerID}
248 }
249
250 func (bk *blockKeeper) processBlocks(peerID string, blocks []*types.Block) {
251         bk.blocksProcessCh <- &blocksMsg{blocks: blocks, peerID: peerID}
252 }
253
254 func (bk *blockKeeper) processHeaders(peerID string, headers []*types.BlockHeader) {
255         bk.headersProcessCh <- &headersMsg{headers: headers, peerID: peerID}
256 }
257
258 func (bk *blockKeeper) regularBlockSync(wantHeight uint64) error {
259         i := bk.chain.BestBlockHeight() + 1
260         for i <= wantHeight {
261                 block, err := bk.requireBlock(i)
262                 if err != nil {
263                         return err
264                 }
265
266                 isOrphan, err := bk.chain.ProcessBlock(block)
267                 if err != nil {
268                         return err
269                 }
270
271                 if isOrphan {
272                         i--
273                         continue
274                 }
275                 i = bk.chain.BestBlockHeight() + 1
276         }
277         return nil
278 }
279
280 func (bk *blockKeeper) requireBlock(height uint64) (*types.Block, error) {
281         if ok := bk.syncPeer.getBlockByHeight(height); !ok {
282                 return nil, errPeerDropped
283         }
284
285         waitTicker := time.NewTimer(syncTimeout)
286         for {
287                 select {
288                 case msg := <-bk.blockProcessCh:
289                         if msg.peerID != bk.syncPeer.ID() {
290                                 continue
291                         }
292                         if msg.block.Height != height {
293                                 continue
294                         }
295                         return msg.block, nil
296                 case <-waitTicker.C:
297                         return nil, errors.Wrap(errRequestTimeout, "requireBlock")
298                 }
299         }
300 }
301
302 func (bk *blockKeeper) requireBlocks(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.Block, error) {
303         if ok := bk.syncPeer.getBlocks(locator, stopHash); !ok {
304                 return nil, errPeerDropped
305         }
306
307         waitTicker := time.NewTimer(syncTimeout)
308         for {
309                 select {
310                 case msg := <-bk.blocksProcessCh:
311                         if msg.peerID != bk.syncPeer.ID() {
312                                 continue
313                         }
314                         return msg.blocks, nil
315                 case <-waitTicker.C:
316                         return nil, errors.Wrap(errRequestTimeout, "requireBlocks")
317                 }
318         }
319 }
320
321 func (bk *blockKeeper) requireHeaders(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.BlockHeader, error) {
322         if ok := bk.syncPeer.getHeaders(locator, stopHash); !ok {
323                 return nil, errPeerDropped
324         }
325
326         waitTicker := time.NewTimer(syncTimeout)
327         for {
328                 select {
329                 case msg := <-bk.headersProcessCh:
330                         if msg.peerID != bk.syncPeer.ID() {
331                                 continue
332                         }
333                         return msg.headers, nil
334                 case <-waitTicker.C:
335                         return nil, errors.Wrap(errRequestTimeout, "requireHeaders")
336                 }
337         }
338 }
339
340 // resetHeaderState sets the headers-first mode state to values appropriate for
341 // syncing from a new peer.
342 func (bk *blockKeeper) resetHeaderState() {
343         header := bk.chain.BestBlockHeader()
344         bk.headerList.Init()
345         if bk.nextCheckpoint() != nil {
346                 bk.headerList.PushBack(header)
347         }
348 }
349
350 func (bk *blockKeeper) startSync() bool {
351         checkPoint := bk.nextCheckpoint()
352         peer := bk.peers.bestPeer(consensus.SFFastSync | consensus.SFFullNode)
353         if peer != nil && checkPoint != nil && peer.Height() >= checkPoint.Height {
354                 bk.syncPeer = peer
355                 if err := bk.fastBlockSync(checkPoint); err != nil {
356                         log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on fastBlockSync")
357                         bk.peers.errorHandler(peer.ID(), err)
358                         return false
359                 }
360                 return true
361         }
362
363         blockHeight := bk.chain.BestBlockHeight()
364         peer = bk.peers.bestPeer(consensus.SFFullNode)
365         if peer != nil && peer.Height() > blockHeight {
366                 bk.syncPeer = peer
367                 targetHeight := blockHeight + maxBlockPerMsg
368                 if targetHeight > peer.Height() {
369                         targetHeight = peer.Height()
370                 }
371
372                 if err := bk.regularBlockSync(targetHeight); err != nil {
373                         log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on regularBlockSync")
374                         bk.peers.errorHandler(peer.ID(), err)
375                         return false
376                 }
377                 return true
378         }
379         return false
380 }
381
382 func (bk *blockKeeper) syncWorker() {
383         genesisBlock, err := bk.chain.GetBlockByHeight(0)
384         if err != nil {
385                 log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleStatusRequestMsg get genesis")
386                 return
387         }
388         syncTicker := time.NewTicker(syncCycle)
389         for {
390                 <-syncTicker.C
391                 if update := bk.startSync(); !update {
392                         continue
393                 }
394
395                 block, err := bk.chain.GetBlockByHeight(bk.chain.BestBlockHeight())
396                 if err != nil {
397                         log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker get best block")
398                 }
399
400                 if err := bk.peers.broadcastMinedBlock(block); err != nil {
401                         log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker broadcast new block")
402                 }
403
404                 if err = bk.peers.broadcastNewStatus(block, genesisBlock); err != nil {
405                         log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker broadcast new status")
406                 }
407         }
408 }