OSDN Git Service

38d136b74986ee5d7312dcf1b0484ceabcb1a0b5
[bytom/vapor.git] / netsync / block_keeper.go
1 package netsync
2
3 import (
4         "container/list"
5         "time"
6
7         log "github.com/sirupsen/logrus"
8
9         "github.com/vapor/consensus"
10         "github.com/vapor/errors"
11         "github.com/vapor/mining/tensority"
12         "github.com/vapor/protocol/bc"
13         "github.com/vapor/protocol/bc/types"
14 )
15
16 const (
17         syncCycle            = 5 * time.Second
18         blockProcessChSize   = 1024
19         blocksProcessChSize  = 128
20         headersProcessChSize = 1024
21 )
22
23 var (
24         maxBlockPerMsg        = uint64(128)
25         maxBlockHeadersPerMsg = uint64(2048)
26         syncTimeout           = 30 * time.Second
27
28         errAppendHeaders  = errors.New("fail to append list due to order dismatch")
29         errRequestTimeout = errors.New("request timeout")
30         errPeerDropped    = errors.New("Peer dropped")
31         errPeerMisbehave  = errors.New("peer is misbehave")
32 )
33
34 type blockMsg struct {
35         block  *types.Block
36         peerID string
37 }
38
39 type blocksMsg struct {
40         blocks []*types.Block
41         peerID string
42 }
43
44 type headersMsg struct {
45         headers []*types.BlockHeader
46         peerID  string
47 }
48
49 type blockKeeper struct {
50         chain Chain
51         peers *peerSet
52
53         syncPeer         *peer
54         blockProcessCh   chan *blockMsg
55         blocksProcessCh  chan *blocksMsg
56         headersProcessCh chan *headersMsg
57
58         headerList *list.List
59 }
60
61 func newBlockKeeper(chain Chain, peers *peerSet) *blockKeeper {
62         bk := &blockKeeper{
63                 chain:            chain,
64                 peers:            peers,
65                 blockProcessCh:   make(chan *blockMsg, blockProcessChSize),
66                 blocksProcessCh:  make(chan *blocksMsg, blocksProcessChSize),
67                 headersProcessCh: make(chan *headersMsg, headersProcessChSize),
68                 headerList:       list.New(),
69         }
70         bk.resetHeaderState()
71         go bk.syncWorker()
72         return bk
73 }
74
75 func (bk *blockKeeper) appendHeaderList(headers []*types.BlockHeader) error {
76         for _, header := range headers {
77                 prevHeader := bk.headerList.Back().Value.(*types.BlockHeader)
78                 if prevHeader.Hash() != header.PreviousBlockHash {
79                         return errAppendHeaders
80                 }
81                 bk.headerList.PushBack(header)
82         }
83         return nil
84 }
85
86 func (bk *blockKeeper) blockLocator() []*bc.Hash {
87         header := bk.chain.BestBlockHeader()
88         locator := []*bc.Hash{}
89
90         step := uint64(1)
91         for header != nil {
92                 headerHash := header.Hash()
93                 locator = append(locator, &headerHash)
94                 if header.Height == 0 {
95                         break
96                 }
97
98                 var err error
99                 if header.Height < step {
100                         header, err = bk.chain.GetHeaderByHeight(0)
101                 } else {
102                         header, err = bk.chain.GetHeaderByHeight(header.Height - step)
103                 }
104                 if err != nil {
105                         log.WithFields(log.Fields{"module": logModule, "err": err}).Error("blockKeeper fail on get blockLocator")
106                         break
107                 }
108
109                 if len(locator) >= 9 {
110                         step *= 2
111                 }
112         }
113         return locator
114 }
115
116 func (bk *blockKeeper) fastBlockSync(checkPoint *consensus.Checkpoint) error {
117         bk.resetHeaderState()
118         lastHeader := bk.headerList.Back().Value.(*types.BlockHeader)
119         for ; lastHeader.Hash() != checkPoint.Hash; lastHeader = bk.headerList.Back().Value.(*types.BlockHeader) {
120                 if lastHeader.Height >= checkPoint.Height {
121                         return errors.Wrap(errPeerMisbehave, "peer is not in the checkpoint branch")
122                 }
123
124                 lastHash := lastHeader.Hash()
125                 headers, err := bk.requireHeaders([]*bc.Hash{&lastHash}, &checkPoint.Hash)
126                 if err != nil {
127                         return err
128                 }
129
130                 if len(headers) == 0 {
131                         return errors.Wrap(errPeerMisbehave, "requireHeaders return empty list")
132                 }
133
134                 if err := bk.appendHeaderList(headers); err != nil {
135                         return err
136                 }
137         }
138
139         fastHeader := bk.headerList.Front()
140         for bk.chain.BestBlockHeight() < checkPoint.Height {
141                 locator := bk.blockLocator()
142                 blocks, err := bk.requireBlocks(locator, &checkPoint.Hash)
143                 if err != nil {
144                         return err
145                 }
146
147                 if len(blocks) == 0 {
148                         return errors.Wrap(errPeerMisbehave, "requireBlocks return empty list")
149                 }
150
151                 for _, block := range blocks {
152                         if fastHeader = fastHeader.Next(); fastHeader == nil {
153                                 return errors.New("get block than is higher than checkpoint")
154                         }
155
156                         blockHash := block.Hash()
157                         if blockHash != fastHeader.Value.(*types.BlockHeader).Hash() {
158                                 return errPeerMisbehave
159                         }
160
161                         seed, err := bk.chain.CalcNextSeed(&block.PreviousBlockHash)
162                         if err != nil {
163                                 return errors.Wrap(err, "fail on fastBlockSync calculate seed")
164                         }
165
166                         tensority.AIHash.AddCache(&blockHash, seed, &bc.Hash{})
167                         _, err = bk.chain.ProcessBlock(block)
168                         tensority.AIHash.RemoveCache(&blockHash, seed)
169                         if err != nil {
170                                 return errors.Wrap(err, "fail on fastBlockSync process block")
171                         }
172                 }
173         }
174         return nil
175 }
176
177 func (bk *blockKeeper) locateBlocks(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.Block, error) {
178         headers, err := bk.locateHeaders(locator, stopHash)
179         if err != nil {
180                 return nil, err
181         }
182
183         blocks := []*types.Block{}
184         for i, header := range headers {
185                 if uint64(i) >= maxBlockPerMsg {
186                         break
187                 }
188
189                 headerHash := header.Hash()
190                 block, err := bk.chain.GetBlockByHash(&headerHash)
191                 if err != nil {
192                         return nil, err
193                 }
194
195                 blocks = append(blocks, block)
196         }
197         return blocks, nil
198 }
199
200 func (bk *blockKeeper) locateHeaders(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.BlockHeader, error) {
201         stopHeader, err := bk.chain.GetHeaderByHash(stopHash)
202         if err != nil {
203                 return nil, err
204         }
205
206         startHeader, err := bk.chain.GetHeaderByHeight(0)
207         if err != nil {
208                 return nil, err
209         }
210
211         for _, hash := range locator {
212                 header, err := bk.chain.GetHeaderByHash(hash)
213                 if err == nil && bk.chain.InMainChain(header.Hash()) {
214                         startHeader = header
215                         break
216                 }
217         }
218
219         totalHeaders := stopHeader.Height - startHeader.Height
220         if totalHeaders > maxBlockHeadersPerMsg {
221                 totalHeaders = maxBlockHeadersPerMsg
222         }
223
224         headers := []*types.BlockHeader{}
225         for i := uint64(1); i <= totalHeaders; i++ {
226                 header, err := bk.chain.GetHeaderByHeight(startHeader.Height + i)
227                 if err != nil {
228                         return nil, err
229                 }
230
231                 headers = append(headers, header)
232         }
233         return headers, nil
234 }
235
236 func (bk *blockKeeper) nextCheckpoint() *consensus.Checkpoint {
237         height := bk.chain.BestBlockHeader().Height
238         checkpoints := consensus.ActiveNetParams.Checkpoints
239         if len(checkpoints) == 0 || height >= checkpoints[len(checkpoints)-1].Height {
240                 return nil
241         }
242
243         nextCheckpoint := &checkpoints[len(checkpoints)-1]
244         for i := len(checkpoints) - 2; i >= 0; i-- {
245                 if height >= checkpoints[i].Height {
246                         break
247                 }
248                 nextCheckpoint = &checkpoints[i]
249         }
250         return nextCheckpoint
251 }
252
253 func (bk *blockKeeper) processBlock(peerID string, block *types.Block) {
254         bk.blockProcessCh <- &blockMsg{block: block, peerID: peerID}
255 }
256
257 func (bk *blockKeeper) processBlocks(peerID string, blocks []*types.Block) {
258         bk.blocksProcessCh <- &blocksMsg{blocks: blocks, peerID: peerID}
259 }
260
261 func (bk *blockKeeper) processHeaders(peerID string, headers []*types.BlockHeader) {
262         bk.headersProcessCh <- &headersMsg{headers: headers, peerID: peerID}
263 }
264
265 func (bk *blockKeeper) regularBlockSync(wantHeight uint64) error {
266         i := bk.chain.BestBlockHeight() + 1
267         for i <= wantHeight {
268                 block, err := bk.requireBlock(i)
269                 if err != nil {
270                         return err
271                 }
272
273                 isOrphan, err := bk.chain.ProcessBlock(block)
274                 if err != nil {
275                         return err
276                 }
277
278                 if isOrphan {
279                         i--
280                         continue
281                 }
282                 i = bk.chain.BestBlockHeight() + 1
283         }
284         return nil
285 }
286
287 func (bk *blockKeeper) requireBlock(height uint64) (*types.Block, error) {
288         if ok := bk.syncPeer.getBlockByHeight(height); !ok {
289                 return nil, errPeerDropped
290         }
291
292         waitTicker := time.NewTimer(syncTimeout)
293         for {
294                 select {
295                 case msg := <-bk.blockProcessCh:
296                         if msg.peerID != bk.syncPeer.ID() {
297                                 continue
298                         }
299                         if msg.block.Height != height {
300                                 continue
301                         }
302                         return msg.block, nil
303                 case <-waitTicker.C:
304                         return nil, errors.Wrap(errRequestTimeout, "requireBlock")
305                 }
306         }
307 }
308
309 func (bk *blockKeeper) requireBlocks(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.Block, error) {
310         if ok := bk.syncPeer.getBlocks(locator, stopHash); !ok {
311                 return nil, errPeerDropped
312         }
313
314         waitTicker := time.NewTimer(syncTimeout)
315         for {
316                 select {
317                 case msg := <-bk.blocksProcessCh:
318                         if msg.peerID != bk.syncPeer.ID() {
319                                 continue
320                         }
321                         return msg.blocks, nil
322                 case <-waitTicker.C:
323                         return nil, errors.Wrap(errRequestTimeout, "requireBlocks")
324                 }
325         }
326 }
327
328 func (bk *blockKeeper) requireHeaders(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.BlockHeader, error) {
329         if ok := bk.syncPeer.getHeaders(locator, stopHash); !ok {
330                 return nil, errPeerDropped
331         }
332
333         waitTicker := time.NewTimer(syncTimeout)
334         for {
335                 select {
336                 case msg := <-bk.headersProcessCh:
337                         if msg.peerID != bk.syncPeer.ID() {
338                                 continue
339                         }
340                         return msg.headers, nil
341                 case <-waitTicker.C:
342                         return nil, errors.Wrap(errRequestTimeout, "requireHeaders")
343                 }
344         }
345 }
346
347 // resetHeaderState sets the headers-first mode state to values appropriate for
348 // syncing from a new peer.
349 func (bk *blockKeeper) resetHeaderState() {
350         header := bk.chain.BestBlockHeader()
351         bk.headerList.Init()
352         if bk.nextCheckpoint() != nil {
353                 bk.headerList.PushBack(header)
354         }
355 }
356
357 func (bk *blockKeeper) startSync() bool {
358         checkPoint := bk.nextCheckpoint()
359         peer := bk.peers.bestPeer(consensus.SFFastSync | consensus.SFFullNode)
360         if peer != nil && checkPoint != nil && peer.Height() >= checkPoint.Height {
361                 bk.syncPeer = peer
362                 if err := bk.fastBlockSync(checkPoint); err != nil {
363                         log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on fastBlockSync")
364                         bk.peers.errorHandler(peer.ID(), err)
365                         return false
366                 }
367                 return true
368         }
369
370         blockHeight := bk.chain.BestBlockHeight()
371         peer = bk.peers.bestPeer(consensus.SFFullNode)
372         if peer != nil && peer.Height() > blockHeight {
373                 bk.syncPeer = peer
374                 targetHeight := blockHeight + maxBlockPerMsg
375                 if targetHeight > peer.Height() {
376                         targetHeight = peer.Height()
377                 }
378
379                 if err := bk.regularBlockSync(targetHeight); err != nil {
380                         log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on regularBlockSync")
381                         bk.peers.errorHandler(peer.ID(), err)
382                         return false
383                 }
384                 return true
385         }
386         return false
387 }
388
389 func (bk *blockKeeper) syncWorker() {
390         genesisBlock, err := bk.chain.GetBlockByHeight(0)
391         if err != nil {
392                 log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleStatusRequestMsg get genesis")
393                 return
394         }
395         syncTicker := time.NewTicker(syncCycle)
396         for {
397                 <-syncTicker.C
398                 if update := bk.startSync(); !update {
399                         continue
400                 }
401
402                 block, err := bk.chain.GetBlockByHeight(bk.chain.BestBlockHeight())
403                 if err != nil {
404                         log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker get best block")
405                 }
406
407                 if err := bk.peers.broadcastMinedBlock(block); err != nil {
408                         log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker broadcast new block")
409                 }
410
411                 if err = bk.peers.broadcastNewStatus(block, genesisBlock); err != nil {
412                         log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker broadcast new status")
413                 }
414         }
415 }