9 "github.com/bytom/bytom/consensus"
10 dbm "github.com/bytom/bytom/database/leveldb"
11 "github.com/bytom/bytom/errors"
12 msgs "github.com/bytom/bytom/netsync/messages"
13 "github.com/bytom/bytom/netsync/peers"
14 "github.com/bytom/bytom/protocol"
15 "github.com/bytom/bytom/protocol/bc"
16 "github.com/bytom/bytom/protocol/bc/types"
17 "github.com/bytom/bytom/test/mock"
18 "github.com/bytom/bytom/testcontrol"
19 "github.com/bytom/bytom/testutil"
22 func TestCheckSyncType(t *testing.T) {
23 tmp, err := ioutil.TempDir(".", "")
25 t.Fatalf("failed to create temporary data folder: %v", err)
27 fastSyncDB := dbm.NewDB("testdb", "leveldb", tmp)
33 blocks := mockBlocks(nil, 50)
34 chain := mock.NewChain()
35 chain.SetBestBlockHeader(&blocks[len(blocks)-1].BlockHeader)
36 for _, block := range blocks {
37 chain.SetBlockByHeight(block.Height, block)
40 type syncPeer struct {
43 irreversibleHeight uint64
56 {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 1000, irreversibleHeight: 500},
57 {peer: &P2PPeer{id: "peer2", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 50, irreversibleHeight: 50},
59 syncType: fastSyncType,
63 {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 1000, irreversibleHeight: 100},
64 {peer: &P2PPeer{id: "peer2", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 500, irreversibleHeight: 50},
66 syncType: regularSyncType,
70 {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 51, irreversibleHeight: 50},
72 syncType: regularSyncType,
76 {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 30, irreversibleHeight: 30},
82 {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode}, bestHeight: 1000, irreversibleHeight: 1000},
84 syncType: regularSyncType,
88 {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 1000, irreversibleHeight: 50},
89 {peer: &P2PPeer{id: "peer2", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 800, irreversibleHeight: 800},
91 syncType: fastSyncType,
95 for i, c := range cases {
96 peers := peers.NewPeerSet(NewPeerSet())
97 blockKeeper := newBlockKeeper(chain, peers, fastSyncDB)
98 for _, syncPeer := range c.peers {
99 blockKeeper.peers.AddPeer(syncPeer.peer)
100 blockKeeper.peers.SetStatus(syncPeer.peer.id, syncPeer.bestHeight, nil)
101 blockKeeper.peers.SetJustifiedStatus(syncPeer.peer.id, syncPeer.irreversibleHeight, nil)
103 gotType := blockKeeper.checkSyncType()
104 if c.syncType != gotType {
105 t.Errorf("case %d: got %d want %d", i, gotType, c.syncType)
110 func TestRegularBlockSync(t *testing.T) {
111 if testcontrol.IgnoreTestTemporary {
115 baseChain := mockBlocks(nil, 50)
116 chainX := append(baseChain, mockBlocks(baseChain[50], 60)...)
117 chainY := append(baseChain, mockBlocks(baseChain[50], 70)...)
118 chainZ := append(baseChain, mockBlocks(baseChain[50], 200)...)
119 chainE := append(baseChain, mockErrorBlocks(baseChain[50], 200, 60)...)
122 syncTimeout time.Duration
123 aBlocks []*types.Block
124 bBlocks []*types.Block
129 syncTimeout: 30 * time.Second,
130 aBlocks: baseChain[:20],
131 bBlocks: baseChain[:50],
132 want: baseChain[:50],
136 syncTimeout: 30 * time.Second,
143 syncTimeout: 30 * time.Second,
144 aBlocks: chainX[:52],
145 bBlocks: chainY[:53],
150 syncTimeout: 30 * time.Second,
151 aBlocks: chainX[:52],
157 syncTimeout: 0 * time.Second,
158 aBlocks: chainX[:52],
161 err: errRequestTimeout,
164 syncTimeout: 30 * time.Second,
165 aBlocks: chainX[:52],
168 err: protocol.ErrBadStateRoot,
171 tmp, err := ioutil.TempDir(".", "")
173 t.Fatalf("failed to create temporary data folder: %v", err)
175 testDBA := dbm.NewDB("testdba", "leveldb", tmp)
176 testDBB := dbm.NewDB("testdbb", "leveldb", tmp)
183 for i, c := range cases {
184 a := mockSync(c.aBlocks, nil, testDBA)
185 b := mockSync(c.bBlocks, nil, testDBB)
186 netWork := NewNetWork()
187 netWork.Register(a, "192.168.0.1", "test node A", consensus.SFFullNode)
188 netWork.Register(b, "192.168.0.2", "test node B", consensus.SFFullNode)
189 if B2A, A2B, err := netWork.HandsShake(a, b); err != nil {
190 t.Errorf("fail on peer hands shake %v", err)
196 requireBlockTimeout = c.syncTimeout
197 a.blockKeeper.syncPeer = a.peers.GetPeer("test node B")
198 if err := a.blockKeeper.regularBlockSync(); errors.Root(err) != c.err {
199 t.Errorf("case %d: got %v want %v", i, err, c.err)
202 got := []*types.Block{}
203 for i := uint64(0); i <= a.chain.BestBlockHeight(); i++ {
204 block, err := a.chain.GetBlockByHeight(i)
206 t.Errorf("case %d got err %v", i, err)
208 got = append(got, block)
211 if !testutil.DeepEqual(got, c.want) {
212 t.Errorf("case %d: got %v want %v", i, got, c.want)
217 func TestRequireBlock(t *testing.T) {
218 if testcontrol.IgnoreTestTemporary {
222 tmp, err := ioutil.TempDir(".", "")
224 t.Fatalf("failed to create temporary data folder: %v", err)
226 testDBA := dbm.NewDB("testdba", "leveldb", tmp)
227 testDBB := dbm.NewDB("testdbb", "leveldb", tmp)
234 blocks := mockBlocks(nil, 5)
235 a := mockSync(blocks[:1], nil, testDBA)
236 b := mockSync(blocks[:5], nil, testDBB)
237 netWork := NewNetWork()
238 netWork.Register(a, "192.168.0.1", "test node A", consensus.SFFullNode)
239 netWork.Register(b, "192.168.0.2", "test node B", consensus.SFFullNode)
240 if B2A, A2B, err := netWork.HandsShake(a, b); err != nil {
241 t.Errorf("fail on peer hands shake %v", err)
247 a.blockKeeper.syncPeer = a.peers.GetPeer("test node B")
248 b.blockKeeper.syncPeer = b.peers.GetPeer("test node A")
250 syncTimeout time.Duration
257 syncTimeout: 30 * time.Second,
264 syncTimeout: 1 * time.Millisecond,
268 err: errRequestTimeout,
273 requireBlockTimeout = 20 * time.Second
276 for i, c := range cases {
277 requireBlockTimeout = c.syncTimeout
278 got, err := c.testNode.blockKeeper.msgFetcher.requireBlock(c.testNode.blockKeeper.syncPeer.ID(), c.requireHeight)
279 if !testutil.DeepEqual(got, c.want) {
280 t.Errorf("case %d: got %v want %v", i, got, c.want)
282 if errors.Root(err) != c.err {
283 t.Errorf("case %d: got %v want %v", i, err, c.err)
288 func TestSendMerkleBlock(t *testing.T) {
289 if testcontrol.IgnoreTestTemporary {
293 tmp, err := ioutil.TempDir(".", "")
295 t.Fatalf("failed to create temporary data folder: %v", err)
298 testDBA := dbm.NewDB("testdba", "leveldb", tmp)
299 testDBB := dbm.NewDB("testdbb", "leveldb", tmp)
312 relatedTxIndex: []int{0, 2, 5},
316 relatedTxIndex: []int{},
320 relatedTxIndex: []int{},
324 relatedTxIndex: []int{0, 1, 2, 3, 4},
328 relatedTxIndex: []int{1, 6, 3, 9, 10, 19},
332 for _, c := range cases {
333 blocks := mockBlocks(nil, 2)
334 targetBlock := blocks[1]
335 txs, bcTxs := mockTxs(c.txCount)
338 targetBlock.Transactions = txs
339 if targetBlock.TransactionsMerkleRoot, err = types.TxMerkleRoot(bcTxs); err != nil {
343 spvNode := mockSync(blocks, nil, testDBA)
344 fullNode := mockSync(blocks, nil, testDBB)
345 netWork := NewNetWork()
346 netWork.Register(spvNode, "192.168.0.1", "spv_node", consensus.SFFastSync)
347 netWork.Register(fullNode, "192.168.0.2", "full_node", consensus.DefaultServices)
350 if F2S, _, err = netWork.HandsShake(spvNode, fullNode); err != nil {
351 t.Errorf("fail on peer hands shake %v", err)
354 completed := make(chan error)
356 msgBytes := <-F2S.msgCh
357 _, msg, _ := decodeMessage(msgBytes)
358 switch m := msg.(type) {
359 case *msgs.MerkleBlockMessage:
360 var relatedTxIDs []*bc.Hash
361 for _, rawTx := range m.RawTxDatas {
363 if err := tx.UnmarshalText(rawTx); err != nil {
367 relatedTxIDs = append(relatedTxIDs, &tx.ID)
369 var txHashes []*bc.Hash
370 for _, hashByte := range m.TxHashes {
371 hash := bc.NewHash(hashByte)
372 txHashes = append(txHashes, &hash)
374 if ok := types.ValidateTxMerkleTreeProof(txHashes, m.Flags, relatedTxIDs, targetBlock.TransactionsMerkleRoot); !ok {
375 completed <- errors.New("validate tx fail")
381 spvPeer := fullNode.peers.GetPeer("spv_node")
382 for i := 0; i < len(c.relatedTxIndex); i++ {
383 spvPeer.AddFilterAddress(txs[c.relatedTxIndex[i]].Outputs[0].ControlProgram)
385 msg := &msgs.GetMerkleBlockMessage{RawHash: targetBlock.Hash().Byte32()}
386 fullNode.handleGetMerkleBlockMsg(spvPeer, msg)
387 if err := <-completed; err != nil {
393 func TestLocateBlocks(t *testing.T) {
394 if testcontrol.IgnoreTestTemporary {
398 maxNumOfBlocksPerMsg = 5
399 blocks := mockBlocks(nil, 100)
407 locator: []uint64{20},
408 stopHash: blocks[100].Hash(),
409 wantHeight: []uint64{20, 21, 22, 23, 24},
413 locator: []uint64{20},
414 stopHash: bc.NewHash([32]byte{0x01, 0x02}),
415 wantHeight: []uint64{},
416 wantErr: mock.ErrFoundHeaderByHash,
420 mockChain := mock.NewChain()
421 bk := &blockKeeper{chain: mockChain}
422 for _, block := range blocks {
423 mockChain.SetBlockByHeight(block.Height, block)
426 for i, c := range cases {
427 locator := []*bc.Hash{}
428 for _, i := range c.locator {
429 hash := blocks[i].Hash()
430 locator = append(locator, &hash)
433 want := []*types.Block{}
434 for _, i := range c.wantHeight {
435 want = append(want, blocks[i])
438 mockTimeout := func() bool { return false }
439 got, err := bk.locateBlocks(locator, &c.stopHash, mockTimeout)
440 if err != c.wantErr {
441 t.Errorf("case %d: got %v want err = %v", i, err, c.wantErr)
444 if !testutil.DeepEqual(got, want) {
445 t.Errorf("case %d: got %v want %v", i, got, want)
450 func TestLocateHeaders(t *testing.T) {
451 if testcontrol.IgnoreTestTemporary {
456 maxNumOfHeadersPerMsg = 1000
458 maxNumOfHeadersPerMsg = 10
459 blocks := mockBlocks(nil, 150)
460 blocksHash := []bc.Hash{}
461 for _, block := range blocks {
462 blocksHash = append(blocksHash, block.Hash())
475 locator: []uint64{90},
476 stopHash: &blocksHash[100],
478 wantHeight: []uint64{90, 91, 92, 93, 94, 95, 96, 97, 98, 99},
483 locator: []uint64{20},
484 stopHash: &blocksHash[24],
486 wantHeight: []uint64{20, 21, 22, 23, 24},
491 locator: []uint64{20},
492 stopHash: &blocksHash[20],
493 wantHeight: []uint64{20},
498 locator: []uint64{20},
499 stopHash: &blocksHash[120],
500 wantHeight: []uint64{},
501 err: mock.ErrFoundHeaderByHash,
505 locator: []uint64{120, 70},
506 stopHash: &blocksHash[78],
507 wantHeight: []uint64{70, 71, 72, 73, 74, 75, 76, 77, 78},
512 locator: []uint64{15},
513 stopHash: &blocksHash[10],
515 wantHeight: []uint64{},
520 locator: []uint64{15},
521 stopHash: &blocksHash[80],
523 wantHeight: []uint64{15, 26, 37, 48, 59, 70, 80},
528 locator: []uint64{0},
529 stopHash: &blocksHash[100],
531 wantHeight: []uint64{0, 10, 20, 30, 40, 50, 60, 70, 80, 90},
536 for i, c := range cases {
537 mockChain := mock.NewChain()
538 bk := &blockKeeper{chain: mockChain}
539 for i := uint64(0); i <= c.chainHeight; i++ {
540 mockChain.SetBlockByHeight(i, blocks[i])
543 locator := []*bc.Hash{}
544 for _, i := range c.locator {
545 hash := blocks[i].Hash()
546 locator = append(locator, &hash)
549 want := []*types.BlockHeader{}
550 for _, i := range c.wantHeight {
551 want = append(want, &blocks[i].BlockHeader)
554 got, err := bk.locateHeaders(locator, c.stopHash, c.skip, maxNumOfHeadersPerMsg)
556 t.Errorf("case %d: got %v want err = %v", i, err, c.err)
558 if !testutil.DeepEqual(got, want) {
559 t.Errorf("case %d: got %v want %v", i, got, want)