OSDN Git Service

feat(net/netsync/p2p/pow): Change network lib and remove pow (#1864)
authorjacksoom <lifengliu1994@gmail.com>
Wed, 7 Apr 2021 13:39:31 +0000 (21:39 +0800)
committerGitHub <noreply@github.com>
Wed, 7 Apr 2021 13:39:31 +0000 (21:39 +0800)
* feat(network/pow): change network lib. remove pow

* remove pow

remove pow

* remove magic delete

* remove pow field

87 files changed:
Makefile
api/api.go
api/block_retrieve.go
api/miner.go [deleted file]
api/nodeinfo.go
cmd/bytomd/commands/run_node.go
cmd/miner/main.go [deleted file]
config/genesis.go
consensus/difficulty/difficulty.go [deleted file]
consensus/difficulty/difficulty_test.go [deleted file]
event/event.go
mining/cpuminer/cpuminer.go [deleted file]
mining/mining.go [deleted file]
mining/mining_test.go [deleted file]
mining/miningpool/miningpool.go [deleted file]
mining/sort.go [deleted file]
mining/tensority/ai_hash.go [deleted file]
mining/tensority/cgo_algorithm/algorithm_simd.go [deleted file]
mining/tensority/cgo_algorithm/algorithm_unsupported.go [deleted file]
mining/tensority/cgo_algorithm/lib/BytomPoW.h [deleted file]
mining/tensority/cgo_algorithm/lib/Makefile [deleted file]
mining/tensority/cgo_algorithm/lib/byte_order-allInOne.h [deleted file]
mining/tensority/cgo_algorithm/lib/cSimdTs.cpp [deleted file]
mining/tensority/cgo_algorithm/lib/cSimdTs.h [deleted file]
mining/tensority/cgo_algorithm/lib/scrypt.h [deleted file]
mining/tensority/cgo_algorithm/lib/seed.h [deleted file]
mining/tensority/cgo_algorithm/lib/sha3-allInOne.h [deleted file]
mining/tensority/cgo_algorithm/lib/ustd.h [deleted file]
mining/tensority/go_algorithm/algorithm.go [deleted file]
mining/tensority/go_algorithm/algorithm_test.go [deleted file]
mining/tensority/go_algorithm/matrix.go [deleted file]
mining/tensority/go_algorithm/seed.go [deleted file]
netsync/block_fetcher_test.go [deleted file]
netsync/block_keeper.go [deleted file]
netsync/block_keeper_test.go [deleted file]
netsync/chainmgr/block_keeper.go [new file with mode: 0644]
netsync/chainmgr/block_keeper_test.go [new file with mode: 0644]
netsync/chainmgr/block_process.go [new file with mode: 0644]
netsync/chainmgr/block_process_test.go [new file with mode: 0644]
netsync/chainmgr/fast_sync.go [new file with mode: 0644]
netsync/chainmgr/fast_sync_test.go [new file with mode: 0644]
netsync/chainmgr/handle.go [new file with mode: 0644]
netsync/chainmgr/msg_fetcher.go [new file with mode: 0644]
netsync/chainmgr/peers.go [new file with mode: 0644]
netsync/chainmgr/peers_test.go [new file with mode: 0644]
netsync/chainmgr/protocol_reactor.go [moved from netsync/protocol_reactor.go with 57% similarity]
netsync/chainmgr/storage.go [new file with mode: 0644]
netsync/chainmgr/storage_test.go [new file with mode: 0644]
netsync/chainmgr/tool_test.go [moved from netsync/tool_test.go with 58% similarity]
netsync/chainmgr/tx_keeper.go [moved from netsync/tx_keeper.go with 82% similarity]
netsync/chainmgr/tx_keeper_test.go [new file with mode: 0644]
netsync/consensusmgr/block_fetcher.go [moved from netsync/block_fetcher.go with 54% similarity]
netsync/consensusmgr/block_fetcher_test.go [new file with mode: 0644]
netsync/consensusmgr/broadcast_msg.go [new file with mode: 0644]
netsync/consensusmgr/consensus_msg.go [new file with mode: 0644]
netsync/consensusmgr/consensus_msg_test.go [new file with mode: 0644]
netsync/consensusmgr/handle.go [new file with mode: 0644]
netsync/consensusmgr/handle_test.go [new file with mode: 0644]
netsync/consensusmgr/reactor.go [new file with mode: 0644]
netsync/handle.go [deleted file]
netsync/messages/chain_msg.go [moved from netsync/message.go with 81% similarity]
netsync/messages/chain_msg_test.go [new file with mode: 0644]
netsync/peer.go [deleted file]
netsync/peers/peer.go [new file with mode: 0644]
netsync/peers/peer_test.go [new file with mode: 0644]
netsync/sync_manager.go [new file with mode: 0644]
node/node.go
p2p/peer.go
protocol/bc/types/block_header.go
protocol/bc/types/block_header_test.go
protocol/bc/types/block_test.go
protocol/bc/types/map.go
protocol/block.go
protocol/consensus.go [new file with mode: 0644]
protocol/protocol.go
protocol/state/blockindex.go
protocol/state/blockindex_test.go
protocol/validation/block.go
protocol/validation/block_test.go
test/bench_blockchain_test.go
test/block_test_util.go
test/integration/block_integration_test.go
test/mock/chain.go
test/mock/mempool.go [new file with mode: 0644]
test/performance/mining_test.go [deleted file]
test/utxo_view/utxo_view_test_util.go
wallet/wallet_test.go

index d54f632..d5cdfdd 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -9,8 +9,8 @@ $(error "$$GOOS is not defined. If you are using Windows, try to re-make using '
 endif
 endif
 
-PACKAGES    := $(shell go list ./... | grep -v '/vendor/' | grep -v '/crypto/ed25519/chainkd' | grep -v '/mining/tensority')
-PACKAGES += 'github.com/bytom/bytom/mining/tensority/go_algorithm'
+PACKAGES    := $(shell go list ./... | grep -v '/vendor/' | grep -v '/crypto/ed25519/chainkd')
+
 
 BUILD_FLAGS := -ldflags "-X github.com/bytom/bytom/version.GitCommit=`git rev-parse HEAD`"
 
index f1e2b4b..d57d8bc 100644 (file)
@@ -18,14 +18,12 @@ import (
        "github.com/bytom/bytom/dashboard/equity"
        "github.com/bytom/bytom/errors"
        "github.com/bytom/bytom/event"
-       "github.com/bytom/bytom/mining/cpuminer"
-       "github.com/bytom/bytom/mining/miningpool"
        "github.com/bytom/bytom/net/http/authn"
        "github.com/bytom/bytom/net/http/gzip"
        "github.com/bytom/bytom/net/http/httpjson"
        "github.com/bytom/bytom/net/http/static"
        "github.com/bytom/bytom/net/websocket"
-       "github.com/bytom/bytom/netsync"
+       "github.com/bytom/bytom/netsync/peers"
        "github.com/bytom/bytom/p2p"
        "github.com/bytom/bytom/protocol"
        "github.com/bytom/bytom/wallet"
@@ -114,8 +112,6 @@ type API struct {
        server          *http.Server
        handler         http.Handler
        txFeedTracker   *txfeed.Tracker
-       cpuMiner        *cpuminer.CPUMiner
-       miningPool      *miningpool.MiningPool
        notificationMgr *websocket.WSNotificationManager
        eventDispatcher *event.Dispatcher
 }
@@ -175,23 +171,20 @@ type NetSync interface {
        IsCaughtUp() bool
        PeerCount() int
        GetNetwork() string
-       BestPeer() *netsync.PeerInfo
+       BestPeer() *peers.PeerInfo
        DialPeerWithAddress(addr *p2p.NetAddress) error
-       GetPeerInfos() []*netsync.PeerInfo
+       GetPeerInfos() []*peers.PeerInfo
        StopPeer(peerID string) error
 }
 
 // NewAPI create and initialize the API
-func NewAPI(sync NetSync, wallet *wallet.Wallet, txfeeds *txfeed.Tracker, cpuMiner *cpuminer.CPUMiner, miningPool *miningpool.MiningPool, chain *protocol.Chain, config *cfg.Config, token *accesstoken.CredentialStore, dispatcher *event.Dispatcher, notificationMgr *websocket.WSNotificationManager) *API {
+func NewAPI(sync NetSync, wallet *wallet.Wallet, txfeeds *txfeed.Tracker, chain *protocol.Chain, config *cfg.Config, token *accesstoken.CredentialStore, dispatcher *event.Dispatcher, notificationMgr *websocket.WSNotificationManager) *API {
        api := &API{
-               sync:          sync,
-               wallet:        wallet,
-               chain:         chain,
-               accessTokens:  token,
-               txFeedTracker: txfeeds,
-               cpuMiner:      cpuMiner,
-               miningPool:    miningPool,
-
+               sync:            sync,
+               wallet:          wallet,
+               chain:           chain,
+               accessTokens:    token,
+               txFeedTracker:   txfeeds,
                eventDispatcher: dispatcher,
                notificationMgr: notificationMgr,
        }
@@ -224,9 +217,6 @@ func (a *API) buildHandler() {
                m.Handle("/get-mining-address", jsonHandler(a.getMiningAddress))
                m.Handle("/set-mining-address", jsonHandler(a.setMiningAddress))
 
-               m.Handle("/get-coinbase-arbitrary", jsonHandler(a.getCoinbaseArbitrary))
-               m.Handle("/set-coinbase-arbitrary", jsonHandler(a.setCoinbaseArbitrary))
-
                m.Handle("/create-asset", jsonHandler(a.createAsset))
                m.Handle("/update-asset-alias", jsonHandler(a.updateAssetAlias))
                m.Handle("/get-asset", jsonHandler(a.getAsset))
@@ -290,17 +280,6 @@ func (a *API) buildHandler() {
        m.Handle("/get-block-hash", jsonHandler(a.getBestBlockHash))
        m.Handle("/get-block-header", jsonHandler(a.getBlockHeader))
        m.Handle("/get-block-count", jsonHandler(a.getBlockCount))
-       m.Handle("/get-difficulty", jsonHandler(a.getDifficulty))
-       m.Handle("/get-hash-rate", jsonHandler(a.getHashRate))
-
-       m.Handle("/is-mining", jsonHandler(a.isMining))
-       m.Handle("/set-mining", jsonHandler(a.setMining))
-
-       m.Handle("/get-work", jsonHandler(a.getWork))
-       m.Handle("/get-work-json", jsonHandler(a.getWorkJSON))
-       m.Handle("/submit-block", jsonHandler(a.submitBlock))
-       m.Handle("/submit-work", jsonHandler(a.submitWork))
-       m.Handle("/submit-work-json", jsonHandler(a.submitWorkJSON))
 
        m.Handle("/verify-message", jsonHandler(a.verifyMessage))
        m.Handle("/compile", jsonHandler(a.compileEquity))
index 37642e2..8e90f72 100644 (file)
@@ -1,14 +1,10 @@
 package api
 
 import (
-       "math/big"
-
        "gopkg.in/fatih/set.v0"
 
        "github.com/bytom/bytom/blockchain/query"
-       "github.com/bytom/bytom/consensus/difficulty"
        chainjson "github.com/bytom/bytom/encoding/json"
-       "github.com/bytom/bytom/errors"
        "github.com/bytom/bytom/protocol/bc"
        "github.com/bytom/bytom/protocol/bc/types"
 )
@@ -52,8 +48,6 @@ type GetBlockResp struct {
        PreviousBlockHash      *bc.Hash   `json:"previous_block_hash"`
        Timestamp              uint64     `json:"timestamp"`
        Nonce                  uint64     `json:"nonce"`
-       Bits                   uint64     `json:"bits"`
-       Difficulty             string     `json:"difficulty"`
        TransactionsMerkleRoot *bc.Hash   `json:"transaction_merkle_root"`
        TransactionStatusHash  *bc.Hash   `json:"transaction_status_hash"`
        Transactions           []*BlockTx `json:"transactions"`
@@ -81,8 +75,6 @@ func (a *API) getBlock(ins BlockReq) Response {
                PreviousBlockHash:      &block.PreviousBlockHash,
                Timestamp:              block.Timestamp,
                Nonce:                  block.Nonce,
-               Bits:                   block.Bits,
-               Difficulty:             difficulty.CalcWork(block.Bits).String(),
                TransactionsMerkleRoot: &block.TransactionsMerkleRoot,
                TransactionStatusHash:  &block.TransactionStatusHash,
                Transactions:           []*BlockTx{},
@@ -181,72 +173,6 @@ func hexBytesToHash(hexBytes chainjson.HexBytes) bc.Hash {
        return bc.NewHash(b32)
 }
 
-// GetDifficultyResp is resp struct for getDifficulty API
-type GetDifficultyResp struct {
-       BlockHash   *bc.Hash `json:"hash"`
-       BlockHeight uint64   `json:"height"`
-       Bits        uint64   `json:"bits"`
-       Difficulty  string   `json:"difficulty"`
-}
-
-func (a *API) getDifficulty(ins BlockReq) Response {
-       block, err := a.getBlockHelper(ins)
-       if err != nil {
-               return NewErrorResponse(err)
-       }
-
-       blockHash := block.Hash()
-       resp := &GetDifficultyResp{
-               BlockHash:   &blockHash,
-               BlockHeight: block.Height,
-               Bits:        block.Bits,
-               Difficulty:  difficulty.CalcWork(block.Bits).String(),
-       }
-       return NewSuccessResponse(resp)
-}
-
-// getHashRateResp is resp struct for getHashRate API
-type getHashRateResp struct {
-       BlockHash   *bc.Hash `json:"hash"`
-       BlockHeight uint64   `json:"height"`
-       HashRate    uint64   `json:"hash_rate"`
-}
-
-func (a *API) getHashRate(ins BlockReq) Response {
-       if len(ins.BlockHash) != 32 && len(ins.BlockHash) != 0 {
-               err := errors.New("Block hash format error.")
-               return NewErrorResponse(err)
-       }
-       if ins.BlockHeight == 0 {
-               ins.BlockHeight = a.chain.BestBlockHeight()
-       }
-
-       block, err := a.getBlockHelper(ins)
-       if err != nil {
-               return NewErrorResponse(err)
-       }
-
-       preBlock, err := a.chain.GetBlockByHash(&block.PreviousBlockHash)
-       if err != nil {
-               return NewErrorResponse(err)
-       }
-
-       diffTime := block.Timestamp - preBlock.Timestamp
-       if preBlock.Timestamp >= block.Timestamp {
-               diffTime = 1
-       }
-       hashCount := difficulty.CalcWork(block.Bits)
-       hashRate := new(big.Int).Div(hashCount, big.NewInt(int64(diffTime)))
-
-       blockHash := block.Hash()
-       resp := &getHashRateResp{
-               BlockHash:   &blockHash,
-               BlockHeight: block.Height,
-               HashRate:    hashRate.Uint64(),
-       }
-       return NewSuccessResponse(resp)
-}
-
 // MerkleBlockReq is used to handle getTxOutProof req
 type MerkleBlockReq struct {
        TxIDs     []chainjson.HexBytes `json:"tx_ids"`
diff --git a/api/miner.go b/api/miner.go
deleted file mode 100644 (file)
index 029993e..0000000
+++ /dev/null
@@ -1,218 +0,0 @@
-package api
-
-import (
-       "context"
-       "strconv"
-
-       chainjson "github.com/bytom/bytom/encoding/json"
-       "github.com/bytom/bytom/errors"
-       "github.com/bytom/bytom/event"
-       "github.com/bytom/bytom/protocol/bc"
-       "github.com/bytom/bytom/protocol/bc/types"
-)
-
-// BlockHeaderJSON struct provides support for get work in json format, when it also follows
-// BlockHeader structure
-type BlockHeaderJSON struct {
-       Version           uint64                 `json:"version"`             // The version of the block.
-       Height            uint64                 `json:"height"`              // The height of the block.
-       PreviousBlockHash bc.Hash                `json:"previous_block_hash"` // The hash of the previous block.
-       Timestamp         uint64                 `json:"timestamp"`           // The time of the block in seconds.
-       Nonce             uint64                 `json:"nonce"`               // Nonce used to generate the block.
-       Bits              uint64                 `json:"bits"`                // Difficulty target for the block.
-       BlockCommitment   *types.BlockCommitment `json:"block_commitment"`    // Block commitment
-}
-
-type CoinbaseArbitrary struct {
-       Arbitrary chainjson.HexBytes `json:"arbitrary"`
-}
-
-func (a *API) getCoinbaseArbitrary() Response {
-       arbitrary := a.wallet.AccountMgr.GetCoinbaseArbitrary()
-       resp := &CoinbaseArbitrary{
-               Arbitrary: arbitrary,
-       }
-       return NewSuccessResponse(resp)
-}
-
-// setCoinbaseArbitrary add arbitary data to the reserved coinbase data.
-// check function createCoinbaseTx in mining/mining.go for detail.
-// arbitraryLenLimit is 107 and can be calculated by:
-//     maxHeight := ^uint64(0)
-//     reserved := append([]byte{0x00}, []byte(strconv.FormatUint(maxHeight, 10))...)
-//     arbitraryLenLimit := consensus.CoinbaseArbitrarySizeLimit - len(reserved)
-func (a *API) setCoinbaseArbitrary(ctx context.Context, req CoinbaseArbitrary) Response {
-       arbitraryLenLimit := 107
-       if len(req.Arbitrary) > arbitraryLenLimit {
-               err := errors.New("Arbitrary exceeds limit: " + strconv.FormatUint(uint64(arbitraryLenLimit), 10))
-               return NewErrorResponse(err)
-       }
-       a.wallet.AccountMgr.SetCoinbaseArbitrary(req.Arbitrary)
-       return a.getCoinbaseArbitrary()
-}
-
-// getWork gets work in compressed protobuf format
-func (a *API) getWork() Response {
-       work, err := a.GetWork()
-       if err != nil {
-               return NewErrorResponse(err)
-       }
-       return NewSuccessResponse(work)
-}
-
-// getWorkJSON gets work in json format
-func (a *API) getWorkJSON() Response {
-       work, err := a.GetWorkJSON()
-       if err != nil {
-               return NewErrorResponse(err)
-       }
-       return NewSuccessResponse(work)
-}
-
-// SubmitBlockReq is req struct for submit-block API
-type SubmitBlockReq struct {
-       Block *types.Block `json:"raw_block"`
-}
-
-// submitBlock trys to submit a raw block to the chain
-func (a *API) submitBlock(ctx context.Context, req *SubmitBlockReq) Response {
-       isOrphan, err := a.chain.ProcessBlock(req.Block)
-       if err != nil {
-               return NewErrorResponse(err)
-       }
-
-       if isOrphan {
-               return NewErrorResponse(errors.New("block submitted is orphan"))
-       }
-
-       if err = a.eventDispatcher.Post(event.NewMinedBlockEvent{Block: *req.Block}); err != nil {
-               return NewErrorResponse(err)
-       }
-
-       return NewSuccessResponse(true)
-}
-
-// SubmitWorkReq is req struct for submit-work API
-type SubmitWorkReq struct {
-       BlockHeader *types.BlockHeader `json:"block_header"`
-}
-
-// submitWork submits work in compressed protobuf format
-func (a *API) submitWork(ctx context.Context, req *SubmitWorkReq) Response {
-       if err := a.SubmitWork(req.BlockHeader); err != nil {
-               return NewErrorResponse(err)
-       }
-       return NewSuccessResponse(true)
-}
-
-// SubmitWorkJSONReq is req struct for submit-work-json API
-type SubmitWorkJSONReq struct {
-       BlockHeader *BlockHeaderJSON `json:"block_header"`
-}
-
-// submitWorkJSON submits work in json format
-func (a *API) submitWorkJSON(ctx context.Context, req *SubmitWorkJSONReq) Response {
-       bh := &types.BlockHeader{
-               Version:           req.BlockHeader.Version,
-               Height:            req.BlockHeader.Height,
-               PreviousBlockHash: req.BlockHeader.PreviousBlockHash,
-               Timestamp:         req.BlockHeader.Timestamp,
-               Nonce:             req.BlockHeader.Nonce,
-               Bits:              req.BlockHeader.Bits,
-               BlockCommitment:   *req.BlockHeader.BlockCommitment,
-       }
-
-       if err := a.SubmitWork(bh); err != nil {
-               return NewErrorResponse(err)
-       }
-       return NewSuccessResponse(true)
-}
-
-// GetWorkResp is resp struct for get-work API
-type GetWorkResp struct {
-       BlockHeader *types.BlockHeader `json:"block_header"`
-       Seed        *bc.Hash           `json:"seed"`
-}
-
-// GetWork gets work in compressed protobuf format
-func (a *API) GetWork() (*GetWorkResp, error) {
-       bh, err := a.miningPool.GetWork()
-       if err != nil {
-               return nil, err
-       }
-
-       seed, err := a.chain.CalcNextSeed(&bh.PreviousBlockHash)
-       if err != nil {
-               return nil, err
-       }
-
-       return &GetWorkResp{
-               BlockHeader: bh,
-               Seed:        seed,
-       }, nil
-}
-
-// GetWorkJSONResp is resp struct for get-work-json API
-type GetWorkJSONResp struct {
-       BlockHeader *BlockHeaderJSON `json:"block_header"`
-       Seed        *bc.Hash         `json:"seed"`
-}
-
-// GetWorkJSON gets work in json format
-func (a *API) GetWorkJSON() (*GetWorkJSONResp, error) {
-       bh, err := a.miningPool.GetWork()
-       if err != nil {
-               return nil, err
-       }
-
-       seed, err := a.chain.CalcNextSeed(&bh.PreviousBlockHash)
-       if err != nil {
-               return nil, err
-       }
-
-       return &GetWorkJSONResp{
-               BlockHeader: &BlockHeaderJSON{
-                       Version:           bh.Version,
-                       Height:            bh.Height,
-                       PreviousBlockHash: bh.PreviousBlockHash,
-                       Timestamp:         bh.Timestamp,
-                       Nonce:             bh.Nonce,
-                       Bits:              bh.Bits,
-                       BlockCommitment:   &bh.BlockCommitment,
-               },
-               Seed: seed,
-       }, nil
-}
-
-// SubmitWork tries to submit work to the chain
-func (a *API) SubmitWork(bh *types.BlockHeader) error {
-       return a.miningPool.SubmitWork(bh)
-}
-
-func (a *API) setMining(in struct {
-       IsMining bool `json:"is_mining"`
-}) Response {
-       if in.IsMining {
-               if _, err := a.wallet.AccountMgr.GetMiningAddress(); err != nil {
-                       return NewErrorResponse(errors.New("Mining address does not exist"))
-               }
-               return a.startMining()
-       }
-       return a.stopMining()
-}
-
-func (a *API) startMining() Response {
-       a.cpuMiner.Start()
-       if !a.IsMining() {
-               return NewErrorResponse(errors.New("Failed to start mining"))
-       }
-       return NewSuccessResponse("")
-}
-
-func (a *API) stopMining() Response {
-       a.cpuMiner.Stop()
-       if a.IsMining() {
-               return NewErrorResponse(errors.New("Failed to stop mining"))
-       }
-       return NewSuccessResponse("")
-}
index 42852e8..c22f034 100644 (file)
@@ -5,7 +5,7 @@ import (
        "net"
 
        "github.com/bytom/bytom/errors"
-       "github.com/bytom/bytom/netsync"
+       "github.com/bytom/bytom/netsync/peers"
        "github.com/bytom/bytom/p2p"
        "github.com/bytom/bytom/version"
 )
@@ -20,7 +20,6 @@ type VersionInfo struct {
 type NetInfo struct {
        Listening    bool         `json:"listening"`
        Syncing      bool         `json:"syncing"`
-       Mining       bool         `json:"mining"`
        PeerCount    int          `json:"peer_count"`
        CurrentBlock uint64       `json:"current_block"`
        HighestBlock uint64       `json:"highest_block"`
@@ -33,7 +32,6 @@ func (a *API) GetNodeInfo() *NetInfo {
        info := &NetInfo{
                Listening:    a.sync.IsListening(),
                Syncing:      !a.sync.IsCaughtUp(),
-               Mining:       a.cpuMiner.IsMining(),
                PeerCount:    a.sync.PeerCount(),
                CurrentBlock: a.chain.BestBlockHeight(),
                NetWorkID:    a.sync.GetNetwork(),
@@ -53,7 +51,7 @@ func (a *API) GetNodeInfo() *NetInfo {
 }
 
 // return the currently connected peers with net address
-func (a *API) getPeerInfoByAddr(addr string) *netsync.PeerInfo {
+func (a *API) getPeerInfoByAddr(addr string) *peers.PeerInfo {
        peerInfos := a.sync.GetPeerInfos()
        for _, peerInfo := range peerInfos {
                if peerInfo.RemoteAddr == addr {
@@ -69,7 +67,7 @@ func (a *API) disconnectPeerById(peerID string) error {
 }
 
 // connect peer b y net address
-func (a *API) connectPeerByIpAndPort(ip string, port uint16) (*netsync.PeerInfo, error) {
+func (a *API) connectPeerByIpAndPort(ip string, port uint16) (*peers.PeerInfo, error) {
        netIp := net.ParseIP(ip)
        if netIp == nil {
                return nil, errors.New("invalid ip address")
@@ -92,17 +90,6 @@ func (a *API) getNetInfo() Response {
        return NewSuccessResponse(a.GetNodeInfo())
 }
 
-// isMining return is in mining or not
-func (a *API) isMining() Response {
-       IsMining := map[string]bool{"is_mining": a.IsMining()}
-       return NewSuccessResponse(IsMining)
-}
-
-// IsMining return mining status
-func (a *API) IsMining() bool {
-       return a.cpuMiner.IsMining()
-}
-
 // return the peers of current node
 func (a *API) listPeers() Response {
        return NewSuccessResponse(a.sync.GetPeerInfos())
index b87e42a..8a140fd 100644 (file)
@@ -2,7 +2,6 @@ package commands
 
 import (
        "strings"
-       "time"
 
        log "github.com/sirupsen/logrus"
        "github.com/spf13/cobra"
@@ -78,7 +77,7 @@ func setLogLevel(level string) {
 }
 
 func runNode(cmd *cobra.Command, args []string) error {
-       startTime := time.Now()
+       // startTime := time.Now()
        setLogLevel(config.LogLevel)
 
        // Create & start node
@@ -87,13 +86,13 @@ func runNode(cmd *cobra.Command, args []string) error {
                log.WithFields(log.Fields{"module": logModule, "err": err}).Fatal("failed to start node")
        }
 
-       nodeInfo := n.NodeInfo()
-       log.WithFields(log.Fields{
-               "module":   logModule,
-               "version":  nodeInfo.Version,
-               "network":  nodeInfo.Network,
-               "duration": time.Since(startTime),
-       }).Info("start node complete")
+       // nodeInfo := n.NodeInfo()
+       // log.WithFields(log.Fields{
+       //      "module":   logModule,
+       //      "version":  nodeInfo,
+       //      "network":  nodeInfo.Network,
+       //      "duration": time.Since(startTime),
+       // }).Info("start node complete")
 
        // Trap signal, run forever.
        n.RunForever()
diff --git a/cmd/miner/main.go b/cmd/miner/main.go
deleted file mode 100644 (file)
index c120958..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-package main
-
-import (
-       "encoding/json"
-       "log"
-       "os"
-
-       "github.com/bytom/bytom/api"
-       "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/consensus/difficulty"
-       "github.com/bytom/bytom/protocol/bc"
-       "github.com/bytom/bytom/protocol/bc/types"
-       "github.com/bytom/bytom/util"
-)
-
-const (
-       maxNonce = ^uint64(0) // 2^64 - 1
-       isCrazy  = true
-       esHR     = 1 //estimated Hashrate
-)
-
-var (
-       lastNonce  = ^uint64(0)
-       lastHeight = uint64(0)
-)
-
-// do proof of work
-func doWork(bh *types.BlockHeader, seed *bc.Hash) bool {
-       log.Println("Start from nonce:", lastNonce+1)
-       for i := uint64(lastNonce + 1); i <= uint64(lastNonce+consensus.TargetSecondsPerBlock*esHR) && i <= maxNonce; i++ {
-               bh.Nonce = i
-               // log.Printf("nonce = %v\n", i)
-               headerHash := bh.Hash()
-               if difficulty.CheckProofOfWork(&headerHash, seed, bh.Bits) {
-                       log.Printf("Mining succeed! Proof hash: %v\n", headerHash.String())
-                       return true
-               }
-       }
-       log.Println("Stop at nonce:", bh.Nonce)
-       lastNonce = bh.Nonce
-       return false
-}
-
-func getBlockHeaderByHeight(height uint64) {
-       type Req struct {
-               BlockHeight uint64 `json:"block_height"`
-       }
-
-       type Resp struct {
-               BlockHeader *types.BlockHeader `json:"block_header"`
-               Reward      uint64             `json:"reward"`
-       }
-
-       data, _ := util.ClientCall("/get-block-header", Req{BlockHeight: height})
-       rawData, err := json.Marshal(data)
-       if err != nil {
-               log.Fatalln(err)
-       }
-
-       resp := &Resp{}
-       if err = json.Unmarshal(rawData, resp); err != nil {
-               log.Fatalln(err)
-       }
-       log.Println("Reward:", resp.Reward)
-}
-
-func main() {
-       for true {
-               data, _ := util.ClientCall("/get-work", nil)
-               if data == nil {
-                       os.Exit(1)
-               }
-               rawData, err := json.Marshal(data)
-               if err != nil {
-                       log.Fatalln(err)
-               }
-               resp := &api.GetWorkResp{}
-               if err = json.Unmarshal(rawData, resp); err != nil {
-                       log.Fatalln(err)
-               }
-
-               log.Println("Mining at height:", resp.BlockHeader.Height)
-               if lastHeight != resp.BlockHeader.Height {
-                       lastNonce = ^uint64(0)
-               }
-               if doWork(resp.BlockHeader, resp.Seed) {
-                       util.ClientCall("/submit-work", &api.SubmitWorkReq{BlockHeader: resp.BlockHeader})
-                       getBlockHeaderByHeight(resp.BlockHeader.Height)
-               }
-
-               lastHeight = resp.BlockHeader.Height
-               if !isCrazy {
-                       return
-               }
-       }
-}
index 3e8bc9c..b71b0da 100644 (file)
@@ -50,7 +50,6 @@ func mainNetGenesisBlock() *types.Block {
                        Height:    0,
                        Nonce:     9253507043297,
                        Timestamp: 1524549600,
-                       Bits:      2161727821137910632,
                        BlockCommitment: types.BlockCommitment{
                                TransactionsMerkleRoot: merkleRoot,
                                TransactionStatusHash:  txStatusHash,
@@ -83,7 +82,6 @@ func testNetGenesisBlock() *types.Block {
                        Height:    0,
                        Nonce:     9253507043297,
                        Timestamp: 1528945000,
-                       Bits:      2305843009214532812,
                        BlockCommitment: types.BlockCommitment{
                                TransactionsMerkleRoot: merkleRoot,
                                TransactionStatusHash:  txStatusHash,
@@ -116,7 +114,6 @@ func soloNetGenesisBlock() *types.Block {
                        Height:    0,
                        Nonce:     9253507043297,
                        Timestamp: 1528945000,
-                       Bits:      2305843009214532812,
                        BlockCommitment: types.BlockCommitment{
                                TransactionsMerkleRoot: merkleRoot,
                                TransactionStatusHash:  txStatusHash,
diff --git a/consensus/difficulty/difficulty.go b/consensus/difficulty/difficulty.go
deleted file mode 100644 (file)
index c030ec6..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-package difficulty
-
-import (
-       "math/big"
-
-       "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/mining/tensority"
-       "github.com/bytom/bytom/protocol/bc"
-       "github.com/bytom/bytom/protocol/bc/types"
-)
-
-var (
-       // bigOne is 1 represented as a big.Int.  It is defined here to avoid
-       // the overhead of creating it multiple times.
-       bigOne = big.NewInt(1)
-
-       // oneLsh256 is 1 shifted left 256 bits.  It is defined here to avoid
-       // the overhead of creating it multiple times.
-       oneLsh256 = new(big.Int).Lsh(bigOne, 256)
-)
-
-// HashToBig convert bc.Hash to a difficulty int
-func HashToBig(hash *bc.Hash) *big.Int {
-       // reverse the bytes of the hash (little-endian) to use it in the big
-       // package (big-endian)
-       buf := hash.Byte32()
-       blen := len(buf)
-       for i := 0; i < blen/2; i++ {
-               buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i]
-       }
-
-       return new(big.Int).SetBytes(buf[:])
-}
-
-// CalcWork calculates a work value from difficulty bits.
-func CalcWork(bits uint64) *big.Int {
-       difficultyNum := CompactToBig(bits)
-       if difficultyNum.Sign() <= 0 {
-               return big.NewInt(0)
-       }
-
-       // (1 << 256) / (difficultyNum + 1)
-       denominator := new(big.Int).Add(difficultyNum, bigOne)
-       return new(big.Int).Div(oneLsh256, denominator)
-}
-
-// CompactToBig converts a compact representation of a whole unsigned integer
-// N to an big.Int. The representation is similar to IEEE754 floating point
-// numbers. Sign is not really being used.
-//
-//     -------------------------------------------------
-//     |   Exponent     |    Sign    |    Mantissa     |
-//     -------------------------------------------------
-//     | 8 bits [63-56] | 1 bit [55] | 55 bits [54-00] |
-//     -------------------------------------------------
-//
-//     N = (-1^sign) * mantissa * 256^(exponent-3)
-//  Actually it will be nicer to use 7 instead of 3 for robustness reason.
-func CompactToBig(compact uint64) *big.Int {
-       // Extract the mantissa, sign bit, and exponent.
-       mantissa := compact & 0x007fffffffffffff
-       isNegative := compact&0x0080000000000000 != 0
-       exponent := uint(compact >> 56)
-
-       var bn *big.Int
-       if exponent <= 3 {
-               mantissa >>= 8 * (3 - exponent)
-               bn = big.NewInt(int64(mantissa))
-       } else {
-               bn = big.NewInt(int64(mantissa))
-               bn.Lsh(bn, 8*(exponent-3))
-       }
-
-       if isNegative {
-               bn = bn.Neg(bn)
-       }
-
-       return bn
-}
-
-// BigToCompact converts a whole number N to a compact representation using
-// an unsigned 64-bit number. Sign is not really being used, but it's kept
-// here.
-func BigToCompact(n *big.Int) uint64 {
-       if n.Sign() == 0 {
-               return 0
-       }
-
-       var mantissa uint64
-       // Bytes() returns the absolute value of n as a big-endian byte slice
-       exponent := uint(len(n.Bytes()))
-
-       // Bits() returns the absolute value of n as a little-endian uint64 slice
-       if exponent <= 3 {
-               mantissa = uint64(n.Bits()[0])
-               mantissa <<= 8 * (3 - exponent)
-       } else {
-               tn := new(big.Int).Set(n)
-               // Since the base for the exponent is 256, the exponent can be treated
-               // as the number of bytes to represent the full 256-bit number. And as
-               // the exponent is treated as the number of bytes, Rsh 8*(exponent-3)
-               // makes sure that the shifted tn won't occupy more than 8*3=24 bits,
-               // and can be read from Bits()[0], which is 64-bit
-               mantissa = uint64(tn.Rsh(tn, 8*(exponent-3)).Bits()[0])
-       }
-
-       if mantissa&0x0080000000000000 != 0 {
-               mantissa >>= 8
-               exponent++
-       }
-
-       compact := uint64(exponent)<<56 | mantissa
-       if n.Sign() < 0 {
-               compact |= 0x0080000000000000
-       }
-       return compact
-}
-
-// CheckProofOfWork checks whether the hash is valid for a given difficulty.
-func CheckProofOfWork(hash, seed *bc.Hash, bits uint64) bool {
-       compareHash := tensority.AIHash.Hash(hash, seed)
-       return HashToBig(compareHash).Cmp(CompactToBig(bits)) <= 0
-}
-
-// CalcNextRequiredDifficulty return the difficulty using compact representation
-// for next block, when a lower difficulty Int actually reflects a more difficult
-// mining progress.
-func CalcNextRequiredDifficulty(lastBH, compareBH *types.BlockHeader) uint64 {
-       if (lastBH.Height)%consensus.BlocksPerRetarget != 0 || lastBH.Height == 0 {
-               return lastBH.Bits
-       }
-
-       targetTimeSpan := int64(consensus.BlocksPerRetarget * consensus.TargetSecondsPerBlock)
-       actualTimeSpan := int64(lastBH.Timestamp - compareBH.Timestamp)
-
-       oldTarget := CompactToBig(lastBH.Bits)
-       newTarget := new(big.Int).Mul(oldTarget, big.NewInt(actualTimeSpan))
-       newTarget.Div(newTarget, big.NewInt(targetTimeSpan))
-       newTargetBits := BigToCompact(newTarget)
-
-       return newTargetBits
-}
diff --git a/consensus/difficulty/difficulty_test.go b/consensus/difficulty/difficulty_test.go
deleted file mode 100644 (file)
index 96e2326..0000000
+++ /dev/null
@@ -1,888 +0,0 @@
-package difficulty
-
-import (
-       "math/big"
-       "reflect"
-       "strconv"
-       "testing"
-
-       "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/protocol/bc"
-       "github.com/bytom/bytom/protocol/bc/types"
-)
-
-// A lower difficulty Int actually reflects a more difficult mining progress.
-func TestCalcNextRequiredDifficulty(t *testing.T) {
-       targetTimeSpan := uint64(consensus.BlocksPerRetarget * consensus.TargetSecondsPerBlock)
-       cases := []struct {
-               lastBH    *types.BlockHeader
-               compareBH *types.BlockHeader
-               want      uint64
-       }{
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    0,
-                               Timestamp: 0,
-                       },
-                       BigToCompact(big.NewInt(1000)),
-               },
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan * 2,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    0,
-                               Timestamp: 0,
-                       },
-                       BigToCompact(big.NewInt(2000)),
-               },
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 1,
-                               Timestamp: targetTimeSpan*2 - consensus.TargetSecondsPerBlock,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    0,
-                               Timestamp: 0,
-                       },
-                       BigToCompact(big.NewInt(1000)),
-               },
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan / 2,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    0,
-                               Timestamp: 0,
-                       },
-                       BigToCompact(big.NewInt(500)),
-               },
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget * 2,
-                               Timestamp: targetTimeSpan + targetTimeSpan*2,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan,
-                       },
-                       BigToCompact(big.NewInt(2000)),
-               },
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget * 2,
-                               Timestamp: targetTimeSpan + targetTimeSpan/2,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan,
-                       },
-                       BigToCompact(big.NewInt(500)),
-               },
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget*2 - 1,
-                               Timestamp: targetTimeSpan + targetTimeSpan*2 - consensus.TargetSecondsPerBlock,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan,
-                       },
-                       BigToCompact(big.NewInt(1000)),
-               },
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget*2 - 1,
-                               Timestamp: targetTimeSpan + targetTimeSpan/2 - consensus.TargetSecondsPerBlock,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan,
-                       },
-                       BigToCompact(big.NewInt(1000)),
-               },
-               // lastBH.Height: 0, lastBH.Timestamp - compareBH.Timestamp: 0, lastBH.Bits: 0
-               {
-                       &types.BlockHeader{
-                               Height:    0,
-                               Timestamp: 0,
-                               Bits:      0,
-                       },
-                       &types.BlockHeader{
-                               Height:    0,
-                               Timestamp: 0,
-                       },
-                       0,
-               },
-               // lastBH.Height: 0, lastBH.Timestamp - compareBH.Timestamp: 0, lastBH.Bits: 18446744073709551615
-               {
-                       &types.BlockHeader{
-                               Height:    0,
-                               Timestamp: 0,
-                               Bits:      18446744073709551615,
-                       },
-                       &types.BlockHeader{
-                               Height:    0,
-                               Timestamp: 0,
-                       },
-                       18446744073709551615,
-               },
-               // lastBH.Height: 0, lastBH.Timestamp - compareBH.Timestamp: 0, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    0,
-                               Timestamp: 0,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    0,
-                               Timestamp: 0,
-                       },
-                       BigToCompact(big.NewInt(1000)),
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget, lastBH.Timestamp - compareBH.Timestamp: 0, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 1,
-                               Timestamp: targetTimeSpan,
-                       },
-                       0,
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget, lastBH.Timestamp - compareBH.Timestamp: -9223372036854775808, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 1,
-                               Timestamp: targetTimeSpan + 9223372036854775808,
-                       },
-                       540431955291560988,
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget, lastBH.Timestamp - compareBH.Timestamp: 9223372036854775807, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan + 9223372036854775807,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 1,
-                               Timestamp: targetTimeSpan,
-                       },
-                       504403158272597019,
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget, lastBH.Timestamp - compareBH.Timestamp: 18446744073709551615, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: 18446744073709551615,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 1,
-                               Timestamp: 0,
-                       },
-                       108086391056957440,
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget, lastBH.Timestamp - compareBH.Timestamp: 302400, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan * 2,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 1,
-                               Timestamp: targetTimeSpan,
-                       },
-                       BigToCompact(big.NewInt(1000)),
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget, lastBH.Timestamp - compareBH.Timestamp: 604800, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan * 3,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 1,
-                               Timestamp: targetTimeSpan,
-                       },
-                       144115188076367872,
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget, lastBH.Timestamp - compareBH.Timestamp: 151200, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan + 9223372036854775807,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 1,
-                               Timestamp: targetTimeSpan,
-                       },
-                       504403158272597019,
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget, lastBH.Timestamp - compareBH.Timestamp: 302400, lastBH.Bits: 0
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan * 2,
-                               Bits:      0,
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 1,
-                               Timestamp: targetTimeSpan,
-                       },
-                       0,
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget, lastBH.Timestamp - compareBH.Timestamp: 302400, lastBH.Bits: 18446744073709551615
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan * 2,
-                               Bits:      18446744073709551615,
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 1,
-                               Timestamp: targetTimeSpan,
-                       },
-                       252201579141136384,
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget + 1, lastBH.Timestamp - compareBH.Timestamp: 302400, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget + 1,
-                               Timestamp: targetTimeSpan * 2,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget,
-                               Timestamp: targetTimeSpan,
-                       },
-                       BigToCompact(big.NewInt(1000)),
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget - 1, lastBH.Timestamp - compareBH.Timestamp: 302400, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 1,
-                               Timestamp: targetTimeSpan * 2,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget - 2,
-                               Timestamp: targetTimeSpan,
-                       },
-                       BigToCompact(big.NewInt(1000)),
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget * 2, lastBH.Timestamp - compareBH.Timestamp: 302400, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget * 2,
-                               Timestamp: targetTimeSpan * 2,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget*2 - 1,
-                               Timestamp: targetTimeSpan,
-                       },
-                       BigToCompact(big.NewInt(1000)),
-               },
-               // lastBH.Height: consensus.BlocksPerRetarget / 2, lastBH.Timestamp - compareBH.Timestamp: 302400, lastBH.Bits: bigInt(1000)
-               {
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget / 2,
-                               Timestamp: targetTimeSpan * 2,
-                               Bits:      BigToCompact(big.NewInt(1000)),
-                       },
-                       &types.BlockHeader{
-                               Height:    consensus.BlocksPerRetarget/2 - 1,
-                               Timestamp: targetTimeSpan,
-                       },
-                       BigToCompact(big.NewInt(1000)),
-               },
-       }
-
-       for i, c := range cases {
-               if got := CalcNextRequiredDifficulty(c.lastBH, c.compareBH); got != c.want {
-                       t.Errorf("Compile(%d) = %d want %d\n", i, got, c.want)
-                       return
-               }
-       }
-}
-
-func TestHashToBig(t *testing.T) {
-       cases := []struct {
-               in  [32]byte
-               out [32]byte
-       }{
-               {
-                       in: [32]byte{
-                               0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-                               0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-                               0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-                               0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-                       },
-                       out: [32]byte{
-                               0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-                               0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-                               0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-                               0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-                       },
-               },
-               {
-                       in: [32]byte{
-                               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-                               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-                               0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-                               0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-                       },
-                       out: [32]byte{
-                               0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
-                               0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
-                               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-                               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-                       },
-               },
-               {
-                       in: [32]byte{
-                               0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
-                               0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
-                               0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
-                               0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
-                       },
-                       out: [32]byte{
-                               0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8,
-                               0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0,
-                               0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8,
-                               0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0,
-                       },
-               },
-       }
-
-       for i, c := range cases {
-               bhash := bc.NewHash(c.in)
-               result := HashToBig(&bhash).Bytes()
-
-               var resArr [32]byte
-               copy(resArr[:], result)
-
-               if !reflect.DeepEqual(resArr, c.out) {
-                       t.Errorf("TestHashToBig test #%d failed:\n\tgot\t%x\n\twant\t%x\n", i, resArr, c.out)
-                       return
-               }
-       }
-}
-
-func TestCompactToBig(t *testing.T) {
-       cases := []struct {
-               in  string
-               out *big.Int
-       }{
-               {
-                       in: `00000000` + //Exponent
-                               `0` + //Sign
-                               `0000000000000000000000000000000000000000000000000000000`, //Mantissa
-                       out: big.NewInt(0),
-               },
-               {
-                       in: `00000000` + //Exponent
-                               `1` + //Sign
-                               `0000000000000000000000000000000000000000000000000000000`, //Mantissa
-                       out: big.NewInt(0),
-               },
-               {
-                       in: `00000001` + //Exponent
-                               `0` + //Sign
-                               `0000000000000000000000000000000000000010000000000000000`, //Mantissa
-                       out: big.NewInt(1),
-               },
-               {
-                       in: `00000001` + //Exponent
-                               `1` + //Sign
-                               `0000000000000000000000000000000000000010000000000000000`, //Mantissa
-                       out: big.NewInt(-1),
-               },
-               {
-                       in: `00000011` + //Exponent
-                               `0` + //Sign
-                               `0000000000000000000000000000000000000010000000000000000`, //Mantissa
-                       out: big.NewInt(65536),
-               },
-               {
-                       in: `00000011` + //Exponent
-                               `1` + //Sign
-                               `0000000000000000000000000000000000000010000000000000000`, //Mantissa
-                       out: big.NewInt(-65536),
-               },
-               {
-                       in: `00000100` + //Exponent
-                               `0` + //Sign
-                               `0000000000000000000000000000000000000010000000000000000`, //Mantissa
-                       out: big.NewInt(16777216),
-               },
-               {
-                       in: `00000100` + //Exponent
-                               `1` + //Sign
-                               `0000000000000000000000000000000000000010000000000000000`, //Mantissa
-                       out: big.NewInt(-16777216),
-               },
-               {
-                       //btm PowMin test
-                       // PowMinBits = 2161727821138738707, i.e 0x1e000000000dbe13, as defined
-                       // in /consensus/general.go
-                       in: `00011110` + //Exponent
-                               `0` + //Sign
-                               `0000000000000000000000000000000000011011011111000010011`, //Mantissa
-                       out: big.NewInt(0).Lsh(big.NewInt(0x0dbe13), 27*8), //2161727821138738707
-               },
-       }
-
-       for i, c := range cases {
-               compact, _ := strconv.ParseUint(c.in, 2, 64)
-               r := CompactToBig(compact)
-               if r.Cmp(c.out) != 0 {
-                       t.Error("TestCompactToBig test #", i, "failed: got", r, "want", c.out)
-                       return
-               }
-       }
-}
-
-func TestBigToCompact(t *testing.T) {
-       // basic tests
-       tests := []struct {
-               in  int64
-               out uint64
-       }{
-               {0, 0x0000000000000000},
-               {-0, 0x0000000000000000},
-               {1, 0x0100000000010000},
-               {-1, 0x0180000000010000},
-               {65536, 0x0300000000010000},
-               {-65536, 0x0380000000010000},
-               {16777216, 0x0400000000010000},
-               {-16777216, 0x0480000000010000},
-       }
-
-       for x, test := range tests {
-               n := big.NewInt(test.in)
-               r := BigToCompact(n)
-               if r != test.out {
-                       t.Errorf("TestBigToCompact test #%d failed: got 0x%016x want 0x%016x\n",
-                               x, r, test.out)
-                       return
-               }
-       }
-
-       // btm PowMin test
-       // PowMinBits = 2161727821138738707, i.e 0x1e000000000dbe13, as defined
-       // in /consensus/general.go
-       n := big.NewInt(0).Lsh(big.NewInt(0x0dbe13), 27*8)
-       out := uint64(0x1e000000000dbe13)
-       r := BigToCompact(n)
-       if r != out {
-               t.Errorf("TestBigToCompact test #%d failed: got 0x%016x want 0x%016x\n",
-                       len(tests), r, out)
-               return
-       }
-}
-
-func TestCalcWorkWithIntStr(t *testing.T) {
-       cases := []struct {
-               strBits string
-               want    *big.Int
-       }{
-               // Exponent: 0, Sign: 0, Mantissa: 0
-               {
-                       `00000000` + //Exponent
-                               `0` + //Sign
-                               `0000000000000000000000000000000000000000000000000000000`, //Mantissa
-                       big.NewInt(0),
-               },
-               // Exponent: 0, Sign: 0, Mantissa: 1 (difficultyNum = 0 and difficultyNum.Sign() = 0)
-               {
-                       `00000000` +
-                               `0` +
-                               `0000000000000000000000000000000000000000000000000000001`,
-                       big.NewInt(0),
-               },
-               // Exponent: 0, Sign: 0, Mantissa: 65536 (difficultyNum = 0 and difficultyNum.Sign() = 0)
-               {
-                       `00000000` +
-                               `0` +
-                               `0000000000000000000000000000000000000010000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 0, Sign: 0, Mantissa: 16777216 (difficultyNum = 1 and difficultyNum.Sign() = 1)
-               {
-                       `00000000` +
-                               `0` +
-                               `0000000000000000000000000000001000000000000000000000000`,
-                       new(big.Int).Div(oneLsh256, big.NewInt(2)),
-               },
-               // Exponent: 0, Sign: 0, Mantissa: 0x007fffffffffffff
-               {
-                       `00000000` +
-                               `0` +
-                               `1111111111111111111111111111111111111111111111111111111`,
-                       big.NewInt(0).Lsh(big.NewInt(0x020000), 208),
-               },
-               // Exponent: 0, Sign: 1, Mantissa: 0
-               {
-                       `00000000` +
-                               `1` +
-                               `0000000000000000000000000000000000000000000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 0, Sign: 1, Mantissa: 1 (difficultyNum = 0 and difficultyNum.Sign() = 0)
-               {
-                       `00000000` +
-                               `1` +
-                               `0000000000000000000000000000000000000000000000000000001`,
-                       big.NewInt(0),
-               },
-               // Exponent: 0, Sign: 1, Mantissa: 65536 (difficultyNum = 0 and difficultyNum.Sign() = 0)
-               {
-                       `00000000` +
-                               `1` +
-                               `0000000000000000000000000000000000000010000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 0, Sign: 0, Mantissa: 16777216 (difficultyNum = -1 and difficultyNum.Sign() = -1)
-               {
-                       `00000000` +
-                               `1` +
-                               `0000000000000000000000000000001000000000000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 0, Sign: 1, Mantissa: 0x007fffffffffffff
-               {
-                       `00000000` +
-                               `1` +
-                               `1111111111111111111111111111111111111111111111111111111`,
-                       big.NewInt(0),
-               },
-               // Exponent: 3, Sign: 0, Mantissa: 0
-               {
-                       `00000011` +
-                               `0` +
-                               `0000000000000000000000000000000000000000000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 3, Sign: 0, Mantissa: 1 (difficultyNum = 1 and difficultyNum.Sign() = 1)
-               {
-                       `00000011` +
-                               `0` +
-                               `0000000000000000000000000000000000000000000000000000001`,
-                       new(big.Int).Div(oneLsh256, big.NewInt(2)),
-               },
-               // Exponent: 3, Sign: 0, Mantissa: 65536 (difficultyNum = 65536 and difficultyNum.Sign() = 1)
-               {
-                       `00000011` +
-                               `0` +
-                               `0000000000000000000000000000000000000010000000000000000`,
-                       new(big.Int).Div(oneLsh256, big.NewInt(65537)),
-               },
-               // Exponent: 0, Sign: 0, Mantissa: 16777216 (difficultyNum = 16777216 and difficultyNum.Sign() = 1)
-               {
-                       `00000011` +
-                               `0` +
-                               `0000000000000000000000000000001000000000000000000000000`,
-                       new(big.Int).Div(oneLsh256, big.NewInt(16777217)),
-               },
-               // Exponent: 3, Sign: 0, Mantissa: 0x007fffffffffffff
-               {
-                       `00000011` +
-                               `0` +
-                               `1111111111111111111111111111111111111111111111111111111`,
-                       new(big.Int).Div(oneLsh256, big.NewInt(36028797018963968)),
-               },
-               // Exponent: 3, Sign: 1, Mantissa: 0
-               {
-                       `00000011` +
-                               `1` +
-                               `0000000000000000000000000000000000000000000000000000000`,
-                       big.NewInt(0),
-               },
-               //Exponent: 3, Sign: 1, Mantissa: 1 (difficultyNum = -1 and difficultyNum.Sign() = -1)
-               {
-                       `00000011` +
-                               `1` +
-                               `0000000000000000000000000000000000000000000000000000001`,
-                       big.NewInt(0),
-               },
-               // Exponent: 3, Sign: 1, Mantissa: 65536 (difficultyNum = -65536 and difficultyNum.Sign() = -1)
-               {
-                       `00000011` +
-                               `1` +
-                               `0000000000000000000000000000000000000010000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 3, Sign: 1, Mantissa: 16777216 (difficultyNum = -16777216 and difficultyNum.Sign() = -1)
-               {
-                       `00000011` +
-                               `1` +
-                               `0000000000000000000000000000001000000000000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 3, Sign: 1, Mantissa: 0x007fffffffffffff
-               {
-                       `00000011` +
-                               `1` +
-                               `1111111111111111111111111111111111111111111111111111111`,
-                       big.NewInt(0),
-               },
-               // Exponent: 7, Sign: 0, Mantissa: 0
-               {
-                       `00000111` +
-                               `0` +
-                               `0000000000000000000000000000000000000000000000000000000`,
-                       big.NewInt(0),
-               },
-               //Exponent: 7, Sign: 1, Mantissa: 1 (difficultyNum = 4294967296 and difficultyNum.Sign() = 1)
-               {
-                       `00000111` +
-                               `0` +
-                               `0000000000000000000000000000000000000000000000000000001`,
-                       new(big.Int).Div(oneLsh256, big.NewInt(4294967297)),
-               },
-               // Exponent: 7, Sign: 0, Mantissa: 65536 (difficultyNum = 4294967296 and difficultyNum.Sign() = 1)
-               {
-                       `00000111` +
-                               `0` +
-                               `0000000000000000000000000000000000000010000000000000000`,
-                       new(big.Int).Div(oneLsh256, big.NewInt(281474976710657)),
-               },
-               // Exponent: 7, Sign: 0, Mantissa: 16777216 (difficultyNum = 72057594037927936 and difficultyNum.Sign() = 1)
-               {
-                       `00000111` +
-                               `0` +
-                               `0000000000000000000000000000001000000000000000000000000`,
-                       new(big.Int).Div(oneLsh256, big.NewInt(72057594037927937)),
-               },
-               // Exponent: 7, Sign: 0, Mantissa: 0x007fffffffffffff
-               {
-                       `00000111` +
-                               `0` +
-                               `1111111111111111111111111111111111111111111111111111111`,
-                       new(big.Int).Div(oneLsh256, new(big.Int).Add(big.NewInt(0).Lsh(big.NewInt(36028797018963967), 32), bigOne)),
-               },
-               // Exponent: 7, Sign: 1, Mantissa: 0
-               {
-                       `00000111` +
-                               `1` +
-                               `0000000000000000000000000000000000000000000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 7, Sign: 1, Mantissa: 1 (difficultyNum = -4294967296 and difficultyNum.Sign() = -1)
-               {
-                       `00000111` +
-                               `1` +
-                               `0000000000000000000000000000000000000000000000000000001`,
-                       big.NewInt(0),
-               },
-               // Exponent: 7, Sign: 1, Mantissa: 65536 (difficultyNum = -72057594037927936 and difficultyNum.Sign() = -1)
-               {
-                       `00000111` +
-                               `1` +
-                               `0000000000000000000000000000000000000010000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 7, Sign: 1, Mantissa: 16777216 (difficultyNum = -154742504910672530067423232 and difficultyNum.Sign() = -1)
-               {
-                       `00000111` +
-                               `1` +
-                               `0000000000000000000000000000001000000000000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 7, Sign: 1, Mantissa: 0x007fffffffffffff
-               {
-                       `00000111` +
-                               `1` +
-                               `1111111111111111111111111111111111111111111111111111111`,
-                       big.NewInt(0),
-               },
-               // Exponent: 255, Sign: 0, Mantissa: 1 (difficultyNum.Sign() = 1)
-               {
-                       `11111111` +
-                               `0` +
-                               `0000000000000000000000000000000000000000000000000000001`,
-                       big.NewInt(0),
-               },
-               // Exponent: 255, Sign: 0, Mantissa: 65536 (difficultyNum.Sign() = 1)
-               {
-                       `11111111` +
-                               `0` +
-                               `0000000000000000000000000000000000000010000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 255, Sign: 0, Mantissa: 16777216 (difficultyNum.Sign() = 1)
-               {
-                       `11111111` +
-                               `0` +
-                               `0000000000000000000000000000001000000000000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 255, Sign: 0, Mantissa: 0x007fffffffffffff
-               {
-                       `11111111` +
-                               `0` +
-                               `1111111111111111111111111111111111111111111111111111111`,
-                       big.NewInt(0),
-               },
-               // Exponent: 255, Sign: 1, Mantissa: 1
-               {
-                       `11111111` +
-                               `1` +
-                               `0000000000000000000000000000000000000000000000000000001`,
-                       big.NewInt(0),
-               },
-               // Exponent: 255, Sign: 1, Mantissa: 65536
-               {
-                       `11111111` +
-                               `1` +
-                               `0000000000000000000000000000000000000010000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 255, Sign: 1, Mantissa: 16777216
-               {
-                       `11111111` +
-                               `1` +
-                               `0000000000000000000000000000001000000000000000000000000`,
-                       big.NewInt(0),
-               },
-               // Exponent: 255, Sign: 1, Mantissa: 0x007fffffffffffff
-               {
-                       `11111111` +
-                               `1` +
-                               `1111111111111111111111111111111111111111111111111111111`,
-                       big.NewInt(0),
-               },
-       }
-
-       for i, c := range cases {
-               bits, err := strconv.ParseUint(c.strBits, 2, 64)
-               if err != nil {
-                       t.Errorf("convert string into uint error: %s\n", err)
-                       return
-               }
-
-               if got := CalcWork(bits); got.Cmp(c.want) != 0 {
-                       t.Errorf("CalcWork(%d) = %s, want %s\n", i, got, c.want)
-                       return
-               }
-       }
-}
-
-func TestCalcWork(t *testing.T) {
-       testCases := []struct {
-               bits uint64
-               want *big.Int
-       }{
-               {
-                       0,
-                       big.NewInt(0),
-               },
-               {
-                       1,
-                       big.NewInt(0),
-               },
-               {
-                       65535,
-                       big.NewInt(0),
-               },
-               {
-                       16777215,
-                       big.NewInt(0),
-               },
-               {
-                       16777216,
-                       new(big.Int).Div(oneLsh256, big.NewInt(2)),
-               },
-               {
-                       4294967295,
-                       new(big.Int).Div(oneLsh256, big.NewInt(256)),
-               },
-               {
-                       36028797018963967,
-                       new(big.Int).Div(oneLsh256, big.NewInt(2147483648)),
-               },
-               {
-                       36028797018963968,
-                       big.NewInt(0),
-               },
-               {
-                       216172782113783808,
-                       big.NewInt(0),
-               },
-               {
-                       216172782113783809,
-                       new(big.Int).Div(oneLsh256, big.NewInt(2)),
-               },
-               {
-                       216172782130561024,
-                       new(big.Int).Div(oneLsh256, big.NewInt(16777217)),
-               },
-               {
-                       252201579132747775,
-                       new(big.Int).Div(oneLsh256, big.NewInt(36028797018963968)),
-               },
-               {
-                       252201579132747776,
-                       big.NewInt(0),
-               },
-               {
-                       288230376151711744,
-                       big.NewInt(0),
-               },
-               {
-                       288230376151711745,
-                       new(big.Int).Div(oneLsh256, big.NewInt(257)),
-               },
-               {
-                       540431955284459519,
-                       new(big.Int).Div(oneLsh256, new(big.Int).Add(big.NewInt(0).Lsh(big.NewInt(36028797018963967), 32), bigOne)),
-               },
-               {
-                       540431955284459520,
-                       big.NewInt(0),
-               },
-               {
-                       9223372036854775807,
-                       big.NewInt(0),
-               },
-               {
-                       18446744073709551615,
-                       big.NewInt(0),
-               },
-       }
-
-       for i, c := range testCases {
-               if got := CalcWork(c.bits); got.Cmp(c.want) != 0 {
-                       t.Errorf("test case with uint64 for CalcWork(%d) = %s, want %s\n", i, got, c.want)
-                       return
-               }
-       }
-}
index 20f525d..5506f93 100644 (file)
@@ -9,6 +9,7 @@ import (
 
        log "github.com/sirupsen/logrus"
 
+       "github.com/bytom/bytom/protocol/bc"
        "github.com/bytom/bytom/protocol/bc/types"
 )
 
@@ -24,7 +25,13 @@ var (
        ErrDuplicateSubscribe = errors.New("event: subscribe duplicate type")
 )
 
-type NewMinedBlockEvent struct{ Block types.Block }
+type NewProposedBlockEvent struct{ Block types.Block }
+
+type BlockSignatureEvent struct {
+       BlockHash bc.Hash
+       Signature []byte
+       XPub      []byte
+}
 
 // TypeMuxEvent is a time-tagged notification pushed to subscribers.
 type TypeMuxEvent struct {
diff --git a/mining/cpuminer/cpuminer.go b/mining/cpuminer/cpuminer.go
deleted file mode 100644 (file)
index 97227a1..0000000
+++ /dev/null
@@ -1,278 +0,0 @@
-package cpuminer
-
-import (
-       "sync"
-       "time"
-
-       log "github.com/sirupsen/logrus"
-
-       "github.com/bytom/bytom/account"
-       "github.com/bytom/bytom/consensus/difficulty"
-       "github.com/bytom/bytom/event"
-       "github.com/bytom/bytom/mining"
-       "github.com/bytom/bytom/protocol"
-       "github.com/bytom/bytom/protocol/bc/types"
-)
-
-const (
-       maxNonce          = ^uint64(0) // 2^64 - 1
-       defaultNumWorkers = 1
-       hashUpdateSecs    = 1
-       logModule         = "cpuminer"
-)
-
-// CPUMiner provides facilities for solving blocks (mining) using the CPU in
-// a concurrency-safe manner.
-type CPUMiner struct {
-       sync.Mutex
-       chain            *protocol.Chain
-       accountManager   *account.Manager
-       txPool           *protocol.TxPool
-       numWorkers       uint64
-       started          bool
-       discreteMining   bool
-       workerWg         sync.WaitGroup
-       updateNumWorkers chan struct{}
-       quit             chan struct{}
-       eventDispatcher  *event.Dispatcher
-}
-
-// solveBlock attempts to find some combination of a nonce, extra nonce, and
-// current timestamp which makes the passed block hash to a value less than the
-// target difficulty.
-func (m *CPUMiner) solveBlock(block *types.Block, ticker *time.Ticker, quit chan struct{}) bool {
-       header := &block.BlockHeader
-       seed, err := m.chain.CalcNextSeed(&header.PreviousBlockHash)
-       if err != nil {
-               return false
-       }
-
-       for i := uint64(0); i <= maxNonce; i++ {
-               select {
-               case <-quit:
-                       return false
-               case <-ticker.C:
-                       if m.chain.BestBlockHeight() >= header.Height {
-                               return false
-                       }
-               default:
-               }
-
-               header.Nonce = i
-               headerHash := header.Hash()
-               if difficulty.CheckProofOfWork(&headerHash, seed, header.Bits) {
-                       return true
-               }
-       }
-       return false
-}
-
-// generateBlocks is a worker that is controlled by the miningWorkerController.
-// It is self contained in that it creates block templates and attempts to solve
-// them while detecting when it is performing stale work and reacting
-// accordingly by generating a new block template.  When a block is solved, it
-// is submitted.
-//
-// It must be run as a goroutine.
-func (m *CPUMiner) generateBlocks(quit chan struct{}) {
-       ticker := time.NewTicker(time.Second * hashUpdateSecs)
-       defer ticker.Stop()
-
-out:
-       for {
-               select {
-               case <-quit:
-                       break out
-               default:
-               }
-
-               block, err := mining.NewBlockTemplate(m.chain, m.txPool, m.accountManager)
-               if err != nil {
-                       log.Errorf("Mining: failed on create NewBlockTemplate: %v", err)
-                       continue
-               }
-
-               if m.solveBlock(block, ticker, quit) {
-                       if isOrphan, err := m.chain.ProcessBlock(block); err == nil {
-                               log.WithFields(log.Fields{
-                                       "module":   logModule,
-                                       "height":   block.BlockHeader.Height,
-                                       "isOrphan": isOrphan,
-                                       "tx":       len(block.Transactions),
-                               }).Info("Miner processed block")
-
-                               // Broadcast the block and announce chain insertion event
-                               if err = m.eventDispatcher.Post(event.NewMinedBlockEvent{Block: *block}); err != nil {
-                                       log.WithFields(log.Fields{"module": logModule, "height": block.BlockHeader.Height, "error": err}).Errorf("Miner fail on post block")
-                               }
-                       } else {
-                               log.WithFields(log.Fields{"module": logModule, "height": block.BlockHeader.Height, "error": err}).Errorf("Miner fail on ProcessBlock")
-                       }
-               }
-       }
-
-       m.workerWg.Done()
-}
-
-// miningWorkerController launches the worker goroutines that are used to
-// generate block templates and solve them.  It also provides the ability to
-// dynamically adjust the number of running worker goroutines.
-//
-// It must be run as a goroutine.
-func (m *CPUMiner) miningWorkerController() {
-       // launchWorkers groups common code to launch a specified number of
-       // workers for generating blocks.
-       var runningWorkers []chan struct{}
-       launchWorkers := func(numWorkers uint64) {
-               for i := uint64(0); i < numWorkers; i++ {
-                       quit := make(chan struct{})
-                       runningWorkers = append(runningWorkers, quit)
-
-                       m.workerWg.Add(1)
-                       go m.generateBlocks(quit)
-               }
-       }
-
-       // Launch the current number of workers by default.
-       runningWorkers = make([]chan struct{}, 0, m.numWorkers)
-       launchWorkers(m.numWorkers)
-
-out:
-       for {
-               select {
-               // Update the number of running workers.
-               case <-m.updateNumWorkers:
-                       // No change.
-                       numRunning := uint64(len(runningWorkers))
-                       if m.numWorkers == numRunning {
-                               continue
-                       }
-
-                       // Add new workers.
-                       if m.numWorkers > numRunning {
-                               launchWorkers(m.numWorkers - numRunning)
-                               continue
-                       }
-
-                       // Signal the most recently created goroutines to exit.
-                       for i := numRunning - 1; i >= m.numWorkers; i-- {
-                               close(runningWorkers[i])
-                               runningWorkers[i] = nil
-                               runningWorkers = runningWorkers[:i]
-                       }
-
-               case <-m.quit:
-                       for _, quit := range runningWorkers {
-                               close(quit)
-                       }
-                       break out
-               }
-       }
-
-       m.workerWg.Wait()
-}
-
-// Start begins the CPU mining process as well as the speed monitor used to
-// track hashing metrics.  Calling this function when the CPU miner has
-// already been started will have no effect.
-//
-// This function is safe for concurrent access.
-func (m *CPUMiner) Start() {
-       m.Lock()
-       defer m.Unlock()
-
-       // Nothing to do if the miner is already running
-       if m.started {
-               return
-       }
-
-       m.quit = make(chan struct{})
-       go m.miningWorkerController()
-
-       m.started = true
-       log.Infof("CPU miner started")
-}
-
-// Stop gracefully stops the mining process by signalling all workers, and the
-// speed monitor to quit.  Calling this function when the CPU miner has not
-// already been started will have no effect.
-//
-// This function is safe for concurrent access.
-func (m *CPUMiner) Stop() {
-       m.Lock()
-       defer m.Unlock()
-
-       // Nothing to do if the miner is not currently running
-       if !m.started {
-               return
-       }
-
-       close(m.quit)
-       m.started = false
-       log.Info("CPU miner stopped")
-}
-
-// IsMining returns whether or not the CPU miner has been started and is
-// therefore currenting mining.
-//
-// This function is safe for concurrent access.
-func (m *CPUMiner) IsMining() bool {
-       m.Lock()
-       defer m.Unlock()
-
-       return m.started
-}
-
-// SetNumWorkers sets the number of workers to create which solve blocks.  Any
-// negative values will cause a default number of workers to be used which is
-// based on the number of processor cores in the system.  A value of 0 will
-// cause all CPU mining to be stopped.
-//
-// This function is safe for concurrent access.
-func (m *CPUMiner) SetNumWorkers(numWorkers int32) {
-       if numWorkers == 0 {
-               m.Stop()
-       }
-
-       // Don't lock until after the first check since Stop does its own
-       // locking.
-       m.Lock()
-       defer m.Unlock()
-
-       // Use default if provided value is negative.
-       if numWorkers < 0 {
-               m.numWorkers = defaultNumWorkers
-       } else {
-               m.numWorkers = uint64(numWorkers)
-       }
-
-       // When the miner is already running, notify the controller about the
-       // the change.
-       if m.started {
-               m.updateNumWorkers <- struct{}{}
-       }
-}
-
-// NumWorkers returns the number of workers which are running to solve blocks.
-//
-// This function is safe for concurrent access.
-func (m *CPUMiner) NumWorkers() int32 {
-       m.Lock()
-       defer m.Unlock()
-
-       return int32(m.numWorkers)
-}
-
-// NewCPUMiner returns a new instance of a CPU miner for the provided configuration.
-// Use Start to begin the mining process.  See the documentation for CPUMiner
-// type for more details.
-func NewCPUMiner(c *protocol.Chain, accountManager *account.Manager, txPool *protocol.TxPool, dispatcher *event.Dispatcher) *CPUMiner {
-       return &CPUMiner{
-               chain:            c,
-               accountManager:   accountManager,
-               txPool:           txPool,
-               numWorkers:       defaultNumWorkers,
-               updateNumWorkers: make(chan struct{}),
-               eventDispatcher:  dispatcher,
-       }
-}
diff --git a/mining/mining.go b/mining/mining.go
deleted file mode 100644 (file)
index fa736d3..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-package mining
-
-import (
-       "sort"
-       "strconv"
-       "time"
-
-       log "github.com/sirupsen/logrus"
-
-       "github.com/bytom/bytom/account"
-       "github.com/bytom/bytom/blockchain/txbuilder"
-       "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/errors"
-       "github.com/bytom/bytom/protocol"
-       "github.com/bytom/bytom/protocol/bc"
-       "github.com/bytom/bytom/protocol/bc/types"
-       "github.com/bytom/bytom/protocol/state"
-       "github.com/bytom/bytom/protocol/validation"
-       "github.com/bytom/bytom/protocol/vm/vmutil"
-)
-
-const logModule = "mining"
-
-// createCoinbaseTx returns a coinbase transaction paying an appropriate subsidy
-// based on the passed block height to the provided address.  When the address
-// is nil, the coinbase transaction will instead be redeemable by anyone.
-func createCoinbaseTx(accountManager *account.Manager, amount uint64, blockHeight uint64) (tx *types.Tx, err error) {
-       amount += consensus.BlockSubsidy(blockHeight)
-       arbitrary := append([]byte{0x00}, []byte(strconv.FormatUint(blockHeight, 10))...)
-
-       var script []byte
-       if accountManager == nil {
-               script, err = vmutil.DefaultCoinbaseProgram()
-       } else {
-               script, err = accountManager.GetCoinbaseControlProgram()
-               arbitrary = append(arbitrary, accountManager.GetCoinbaseArbitrary()...)
-       }
-       if err != nil {
-               return nil, err
-       }
-
-       if len(arbitrary) > consensus.CoinbaseArbitrarySizeLimit {
-               return nil, validation.ErrCoinbaseArbitraryOversize
-       }
-
-       builder := txbuilder.NewBuilder(time.Now())
-       if err = builder.AddInput(types.NewCoinbaseInput(arbitrary), &txbuilder.SigningInstruction{}); err != nil {
-               return nil, err
-       }
-       if err = builder.AddOutput(types.NewTxOutput(*consensus.BTMAssetID, amount, script)); err != nil {
-               return nil, err
-       }
-       _, txData, err := builder.Build()
-       if err != nil {
-               return nil, err
-       }
-
-       byteData, err := txData.MarshalText()
-       if err != nil {
-               return nil, err
-       }
-       txData.SerializedSize = uint64(len(byteData))
-
-       tx = &types.Tx{
-               TxData: *txData,
-               Tx:     types.MapTx(txData),
-       }
-       return tx, nil
-}
-
-// NewBlockTemplate returns a new block template that is ready to be solved
-func NewBlockTemplate(c *protocol.Chain, txPool *protocol.TxPool, accountManager *account.Manager) (b *types.Block, err error) {
-       view := state.NewUtxoViewpoint()
-       txStatus := bc.NewTransactionStatus()
-       if err := txStatus.SetStatus(0, false); err != nil {
-               return nil, err
-       }
-       txEntries := []*bc.Tx{nil}
-       gasUsed := uint64(0)
-       txFee := uint64(0)
-
-       // get preblock info for generate next block
-       preBlockHeader := c.BestBlockHeader()
-       preBlockHash := preBlockHeader.Hash()
-       nextBlockHeight := preBlockHeader.Height + 1
-       nextBits, err := c.CalcNextBits(&preBlockHash)
-       if err != nil {
-               return nil, err
-       }
-
-       b = &types.Block{
-               BlockHeader: types.BlockHeader{
-                       Version:           1,
-                       Height:            nextBlockHeight,
-                       PreviousBlockHash: preBlockHash,
-                       Timestamp:         uint64(time.Now().Unix()),
-                       BlockCommitment:   types.BlockCommitment{},
-                       Bits:              nextBits,
-               },
-       }
-       bcBlock := &bc.Block{BlockHeader: &bc.BlockHeader{Height: nextBlockHeight}}
-       b.Transactions = []*types.Tx{nil}
-
-       txs := txPool.GetTransactions()
-       sort.Sort(byTime(txs))
-       for _, txDesc := range txs {
-               tx := txDesc.Tx.Tx
-               gasOnlyTx := false
-
-               if err := c.GetTransactionsUtxo(view, []*bc.Tx{tx}); err != nil {
-                       blkGenSkipTxForErr(txPool, &tx.ID, err)
-                       continue
-               }
-
-               gasStatus, err := validation.ValidateTx(tx, bcBlock)
-               if err != nil {
-                       if !gasStatus.GasValid {
-                               blkGenSkipTxForErr(txPool, &tx.ID, err)
-                               continue
-                       }
-                       gasOnlyTx = true
-               }
-
-               if gasUsed+uint64(gasStatus.GasUsed) > consensus.MaxBlockGas {
-                       break
-               }
-
-               if err := view.ApplyTransaction(bcBlock, tx, gasOnlyTx); err != nil {
-                       blkGenSkipTxForErr(txPool, &tx.ID, err)
-                       continue
-               }
-
-               if err := txStatus.SetStatus(len(b.Transactions), gasOnlyTx); err != nil {
-                       return nil, err
-               }
-
-               b.Transactions = append(b.Transactions, txDesc.Tx)
-               txEntries = append(txEntries, tx)
-               gasUsed += uint64(gasStatus.GasUsed)
-               txFee += txDesc.Fee
-
-               if gasUsed == consensus.MaxBlockGas {
-                       break
-               }
-       }
-
-       // creater coinbase transaction
-       b.Transactions[0], err = createCoinbaseTx(accountManager, txFee, nextBlockHeight)
-       if err != nil {
-               return nil, errors.Wrap(err, "fail on createCoinbaseTx")
-       }
-       txEntries[0] = b.Transactions[0].Tx
-
-       b.BlockHeader.BlockCommitment.TransactionsMerkleRoot, err = types.TxMerkleRoot(txEntries)
-       if err != nil {
-               return nil, err
-       }
-
-       b.BlockHeader.BlockCommitment.TransactionStatusHash, err = types.TxStatusMerkleRoot(txStatus.VerifyStatus)
-       return b, err
-}
-
-func blkGenSkipTxForErr(txPool *protocol.TxPool, txHash *bc.Hash, err error) {
-       log.WithFields(log.Fields{"module": logModule, "error": err}).Error("mining block generation: skip tx due to")
-       txPool.RemoveTransaction(txHash)
-}
diff --git a/mining/mining_test.go b/mining/mining_test.go
deleted file mode 100644 (file)
index d558e1f..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-package mining
-
-import "testing"
-
-func TestCreateCoinbaseTx(t *testing.T) {
-       reductionInterval := uint64(840000)
-       baseSubsidy := uint64(41250000000)
-       cases := []struct {
-               height  uint64
-               txFee   uint64
-               subsidy uint64
-       }{
-               {
-                       height:  reductionInterval - 1,
-                       txFee:   100000000,
-                       subsidy: baseSubsidy + 100000000,
-               },
-               {
-                       height:  reductionInterval,
-                       txFee:   2000000000,
-                       subsidy: baseSubsidy/2 + 2000000000,
-               },
-               {
-                       height:  reductionInterval + 1,
-                       txFee:   0,
-                       subsidy: baseSubsidy / 2,
-               },
-               {
-                       height:  reductionInterval * 2,
-                       txFee:   100000000,
-                       subsidy: baseSubsidy/4 + 100000000,
-               },
-       }
-
-       for _, c := range cases {
-               coinbaseTx, err := createCoinbaseTx(nil, c.txFee, c.height)
-               if err != nil {
-                       t.Fatal(err)
-               }
-
-               outputAmount := coinbaseTx.Outputs[0].OutputCommitment.Amount
-               if outputAmount != c.subsidy {
-                       t.Fatalf("coinbase tx reward dismatch, expected: %d, have: %d", c.subsidy, outputAmount)
-               }
-       }
-}
diff --git a/mining/miningpool/miningpool.go b/mining/miningpool/miningpool.go
deleted file mode 100644 (file)
index 0df0fc4..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-package miningpool
-
-import (
-       "errors"
-       "sync"
-       "time"
-
-       log "github.com/sirupsen/logrus"
-
-       "github.com/bytom/bytom/account"
-       "github.com/bytom/bytom/event"
-       "github.com/bytom/bytom/mining"
-       "github.com/bytom/bytom/protocol"
-       "github.com/bytom/bytom/protocol/bc/types"
-)
-
-const (
-       maxSubmitChSize = 50
-)
-
-type submitBlockMsg struct {
-       blockHeader *types.BlockHeader
-       reply       chan error
-}
-
-// MiningPool is the support struct for p2p mine pool
-type MiningPool struct {
-       mutex    sync.RWMutex
-       block    *types.Block
-       submitCh chan *submitBlockMsg
-
-       chain           *protocol.Chain
-       accountManager  *account.Manager
-       txPool          *protocol.TxPool
-       eventDispatcher *event.Dispatcher
-}
-
-// NewMiningPool will create a new MiningPool
-func NewMiningPool(c *protocol.Chain, accountManager *account.Manager, txPool *protocol.TxPool, dispatcher *event.Dispatcher) *MiningPool {
-       m := &MiningPool{
-               submitCh:        make(chan *submitBlockMsg, maxSubmitChSize),
-               chain:           c,
-               accountManager:  accountManager,
-               txPool:          txPool,
-               eventDispatcher: dispatcher,
-       }
-       m.generateBlock()
-       go m.blockUpdater()
-       return m
-}
-
-// blockUpdater is the goroutine for keep update mining block
-func (m *MiningPool) blockUpdater() {
-       for {
-               select {
-               case <-m.chain.BlockWaiter(m.chain.BestBlockHeight() + 1):
-                       m.generateBlock()
-
-               case submitMsg := <-m.submitCh:
-                       err := m.submitWork(submitMsg.blockHeader)
-                       if err == nil {
-                               m.generateBlock()
-                       }
-                       submitMsg.reply <- err
-               }
-       }
-}
-
-// generateBlock generates a block template to mine
-func (m *MiningPool) generateBlock() {
-       m.mutex.Lock()
-       defer m.mutex.Unlock()
-
-       block, err := mining.NewBlockTemplate(m.chain, m.txPool, m.accountManager)
-       if err != nil {
-               log.Errorf("miningpool: failed on create NewBlockTemplate: %v", err)
-               return
-       }
-       m.block = block
-}
-
-// GetWork will return a block header for p2p mining
-func (m *MiningPool) GetWork() (*types.BlockHeader, error) {
-       if m.block != nil {
-               m.mutex.RLock()
-               defer m.mutex.RUnlock()
-
-               m.block.BlockHeader.Timestamp = uint64(time.Now().Unix())
-               bh := m.block.BlockHeader
-               return &bh, nil
-       }
-       return nil, errors.New("no block is ready for mining")
-}
-
-// SubmitWork will try to submit the result to the blockchain
-func (m *MiningPool) SubmitWork(bh *types.BlockHeader) error {
-       if bh == nil {
-               return errors.New("can't submit empty block")
-       }
-
-       reply := make(chan error, 1)
-       m.submitCh <- &submitBlockMsg{blockHeader: bh, reply: reply}
-       err := <-reply
-       if err != nil {
-               log.WithFields(log.Fields{"err": err, "height": bh.Height}).Warning("submitWork failed")
-       }
-       return err
-}
-
-func (m *MiningPool) submitWork(bh *types.BlockHeader) error {
-       m.mutex.Lock()
-       defer m.mutex.Unlock()
-
-       if m.block == nil || bh.PreviousBlockHash != m.block.PreviousBlockHash {
-               return errors.New("pending mining block has been changed")
-       }
-
-       m.block.Nonce = bh.Nonce
-       m.block.Timestamp = bh.Timestamp
-       isOrphan, err := m.chain.ProcessBlock(m.block)
-       if err != nil {
-               return err
-       }
-       if isOrphan {
-               return errors.New("submit result is orphan")
-       }
-
-       if err := m.eventDispatcher.Post(event.NewMinedBlockEvent{Block: *m.block}); err != nil {
-               return err
-       }
-
-       return nil
-}
diff --git a/mining/sort.go b/mining/sort.go
deleted file mode 100644 (file)
index a95a26f..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-package mining
-
-import "github.com/bytom/bytom/protocol"
-
-type byTime []*protocol.TxDesc
-
-func (a byTime) Len() int           { return len(a) }
-func (a byTime) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-func (a byTime) Less(i, j int) bool { return a[i].Added.Before(a[j].Added) }
diff --git a/mining/tensority/ai_hash.go b/mining/tensority/ai_hash.go
deleted file mode 100644 (file)
index 803927e..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-package tensority
-
-import (
-       "github.com/golang/groupcache/lru"
-
-       "github.com/bytom/bytom/crypto/sha3pool"
-       "github.com/bytom/bytom/mining/tensority/cgo_algorithm"
-       "github.com/bytom/bytom/mining/tensority/go_algorithm"
-       "github.com/bytom/bytom/protocol/bc"
-)
-
-const maxAIHashCached = 64
-
-func calcCacheKey(hash, seed *bc.Hash) *bc.Hash {
-       var b32 [32]byte
-       sha3pool.Sum256(b32[:], append(hash.Bytes(), seed.Bytes()...))
-       key := bc.NewHash(b32)
-       return &key
-}
-
-// Cache is create for cache the tensority result
-type Cache struct {
-       lruCache *lru.Cache
-}
-
-// NewCache create a cache struct
-func NewCache() *Cache {
-       return &Cache{lruCache: lru.New(maxAIHashCached)}
-}
-
-// AddCache is used for add tensority calculate result
-func (a *Cache) AddCache(hash, seed, result *bc.Hash) {
-       key := calcCacheKey(hash, seed)
-       a.lruCache.Add(*key, result)
-}
-
-// RemoveCache clean the cached result
-func (a *Cache) RemoveCache(hash, seed *bc.Hash) {
-       key := calcCacheKey(hash, seed)
-       a.lruCache.Remove(key)
-}
-
-// Hash is the real entry for call tensority algorithm
-func (a *Cache) Hash(hash, seed *bc.Hash) *bc.Hash {
-       key := calcCacheKey(hash, seed)
-       if v, ok := a.lruCache.Get(*key); ok {
-               return v.(*bc.Hash)
-       }
-       return algorithm(hash, seed)
-}
-
-func algorithm(bh, seed *bc.Hash) *bc.Hash {
-       if UseSIMD {
-               return cgo_algorithm.SimdAlgorithm(bh, seed)
-       } else {
-               return go_algorithm.LegacyAlgorithm(bh, seed)
-       }
-}
-
-var (
-       AIHash  = NewCache() // AIHash is created for let different package share same cache
-       UseSIMD = false
-)
diff --git a/mining/tensority/cgo_algorithm/algorithm_simd.go b/mining/tensority/cgo_algorithm/algorithm_simd.go
deleted file mode 100644 (file)
index cdecf7c..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-// +build simd
-
-package cgo_algorithm
-
-// #cgo !darwin CFLAGS: -I.
-// #cgo !darwin LDFLAGS: -L. -l:./lib/cSimdTs.o -lstdc++ -lgomp -lpthread
-// #cgo darwin CFLAGS: -I. -I/usr/local/opt/llvm/include
-// #cgo darwin LDFLAGS: -L. -l./lib/cSimdTs.o -lstdc++ -lomp -L/usr/local/opt/llvm/lib
-// #include "./lib/cSimdTs.h"
-import "C"
-
-import (
-       "unsafe"
-
-       "github.com/bytom/bytom/protocol/bc"
-)
-
-func SimdAlgorithm(bh, seed *bc.Hash) *bc.Hash {
-       bhBytes := bh.Bytes()
-       sdBytes := seed.Bytes()
-       bhPtr := (*C.uint8_t)(unsafe.Pointer(&bhBytes[0]))
-       seedPtr := (*C.uint8_t)(unsafe.Pointer(&sdBytes[0]))
-
-       resPtr := C.SimdTs(bhPtr, seedPtr)
-       resHash := bc.NewHash(*(*[32]byte)(unsafe.Pointer(resPtr)))
-
-       return &resHash
-}
diff --git a/mining/tensority/cgo_algorithm/algorithm_unsupported.go b/mining/tensority/cgo_algorithm/algorithm_unsupported.go
deleted file mode 100644 (file)
index a5d9d97..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build !simd
-
-package cgo_algorithm
-
-import (
-       log "github.com/sirupsen/logrus"
-
-       "github.com/bytom/bytom/mining/tensority/go_algorithm"
-       "github.com/bytom/bytom/protocol/bc"
-)
-
-func SimdAlgorithm(bh, seed *bc.Hash) *bc.Hash {
-       log.Warn("SIMD feature is not supported on release version, please compile the lib according to README to enable this feature.")
-       return go_algorithm.LegacyAlgorithm(bh, seed)
-}
diff --git a/mining/tensority/cgo_algorithm/lib/BytomPoW.h b/mining/tensority/cgo_algorithm/lib/BytomPoW.h
deleted file mode 100644 (file)
index 034f0e3..0000000
+++ /dev/null
@@ -1,477 +0,0 @@
-/* BytomPoW.h */\r
-#ifndef BYTOMPOW_H\r
-#define BYTOMPOW_H\r
-\r
-#include "scrypt.h"\r
-#include "sha3-allInOne.h"\r
-#include <iostream>\r
-#include <vector>\r
-#include <time.h>\r
-#include <assert.h>\r
-#include <stdint.h>\r
-#include <x86intrin.h>\r
-#include <omp.h>\r
-\r
-#define FNV(v1,v2) int32_t( ((v1)*FNV_PRIME) ^ (v2) )\r
-const int FNV_PRIME = 0x01000193;\r
-\r
-struct Mat256x256i8 {\r
-    int8_t d[256][256];\r
-\r
-    void toIdentityMatrix() {\r
-        for(int i = 0; i < 256; i++) {\r
-            for(int j = 0; j < 256; j++) {\r
-                d[i][j] = (i==j)?1:0; // diagonal\r
-            }\r
-        }\r
-    }\r
-\r
-    void copyFrom(const Mat256x256i8& other) {\r
-        for(int i = 0; i < 256; i++) {\r
-            for(int j = 0; j < 256; j++) {\r
-                this->d[j][i] = other.d[j][i];\r
-            }\r
-        }\r
-    }\r
-\r
-    Mat256x256i8() {\r
-//        this->toIdentityMatrix();\r
-    }\r
-\r
-    Mat256x256i8(const Mat256x256i8& other) {\r
-        this->copyFrom(other);\r
-    }\r
-\r
-    void copyFrom_helper(LTCMemory& ltcMem, int offset) {\r
-        for(int i = 0; i < 256; i++) {\r
-            const Words32& lo=ltcMem.get(i*4 + offset);\r
-            const Words32& hi=ltcMem.get(i*4 + 2 + offset);\r
-            for(int j = 0; j < 64; j++) {\r
-                uint32_t i32 = j>=32?hi.get(j-32):lo.get(j);\r
-                d[j*4+0][i] = (i32>> 0) & 0xFF;\r
-                d[j*4+1][i] = (i32>> 8) & 0xFF;\r
-                d[j*4+2][i] = (i32>>16) & 0xFF;\r
-                d[j*4+3][i] = (i32>>24) & 0xFF;\r
-            }\r
-        }\r
-    }\r
-\r
-    void copyFromEven(LTCMemory& ltcMem) {\r
-        copyFrom_helper(ltcMem, 0);\r
-    }\r
-\r
-    void copyFromOdd(LTCMemory& ltcMem) {\r
-        copyFrom_helper(ltcMem, 1);\r
-    }\r
-\r
-    void add(Mat256x256i8& a, Mat256x256i8& b) {\r
-        for(int i = 0; i < 256; i++) {\r
-            for(int j = 0; j < 256; j++) {\r
-                int tmp = int(a.d[i][j]) + int(b.d[i][j]);\r
-                this->d[i][j] = (tmp & 0xFF);\r
-            }\r
-        }\r
-    }\r
-};\r
-\r
-struct Mat256x256i16 {\r
-    int16_t d[256][256];\r
-\r
-    void toIdentityMatrix() {\r
-        for(int i = 0; i < 256; i++) {\r
-            for(int j = 0; j < 256; j++) {\r
-                d[i][j] = (i==j?1:0); // diagonal\r
-            }\r
-        }\r
-    }\r
-\r
-    void copyFrom(const Mat256x256i8& other) {\r
-        for(int i = 0; i < 256; i++) {\r
-            for(int j = 0; j < 256; j++) {\r
-                this->d[j][i] = int16_t(other.d[j][i]);\r
-                assert(this->d[j][i] == other.d[j][i]);\r
-            }\r
-        }\r
-    }\r
-\r
-    void copyFrom(const Mat256x256i16& other) {\r
-        for(int i = 0; i < 256; i++) {\r
-            for(int j = 0; j < 256; j++) {\r
-                this->d[j][i] = other.d[j][i];\r
-            }\r
-        }\r
-    }\r
-\r
-    Mat256x256i16() {\r
-//        this->toIdentityMatrix();\r
-    }\r
-\r
-    Mat256x256i16(const Mat256x256i16& other) {\r
-        this->copyFrom(other);\r
-    }\r
-\r
-    void copyFrom_helper(LTCMemory& ltcMem, int offset) {\r
-        for(int i = 0; i < 256; i++) {\r
-            const Words32& lo = ltcMem.get(i*4 + offset);\r
-            const Words32& hi = ltcMem.get(i*4 + 2 + offset);\r
-            for(int j = 0; j < 64; j++) {\r
-                uint32_t i32 = j>=32?hi.get(j-32):lo.get(j);\r
-                d[j*4+0][i] = int8_t((i32>> 0) & 0xFF);\r
-                d[j*4+1][i] = int8_t((i32>> 8) & 0xFF);\r
-                d[j*4+2][i] = int8_t((i32>>16) & 0xFF);\r
-                d[j*4+3][i] = int8_t((i32>>24) & 0xFF);\r
-            }\r
-        }\r
-    }\r
-\r
-    void copyFromEven(LTCMemory& ltcMem) {\r
-        copyFrom_helper(ltcMem, 0);\r
-    }\r
-\r
-    void copyFromOdd(LTCMemory& ltcMem) {\r
-        copyFrom_helper(ltcMem, 1);\r
-    }\r
-\r
-    void mul(const Mat256x256i16& a, const Mat256x256i16& b) {\r
-        for(int i = 0; i < 256; i += 16) {\r
-            for(int j = 0; j < 256; j += 16) {\r
-                for(int ii = i; ii < i+16; ii += 8) {\r
-                    __m256i r[8],s,t[8],u[8],m[8];\r
-                    r[0] = _mm256_set1_epi16(0);\r
-                    r[1] = _mm256_set1_epi16(0);\r
-                    r[2] = _mm256_set1_epi16(0);\r
-                    r[3] = _mm256_set1_epi16(0);\r
-                    r[4] = _mm256_set1_epi16(0);\r
-                    r[5] = _mm256_set1_epi16(0);\r
-                    r[6] = _mm256_set1_epi16(0);\r
-                    r[7] = _mm256_set1_epi16(0);\r
-                    for(int k = 0; k < 256; k++) {\r
-                        s = *((__m256i*)(&(b.d[k][j])));\r
-                        u[0] = _mm256_set1_epi16(a.d[ii+0][k]);\r
-                        u[1] = _mm256_set1_epi16(a.d[ii+1][k]);\r
-                        u[2] = _mm256_set1_epi16(a.d[ii+2][k]);\r
-                        u[3] = _mm256_set1_epi16(a.d[ii+3][k]);\r
-                        u[4] = _mm256_set1_epi16(a.d[ii+4][k]);\r
-                        u[5] = _mm256_set1_epi16(a.d[ii+5][k]);\r
-                        u[6] = _mm256_set1_epi16(a.d[ii+6][k]);\r
-                        u[7] = _mm256_set1_epi16(a.d[ii+7][k]);\r
-                        m[0] = _mm256_mullo_epi16(u[0],s);\r
-                        m[1] = _mm256_mullo_epi16(u[1],s);\r
-                        m[2] = _mm256_mullo_epi16(u[2],s);\r
-                        m[3] = _mm256_mullo_epi16(u[3],s);\r
-                        m[4] = _mm256_mullo_epi16(u[4],s);\r
-                        m[5] = _mm256_mullo_epi16(u[5],s);\r
-                        m[6] = _mm256_mullo_epi16(u[6],s);\r
-                        m[7] = _mm256_mullo_epi16(u[7],s);\r
-                        r[0] = _mm256_add_epi16(r[0],m[0]);\r
-                        r[1] = _mm256_add_epi16(r[1],m[1]);\r
-                        r[2] = _mm256_add_epi16(r[2],m[2]);\r
-                        r[3] = _mm256_add_epi16(r[3],m[3]);\r
-                        r[4] = _mm256_add_epi16(r[4],m[4]);\r
-                        r[5] = _mm256_add_epi16(r[5],m[5]);\r
-                        r[6] = _mm256_add_epi16(r[6],m[6]);\r
-                        r[7] = _mm256_add_epi16(r[7],m[7]);\r
-                    }\r
-                    t[0] = _mm256_slli_epi16(r[0],8);\r
-                    t[1] = _mm256_slli_epi16(r[1],8);\r
-                    t[2] = _mm256_slli_epi16(r[2],8);\r
-                    t[3] = _mm256_slli_epi16(r[3],8);\r
-                    t[4] = _mm256_slli_epi16(r[4],8);\r
-                    t[5] = _mm256_slli_epi16(r[5],8);\r
-                    t[6] = _mm256_slli_epi16(r[6],8);\r
-                    t[7] = _mm256_slli_epi16(r[7],8);\r
-                    t[0] = _mm256_add_epi16(r[0],t[0]);\r
-                    t[1] = _mm256_add_epi16(r[1],t[1]);\r
-                    t[2] = _mm256_add_epi16(r[2],t[2]);\r
-                    t[3] = _mm256_add_epi16(r[3],t[3]);\r
-                    t[4] = _mm256_add_epi16(r[4],t[4]);\r
-                    t[5] = _mm256_add_epi16(r[5],t[5]);\r
-                    t[6] = _mm256_add_epi16(r[6],t[6]);\r
-                    t[7] = _mm256_add_epi16(r[7],t[7]);\r
-                    for(int x = 0; x < 8; x++) {\r
-                        this->d[ii+x][j+0 ] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*0 +1)));\r
-                        this->d[ii+x][j+1 ] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*1 +1)));\r
-                        this->d[ii+x][j+2 ] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*2 +1)));\r
-                        this->d[ii+x][j+3 ] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*3 +1)));\r
-                        this->d[ii+x][j+4 ] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*4 +1)));\r
-                        this->d[ii+x][j+5 ] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*5 +1)));\r
-                        this->d[ii+x][j+6 ] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*6 +1)));\r
-                        this->d[ii+x][j+7 ] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*7 +1)));\r
-                        this->d[ii+x][j+8 ] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*8 +1)));\r
-                        this->d[ii+x][j+9 ] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*9 +1)));\r
-                        this->d[ii+x][j+10] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*10+1)));\r
-                        this->d[ii+x][j+11] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*11+1)));\r
-                        this->d[ii+x][j+12] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*12+1)));\r
-                        this->d[ii+x][j+13] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*13+1)));\r
-                        this->d[ii+x][j+14] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*14+1)));\r
-                        this->d[ii+x][j+15] = int16_t(int8_t(_mm256_extract_epi8(t[x],2*15+1)));\r
-                    }\r
-                }\r
-            }\r
-        }\r
-    }\r
-\r
-    void add(Mat256x256i16& a, Mat256x256i16& b) {\r
-        for(int i = 0; i < 256; i++) {\r
-            for(int j = 0; j < 256; j++) {\r
-                int tmp = int(a.d[i][j]) + int(b.d[i][j]);\r
-                this->d[i][j] = (tmp & 0xFF);\r
-            }\r
-        }\r
-    }\r
-\r
-    void toMatI8(Mat256x256i8& other) {\r
-        for(int i = 0; i < 256; i++) {\r
-            for(int j = 0; j < 256; j++) {\r
-                other.d[j][i] = (this->d[j][i]) & 0xFF;\r
-            }\r
-        }\r
-    }\r
-\r
-    void topup(Mat256x256i8& other) {\r
-        for(int i = 0; i < 256; i++) {\r
-            for(int j = 0; j < 256; j++) {\r
-                other.d[j][i] += (this->d[j][i]) & 0xFF;\r
-            }\r
-        }\r
-    }\r
-};\r
-\r
-\r
-struct Arr256x64i32 {\r
-    uint32_t d[256][64];\r
-\r
-    uint8_t* d0RawPtr() {\r
-        return (uint8_t*)(d[0]);\r
-    }\r
-\r
-    Arr256x64i32(const Mat256x256i8& mat) {\r
-        for(int j = 0; j < 256; j++) {\r
-            for(int i = 0; i < 64; i++) {\r
-                d[j][i] = ((uint32_t(uint8_t(mat.d[j][i + 192]))) << 24) |\r
-                          ((uint32_t(uint8_t(mat.d[j][i + 128]))) << 16) |\r
-                          ((uint32_t(uint8_t(mat.d[j][i +  64]))) <<  8) |\r
-                          ((uint32_t(uint8_t(mat.d[j][i]))) << 0);\r
-            }\r
-        }\r
-    }\r
-\r
-    void reduceFNV() {\r
-        for(int k = 256; k > 1; k = k/2) {\r
-            for(int j = 0; j < k/2; j++) {\r
-                for(int i = 0; i < 64; i++) {\r
-                    d[j][i] = FNV(d[j][i], d[j + k/2][i]);\r
-                }\r
-            }\r
-        }\r
-    }\r
-};\r
-\r
-// struct BytomMatList8 {\r
-//     std::vector<Mat256x256i8*> matVec;\r
-\r
-//     Mat256x256i8 at(int i) {\r
-//         return *(matVec[i]);\r
-//     }\r
-\r
-//     BytomMatList8() {\r
-//         for(int i=0; i<256; i++) {\r
-//             Mat256x256i8* ptr = new Mat256x256i8;\r
-//             assert(ptr!=NULL);\r
-//             matVec.push_back(ptr);\r
-//         }\r
-//     }\r
-\r
-//     ~BytomMatList8() {\r
-//         for(int i=0; i<256; i++) {\r
-//             delete matVec[i];\r
-//         }\r
-//     }\r
-\r
-//     void init(const Words32& X_in) {\r
-//         Words32 X = X_in;\r
-//         LTCMemory ltcMem;\r
-//         for(int i=0; i<128; i++) {\r
-//             ltcMem.scrypt(X);\r
-//             matVec[2*i]->copyFromEven(ltcMem);\r
-//             matVec[2*i+1]->copyFromOdd(ltcMem);\r
-//         }\r
-//     }\r
-// };\r
-\r
-struct BytomMatList16 {\r
-    std::vector<Mat256x256i16*> matVec;\r
-\r
-    Mat256x256i16 at(int i) {\r
-        return *(matVec[i]);\r
-    }\r
-\r
-    BytomMatList16() {\r
-        for(int i = 0; i < 256; i++) {\r
-            Mat256x256i16* ptr = new Mat256x256i16;\r
-            assert(ptr != NULL);\r
-            matVec.push_back(ptr);\r
-        }\r
-    }\r
-\r
-    ~BytomMatList16() {\r
-        for(int i = 0; i < 256; i++)\r
-            delete matVec[i];\r
-    }\r
-\r
-    void init(const Words32& X_in) {\r
-        Words32 X = X_in;\r
-        LTCMemory ltcMem;\r
-        for(int i = 0; i < 128; i++) {\r
-            ltcMem.scrypt(X);\r
-            matVec[2*i]->copyFromEven(ltcMem);\r
-            matVec[2*i + 1]->copyFromOdd(ltcMem);\r
-        }\r
-    }\r
-\r
-    // void copyFrom(BytomMatList8& other) {\r
-    //     for(int i=0; i<256; i++) {\r
-    //         matVec[i]->copyFrom(*other.matVec[i]);\r
-    //     }\r
-    // }\r
-\r
-    // void copyFrom(BytomMatList16& other) {\r
-    //     for(int i=0; i<256; i++) {\r
-    //         matVec[i]->copyFrom(*other.matVec[i]);\r
-    //     }\r
-    // }\r
-};\r
-\r
-// extern BytomMatList8* matList_int8;\r
-extern BytomMatList16* matList_int16;\r
-\r
-inline void iter_mineBytom(const uint8_t *fixedMessage,\r
-                            uint32_t len,\r
-                            // uint8_t nonce[8],\r
-                            uint8_t result[32]) {\r
-    Mat256x256i8 *resArr8 = new Mat256x256i8[4];\r
-\r
-    clock_t start, end;\r
-    start = clock();\r
-    // Itz faster using single thread ...\r
-    #pragma omp parallel for simd\r
-    for(int k = 0; k < 4; k++) { // The k-loop\r
-        sha3_ctx *ctx = new sha3_ctx;\r
-        Mat256x256i16 *mat16 = new Mat256x256i16;\r
-        Mat256x256i16 *tmp16 = new Mat256x256i16;\r
-        uint8_t sequence[32];\r
-        rhash_sha3_256_init(ctx);\r
-        rhash_sha3_update(ctx, fixedMessage + (len*k/4), len/4);//分四轮消耗掉fixedMessage\r
-        rhash_sha3_final(ctx, sequence);\r
-        tmp16->toIdentityMatrix();\r
-\r
-        for(int j = 0; j < 2; j++) {\r
-            // equivalent as tmp=tmp*matlist, i+=1 \r
-            for(int i = 0; i < 32; i += 2) {\r
-                // "mc = ma dot mb.T" in GoLang code\r
-                mat16->mul(*tmp16, matList_int16->at(sequence[i]));\r
-                // "ma = mc" in GoLang code\r
-                tmp16->mul(*mat16, matList_int16->at(sequence[i+1]));\r
-            }\r
-        }\r
-        // "res[k] = mc" in GoLang code\r
-        tmp16->toMatI8(resArr8[k]); // 0.00018s\r
-        delete mat16;\r
-        delete tmp16;\r
-        delete ctx;\r
-    }\r
-\r
-    // 3.7e-05s\r
-    Mat256x256i8 *res8 = new Mat256x256i8;\r
-    res8->add(resArr8[0], resArr8[1]);\r
-    res8->add(*res8, resArr8[2]);\r
-    res8->add(*res8, resArr8[3]);\r
-\r
-    end = clock();    \r
-    // std::cout << "\tTime for getting MulMatix: "\r
-    //           << (double)(end - start) / CLOCKS_PER_SEC * 1000 << "ms"\r
-    //           << std::endl;\r
-\r
-    Arr256x64i32 arr(*res8);\r
-    arr.reduceFNV();\r
-    sha3_ctx *ctx = new sha3_ctx;\r
-    rhash_sha3_256_init(ctx);\r
-    rhash_sha3_update(ctx, arr.d0RawPtr(), 256);\r
-    rhash_sha3_final(ctx, result);\r
-\r
-    delete res8;\r
-    delete[] resArr8;\r
-    delete ctx;\r
-}\r
-\r
-inline void incrNonce(uint8_t nonce[8]) {\r
-    for(int i = 0; i < 8; i++) {\r
-        if(nonce[i] != 255) {\r
-            nonce[i]++;\r
-            break;\r
-        } else {\r
-            nonce[i] = 0;\r
-        }\r
-    }\r
-}\r
-\r
-inline int countLeadingZero(uint8_t result[32]) {\r
-    int count = 0;\r
-    for(int i = 31; i >= 0; i--) { // NOTE: reverse\r
-        if(result[i] < 1) {\r
-            count += 8;\r
-        } else if(result[i]<2)  {\r
-            count += 7;\r
-            break;\r
-        } else if(result[i]<4)  {\r
-            count += 6;\r
-            break;\r
-        } else if(result[i]<8)  {\r
-            count += 5;\r
-            break;\r
-        } else if(result[i]<16) {\r
-            count += 4;\r
-            break;\r
-        } else if(result[i]<32) {\r
-            count += 3;\r
-            break;\r
-        } else if(result[i]<64) {\r
-            count += 2;\r
-            break;\r
-        } else if(result[i]<128) {\r
-            count += 1;\r
-            break;\r
-        }\r
-    }\r
-    return count;\r
-}\r
-\r
-// inline int test_mineBytom(\r
-//     const uint8_t *fixedMessage,\r
-//     uint32_t len,\r
-//     uint8_t nonce[32],\r
-//     int count,\r
-//     int leadingZeroThres)\r
-// {\r
-//   assert(len%4==0);\r
-//   int step;\r
-//   for(step=0; step<count; step++) {\r
-//     uint8_t result[32];\r
-//     //std::cerr<<"Mine step "<<step<<std::endl;\r
-//     iter_mineBytom(fixedMessage,100,nonce,result);\r
-//     std::cerr<<"Mine step "<<step<<std::endl;\r
-//     for (int i = 0; i < 32; i++) {\r
-//       printf("%02x ", result[i]);\r
-//       if (i % 8 == 7)\r
-//         printf("\n");\r
-//     }\r
-//     if (countLeadingZero(result) > leadingZeroThres)\r
-//       return step;\r
-//     incrNonce(nonce);\r
-//   }\r
-//   return step;\r
-// }\r
-\r
-\r
-#endif\r
-\r
diff --git a/mining/tensority/cgo_algorithm/lib/Makefile b/mining/tensority/cgo_algorithm/lib/Makefile
deleted file mode 100644 (file)
index 7453dd3..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-TARGET = cSimdTs
-
-UNAME_S := $(shell uname -s)
-ifeq ($(UNAME_S),Darwin)
-    CXX = /usr/local/opt/llvm/bin/clang++
-else
-       CXX = g++
-endif
-
-CXXFLAGS = -std=c++11 -pthread -mavx2 -O3 -fopenmp -fPIC
-
-.PHONY: clean
-
-all: $(TARGET)
-
-$(TARGET): $(TARGET).cpp
-       $(CXX) -o $@.o -c $^ $(CXXFLAGS)
-
-clean:
-       rm -f *.o *.so *.a
\ No newline at end of file
diff --git a/mining/tensority/cgo_algorithm/lib/byte_order-allInOne.h b/mining/tensority/cgo_algorithm/lib/byte_order-allInOne.h
deleted file mode 100644 (file)
index 8da3e9d..0000000
+++ /dev/null
@@ -1,327 +0,0 @@
-/* byte_order-allInOne.h */
-#ifndef BYTE_ORDER_H
-#define BYTE_ORDER_H
-#include "ustd.h"
-#include <stdlib.h>
-
-#ifdef __GLIBC__
-# include <endian.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* if x86 compatible cpu */
-#if defined(i386) || defined(__i386__) || defined(__i486__) || \
-       defined(__i586__) || defined(__i686__) || defined(__pentium__) || \
-       defined(__pentiumpro__) || defined(__pentium4__) || \
-       defined(__nocona__) || defined(prescott) || defined(__core2__) || \
-       defined(__k6__) || defined(__k8__) || defined(__athlon__) || \
-       defined(__amd64) || defined(__amd64__) || \
-       defined(__x86_64) || defined(__x86_64__) || defined(_M_IX86) || \
-       defined(_M_AMD64) || defined(_M_IA64) || defined(_M_X64)
-/* detect if x86-64 instruction set is supported */
-# if defined(_LP64) || defined(__LP64__) || defined(__x86_64) || \
-       defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
-#  define CPU_X64
-# else
-#  define CPU_IA32
-# endif
-#endif
-
-
-/* detect CPU endianness */
-#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
-               __BYTE_ORDER == __LITTLE_ENDIAN) || \
-       (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
-               __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
-       defined(CPU_IA32) || defined(CPU_X64) || \
-       defined(__ia64) || defined(__ia64__) || defined(__alpha__) || defined(_M_ALPHA) || \
-       defined(vax) || defined(MIPSEL) || defined(_ARM_) || defined(__arm__)
-# define CPU_LITTLE_ENDIAN
-# define IS_BIG_ENDIAN 0
-# define IS_LITTLE_ENDIAN 1
-#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
-               __BYTE_ORDER == __BIG_ENDIAN) || \
-       (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
-               __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
-       defined(__sparc) || defined(__sparc__) || defined(sparc) || \
-       defined(_ARCH_PPC) || defined(_ARCH_PPC64) || defined(_POWER) || \
-       defined(__POWERPC__) || defined(POWERPC) || defined(__powerpc) || \
-       defined(__powerpc__) || defined(__powerpc64__) || defined(__ppc__) || \
-       defined(__hpux)  || defined(_MIPSEB) || defined(mc68000) || \
-       defined(__s390__) || defined(__s390x__) || defined(sel)
-# define CPU_BIG_ENDIAN
-# define IS_BIG_ENDIAN 1
-# define IS_LITTLE_ENDIAN 0
-#else
-# error "Can't detect CPU architechture"
-#endif
-
-#ifndef __has_builtin
-# define __has_builtin(x) 0
-#endif
-
-#define IS_ALIGNED_32(p) (0 == (3 & ((const char*)(p) - (const char*)0)))
-#define IS_ALIGNED_64(p) (0 == (7 & ((const char*)(p) - (const char*)0)))
-
-#if defined(_MSC_VER)
-#define ALIGN_ATTR(n) __declspec(align(n))
-#elif defined(__GNUC__)
-#define ALIGN_ATTR(n) __attribute__((aligned (n)))
-#else
-#define ALIGN_ATTR(n) /* nothing */
-#endif
-
-
-#if defined(_MSC_VER) || defined(__BORLANDC__)
-#define I64(x) x##ui64
-#else
-#define I64(x) x##ULL
-#endif
-
-
-#ifndef __STRICT_ANSI__
-#define RHASH_INLINE inline
-#elif defined(__GNUC__)
-#define RHASH_INLINE __inline__
-#else
-#define RHASH_INLINE
-#endif
-
-/* define rhash_ctz - count traling zero bits */
-#if (defined(__GNUC__) && __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) || \
-    (defined(__clang__) && __has_builtin(__builtin_ctz))
-/* GCC >= 3.4 or clang */
-# define rhash_ctz(x) __builtin_ctz(x)
-#else
-unsigned rhash_ctz(unsigned); /* define as function */
-#endif
-
-/* bswap definitions */
-#if (defined(__GNUC__) && (__GNUC__ >= 4) && (__GNUC__ > 4 || __GNUC_MINOR__ >= 3)) || \
-    (defined(__clang__) && __has_builtin(__builtin_bswap32) && __has_builtin(__builtin_bswap64))
-/* GCC >= 4.3 or clang */
-# define bswap_32(x) __builtin_bswap32(x)
-# define bswap_64(x) __builtin_bswap64(x)
-#elif (_MSC_VER > 1300) && (defined(CPU_IA32) || defined(CPU_X64)) /* MS VC */
-# define bswap_32(x) _byteswap_ulong((unsigned long)x)
-# define bswap_64(x) _byteswap_uint64((__int64)x)
-#else
-/* fallback to generic bswap definition */
-static RHASH_INLINE uint32_t bswap_32(uint32_t x)
-{
-# if defined(__GNUC__) && defined(CPU_IA32) && !defined(__i386__) && !defined(RHASH_NO_ASM)
-       __asm("bswap\t%0" : "=r" (x) : "0" (x)); /* gcc x86 version */
-       return x;
-# else
-       x = ((x << 8) & 0xFF00FF00u) | ((x >> 8) & 0x00FF00FFu);
-       return (x >> 16) | (x << 16);
-# endif
-}
-static RHASH_INLINE uint64_t bswap_64(uint64_t x)
-{
-       union {
-               uint64_t ll;
-               uint32_t l[2];
-       } w, r;
-       w.ll = x;
-       r.l[0] = bswap_32(w.l[1]);
-       r.l[1] = bswap_32(w.l[0]);
-       return r.ll;
-}
-#endif /* bswap definitions */
-
-#ifdef CPU_BIG_ENDIAN
-# define be2me_32(x) (x)
-# define be2me_64(x) (x)
-# define le2me_32(x) bswap_32(x)
-# define le2me_64(x) bswap_64(x)
-
-# define be32_copy(to, index, from, length) memcpy((to) + (index), (from), (length))
-# define le32_copy(to, index, from, length) rhash_swap_copy_str_to_u32((to), (index), (from), (length))
-# define be64_copy(to, index, from, length) memcpy((to) + (index), (from), (length))
-# define le64_copy(to, index, from, length) rhash_swap_copy_str_to_u64((to), (index), (from), (length))
-# define me64_to_be_str(to, from, length) memcpy((to), (from), (length))
-# define me64_to_le_str(to, from, length) rhash_swap_copy_u64_to_str((to), (from), (length))
-
-#else /* CPU_BIG_ENDIAN */
-# define be2me_32(x) bswap_32(x)
-# define be2me_64(x) bswap_64(x)
-# define le2me_32(x) (x)
-# define le2me_64(x) (x)
-
-# define be32_copy(to, index, from, length) rhash_swap_copy_str_to_u32((to), (index), (from), (length))
-# define le32_copy(to, index, from, length) memcpy((to) + (index), (from), (length))
-# define be64_copy(to, index, from, length) rhash_swap_copy_str_to_u64((to), (index), (from), (length))
-# define le64_copy(to, index, from, length) memcpy((to) + (index), (from), (length))
-# define me64_to_be_str(to, from, length) rhash_swap_copy_u64_to_str((to), (from), (length))
-# define me64_to_le_str(to, from, length) memcpy((to), (from), (length))
-#endif /* CPU_BIG_ENDIAN */
-
-/* ROTL/ROTR macros rotate a 32/64-bit word left/right by n bits */
-#define ROTL32(dword, n) ((dword) << (n) ^ ((dword) >> (32 - (n))))
-#define ROTR32(dword, n) ((dword) >> (n) ^ ((dword) << (32 - (n))))
-#define ROTL64(qword, n) ((qword) << (n) ^ ((qword) >> (64 - (n))))
-#define ROTR64(qword, n) ((qword) >> (n) ^ ((qword) << (64 - (n))))
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif /* __cplusplus */
-
-#endif /* BYTE_ORDER_H */
-
-
-// Apdated from byte_order.c
-/* byte_order.c - byte order related platform dependent routines,
- *
- * Copyright: 2008-2012 Aleksey Kravchenko <rhash.admin@gmail.com>
- *
- * Permission is hereby granted,  free of charge,  to any person  obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction,  including without limitation
- * the rights to  use, copy, modify,  merge, publish, distribute, sublicense,
- * and/or sell copies  of  the Software,  and to permit  persons  to whom the
- * Software is furnished to do so.
- *
- * This program  is  distributed  in  the  hope  that it will be useful,  but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.  Use this program  at  your own risk!
- */
-
-#ifndef rhash_ctz
-
-#  if _MSC_VER >= 1300 && (_M_IX86 || _M_AMD64 || _M_IA64) /* if MSVC++ >= 2002 on x86/x64 */
-#  include <intrin.h>
-#  pragma intrinsic(_BitScanForward)
-
-/**
- * Returns index of the trailing bit of x.
- *
- * @param x the number to process
- * @return zero-based index of the trailing bit
- */
-inline unsigned rhash_ctz(unsigned x)
-{
-       unsigned long index;
-       unsigned char isNonzero = _BitScanForward(&index, x); /* MSVC intrinsic */
-       return (isNonzero ? (unsigned)index : 0);
-}
-#  else /* _MSC_VER >= 1300... */
-
-/**
- * Returns index of the trailing bit of a 32-bit number.
- * This is a plain C equivalent for GCC __builtin_ctz() bit scan.
- *
- * @param x the number to process
- * @return zero-based index of the trailing bit
- */
-inline unsigned rhash_ctz(unsigned x)
-{
-       /* array for conversion to bit position */
-       static unsigned char bit_pos[32] =  {
-               0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
-               31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
-       };
-
-       /* The De Bruijn bit-scan was devised in 1997, according to Donald Knuth
-        * by Martin Lauter. The constant 0x077CB531UL is a De Bruijn sequence,
-        * which produces a unique pattern of bits into the high 5 bits for each
-        * possible bit position that it is multiplied against.
-        * See http://graphics.stanford.edu/~seander/bithacks.html
-        * and http://chessprogramming.wikispaces.com/BitScan */
-       return (unsigned)bit_pos[((uint32_t)((x & -x) * 0x077CB531U)) >> 27];
-}
-#  endif /* _MSC_VER >= 1300... */
-#endif /* rhash_ctz */
-
-/**
- * Copy a memory block with simultaneous exchanging byte order.
- * The byte order is changed from little-endian 32-bit integers
- * to big-endian (or vice-versa).
- *
- * @param to the pointer where to copy memory block
- * @param index the index to start writing from
- * @param from  the source block to copy
- * @param length length of the memory block
- */
-inline void rhash_swap_copy_str_to_u32(void* to, int index, const void* from, size_t length)
-{
-       /* if all pointers and length are 32-bits aligned */
-       if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | index | length ) & 3) ) {
-               /* copy memory as 32-bit words */
-               const uint32_t* src = (const uint32_t*)from;
-               const uint32_t* end = (const uint32_t*)((const char*)src + length);
-               uint32_t* dst = (uint32_t*)((char*)to + index);
-               for (; src < end; dst++, src++)
-                       *dst = bswap_32(*src);
-       } else {
-               const char* src = (const char*)from;
-               for (length += index; (size_t)index < length; index++)
-                       ((char*)to)[index ^ 3] = *(src++);
-       }
-}
-
-/**
- * Copy a memory block with changed byte order.
- * The byte order is changed from little-endian 64-bit integers
- * to big-endian (or vice-versa).
- *
- * @param to     the pointer where to copy memory block
- * @param index  the index to start writing from
- * @param from   the source block to copy
- * @param length length of the memory block
- */
-inline void rhash_swap_copy_str_to_u64(void* to, int index, const void* from, size_t length)
-{
-       /* if all pointers and length are 64-bits aligned */
-       if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | index | length ) & 7) ) {
-               /* copy aligned memory block as 64-bit integers */
-               const uint64_t* src = (const uint64_t*)from;
-               const uint64_t* end = (const uint64_t*)((const char*)src + length);
-               uint64_t* dst = (uint64_t*)((char*)to + index);
-               while (src < end) *(dst++) = bswap_64( *(src++) );
-       } else {
-               const char* src = (const char*)from;
-               for (length += index; (size_t)index < length; index++) ((char*)to)[index ^ 7] = *(src++);
-       }
-}
-
-/**
- * Copy data from a sequence of 64-bit words to a binary string of given length,
- * while changing byte order.
- *
- * @param to     the binary string to receive data
- * @param from   the source sequence of 64-bit words
- * @param length the size in bytes of the data being copied
- */
-inline void rhash_swap_copy_u64_to_str(void* to, const void* from, size_t length)
-{
-       /* if all pointers and length are 64-bits aligned */
-       if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | length ) & 7) ) {
-               /* copy aligned memory block as 64-bit integers */
-               const uint64_t* src = (const uint64_t*)from;
-               const uint64_t* end = (const uint64_t*)((const char*)src + length);
-               uint64_t* dst = (uint64_t*)to;
-               while (src < end) *(dst++) = bswap_64( *(src++) );
-       } else {
-               size_t index;
-               char* dst = (char*)to;
-               for (index = 0; index < length; index++) *(dst++) = ((char*)from)[index ^ 7];
-       }
-}
-
-/**
- * Exchange byte order in the given array of 32-bit integers.
- *
- * @param arr    the array to process
- * @param length array length
- */
-inline void rhash_u32_mem_swap(unsigned *arr, int length)
-{
-       unsigned* end = arr + length;
-       for (; arr < end; arr++) {
-               *arr = bswap_32(*arr);
-       }
-}
diff --git a/mining/tensority/cgo_algorithm/lib/cSimdTs.cpp b/mining/tensority/cgo_algorithm/lib/cSimdTs.cpp
deleted file mode 100644 (file)
index 20e46e9..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-#include <iostream>
-#include <cstdio>
-#include <map>
-#include <mutex>
-#include <signal.h>
-#include "cSimdTs.h"
-#include "BytomPoW.h"
-#include "seed.h"
-
-using namespace std;
-
-BytomMatList16* matList_int16;
-uint8_t result[32] = {0};
-map <vector<uint8_t>, BytomMatList16*> seedCache;
-static const int cacheSize = 42; //"Answer to the Ultimate Question of Life, the Universe, and Everything"
-mutex mtx;
-
-uint8_t *SimdTs(uint8_t blockheader[32], uint8_t seed[32]){
-    mtx.lock();
-    vector<uint8_t> seedVec(seed, seed + 32);
-
-    if(seedCache.find(seedVec) != seedCache.end()) {
-        // printf("\t---%s---\n", "Seed already exists in the cache.");
-        matList_int16 = seedCache[seedVec];
-    } else {
-        uint32_t exted[32];
-        extend(exted, seed); // extends seed to exted
-        Words32 extSeed;
-        init_seed(extSeed, exted);
-
-        matList_int16 = new BytomMatList16;
-        matList_int16->init(extSeed);
-
-        seedCache.insert(make_pair(seedVec, matList_int16));
-    }
-
-    iter_mineBytom(blockheader, 32, result);
-
-    if(seedCache.size() > cacheSize) {
-        for(map<vector<uint8_t>, BytomMatList16*>::iterator it=seedCache.begin(); it!=seedCache.end(); ++it){
-            delete it->second;
-        }
-        seedCache.clear();
-    }
-
-    mtx.unlock();
-    return result;
-}
diff --git a/mining/tensority/cgo_algorithm/lib/cSimdTs.h b/mining/tensority/cgo_algorithm/lib/cSimdTs.h
deleted file mode 100644 (file)
index f61a2bf..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _C_SIMD_TENSOR_H_
-#define _C_SIMD_TENSOR_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-       #include <stdint.h>
-
-       uint8_t *SimdTs(uint8_t blockheader[32], uint8_t seed[32]);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/mining/tensority/cgo_algorithm/lib/scrypt.h b/mining/tensority/cgo_algorithm/lib/scrypt.h
deleted file mode 100644 (file)
index 24a9b4d..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/* scrypt.h */\r
-#ifndef SCRYPT_H\r
-#define SCRYPT_H\r
-\r
-#include <stdint.h>\r
-#include <assert.h>\r
-#include <stdio.h>\r
-\r
-struct Words16 {\r
-  uint32_t w[16];\r
-};\r
-\r
-#define ROTL(a, b) (((a) << (b)) | ((a) >> (32 - (b))))\r
-\r
-inline void xor_salsa8(uint32_t B[16], const uint32_t Bx[16]) {\r
-  uint32_t x00,x01,x02,x03,x04,x05,x06,x07,x08,x09,x10,x11,x12,x13,x14,x15;\r
-  int i;\r
-\r
-  x00 = (B[ 0] ^= Bx[ 0]);\r
-  x01 = (B[ 1] ^= Bx[ 1]);\r
-  x02 = (B[ 2] ^= Bx[ 2]);\r
-  x03 = (B[ 3] ^= Bx[ 3]);\r
-  x04 = (B[ 4] ^= Bx[ 4]);\r
-  x05 = (B[ 5] ^= Bx[ 5]);\r
-  x06 = (B[ 6] ^= Bx[ 6]);\r
-  x07 = (B[ 7] ^= Bx[ 7]);\r
-  x08 = (B[ 8] ^= Bx[ 8]);\r
-  x09 = (B[ 9] ^= Bx[ 9]);\r
-  x10 = (B[10] ^= Bx[10]);\r
-  x11 = (B[11] ^= Bx[11]);\r
-  x12 = (B[12] ^= Bx[12]);\r
-  x13 = (B[13] ^= Bx[13]);\r
-  x14 = (B[14] ^= Bx[14]);\r
-  x15 = (B[15] ^= Bx[15]);\r
-  for (i = 0; i < 8; i += 2) {\r
-    /* Operate on columns. */\r
-    x04 ^= ROTL(x00 + x12,  7);  x09 ^= ROTL(x05 + x01,  7);\r
-    x14 ^= ROTL(x10 + x06,  7);  x03 ^= ROTL(x15 + x11,  7);\r
-\r
-    x08 ^= ROTL(x04 + x00,  9);  x13 ^= ROTL(x09 + x05,  9);\r
-    x02 ^= ROTL(x14 + x10,  9);  x07 ^= ROTL(x03 + x15,  9);\r
-\r
-    x12 ^= ROTL(x08 + x04, 13);  x01 ^= ROTL(x13 + x09, 13);\r
-    x06 ^= ROTL(x02 + x14, 13);  x11 ^= ROTL(x07 + x03, 13);\r
-\r
-    x00 ^= ROTL(x12 + x08, 18);  x05 ^= ROTL(x01 + x13, 18);\r
-    x10 ^= ROTL(x06 + x02, 18);  x15 ^= ROTL(x11 + x07, 18);\r
-\r
-    /* Operate on rows. */\r
-    x01 ^= ROTL(x00 + x03,  7);  x06 ^= ROTL(x05 + x04,  7);\r
-    x11 ^= ROTL(x10 + x09,  7);  x12 ^= ROTL(x15 + x14,  7);\r
-\r
-    x02 ^= ROTL(x01 + x00,  9);  x07 ^= ROTL(x06 + x05,  9);\r
-    x08 ^= ROTL(x11 + x10,  9);  x13 ^= ROTL(x12 + x15,  9);\r
-\r
-    x03 ^= ROTL(x02 + x01, 13);  x04 ^= ROTL(x07 + x06, 13);\r
-    x09 ^= ROTL(x08 + x11, 13);  x14 ^= ROTL(x13 + x12, 13);\r
-\r
-    x00 ^= ROTL(x03 + x02, 18);  x05 ^= ROTL(x04 + x07, 18);\r
-    x10 ^= ROTL(x09 + x08, 18);  x15 ^= ROTL(x14 + x13, 18);\r
-  }\r
-  B[ 0] += x00;\r
-  B[ 1] += x01;\r
-  B[ 2] += x02;\r
-  B[ 3] += x03;\r
-  B[ 4] += x04;\r
-  B[ 5] += x05;\r
-  B[ 6] += x06;\r
-  B[ 7] += x07;\r
-  B[ 8] += x08;\r
-  B[ 9] += x09;\r
-  B[10] += x10;\r
-  B[11] += x11;\r
-  B[12] += x12;\r
-  B[13] += x13;\r
-  B[14] += x14;\r
-  B[15] += x15;\r
-}\r
-\r
-struct Words32 {\r
-  Words16 lo, hi;\r
-  uint32_t get(uint32_t i) const {\r
-    if(i<16) return lo.w[i];\r
-    else if(i<32) return hi.w[i-16];\r
-    else assert(false);\r
-  }\r
-  void xor_other(const Words32& other) {\r
-    for(int i=0; i<16; i++) lo.w[i]^=other.lo.w[i];\r
-    for(int i=0; i<16; i++) hi.w[i]^=other.hi.w[i];\r
-  }\r
-};\r
-\r
-struct LTCMemory {\r
-  Words32 w32[1024];\r
-  const Words32& get(uint32_t i) const {\r
-    assert(i<1024);\r
-    return w32[i];\r
-  }\r
-  void printItems() {\r
-    printf("\nprint scrypt items\n");\r
-    for(int i = 0; i < 16; i++) {\r
-      printf(" ");\r
-      printf(" %u ", uint32_t(this->get(0).lo.w[i]));\r
-    }\r
-  }\r
-  void scrypt(Words32& X) {\r
-    for (int i = 0; i < 1024; i++) {\r
-      w32[i]=X;\r
-      xor_salsa8(X.lo.w, X.hi.w);\r
-      xor_salsa8(X.hi.w, X.lo.w);\r
-    }\r
-    for (int i = 0; i < 1024; i++) {\r
-      int j = X.hi.w[0] & 1023;\r
-      X.xor_other(w32[j]);\r
-      xor_salsa8(X.lo.w, X.hi.w);\r
-      xor_salsa8(X.hi.w, X.lo.w);\r
-    }\r
-  }\r
-};\r
-\r
-#endif\r
diff --git a/mining/tensority/cgo_algorithm/lib/seed.h b/mining/tensority/cgo_algorithm/lib/seed.h
deleted file mode 100644 (file)
index cc0b5df..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/* seed.h */
-#ifndef SEED_H
-#define SEED_H
-
-inline void extend(uint32_t* exted, uint8_t *g_seed){
-    sha3_ctx *ctx = (sha3_ctx*)calloc(1, sizeof(*ctx));
-    // uint8_t seedHash[4*32];
-    uint8_t seedHash[4][32];
-
-    //  std::copy beats memcpy
-    // std::copy(g_seed, g_seed + 32, seedHash);
-    std::copy(g_seed, g_seed + 32, seedHash[0]);
-    
-    for(int i = 0; i < 3; ++i) {
-        rhash_sha3_256_init(ctx);
-        // rhash_sha3_update(ctx, seedHash+i*32, 32);
-        // rhash_sha3_final(ctx, seedHash+(i+1)*32);
-        rhash_sha3_update(ctx, seedHash[i], 32);
-        rhash_sha3_final(ctx, seedHash[i+1]);
-    }
-
-    for(int i = 0; i < 32; ++i) {
-//        exted[i] =  ((*(seedHash+i*4+3))<<24) +
-//                    ((*(seedHash+i*4+2))<<16) +
-//                    ((*(seedHash+i*4+1))<<8) +
-//                    (*(seedHash+i*4));
-        exted[i] =  (seedHash[i/8][(i*4+3)%32]<<24) +
-                    (seedHash[i/8][(i*4+2)%32]<<16) +
-                    (seedHash[i/8][(i*4+1)%32]<<8) +
-                    seedHash[i/8][(i*4)%32];
-    }
-
-    free(ctx);
-}
-
-inline void init_seed(Words32 &seed, uint32_t _seed[32])
-{
-    for (int i = 0; i < 16; i++)
-        seed.lo.w[i] = _seed[i];
-    for (int i = 0; i < 16; i++)
-        seed.hi.w[i] = _seed[16 + i];
-}
-
-#endif
\ No newline at end of file
diff --git a/mining/tensority/cgo_algorithm/lib/sha3-allInOne.h b/mining/tensority/cgo_algorithm/lib/sha3-allInOne.h
deleted file mode 100644 (file)
index e931604..0000000
+++ /dev/null
@@ -1,416 +0,0 @@
-/* sha3-allInOne.h */
-#ifndef RHASH_SHA3_H
-#define RHASH_SHA3_H
-#include "ustd.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define sha3_224_hash_size  28
-#define sha3_256_hash_size  32
-#define sha3_384_hash_size  48
-#define sha3_512_hash_size  64
-#define sha3_max_permutation_size 25
-#define sha3_max_rate_in_qwords 24
-
-/**
- * SHA3 Algorithm context.
- */
-typedef struct sha3_ctx
-{
-       /* 1600 bits algorithm hashing state */
-       uint64_t hash[sha3_max_permutation_size];
-       /* 1536-bit buffer for leftovers */
-       uint64_t message[sha3_max_rate_in_qwords];
-       /* count of bytes in the message[] buffer */
-       unsigned rest;
-       /* size of a message block processed at once */
-       unsigned block_size;
-} sha3_ctx;
-
-/* methods for calculating the hash function */
-
-// void rhash_sha3_224_init(sha3_ctx *ctx);
-// void rhash_sha3_256_init(sha3_ctx *ctx);
-// void rhash_sha3_384_init(sha3_ctx *ctx);
-// void rhash_sha3_512_init(sha3_ctx *ctx);
-// void rhash_sha3_update(sha3_ctx *ctx, const unsigned char* msg, size_t size);
-// void rhash_sha3_final(sha3_ctx *ctx, unsigned char* result);
-
-#ifdef USE_KECCAK
-#define rhash_keccak_224_init rhash_sha3_224_init
-#define rhash_keccak_256_init rhash_sha3_256_init
-#define rhash_keccak_384_init rhash_sha3_384_init
-#define rhash_keccak_512_init rhash_sha3_512_init
-#define rhash_keccak_update rhash_sha3_update
-inline void rhash_keccak_final(sha3_ctx *ctx, unsigned char* result);
-#endif
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif /* __cplusplus */
-
-
-
-// Adpated from sha3.c
-/*--------------------------------------------------------------------------*/
-/* sha3.c - an implementation of Secure Hash Algorithm 3 (Keccak).
- * based on the
- * The Keccak SHA-3 submission. Submission to NIST (Round 3), 2011
- * by Guido Bertoni, Joan Daemen, Michaël Peeters and Gilles Van Assche
- *
- * Copyright: 2013 Aleksey Kravchenko <rhash.admin@gmail.com>
- *
- * Permission is hereby granted,  free of charge,  to any person  obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction,  including without limitation
- * the rights to  use, copy, modify,  merge, publish, distribute, sublicense,
- * and/or sell copies  of  the Software,  and to permit  persons  to whom the
- * Software is furnished to do so.
- *
- * This program  is  distributed  in  the  hope  that it will be useful,  but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.  Use this program  at  your own risk!
- */
-
-#include <assert.h>
-#include <string.h>
-#include "byte_order-allInOne.h"
-
-/* constants */
-#define NumberOfRounds 24
-
-/* SHA3 (Keccak) constants for 24 rounds */
-static uint64_t keccak_round_constants[NumberOfRounds] = {
-    I64(0x0000000000000001), I64(0x0000000000008082), I64(0x800000000000808A), I64(0x8000000080008000),
-    I64(0x000000000000808B), I64(0x0000000080000001), I64(0x8000000080008081), I64(0x8000000000008009),
-    I64(0x000000000000008A), I64(0x0000000000000088), I64(0x0000000080008009), I64(0x000000008000000A),
-    I64(0x000000008000808B), I64(0x800000000000008B), I64(0x8000000000008089), I64(0x8000000000008003),
-    I64(0x8000000000008002), I64(0x8000000000000080), I64(0x000000000000800A), I64(0x800000008000000A),
-    I64(0x8000000080008081), I64(0x8000000000008080), I64(0x0000000080000001), I64(0x8000000080008008)
-};
-
-/* Initializing a sha3 context for given number of output bits */
-inline void rhash_keccak_init(sha3_ctx *ctx, unsigned bits)
-{
-    /* NB: The Keccak capacity parameter = bits * 2 */
-    unsigned rate = 1600 - bits * 2;
-
-    memset(ctx, 0, sizeof(sha3_ctx));
-    ctx->block_size = rate / 8;
-    assert(rate <= 1600 && (rate % 64) == 0);
-}
-
-/**
- * Initialize context before calculating hash.
- *
- * @param ctx context to initialize
- */
-inline void rhash_sha3_224_init(sha3_ctx *ctx)
-{
-    rhash_keccak_init(ctx, 224);
-}
-
-/**
- * Initialize context before calculating hash.
- *
- * @param ctx context to initialize
- */
-inline void rhash_sha3_256_init(sha3_ctx *ctx)
-{
-    rhash_keccak_init(ctx, 256);
-}
-
-/**
- * Initialize context before calculating hash.
- *
- * @param ctx context to initialize
- */
-inline void rhash_sha3_384_init(sha3_ctx *ctx)
-{
-    rhash_keccak_init(ctx, 384);
-}
-
-/**
- * Initialize context before calculating hash.
- *
- * @param ctx context to initialize
- */
-inline void rhash_sha3_512_init(sha3_ctx *ctx)
-{
-    rhash_keccak_init(ctx, 512);
-}
-
-/* Keccak theta() transformation */
-inline void keccak_theta(uint64_t *A)
-{
-    unsigned int x;
-    uint64_t C[5], D[5];
-
-    for (x = 0; x < 5; x++) {
-        C[x] = A[x] ^ A[x + 5] ^ A[x + 10] ^ A[x + 15] ^ A[x + 20];
-    }
-    D[0] = ROTL64(C[1], 1) ^ C[4];
-    D[1] = ROTL64(C[2], 1) ^ C[0];
-    D[2] = ROTL64(C[3], 1) ^ C[1];
-    D[3] = ROTL64(C[4], 1) ^ C[2];
-    D[4] = ROTL64(C[0], 1) ^ C[3];
-
-    for (x = 0; x < 5; x++) {
-        A[x]      ^= D[x];
-        A[x + 5]  ^= D[x];
-        A[x + 10] ^= D[x];
-        A[x + 15] ^= D[x];
-        A[x + 20] ^= D[x];
-    }
-}
-
-/* Keccak pi() transformation */
-inline void keccak_pi(uint64_t *A)
-{
-    uint64_t A1;
-    A1 = A[1];
-    A[ 1] = A[ 6];
-    A[ 6] = A[ 9];
-    A[ 9] = A[22];
-    A[22] = A[14];
-    A[14] = A[20];
-    A[20] = A[ 2];
-    A[ 2] = A[12];
-    A[12] = A[13];
-    A[13] = A[19];
-    A[19] = A[23];
-    A[23] = A[15];
-    A[15] = A[ 4];
-    A[ 4] = A[24];
-    A[24] = A[21];
-    A[21] = A[ 8];
-    A[ 8] = A[16];
-    A[16] = A[ 5];
-    A[ 5] = A[ 3];
-    A[ 3] = A[18];
-    A[18] = A[17];
-    A[17] = A[11];
-    A[11] = A[ 7];
-    A[ 7] = A[10];
-    A[10] = A1;
-    /* note: A[ 0] is left as is */
-}
-
-/* Keccak chi() transformation */
-inline void keccak_chi(uint64_t *A)
-{
-    int i;
-    for (i = 0; i < 25; i += 5) {
-        uint64_t A0 = A[0 + i], A1 = A[1 + i];
-        A[0 + i] ^= ~A1 & A[2 + i];
-        A[1 + i] ^= ~A[2 + i] & A[3 + i];
-        A[2 + i] ^= ~A[3 + i] & A[4 + i];
-        A[3 + i] ^= ~A[4 + i] & A0;
-        A[4 + i] ^= ~A0 & A1;
-    }
-}
-
-inline void rhash_sha3_permutation(uint64_t *state)
-{
-    int round;
-    for (round = 0; round < NumberOfRounds; round++)
-    {
-        keccak_theta(state);
-
-        /* apply Keccak rho() transformation */
-        state[ 1] = ROTL64(state[ 1],  1);
-        state[ 2] = ROTL64(state[ 2], 62);
-        state[ 3] = ROTL64(state[ 3], 28);
-        state[ 4] = ROTL64(state[ 4], 27);
-        state[ 5] = ROTL64(state[ 5], 36);
-        state[ 6] = ROTL64(state[ 6], 44);
-        state[ 7] = ROTL64(state[ 7],  6);
-        state[ 8] = ROTL64(state[ 8], 55);
-        state[ 9] = ROTL64(state[ 9], 20);
-        state[10] = ROTL64(state[10],  3);
-        state[11] = ROTL64(state[11], 10);
-        state[12] = ROTL64(state[12], 43);
-        state[13] = ROTL64(state[13], 25);
-        state[14] = ROTL64(state[14], 39);
-        state[15] = ROTL64(state[15], 41);
-        state[16] = ROTL64(state[16], 45);
-        state[17] = ROTL64(state[17], 15);
-        state[18] = ROTL64(state[18], 21);
-        state[19] = ROTL64(state[19],  8);
-        state[20] = ROTL64(state[20], 18);
-        state[21] = ROTL64(state[21],  2);
-        state[22] = ROTL64(state[22], 61);
-        state[23] = ROTL64(state[23], 56);
-        state[24] = ROTL64(state[24], 14);
-
-        keccak_pi(state);
-        keccak_chi(state);
-
-        /* apply iota(state, round) */
-        *state ^= keccak_round_constants[round];
-    }
-}
-
-/**
- * The core transformation. Process the specified block of data.
- *
- * @param hash the algorithm state
- * @param block the message block to process
- * @param block_size the size of the processed block in bytes
- */
-inline void rhash_sha3_process_block(uint64_t hash[25], const uint64_t *block, size_t block_size)
-{
-    /* expanded loop */
-    hash[ 0] ^= le2me_64(block[ 0]);
-    hash[ 1] ^= le2me_64(block[ 1]);
-    hash[ 2] ^= le2me_64(block[ 2]);
-    hash[ 3] ^= le2me_64(block[ 3]);
-    hash[ 4] ^= le2me_64(block[ 4]);
-    hash[ 5] ^= le2me_64(block[ 5]);
-    hash[ 6] ^= le2me_64(block[ 6]);
-    hash[ 7] ^= le2me_64(block[ 7]);
-    hash[ 8] ^= le2me_64(block[ 8]);
-    /* if not sha3-512 */
-    if (block_size > 72) {
-        hash[ 9] ^= le2me_64(block[ 9]);
-        hash[10] ^= le2me_64(block[10]);
-        hash[11] ^= le2me_64(block[11]);
-        hash[12] ^= le2me_64(block[12]);
-        /* if not sha3-384 */
-        if (block_size > 104) {
-            hash[13] ^= le2me_64(block[13]);
-            hash[14] ^= le2me_64(block[14]);
-            hash[15] ^= le2me_64(block[15]);
-            hash[16] ^= le2me_64(block[16]);
-            /* if not sha3-256 */
-            if (block_size > 136) {
-                hash[17] ^= le2me_64(block[17]);
-#ifdef FULL_SHA3_FAMILY_SUPPORT
-                /* if not sha3-224 */
-                if (block_size > 144) {
-                    hash[18] ^= le2me_64(block[18]);
-                    hash[19] ^= le2me_64(block[19]);
-                    hash[20] ^= le2me_64(block[20]);
-                    hash[21] ^= le2me_64(block[21]);
-                    hash[22] ^= le2me_64(block[22]);
-                    hash[23] ^= le2me_64(block[23]);
-                    hash[24] ^= le2me_64(block[24]);
-                }
-#endif
-            }
-        }
-    }
-    /* make a permutation of the hash */
-    rhash_sha3_permutation(hash);
-}
-
-#define SHA3_FINALIZED 0x80000000
-
-/**
- * Calculate message hash.
- * Can be called repeatedly with chunks of the message to be hashed.
- *
- * @param ctx the algorithm context containing current hashing state
- * @param msg message chunk
- * @param size length of the message chunk
- */
-inline void rhash_sha3_update(sha3_ctx *ctx, const unsigned char *msg, size_t size)
-{
-    size_t index = (size_t)ctx->rest;
-    size_t block_size = (size_t)ctx->block_size;
-
-    if (ctx->rest & SHA3_FINALIZED) return; /* too late for additional input */
-    ctx->rest = (unsigned)((ctx->rest + size) % block_size);
-
-    /* fill partial block */
-    if (index) {
-        size_t left = block_size - index;
-        memcpy((char*)ctx->message + index, msg, (size < left ? size : left));
-        if (size < left) return;
-
-        /* process partial block */
-        rhash_sha3_process_block(ctx->hash, ctx->message, block_size);
-        msg  += left;
-        size -= left;
-    }
-    while (size >= block_size) {
-        uint64_t* aligned_message_block;
-        if (IS_ALIGNED_64(msg)) {
-            /* the most common case is processing of an already aligned message
-            without copying it */
-            aligned_message_block = (uint64_t*)msg;
-        } else {
-            memcpy(ctx->message, msg, block_size);
-            aligned_message_block = ctx->message;
-        }
-
-        rhash_sha3_process_block(ctx->hash, aligned_message_block, block_size);
-        msg  += block_size;
-        size -= block_size;
-    }
-    if (size) {
-        memcpy(ctx->message, msg, size); /* save leftovers */
-    }
-}
-
-/**
- * Store calculated hash into the given array.
- *
- * @param ctx the algorithm context containing current hashing state
- * @param result calculated hash in binary form
- */
-inline void rhash_sha3_final(sha3_ctx *ctx, unsigned char* result)
-{
-    size_t digest_length = 100 - ctx->block_size / 2;
-    const size_t block_size = ctx->block_size;
-
-    if (!(ctx->rest & SHA3_FINALIZED))
-    {
-        /* clear the rest of the data queue */
-        memset((char*)ctx->message + ctx->rest, 0, block_size - ctx->rest);
-        ((char*)ctx->message)[ctx->rest] |= 0x06;
-        ((char*)ctx->message)[block_size - 1] |= 0x80;
-
-        /* process final block */
-        rhash_sha3_process_block(ctx->hash, ctx->message, block_size);
-        ctx->rest = SHA3_FINALIZED; /* mark context as finalized */
-    }
-
-    assert(block_size > digest_length);
-    if (result) me64_to_le_str(result, ctx->hash, digest_length);
-}
-
-#ifdef USE_KECCAK
-/**
-* Store calculated hash into the given array.
-*
-* @param ctx the algorithm context containing current hashing state
-* @param result calculated hash in binary form
-*/
-inline void rhash_keccak_final(sha3_ctx *ctx, unsigned char* result)
-{
-    size_t digest_length = 100 - ctx->block_size / 2;
-    const size_t block_size = ctx->block_size;
-
-    if (!(ctx->rest & SHA3_FINALIZED))
-    {
-        /* clear the rest of the data queue */
-        memset((char*)ctx->message + ctx->rest, 0, block_size - ctx->rest);
-        ((char*)ctx->message)[ctx->rest] |= 0x01;
-        ((char*)ctx->message)[block_size - 1] |= 0x80;
-
-        /* process final block */
-        rhash_sha3_process_block(ctx->hash, ctx->message, block_size);
-        ctx->rest = SHA3_FINALIZED; /* mark context as finalized */
-    }
-
-    assert(block_size > digest_length);
-    if (result) me64_to_le_str(result, ctx->hash, digest_length);
-}
-#endif /* USE_KECCAK */
-
-
-
-#endif /* RHASH_SHA3_H */
\ No newline at end of file
diff --git a/mining/tensority/cgo_algorithm/lib/ustd.h b/mining/tensority/cgo_algorithm/lib/ustd.h
deleted file mode 100644 (file)
index 94f1ae2..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/* ustd.h common macros and includes */
-#ifndef LIBRHASH_USTD_H
-#define LIBRHASH_USTD_H
-
-#if _MSC_VER >= 1300
-
-# define int64_t __int64
-# define int32_t __int32
-# define int16_t __int16
-# define int8_t  __int8
-# define uint64_t unsigned __int64
-# define uint32_t unsigned __int32
-# define uint16_t unsigned __int16
-# define uint8_t  unsigned __int8
-
-/* disable warnings: The POSIX name for this item is deprecated. Use the ISO C++ conformant name. */
-#pragma warning(disable : 4996)
-
-#else /* _MSC_VER >= 1300 */
-
-# include <stdint.h>
-# include <unistd.h>
-
-#endif /* _MSC_VER >= 1300 */
-
-#if _MSC_VER <= 1300
-# include <stdlib.h> /* size_t for vc6.0 */
-#endif /* _MSC_VER <= 1300 */
-
-#endif /* LIBRHASH_USTD_H */
diff --git a/mining/tensority/go_algorithm/algorithm.go b/mining/tensority/go_algorithm/algorithm.go
deleted file mode 100644 (file)
index 0e5f5a6..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-package go_algorithm
-
-import (
-       "github.com/bytom/bytom/protocol/bc"
-)
-
-func LegacyAlgorithm(bh, seed *bc.Hash) *bc.Hash {
-       cache := calcSeedCache(seed.Bytes())
-       data := mulMatrix(bh.Bytes(), cache)
-       return hashMatrix(data)
-}
diff --git a/mining/tensority/go_algorithm/algorithm_test.go b/mining/tensority/go_algorithm/algorithm_test.go
deleted file mode 100644 (file)
index e2ddd78..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-package go_algorithm
-
-import (
-       "reflect"
-       "runtime"
-       "testing"
-       "time"
-
-       "github.com/bytom/bytom/protocol/bc"
-)
-
-var tests = []struct {
-       blockHeader [32]byte
-       seed        [32]byte
-       hash        [32]byte
-}{
-       {
-               blockHeader: [32]byte{
-                       0xd0, 0xda, 0xd7, 0x3f, 0xb2, 0xda, 0xbf, 0x33,
-                       0x53, 0xfd, 0xa1, 0x55, 0x71, 0xb4, 0xe5, 0xf6,
-                       0xac, 0x62, 0xff, 0x18, 0x7b, 0x35, 0x4f, 0xad,
-                       0xd4, 0x84, 0x0d, 0x9f, 0xf2, 0xf1, 0xaf, 0xdf,
-               },
-               seed: [32]byte{
-                       0x07, 0x37, 0x52, 0x07, 0x81, 0x34, 0x5b, 0x11,
-                       0xb7, 0xbd, 0x0f, 0x84, 0x3c, 0x1b, 0xdd, 0x9a,
-                       0xea, 0x81, 0xb6, 0xda, 0x94, 0xfd, 0x14, 0x1c,
-                       0xc9, 0xf2, 0xdf, 0x53, 0xac, 0x67, 0x44, 0xd2,
-               },
-               hash: [32]byte{
-                       0xe3, 0x5d, 0xa5, 0x47, 0x95, 0xd8, 0x2f, 0x85,
-                       0x49, 0xc0, 0xe5, 0x80, 0xcb, 0xf2, 0xe3, 0x75,
-                       0x7a, 0xb5, 0xef, 0x8f, 0xed, 0x1b, 0xdb, 0xe4,
-                       0x39, 0x41, 0x6c, 0x7e, 0x6f, 0x8d, 0xf2, 0x27,
-               },
-       },
-       {
-               blockHeader: [32]byte{
-                       0xd0, 0xda, 0xd7, 0x3f, 0xb2, 0xda, 0xbf, 0x33,
-                       0x53, 0xfd, 0xa1, 0x55, 0x71, 0xb4, 0xe5, 0xf6,
-                       0xac, 0x62, 0xff, 0x18, 0x7b, 0x35, 0x4f, 0xad,
-                       0xd4, 0x84, 0x0d, 0x9f, 0xf2, 0xf1, 0xaf, 0xdf,
-               },
-               seed: [32]byte{
-                       0x07, 0x37, 0x52, 0x07, 0x81, 0x34, 0x5b, 0x11,
-                       0xb7, 0xbd, 0x0f, 0x84, 0x3c, 0x1b, 0xdd, 0x9a,
-                       0xea, 0x81, 0xb6, 0xda, 0x94, 0xfd, 0x14, 0x1c,
-                       0xc9, 0xf2, 0xdf, 0x53, 0xac, 0x67, 0x44, 0xd2,
-               },
-               hash: [32]byte{
-                       0xe3, 0x5d, 0xa5, 0x47, 0x95, 0xd8, 0x2f, 0x85,
-                       0x49, 0xc0, 0xe5, 0x80, 0xcb, 0xf2, 0xe3, 0x75,
-                       0x7a, 0xb5, 0xef, 0x8f, 0xed, 0x1b, 0xdb, 0xe4,
-                       0x39, 0x41, 0x6c, 0x7e, 0x6f, 0x8d, 0xf2, 0x27,
-               },
-       },
-       {
-               blockHeader: [32]byte{
-                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-               },
-               seed: [32]byte{
-                       0x48, 0xdd, 0xa5, 0xbb, 0xe9, 0x17, 0x1a, 0x66,
-                       0x56, 0x20, 0x6e, 0xc5, 0x6c, 0x59, 0x5c, 0x58,
-                       0x34, 0xb6, 0xcf, 0x38, 0xc5, 0xfe, 0x71, 0xbc,
-                       0xb4, 0x4f, 0xe4, 0x38, 0x33, 0xae, 0xe9, 0xdf,
-               },
-               hash: [32]byte{
-                       0x26, 0xdb, 0x94, 0xef, 0xa4, 0x22, 0xd7, 0x6c,
-                       0x40, 0x2a, 0x54, 0xee, 0xb6, 0x1d, 0xd5, 0xf5,
-                       0x32, 0x82, 0xcd, 0x3c, 0xe1, 0xa0, 0xac, 0x67,
-                       0x7e, 0x17, 0x70, 0x51, 0xed, 0xaa, 0x98, 0xc1,
-               },
-       },
-       {
-               blockHeader: [32]byte{
-                       0x8d, 0x96, 0x9e, 0xef, 0x6e, 0xca, 0xd3, 0xc2,
-                       0x9a, 0x3a, 0x62, 0x92, 0x80, 0xe6, 0x86, 0xcf,
-                       0x0c, 0x3f, 0x5d, 0x5a, 0x86, 0xaf, 0xf3, 0xca,
-                       0x12, 0x02, 0x0c, 0x92, 0x3a, 0xdc, 0x6c, 0x92,
-               },
-               seed: [32]byte{
-                       0x0e, 0x3b, 0x78, 0xd8, 0x38, 0x08, 0x44, 0xb0,
-                       0xf6, 0x97, 0xbb, 0x91, 0x2d, 0xa7, 0xf4, 0xd2,
-                       0x10, 0x38, 0x2c, 0x67, 0x14, 0x19, 0x4f, 0xd1,
-                       0x60, 0x39, 0xef, 0x2a, 0xcd, 0x92, 0x4d, 0xcf,
-               },
-               hash: [32]byte{
-                       0xfe, 0xce, 0xc3, 0x36, 0x69, 0x73, 0x75, 0x92,
-                       0xf7, 0x75, 0x4b, 0x21, 0x5b, 0x20, 0xba, 0xce,
-                       0xfb, 0xa6, 0x4d, 0x2e, 0x4c, 0xa1, 0x65, 0x6f,
-                       0x85, 0xea, 0x1d, 0x3d, 0xbe, 0x16, 0x28, 0x39,
-               },
-       },
-       {
-               blockHeader: [32]byte{
-                       0x2f, 0x01, 0x43, 0x11, 0xe0, 0x92, 0x6f, 0xa8,
-                       0xb3, 0xd6, 0xe6, 0xde, 0x20, 0x51, 0xbf, 0x69,
-                       0x33, 0x21, 0x23, 0xba, 0xad, 0xfe, 0x52, 0x2b,
-                       0x62, 0xf4, 0x64, 0x56, 0x55, 0x85, 0x9e, 0x7a,
-               },
-               seed: [32]byte{
-                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-               },
-               hash: [32]byte{
-                       0xc1, 0xc3, 0xcf, 0x4c, 0x76, 0x96, 0x8e, 0x29,
-                       0x67, 0xf0, 0x05, 0x3c, 0x76, 0xf2, 0x08, 0x4c,
-                       0xc0, 0x1e, 0xd0, 0xfe, 0x97, 0x66, 0x42, 0x8d,
-                       0xb9, 0x9c, 0x45, 0xbe, 0xdf, 0x0c, 0xdb, 0xe2,
-               },
-       },
-       {
-               blockHeader: [32]byte{
-                       0xe0, 0xe3, 0xc4, 0x31, 0x78, 0xa1, 0x26, 0xd0,
-                       0x48, 0x71, 0xb9, 0xc5, 0xd0, 0xc6, 0x42, 0xe5,
-                       0xe0, 0x8b, 0x96, 0x79, 0xa5, 0xf6, 0x6b, 0x82,
-                       0x1b, 0xd9, 0xa0, 0x30, 0xef, 0xf0, 0x2c, 0xe7,
-               },
-               seed: [32]byte{
-                       0x6a, 0xb2, 0x1e, 0x13, 0x01, 0xf5, 0x75, 0x2c,
-                       0x2f, 0xca, 0x1b, 0x55, 0x98, 0xf4, 0x9d, 0x37,
-                       0x69, 0x48, 0x2e, 0x07, 0x3c, 0x1f, 0x26, 0xe3,
-                       0xb8, 0x36, 0x5f, 0x40, 0x55, 0x53, 0xea, 0x31,
-               },
-               hash: [32]byte{
-                       0xab, 0xbc, 0x2c, 0xb3, 0x96, 0x38, 0xf6, 0x84,
-                       0x23, 0x5f, 0xbc, 0x1b, 0x3f, 0xf1, 0x07, 0x94,
-                       0x59, 0x48, 0xc5, 0x81, 0xb6, 0x92, 0x9b, 0xae,
-                       0x2c, 0xd6, 0x81, 0x88, 0x9f, 0xf2, 0xd8, 0x24,
-               },
-       },
-       {
-               blockHeader: [32]byte{
-                       0x88, 0x6a, 0x8e, 0x85, 0xb2, 0x75, 0xe7, 0xd6,
-                       0x5b, 0x56, 0x9b, 0xa5, 0x10, 0x87, 0x5c, 0x0e,
-                       0x63, 0xde, 0xce, 0x1a, 0x94, 0x56, 0x99, 0x14,
-                       0xd7, 0x62, 0x4c, 0x0d, 0xac, 0x80, 0x02, 0xf9,
-               },
-               seed: [32]byte{
-                       0x9e, 0x62, 0x91, 0x97, 0x0c, 0xb4, 0x4d, 0xd9,
-                       0x40, 0x08, 0xc7, 0x9b, 0xca, 0xf9, 0xd8, 0x6f,
-                       0x18, 0xb4, 0xb4, 0x9b, 0xa5, 0xb2, 0xa0, 0x47,
-                       0x81, 0xdb, 0x71, 0x99, 0xed, 0x3b, 0x9e, 0x4e,
-               },
-               hash: [32]byte{
-                       0x6a, 0x1f, 0x27, 0x6c, 0xac, 0x6f, 0x9e, 0x8b,
-                       0x42, 0x6e, 0xab, 0x46, 0xb5, 0x33, 0xf8, 0x2e,
-                       0x82, 0xa1, 0x48, 0xc0, 0x3f, 0xb0, 0xa8, 0x69,
-                       0x34, 0xa8, 0xe5, 0x48, 0x3b, 0x39, 0xda, 0x5e,
-               },
-       },
-       {
-               blockHeader: [32]byte{
-                       0x2f, 0x86, 0xfe, 0x50, 0x8f, 0xaa, 0x7d, 0x68,
-                       0x69, 0x2a, 0x20, 0x89, 0x27, 0x1b, 0x69, 0x01,
-                       0x38, 0x5d, 0x90, 0x58, 0xcd, 0x47, 0xe8, 0x4f,
-                       0xb4, 0x02, 0xb5, 0x08, 0x5f, 0x9a, 0x83, 0x60,
-               },
-               seed: [32]byte{
-                       0x9e, 0x62, 0x91, 0x97, 0x0c, 0xb4, 0x4d, 0xd9,
-                       0x40, 0x08, 0xc7, 0x9b, 0xca, 0xf9, 0xd8, 0x6f,
-                       0x18, 0xb4, 0xb4, 0x9b, 0xa5, 0xb2, 0xa0, 0x47,
-                       0x81, 0xdb, 0x71, 0x99, 0xed, 0x3b, 0x9e, 0x4e,
-               },
-               hash: [32]byte{
-                       0x81, 0x75, 0x6f, 0xda, 0xb3, 0x9a, 0x17, 0x16,
-                       0x3b, 0x0c, 0xe5, 0x82, 0xee, 0x4e, 0xe2, 0x56,
-                       0xfb, 0x4d, 0x1e, 0x15, 0x6c, 0x69, 0x2b, 0x99,
-                       0x7d, 0x60, 0x8a, 0x42, 0xec, 0xb3, 0x8d, 0x47,
-               },
-       },
-       {
-               blockHeader: [32]byte{
-                       0xfe, 0x97, 0x91, 0xd7, 0x1b, 0x67, 0xee, 0x62,
-                       0x51, 0x5e, 0x08, 0x72, 0x3c, 0x06, 0x1b, 0x5c,
-                       0xcb, 0x95, 0x2a, 0x80, 0xd8, 0x04, 0x41, 0x7c,
-                       0x8a, 0xee, 0xdf, 0x7f, 0x63, 0x3c, 0x52, 0x4a,
-               },
-               seed: [32]byte{
-                       0x9e, 0x62, 0x91, 0x97, 0x0c, 0xb4, 0x4d, 0xd9,
-                       0x40, 0x08, 0xc7, 0x9b, 0xca, 0xf9, 0xd8, 0x6f,
-                       0x18, 0xb4, 0xb4, 0x9b, 0xa5, 0xb2, 0xa0, 0x47,
-                       0x81, 0xdb, 0x71, 0x99, 0xed, 0x3b, 0x9e, 0x4e,
-               },
-               hash: [32]byte{
-                       0xfa, 0xd5, 0x19, 0x5a, 0x0c, 0x8e, 0x3b, 0x59,
-                       0x0b, 0x86, 0xa3, 0xc0, 0xa9, 0x5e, 0x75, 0x29,
-                       0x56, 0x58, 0x88, 0x50, 0x8a, 0xec, 0xca, 0x96,
-                       0xe9, 0xae, 0xda, 0x63, 0x30, 0x02, 0xf4, 0x09,
-               },
-       },
-       {
-               blockHeader: [32]byte{
-                       0xef, 0x30, 0x67, 0x87, 0xa0, 0x87, 0xc1, 0x18,
-                       0xfc, 0xb6, 0xd3, 0x51, 0xf0, 0x19, 0x9d, 0xca,
-                       0x98, 0x05, 0x58, 0x98, 0xe2, 0x08, 0xfb, 0xf1,
-                       0xa9, 0x34, 0xc9, 0xd7, 0x0b, 0x58, 0xee, 0x4b,
-               },
-               seed: [32]byte{
-                       0x9e, 0x62, 0x91, 0x97, 0x0c, 0xb4, 0x4d, 0xd9,
-                       0x40, 0x08, 0xc7, 0x9b, 0xca, 0xf9, 0xd8, 0x6f,
-                       0x18, 0xb4, 0xb4, 0x9b, 0xa5, 0xb2, 0xa0, 0x47,
-                       0x81, 0xdb, 0x71, 0x99, 0xed, 0x3b, 0x9e, 0x4e,
-               },
-               hash: [32]byte{
-                       0xff, 0xb0, 0xca, 0xda, 0xb3, 0x14, 0x67, 0x3d,
-                       0x22, 0x8f, 0x8f, 0xe7, 0x4d, 0x84, 0xa4, 0x65,
-                       0x2e, 0x01, 0xc8, 0x2c, 0x26, 0x41, 0xd3, 0xe2,
-                       0xfa, 0x91, 0x48, 0xaf, 0xea, 0xb0, 0xd6, 0x06,
-               },
-       },
-       {
-               blockHeader: [32]byte{
-                       0x10, 0xd2, 0xf1, 0xb2, 0xf4, 0x8e, 0x38, 0x9c,
-                       0x97, 0xdd, 0xe1, 0xb1, 0xa6, 0x3b, 0xcd, 0x74,
-                       0x3c, 0x23, 0x40, 0x93, 0x5d, 0x71, 0xe2, 0xc3,
-                       0x58, 0xba, 0x10, 0xe5, 0x84, 0x69, 0x61, 0x43,
-               },
-               seed: [32]byte{
-                       0x9e, 0x62, 0x91, 0x97, 0x0c, 0xb4, 0x4d, 0xd9,
-                       0x40, 0x08, 0xc7, 0x9b, 0xca, 0xf9, 0xd8, 0x6f,
-                       0x18, 0xb4, 0xb4, 0x9b, 0xa5, 0xb2, 0xa0, 0x47,
-                       0x81, 0xdb, 0x71, 0x99, 0xed, 0x3b, 0x9e, 0x4e,
-               },
-               hash: [32]byte{
-                       0x6a, 0xdd, 0x5a, 0xf1, 0x82, 0x53, 0xd0, 0x72,
-                       0x2c, 0x54, 0x2f, 0x7e, 0x71, 0xf4, 0x7d, 0x9c,
-                       0xb1, 0xa4, 0xd7, 0xfb, 0x7c, 0x7d, 0x26, 0xae,
-                       0xfe, 0x3c, 0x83, 0x1c, 0xb8, 0x54, 0xf1, 0x7e,
-               },
-       },
-}
-
-// Tests that tensority hash result is correct.
-func TestLegacyAlgorithm(t *testing.T) {
-       startT := time.Now()
-       for i, tt := range tests {
-               sT := time.Now()
-               bhhash := bc.NewHash(tt.blockHeader)
-               sdhash := bc.NewHash(tt.seed)
-               result := LegacyAlgorithm(&bhhash, &sdhash).Bytes()
-               var resArr [32]byte
-               copy(resArr[:], result)
-               eT := time.Now()
-
-               if !reflect.DeepEqual(resArr, tt.hash) {
-                       t.Errorf("Test case %d:\n", i+1)
-                       t.Errorf("Gets\t%x\n", resArr)
-                       t.Errorf("Expects\t%x\n", tt.hash)
-                       t.Errorf("FAIL\n\n")
-               } else {
-                       t.Logf("Test case %d:\n", i+1)
-                       t.Log("Total verification time:", eT.Sub(sT))
-                       t.Log("PASS\n")
-               }
-       }
-       endT := time.Now()
-       t.Log("Avg time:", time.Duration(int(endT.Sub(startT))/len(tests)))
-}
-
-func BenchmarkLegacyAlgorithm(b *testing.B) {
-       bhhash := bc.NewHash(tests[0].blockHeader)
-       sdhash := bc.NewHash(tests[0].seed)
-       b.ResetTimer()
-       for i := 0; i < b.N; i++ {
-               LegacyAlgorithm(&bhhash, &sdhash)
-       }
-}
-
-func BenchmarkLegacyAlgorithmParallel(b *testing.B) {
-       bhhash := bc.NewHash(tests[0].blockHeader)
-       sdhash := bc.NewHash(tests[0].seed)
-       b.SetParallelism(runtime.NumCPU())
-       b.RunParallel(func(pb *testing.PB) {
-               for pb.Next() {
-                       LegacyAlgorithm(&bhhash, &sdhash)
-               }
-       })
-}
diff --git a/mining/tensority/go_algorithm/matrix.go b/mining/tensority/go_algorithm/matrix.go
deleted file mode 100644 (file)
index 10c22c4..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-package go_algorithm
-
-import (
-       "reflect"
-       "runtime"
-       "sync"
-       "unsafe"
-
-       "gonum.org/v1/gonum/mat"
-
-       "github.com/bytom/bytom/crypto/sha3pool"
-       "github.com/bytom/bytom/protocol/bc"
-)
-
-const (
-       matSize = 1 << 8 // Size of matrix
-       matNum  = 1 << 8 // Number of matrix
-)
-
-func mulMatrix(headerhash []byte, cache []uint32) []uint8 {
-       ui32data := make([]uint32, matNum*matSize*matSize/4)
-       for i := 0; i < 128; i++ {
-               start := i * 1024 * 32
-               for j := 0; j < 512; j++ {
-                       copy(ui32data[start+j*32:start+j*32+32], cache[start+j*64:start+j*64+32])
-                       copy(ui32data[start+512*32+j*32:start+512*32+j*32+32], cache[start+j*64+32:start+j*64+64])
-               }
-       }
-
-       // Convert our destination slice to a int8 buffer
-       header := *(*reflect.SliceHeader)(unsafe.Pointer(&ui32data))
-       header.Len *= 4
-       header.Cap *= 4
-       i8data := *(*[]int8)(unsafe.Pointer(&header))
-
-       f64data := make([]float64, matNum*matSize*matSize)
-       for i := 0; i < matNum*matSize*matSize; i++ {
-               f64data[i] = float64(i8data[i])
-       }
-
-       dataIdentity := make([]float64, matSize*matSize)
-       for i := 0; i < 256; i++ {
-               dataIdentity[i*257] = float64(1)
-       }
-
-       var tmp [matSize][matSize]float64
-       var maArr [4][matSize][matSize]float64
-
-       runtime.GOMAXPROCS(4)
-       var wg sync.WaitGroup
-       wg.Add(4)
-
-       for k := 0; k < 4; k++ {
-               go func(i int) {
-                       defer wg.Done()
-
-                       ma := mat.NewDense(matSize, matSize, dataIdentity)
-                       mc := mat.NewDense(matSize, matSize, make([]float64, matSize*matSize))
-
-                       var sequence [32]byte
-                       sha3pool.Sum256(sequence[:], headerhash[i*8:(i+1)*8])
-
-                       for j := 0; j < 2; j++ {
-                               for k := 0; k < 32; k++ {
-                                       index := int(sequence[k])
-                                       mb := mat.NewDense(matSize, matSize, f64data[index*matSize*matSize:(index+1)*matSize*matSize])
-                                       mc.Mul(ma, mb.T())
-
-                                       for row := 0; row < matSize; row++ {
-                                               for col := 0; col < matSize; col++ {
-                                                       i32v := int32(mc.At(row, col))
-                                                       i8v := int8((i32v & 0xff) +
-                                                               ((i32v >> 8) & 0xff))
-                                                       mc.Set(row, col, float64(i8v))
-                                               }
-                                       }
-                                       ma = mc
-                               }
-                       }
-
-                       for row := 0; row < matSize; row++ {
-                               for col := 0; col < matSize; col++ {
-                                       maArr[i][row][col] = ma.At(row, col)
-                               }
-                       }
-               }(k)
-       }
-       wg.Wait()
-
-       for i := 0; i < 4; i++ {
-               for row := 0; row < matSize; row++ {
-                       for col := 0; col < matSize; col++ {
-                               i32vtmp := int32(tmp[row][col])
-                               i32vma := int32(maArr[i][row][col])
-                               i8v := int8(int32(i32vtmp+i32vma) & 0xff)
-                               tmp[row][col] = float64(i8v)
-                       }
-               }
-       }
-
-       result := make([]uint8, 0)
-       for i := 0; i < matSize; i++ {
-               for j := 0; j < matSize; j++ {
-                       result = append(result, uint8(tmp[i][j]))
-               }
-       }
-       return result
-}
-
-// hashMatrix hash result of mulMatrix
-func hashMatrix(result []uint8) *bc.Hash {
-       var mat8 [matSize][matSize]uint8
-       for i := 0; i < matSize; i++ {
-               for j := 0; j < matSize; j++ {
-                       mat8[i][j] = result[i*matSize+j]
-               }
-       }
-
-       var mat32 [matSize][matSize / 4]uint32
-
-       for i := 0; i < matSize; i++ {
-               for j := 0; j < matSize/4; j++ {
-                       mat32[i][j] = ((uint32(mat8[i][j+192])) << 24) |
-                               ((uint32(mat8[i][j+128])) << 16) |
-                               ((uint32(mat8[i][j+64])) << 8) |
-                               ((uint32(mat8[i][j])) << 0)
-               }
-       }
-
-       for k := matSize; k > 1; k = k / 2 {
-               for j := 0; j < k/2; j++ {
-                       for i := 0; i < matSize/4; i++ {
-                               mat32[j][i] = fnv(mat32[j][i], mat32[j+k/2][i])
-                       }
-               }
-       }
-
-       ui32data := make([]uint32, 0)
-       for i := 0; i < matSize/4; i++ {
-               ui32data = append(ui32data, mat32[0][i])
-       }
-
-       // Convert our destination slice to a byte buffer
-       header := *(*reflect.SliceHeader)(unsafe.Pointer(&ui32data))
-       header.Len *= 4
-       header.Cap *= 4
-       dataBytes := *(*[]byte)(unsafe.Pointer(&header))
-
-       var h [32]byte
-       sha3pool.Sum256(h[:], dataBytes)
-       bcHash := bc.NewHash(h)
-       return &bcHash
-}
-
-func fnv(a, b uint32) uint32 {
-       return a*0x01000193 ^ b
-}
diff --git a/mining/tensority/go_algorithm/seed.go b/mining/tensority/go_algorithm/seed.go
deleted file mode 100644 (file)
index 9e5b63e..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-package go_algorithm
-
-import (
-       "encoding/binary"
-       "unsafe"
-
-       "github.com/bytom/bytom/crypto/scrypt"
-       "github.com/bytom/bytom/crypto/sha3pool"
-       "github.com/bytom/bytom/protocol/bc"
-)
-
-func calcSeed(blockHashs []*bc.Hash) []byte {
-       data := []byte{}
-       for _, blockHash := range blockHashs {
-               data = append(data, blockHash.Bytes()...)
-       }
-       var s [32]byte
-       sha3pool.Sum256(s[:], data)
-
-       return s[:]
-}
-
-func extendBytes(seed []byte, round int) []byte {
-       extSeed := make([]byte, len(seed)*(round+1))
-       copy(extSeed, seed)
-
-       for i := 0; i < round; i++ {
-               var h [32]byte
-               sha3pool.Sum256(h[:], extSeed[i*32:(i+1)*32])
-               copy(extSeed[(i+1)*32:(i+2)*32], h[:])
-       }
-
-       return extSeed
-}
-
-func calcSeedCache(seed []byte) (cache []uint32) {
-       extSeed := extendBytes(seed, 3)
-       v := make([]uint32, 32*1024)
-
-       // Swap the byte order on big endian systems
-       if !isLittleEndian() {
-               swap(extSeed)
-       }
-
-       for i := 0; i < 128; i++ {
-               scrypt.Smix(extSeed, v)
-               cache = append(cache, v...)
-       }
-
-       return cache
-}
-
-// isLittleEndian returns whether the local system is running in little or big
-// endian byte order.
-func isLittleEndian() bool {
-       n := uint32(0x01020304)
-       return *(*byte)(unsafe.Pointer(&n)) == 0x04
-}
-
-// swap changes the byte order of the buffer assuming a uint32 representation.
-func swap(buffer []byte) {
-       for i := 0; i < len(buffer); i += 4 {
-               binary.BigEndian.PutUint32(buffer[i:], binary.LittleEndian.Uint32(buffer[i:]))
-       }
-}
diff --git a/netsync/block_fetcher_test.go b/netsync/block_fetcher_test.go
deleted file mode 100644 (file)
index 679c76c..0000000
+++ /dev/null
@@ -1,153 +0,0 @@
-package netsync
-
-import (
-       "testing"
-       "time"
-
-       "github.com/bytom/bytom/protocol/bc"
-       "github.com/bytom/bytom/protocol/bc/types"
-)
-
-type chain struct {
-       blocks []uint64
-}
-
-func newChain() *chain {
-       blocks := make([]uint64, 1, 1)
-       blocks[0] = 99
-       return &chain{
-               blocks: blocks,
-       }
-}
-
-func (c *chain) BestBlockHeader() *types.BlockHeader {
-       return nil
-}
-func (c *chain) CalcNextSeed(*bc.Hash) (*bc.Hash, error) {
-       return nil, nil
-}
-func (c *chain) GetHeaderByHeight(uint64) (*types.BlockHeader, error) {
-       return nil, nil
-}
-func (c *chain) GetTransactionStatus(*bc.Hash) (*bc.TransactionStatus, error) {
-       return nil, nil
-}
-func (c *chain) InMainChain(bc.Hash) bool {
-       return true
-}
-func (c *chain) ValidateTx(*types.Tx) (bool, error) {
-       return true, nil
-}
-func (c *chain) GetBlockByHeight(uint64) (*types.Block, error) {
-       return nil, nil
-}
-
-func (c *chain) BestBlockHeight() uint64 {
-       return c.blocks[len(c.blocks)-1]
-}
-
-func (c *chain) GetBlockByHash(*bc.Hash) (*types.Block, error) {
-       return nil, nil
-}
-
-func (c *chain) GetHeaderByHash(*bc.Hash) (*types.BlockHeader, error) {
-       return nil, nil
-}
-
-func (c *chain) ProcessBlock(block *types.Block) (bool, error) {
-       c.blocks = append(c.blocks, block.Height)
-       return false, nil
-}
-
-func (c *chain) ProcessBlockSignature(signature, pubkey []byte, blockHeight uint64, blockHash *bc.Hash) error {
-       return nil
-}
-
-func TestBlockFetcher(t *testing.T) {
-       peers := newPeerSet(NewPeerSet())
-       testCase := []struct {
-               blockMsg *blockMsg
-               height   uint64
-       }{
-               {
-                       blockMsg: &blockMsg{
-                               block: &types.Block{
-                                       BlockHeader: types.BlockHeader{
-                                               Height: 100,
-                                       },
-                               },
-                       },
-                       height: 100,
-               },
-               {
-                       blockMsg: &blockMsg{
-                               block: &types.Block{
-                                       BlockHeader: types.BlockHeader{
-                                               Height: 101,
-                                       },
-                               },
-                       },
-                       height: 101,
-               },
-               {
-                       blockMsg: &blockMsg{
-                               block: &types.Block{
-                                       BlockHeader: types.BlockHeader{
-                                               Height: 105,
-                                       },
-                               },
-                       },
-                       height: 101,
-               },
-               {
-                       blockMsg: &blockMsg{
-                               block: &types.Block{
-                                       BlockHeader: types.BlockHeader{
-                                               Height: 200,
-                                       },
-                               },
-                       },
-                       height: 101,
-               },
-               {
-                       blockMsg: &blockMsg{
-                               block: &types.Block{
-                                       BlockHeader: types.BlockHeader{
-                                               Height: 104,
-                                       },
-                               },
-                       },
-                       height: 101,
-               },
-               {
-                       blockMsg: &blockMsg{
-                               block: &types.Block{
-                                       BlockHeader: types.BlockHeader{
-                                               Height: 103,
-                                       },
-                               },
-                       },
-                       height: 101,
-               },
-               {
-                       blockMsg: &blockMsg{
-                               block: &types.Block{
-                                       BlockHeader: types.BlockHeader{
-                                               Height: 102,
-                                       },
-                               },
-                       },
-                       height: 105,
-               },
-       }
-       fetcher := newBlockFetcher(newChain(), peers)
-
-       for i, c := range testCase {
-               fetcher.processNewBlock(c.blockMsg)
-               time.Sleep(10 * time.Millisecond)
-               chainHeight := fetcher.chain.BestBlockHeight()
-               if chainHeight != c.height {
-                       t.Fatalf("test block fetcher error. index %d expected chain height %d but got %d", i, chainHeight, c.height)
-               }
-       }
-}
diff --git a/netsync/block_keeper.go b/netsync/block_keeper.go
deleted file mode 100644 (file)
index 05b9aec..0000000
+++ /dev/null
@@ -1,425 +0,0 @@
-package netsync
-
-import (
-       "container/list"
-       "time"
-
-       log "github.com/sirupsen/logrus"
-
-       "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/errors"
-       "github.com/bytom/bytom/mining/tensority"
-       "github.com/bytom/bytom/p2p/security"
-       "github.com/bytom/bytom/protocol/bc"
-       "github.com/bytom/bytom/protocol/bc/types"
-)
-
-const (
-       syncCycle            = 5 * time.Second
-       blockProcessChSize   = 1024
-       blocksProcessChSize  = 128
-       headersProcessChSize = 1024
-)
-
-var (
-       maxBlockPerMsg        = uint64(128)
-       maxBlockHeadersPerMsg = uint64(2048)
-       syncTimeout           = 30 * time.Second
-
-       errAppendHeaders  = errors.New("fail to append list due to order dismatch")
-       errRequestTimeout = errors.New("request timeout")
-       errPeerDropped    = errors.New("Peer dropped")
-       errPeerMisbehave  = errors.New("peer is misbehave")
-       ErrPeerMisbehave  = errors.New("peer is misbehave")
-)
-
-type blockMsg struct {
-       block  *types.Block
-       peerID string
-}
-
-type blocksMsg struct {
-       blocks []*types.Block
-       peerID string
-}
-
-type headersMsg struct {
-       headers []*types.BlockHeader
-       peerID  string
-}
-
-type blockKeeper struct {
-       chain Chain
-       peers *peerSet
-
-       syncPeer         *peer
-       blockProcessCh   chan *blockMsg
-       blocksProcessCh  chan *blocksMsg
-       headersProcessCh chan *headersMsg
-
-       headerList *list.List
-}
-
-func newBlockKeeper(chain Chain, peers *peerSet) *blockKeeper {
-       bk := &blockKeeper{
-               chain:            chain,
-               peers:            peers,
-               blockProcessCh:   make(chan *blockMsg, blockProcessChSize),
-               blocksProcessCh:  make(chan *blocksMsg, blocksProcessChSize),
-               headersProcessCh: make(chan *headersMsg, headersProcessChSize),
-               headerList:       list.New(),
-       }
-       bk.resetHeaderState()
-       go bk.syncWorker()
-       return bk
-}
-
-func (bk *blockKeeper) appendHeaderList(headers []*types.BlockHeader) error {
-       for _, header := range headers {
-               prevHeader := bk.headerList.Back().Value.(*types.BlockHeader)
-               if prevHeader.Hash() != header.PreviousBlockHash {
-                       return errAppendHeaders
-               }
-               bk.headerList.PushBack(header)
-       }
-       return nil
-}
-
-func (bk *blockKeeper) blockLocator() []*bc.Hash {
-       header := bk.chain.BestBlockHeader()
-       locator := []*bc.Hash{}
-
-       step := uint64(1)
-       for header != nil {
-               headerHash := header.Hash()
-               locator = append(locator, &headerHash)
-               if header.Height == 0 {
-                       break
-               }
-
-               var err error
-               if header.Height < step {
-                       header, err = bk.chain.GetHeaderByHeight(0)
-               } else {
-                       header, err = bk.chain.GetHeaderByHeight(header.Height - step)
-               }
-               if err != nil {
-                       log.WithFields(log.Fields{"module": logModule, "err": err}).Error("blockKeeper fail on get blockLocator")
-                       break
-               }
-
-               if len(locator) >= 9 {
-                       step *= 2
-               }
-       }
-       return locator
-}
-
-func (bk *blockKeeper) fastBlockSync(checkPoint *consensus.Checkpoint) error {
-       bk.resetHeaderState()
-       lastHeader := bk.headerList.Back().Value.(*types.BlockHeader)
-       for ; lastHeader.Hash() != checkPoint.Hash; lastHeader = bk.headerList.Back().Value.(*types.BlockHeader) {
-               if lastHeader.Height >= checkPoint.Height {
-                       return errors.Wrap(errPeerMisbehave, "peer is not in the checkpoint branch")
-               }
-
-               lastHash := lastHeader.Hash()
-               headers, err := bk.requireHeaders([]*bc.Hash{&lastHash}, &checkPoint.Hash)
-               if err != nil {
-                       return err
-               }
-
-               if len(headers) == 0 {
-                       return errors.Wrap(errPeerMisbehave, "requireHeaders return empty list")
-               }
-
-               if err := bk.appendHeaderList(headers); err != nil {
-                       return err
-               }
-       }
-
-       fastHeader := bk.headerList.Front()
-       for bk.chain.BestBlockHeight() < checkPoint.Height {
-               locator := bk.blockLocator()
-               blocks, err := bk.requireBlocks(locator, &checkPoint.Hash)
-               if err != nil {
-                       return err
-               }
-
-               if len(blocks) == 0 {
-                       return errors.Wrap(errPeerMisbehave, "requireBlocks return empty list")
-               }
-
-               for _, block := range blocks {
-                       if fastHeader = fastHeader.Next(); fastHeader == nil {
-                               return errors.New("get block than is higher than checkpoint")
-                       }
-
-                       blockHash := block.Hash()
-                       if blockHash != fastHeader.Value.(*types.BlockHeader).Hash() {
-                               return errPeerMisbehave
-                       }
-
-                       seed, err := bk.chain.CalcNextSeed(&block.PreviousBlockHash)
-                       if err != nil {
-                               return errors.Wrap(err, "fail on fastBlockSync calculate seed")
-                       }
-
-                       tensority.AIHash.AddCache(&blockHash, seed, &bc.Hash{})
-                       _, err = bk.chain.ProcessBlock(block)
-                       tensority.AIHash.RemoveCache(&blockHash, seed)
-                       if err != nil {
-                               return errors.Wrap(err, "fail on fastBlockSync process block")
-                       }
-               }
-       }
-       return nil
-}
-
-func (bk *blockKeeper) locateBlocks(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.Block, error) {
-       headers, err := bk.locateHeaders(locator, stopHash)
-       if err != nil {
-               return nil, err
-       }
-
-       blocks := []*types.Block{}
-       for i, header := range headers {
-               if uint64(i) >= maxBlockPerMsg {
-                       break
-               }
-
-               headerHash := header.Hash()
-               block, err := bk.chain.GetBlockByHash(&headerHash)
-               if err != nil {
-                       return nil, err
-               }
-
-               blocks = append(blocks, block)
-       }
-       return blocks, nil
-}
-
-func (bk *blockKeeper) locateHeaders(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.BlockHeader, error) {
-       stopHeader, err := bk.chain.GetHeaderByHash(stopHash)
-       if err != nil {
-               return nil, err
-       }
-
-       startHeader, err := bk.chain.GetHeaderByHeight(0)
-       if err != nil {
-               return nil, err
-       }
-
-       for _, hash := range locator {
-               header, err := bk.chain.GetHeaderByHash(hash)
-               if err == nil && bk.chain.InMainChain(header.Hash()) {
-                       startHeader = header
-                       break
-               }
-       }
-
-       totalHeaders := stopHeader.Height - startHeader.Height
-       if totalHeaders > maxBlockHeadersPerMsg {
-               totalHeaders = maxBlockHeadersPerMsg
-       }
-
-       headers := []*types.BlockHeader{}
-       for i := uint64(1); i <= totalHeaders; i++ {
-               header, err := bk.chain.GetHeaderByHeight(startHeader.Height + i)
-               if err != nil {
-                       return nil, err
-               }
-
-               headers = append(headers, header)
-       }
-       return headers, nil
-}
-
-func (bk *blockKeeper) nextCheckpoint() *consensus.Checkpoint {
-       height := bk.chain.BestBlockHeader().Height
-       checkpoints := consensus.ActiveNetParams.Checkpoints
-       if len(checkpoints) == 0 || height >= checkpoints[len(checkpoints)-1].Height {
-               return nil
-       }
-
-       nextCheckpoint := &checkpoints[len(checkpoints)-1]
-       for i := len(checkpoints) - 2; i >= 0; i-- {
-               if height >= checkpoints[i].Height {
-                       break
-               }
-               nextCheckpoint = &checkpoints[i]
-       }
-       return nextCheckpoint
-}
-
-func (bk *blockKeeper) processBlock(peerID string, block *types.Block) {
-       bk.blockProcessCh <- &blockMsg{block: block, peerID: peerID}
-}
-
-func (bk *blockKeeper) processBlocks(peerID string, blocks []*types.Block) {
-       bk.blocksProcessCh <- &blocksMsg{blocks: blocks, peerID: peerID}
-}
-
-func (bk *blockKeeper) processHeaders(peerID string, headers []*types.BlockHeader) {
-       bk.headersProcessCh <- &headersMsg{headers: headers, peerID: peerID}
-}
-
-func (bk *blockKeeper) regularBlockSync(wantHeight uint64) error {
-       i := bk.chain.BestBlockHeight() + 1
-       for i <= wantHeight {
-               block, err := bk.requireBlock(i)
-               if err != nil {
-                       return err
-               }
-
-               isOrphan, err := bk.chain.ProcessBlock(block)
-               if err != nil {
-                       return err
-               }
-
-               if isOrphan {
-                       i--
-                       continue
-               }
-               i = bk.chain.BestBlockHeight() + 1
-       }
-       return nil
-}
-
-func (bk *blockKeeper) requireBlock(height uint64) (*types.Block, error) {
-       if ok := bk.syncPeer.getBlockByHeight(height); !ok {
-               return nil, errPeerDropped
-       }
-
-       timeout := time.NewTimer(syncTimeout)
-       defer timeout.Stop()
-
-       for {
-               select {
-               case msg := <-bk.blockProcessCh:
-                       if msg.peerID != bk.syncPeer.ID() {
-                               continue
-                       }
-                       if msg.block.Height != height {
-                               continue
-                       }
-                       return msg.block, nil
-               case <-timeout.C:
-                       return nil, errors.Wrap(errRequestTimeout, "requireBlock")
-               }
-       }
-}
-
-func (bk *blockKeeper) requireBlocks(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.Block, error) {
-       if ok := bk.syncPeer.getBlocks(locator, stopHash); !ok {
-               return nil, errPeerDropped
-       }
-
-       timeout := time.NewTimer(syncTimeout)
-       defer timeout.Stop()
-
-       for {
-               select {
-               case msg := <-bk.blocksProcessCh:
-                       if msg.peerID != bk.syncPeer.ID() {
-                               continue
-                       }
-                       return msg.blocks, nil
-               case <-timeout.C:
-                       return nil, errors.Wrap(errRequestTimeout, "requireBlocks")
-               }
-       }
-}
-
-func (bk *blockKeeper) requireHeaders(locator []*bc.Hash, stopHash *bc.Hash) ([]*types.BlockHeader, error) {
-       if ok := bk.syncPeer.getHeaders(locator, stopHash); !ok {
-               return nil, errPeerDropped
-       }
-
-       timeout := time.NewTimer(syncTimeout)
-       defer timeout.Stop()
-
-       for {
-               select {
-               case msg := <-bk.headersProcessCh:
-                       if msg.peerID != bk.syncPeer.ID() {
-                               continue
-                       }
-                       return msg.headers, nil
-               case <-timeout.C:
-                       return nil, errors.Wrap(errRequestTimeout, "requireHeaders")
-               }
-       }
-}
-
-// resetHeaderState sets the headers-first mode state to values appropriate for
-// syncing from a new peer.
-func (bk *blockKeeper) resetHeaderState() {
-       header := bk.chain.BestBlockHeader()
-       bk.headerList.Init()
-       if bk.nextCheckpoint() != nil {
-               bk.headerList.PushBack(header)
-       }
-}
-
-func (bk *blockKeeper) startSync() bool {
-       checkPoint := bk.nextCheckpoint()
-       peer := bk.peers.bestPeer(consensus.SFFastSync | consensus.SFFullNode)
-       if peer != nil && checkPoint != nil && peer.Height() >= checkPoint.Height {
-               bk.syncPeer = peer
-               if err := bk.fastBlockSync(checkPoint); err != nil {
-                       log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on fastBlockSync")
-                       bk.peers.ProcessIllegal(peer.ID(), security.LevelMsgIllegal, err.Error())
-                       return false
-               }
-               return true
-       }
-
-       blockHeight := bk.chain.BestBlockHeight()
-       peer = bk.peers.bestPeer(consensus.SFFullNode)
-       if peer != nil && peer.Height() > blockHeight {
-               bk.syncPeer = peer
-               targetHeight := blockHeight + maxBlockPerMsg
-               if targetHeight > peer.Height() {
-                       targetHeight = peer.Height()
-               }
-
-               if err := bk.regularBlockSync(targetHeight); err != nil {
-                       log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on regularBlockSync")
-                       bk.peers.ProcessIllegal(peer.ID(), security.LevelMsgIllegal, err.Error())
-                       return false
-               }
-               return true
-       }
-       return false
-}
-
-func (bk *blockKeeper) syncWorker() {
-       genesisBlock, err := bk.chain.GetBlockByHeight(0)
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleStatusRequestMsg get genesis")
-               return
-       }
-       syncTicker := time.NewTicker(syncCycle)
-       defer syncTicker.Stop()
-
-       for {
-               <-syncTicker.C
-               if update := bk.startSync(); !update {
-                       continue
-               }
-
-               block, err := bk.chain.GetBlockByHeight(bk.chain.BestBlockHeight())
-               if err != nil {
-                       log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker get best block")
-               }
-
-               if err := bk.peers.broadcastMinedBlock(block); err != nil {
-                       log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker broadcast new block")
-               }
-
-               if err = bk.peers.broadcastNewStatus(block, genesisBlock); err != nil {
-                       log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker broadcast new status")
-               }
-       }
-}
diff --git a/netsync/block_keeper_test.go b/netsync/block_keeper_test.go
deleted file mode 100644 (file)
index 9eff7f5..0000000
+++ /dev/null
@@ -1,640 +0,0 @@
-package netsync
-
-import (
-       "container/list"
-       "encoding/hex"
-       "encoding/json"
-       "testing"
-       "time"
-
-       "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/errors"
-       "github.com/bytom/bytom/protocol/bc"
-       "github.com/bytom/bytom/protocol/bc/types"
-       "github.com/bytom/bytom/test/mock"
-       "github.com/bytom/bytom/testutil"
-)
-
-func TestAppendHeaderList(t *testing.T) {
-       blocks := mockBlocks(nil, 7)
-       cases := []struct {
-               originalHeaders []*types.BlockHeader
-               inputHeaders    []*types.BlockHeader
-               wantHeaders     []*types.BlockHeader
-               err             error
-       }{
-               {
-                       originalHeaders: []*types.BlockHeader{&blocks[0].BlockHeader},
-                       inputHeaders:    []*types.BlockHeader{&blocks[1].BlockHeader, &blocks[2].BlockHeader},
-                       wantHeaders:     []*types.BlockHeader{&blocks[0].BlockHeader, &blocks[1].BlockHeader, &blocks[2].BlockHeader},
-                       err:             nil,
-               },
-               {
-                       originalHeaders: []*types.BlockHeader{&blocks[5].BlockHeader},
-                       inputHeaders:    []*types.BlockHeader{&blocks[6].BlockHeader},
-                       wantHeaders:     []*types.BlockHeader{&blocks[5].BlockHeader, &blocks[6].BlockHeader},
-                       err:             nil,
-               },
-               {
-                       originalHeaders: []*types.BlockHeader{&blocks[5].BlockHeader},
-                       inputHeaders:    []*types.BlockHeader{&blocks[7].BlockHeader},
-                       wantHeaders:     []*types.BlockHeader{&blocks[5].BlockHeader},
-                       err:             errAppendHeaders,
-               },
-               {
-                       originalHeaders: []*types.BlockHeader{&blocks[5].BlockHeader},
-                       inputHeaders:    []*types.BlockHeader{&blocks[7].BlockHeader, &blocks[6].BlockHeader},
-                       wantHeaders:     []*types.BlockHeader{&blocks[5].BlockHeader},
-                       err:             errAppendHeaders,
-               },
-               {
-                       originalHeaders: []*types.BlockHeader{&blocks[2].BlockHeader},
-                       inputHeaders:    []*types.BlockHeader{&blocks[3].BlockHeader, &blocks[4].BlockHeader, &blocks[6].BlockHeader},
-                       wantHeaders:     []*types.BlockHeader{&blocks[2].BlockHeader, &blocks[3].BlockHeader, &blocks[4].BlockHeader},
-                       err:             errAppendHeaders,
-               },
-       }
-
-       for i, c := range cases {
-               bk := &blockKeeper{headerList: list.New()}
-               for _, header := range c.originalHeaders {
-                       bk.headerList.PushBack(header)
-               }
-
-               if err := bk.appendHeaderList(c.inputHeaders); err != c.err {
-                       t.Errorf("case %d: got error %v want error %v", i, err, c.err)
-               }
-
-               gotHeaders := []*types.BlockHeader{}
-               for e := bk.headerList.Front(); e != nil; e = e.Next() {
-                       gotHeaders = append(gotHeaders, e.Value.(*types.BlockHeader))
-               }
-
-               if !testutil.DeepEqual(gotHeaders, c.wantHeaders) {
-                       t.Errorf("case %d: got %v want %v", i, gotHeaders, c.wantHeaders)
-               }
-       }
-}
-
-func TestBlockLocator(t *testing.T) {
-       blocks := mockBlocks(nil, 500)
-       cases := []struct {
-               bestHeight uint64
-               wantHeight []uint64
-       }{
-               {
-                       bestHeight: 0,
-                       wantHeight: []uint64{0},
-               },
-               {
-                       bestHeight: 1,
-                       wantHeight: []uint64{1, 0},
-               },
-               {
-                       bestHeight: 7,
-                       wantHeight: []uint64{7, 6, 5, 4, 3, 2, 1, 0},
-               },
-               {
-                       bestHeight: 10,
-                       wantHeight: []uint64{10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
-               },
-               {
-                       bestHeight: 100,
-                       wantHeight: []uint64{100, 99, 98, 97, 96, 95, 94, 93, 92, 91, 89, 85, 77, 61, 29, 0},
-               },
-               {
-                       bestHeight: 500,
-                       wantHeight: []uint64{500, 499, 498, 497, 496, 495, 494, 493, 492, 491, 489, 485, 477, 461, 429, 365, 237, 0},
-               },
-       }
-
-       for i, c := range cases {
-               mockChain := mock.NewChain()
-               bk := &blockKeeper{chain: mockChain}
-               mockChain.SetBestBlockHeader(&blocks[c.bestHeight].BlockHeader)
-               for i := uint64(0); i <= c.bestHeight; i++ {
-                       mockChain.SetBlockByHeight(i, blocks[i])
-               }
-
-               want := []*bc.Hash{}
-               for _, i := range c.wantHeight {
-                       hash := blocks[i].Hash()
-                       want = append(want, &hash)
-               }
-
-               if got := bk.blockLocator(); !testutil.DeepEqual(got, want) {
-                       t.Errorf("case %d: got %v want %v", i, got, want)
-               }
-       }
-}
-
-func TestFastBlockSync(t *testing.T) {
-       maxBlockPerMsg = 5
-       maxBlockHeadersPerMsg = 10
-       baseChain := mockBlocks(nil, 300)
-
-       cases := []struct {
-               syncTimeout time.Duration
-               aBlocks     []*types.Block
-               bBlocks     []*types.Block
-               checkPoint  *consensus.Checkpoint
-               want        []*types.Block
-               err         error
-       }{
-               {
-                       syncTimeout: 30 * time.Second,
-                       aBlocks:     baseChain[:100],
-                       bBlocks:     baseChain[:301],
-                       checkPoint: &consensus.Checkpoint{
-                               Height: baseChain[250].Height,
-                               Hash:   baseChain[250].Hash(),
-                       },
-                       want: baseChain[:251],
-                       err:  nil,
-               },
-               {
-                       syncTimeout: 30 * time.Second,
-                       aBlocks:     baseChain[:100],
-                       bBlocks:     baseChain[:301],
-                       checkPoint: &consensus.Checkpoint{
-                               Height: baseChain[100].Height,
-                               Hash:   baseChain[100].Hash(),
-                       },
-                       want: baseChain[:101],
-                       err:  nil,
-               },
-               {
-                       syncTimeout: 1 * time.Millisecond,
-                       aBlocks:     baseChain[:100],
-                       bBlocks:     baseChain[:100],
-                       checkPoint: &consensus.Checkpoint{
-                               Height: baseChain[200].Height,
-                               Hash:   baseChain[200].Hash(),
-                       },
-                       want: baseChain[:100],
-                       err:  errRequestTimeout,
-               },
-       }
-
-       for i, c := range cases {
-               syncTimeout = c.syncTimeout
-               a := mockSync(c.aBlocks)
-               b := mockSync(c.bBlocks)
-               netWork := NewNetWork()
-               netWork.Register(a, "192.168.0.1", "test node A", consensus.SFFullNode)
-               netWork.Register(b, "192.168.0.2", "test node B", consensus.SFFullNode)
-               if B2A, A2B, err := netWork.HandsShake(a, b); err != nil {
-                       t.Errorf("fail on peer hands shake %v", err)
-               } else {
-                       go B2A.postMan()
-                       go A2B.postMan()
-               }
-
-               a.blockKeeper.syncPeer = a.peers.getPeer("test node B")
-               if err := a.blockKeeper.fastBlockSync(c.checkPoint); errors.Root(err) != c.err {
-                       t.Errorf("case %d: got %v want %v", i, err, c.err)
-               }
-
-               got := []*types.Block{}
-               for i := uint64(0); i <= a.chain.BestBlockHeight(); i++ {
-                       block, err := a.chain.GetBlockByHeight(i)
-                       if err != nil {
-                               t.Errorf("case %d got err %v", i, err)
-                       }
-                       got = append(got, block)
-               }
-
-               if !testutil.DeepEqual(got, c.want) {
-                       t.Errorf("case %d: got %v want %v", i, got, c.want)
-               }
-       }
-}
-
-func TestLocateBlocks(t *testing.T) {
-       maxBlockPerMsg = 5
-       blocks := mockBlocks(nil, 100)
-       cases := []struct {
-               locator    []uint64
-               stopHash   bc.Hash
-               wantHeight []uint64
-       }{
-               {
-                       locator:    []uint64{20},
-                       stopHash:   blocks[100].Hash(),
-                       wantHeight: []uint64{21, 22, 23, 24, 25},
-               },
-       }
-
-       mockChain := mock.NewChain()
-       bk := &blockKeeper{chain: mockChain}
-       for _, block := range blocks {
-               mockChain.SetBlockByHeight(block.Height, block)
-       }
-
-       for i, c := range cases {
-               locator := []*bc.Hash{}
-               for _, i := range c.locator {
-                       hash := blocks[i].Hash()
-                       locator = append(locator, &hash)
-               }
-
-               want := []*types.Block{}
-               for _, i := range c.wantHeight {
-                       want = append(want, blocks[i])
-               }
-
-               got, _ := bk.locateBlocks(locator, &c.stopHash)
-               if !testutil.DeepEqual(got, want) {
-                       t.Errorf("case %d: got %v want %v", i, got, want)
-               }
-       }
-}
-
-func TestLocateHeaders(t *testing.T) {
-       maxBlockHeadersPerMsg = 10
-       blocks := mockBlocks(nil, 150)
-       cases := []struct {
-               chainHeight uint64
-               locator     []uint64
-               stopHash    bc.Hash
-               wantHeight  []uint64
-               err         bool
-       }{
-               {
-                       chainHeight: 100,
-                       locator:     []uint64{},
-                       stopHash:    blocks[100].Hash(),
-                       wantHeight:  []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
-                       err:         false,
-               },
-               {
-                       chainHeight: 100,
-                       locator:     []uint64{20},
-                       stopHash:    blocks[100].Hash(),
-                       wantHeight:  []uint64{21, 22, 23, 24, 25, 26, 27, 28, 29, 30},
-                       err:         false,
-               },
-               {
-                       chainHeight: 100,
-                       locator:     []uint64{20},
-                       stopHash:    blocks[24].Hash(),
-                       wantHeight:  []uint64{21, 22, 23, 24},
-                       err:         false,
-               },
-               {
-                       chainHeight: 100,
-                       locator:     []uint64{20},
-                       stopHash:    blocks[20].Hash(),
-                       wantHeight:  []uint64{},
-                       err:         false,
-               },
-               {
-                       chainHeight: 100,
-                       locator:     []uint64{20},
-                       stopHash:    bc.Hash{},
-                       wantHeight:  []uint64{},
-                       err:         true,
-               },
-               {
-                       chainHeight: 100,
-                       locator:     []uint64{120, 70},
-                       stopHash:    blocks[78].Hash(),
-                       wantHeight:  []uint64{71, 72, 73, 74, 75, 76, 77, 78},
-                       err:         false,
-               },
-       }
-
-       for i, c := range cases {
-               mockChain := mock.NewChain()
-               bk := &blockKeeper{chain: mockChain}
-               for i := uint64(0); i <= c.chainHeight; i++ {
-                       mockChain.SetBlockByHeight(i, blocks[i])
-               }
-
-               locator := []*bc.Hash{}
-               for _, i := range c.locator {
-                       hash := blocks[i].Hash()
-                       locator = append(locator, &hash)
-               }
-
-               want := []*types.BlockHeader{}
-               for _, i := range c.wantHeight {
-                       want = append(want, &blocks[i].BlockHeader)
-               }
-
-               got, err := bk.locateHeaders(locator, &c.stopHash)
-               if err != nil != c.err {
-                       t.Errorf("case %d: got %v want err = %v", i, err, c.err)
-               }
-               if !testutil.DeepEqual(got, want) {
-                       t.Errorf("case %d: got %v want %v", i, got, want)
-               }
-       }
-}
-
-func TestNextCheckpoint(t *testing.T) {
-       cases := []struct {
-               checkPoints []consensus.Checkpoint
-               bestHeight  uint64
-               want        *consensus.Checkpoint
-       }{
-               {
-                       checkPoints: []consensus.Checkpoint{},
-                       bestHeight:  5000,
-                       want:        nil,
-               },
-               {
-                       checkPoints: []consensus.Checkpoint{
-                               {10000, bc.Hash{V0: 1}},
-                       },
-                       bestHeight: 5000,
-                       want:       &consensus.Checkpoint{10000, bc.Hash{V0: 1}},
-               },
-               {
-                       checkPoints: []consensus.Checkpoint{
-                               {10000, bc.Hash{V0: 1}},
-                               {20000, bc.Hash{V0: 2}},
-                               {30000, bc.Hash{V0: 3}},
-                       },
-                       bestHeight: 15000,
-                       want:       &consensus.Checkpoint{20000, bc.Hash{V0: 2}},
-               },
-               {
-                       checkPoints: []consensus.Checkpoint{
-                               {10000, bc.Hash{V0: 1}},
-                               {20000, bc.Hash{V0: 2}},
-                               {30000, bc.Hash{V0: 3}},
-                       },
-                       bestHeight: 10000,
-                       want:       &consensus.Checkpoint{20000, bc.Hash{V0: 2}},
-               },
-               {
-                       checkPoints: []consensus.Checkpoint{
-                               {10000, bc.Hash{V0: 1}},
-                               {20000, bc.Hash{V0: 2}},
-                               {30000, bc.Hash{V0: 3}},
-                       },
-                       bestHeight: 35000,
-                       want:       nil,
-               },
-       }
-
-       mockChain := mock.NewChain()
-       for i, c := range cases {
-               consensus.ActiveNetParams.Checkpoints = c.checkPoints
-               mockChain.SetBestBlockHeader(&types.BlockHeader{Height: c.bestHeight})
-               bk := &blockKeeper{chain: mockChain}
-
-               if got := bk.nextCheckpoint(); !testutil.DeepEqual(got, c.want) {
-                       t.Errorf("case %d: got %v want %v", i, got, c.want)
-               }
-       }
-}
-
-func TestRegularBlockSync(t *testing.T) {
-       baseChain := mockBlocks(nil, 50)
-       chainX := append(baseChain, mockBlocks(baseChain[50], 60)...)
-       chainY := append(baseChain, mockBlocks(baseChain[50], 70)...)
-       cases := []struct {
-               syncTimeout time.Duration
-               aBlocks     []*types.Block
-               bBlocks     []*types.Block
-               syncHeight  uint64
-               want        []*types.Block
-               err         error
-       }{
-               {
-                       syncTimeout: 30 * time.Second,
-                       aBlocks:     baseChain[:20],
-                       bBlocks:     baseChain[:50],
-                       syncHeight:  45,
-                       want:        baseChain[:46],
-                       err:         nil,
-               },
-               {
-                       syncTimeout: 30 * time.Second,
-                       aBlocks:     chainX,
-                       bBlocks:     chainY,
-                       syncHeight:  70,
-                       want:        chainY,
-                       err:         nil,
-               },
-               {
-                       syncTimeout: 30 * time.Second,
-                       aBlocks:     chainX[:52],
-                       bBlocks:     chainY[:53],
-                       syncHeight:  52,
-                       want:        chainY[:53],
-                       err:         nil,
-               },
-               {
-                       syncTimeout: 1 * time.Millisecond,
-                       aBlocks:     baseChain,
-                       bBlocks:     baseChain,
-                       syncHeight:  52,
-                       want:        baseChain,
-                       err:         errRequestTimeout,
-               },
-       }
-
-       for i, c := range cases {
-               syncTimeout = c.syncTimeout
-               a := mockSync(c.aBlocks)
-               b := mockSync(c.bBlocks)
-               netWork := NewNetWork()
-               netWork.Register(a, "192.168.0.1", "test node A", consensus.SFFullNode)
-               netWork.Register(b, "192.168.0.2", "test node B", consensus.SFFullNode)
-               if B2A, A2B, err := netWork.HandsShake(a, b); err != nil {
-                       t.Errorf("fail on peer hands shake %v", err)
-               } else {
-                       go B2A.postMan()
-                       go A2B.postMan()
-               }
-
-               a.blockKeeper.syncPeer = a.peers.getPeer("test node B")
-               if err := a.blockKeeper.regularBlockSync(c.syncHeight); errors.Root(err) != c.err {
-                       t.Errorf("case %d: got %v want %v", i, err, c.err)
-               }
-
-               got := []*types.Block{}
-               for i := uint64(0); i <= a.chain.BestBlockHeight(); i++ {
-                       block, err := a.chain.GetBlockByHeight(i)
-                       if err != nil {
-                               t.Errorf("case %d got err %v", i, err)
-                       }
-                       got = append(got, block)
-               }
-
-               if !testutil.DeepEqual(got, c.want) {
-                       t.Errorf("case %d: got %v want %v", i, got, c.want)
-               }
-       }
-}
-
-func TestRequireBlock(t *testing.T) {
-       blocks := mockBlocks(nil, 5)
-       a := mockSync(blocks[:1])
-       b := mockSync(blocks[:5])
-       netWork := NewNetWork()
-       netWork.Register(a, "192.168.0.1", "test node A", consensus.SFFullNode)
-       netWork.Register(b, "192.168.0.2", "test node B", consensus.SFFullNode)
-       if B2A, A2B, err := netWork.HandsShake(a, b); err != nil {
-               t.Errorf("fail on peer hands shake %v", err)
-       } else {
-               go B2A.postMan()
-               go A2B.postMan()
-       }
-
-       a.blockKeeper.syncPeer = a.peers.getPeer("test node B")
-       b.blockKeeper.syncPeer = b.peers.getPeer("test node A")
-       cases := []struct {
-               syncTimeout   time.Duration
-               testNode      *SyncManager
-               requireHeight uint64
-               want          *types.Block
-               err           error
-       }{
-               {
-                       syncTimeout:   30 * time.Second,
-                       testNode:      a,
-                       requireHeight: 4,
-                       want:          blocks[4],
-                       err:           nil,
-               },
-               {
-                       syncTimeout:   1 * time.Millisecond,
-                       testNode:      b,
-                       requireHeight: 4,
-                       want:          nil,
-                       err:           errRequestTimeout,
-               },
-       }
-
-       for i, c := range cases {
-               syncTimeout = c.syncTimeout
-               got, err := c.testNode.blockKeeper.requireBlock(c.requireHeight)
-               if !testutil.DeepEqual(got, c.want) {
-                       t.Errorf("case %d: got %v want %v", i, got, c.want)
-               }
-               if errors.Root(err) != c.err {
-                       t.Errorf("case %d: got %v want %v", i, err, c.err)
-               }
-       }
-}
-
-func TestSendMerkleBlock(t *testing.T) {
-       cases := []struct {
-               txCount        int
-               relatedTxIndex []int
-       }{
-               {
-                       txCount:        10,
-                       relatedTxIndex: []int{0, 2, 5},
-               },
-               {
-                       txCount:        0,
-                       relatedTxIndex: []int{},
-               },
-               {
-                       txCount:        10,
-                       relatedTxIndex: []int{},
-               },
-               {
-                       txCount:        5,
-                       relatedTxIndex: []int{0, 1, 2, 3, 4},
-               },
-               {
-                       txCount:        20,
-                       relatedTxIndex: []int{1, 6, 3, 9, 10, 19},
-               },
-       }
-
-       for _, c := range cases {
-               blocks := mockBlocks(nil, 2)
-               targetBlock := blocks[1]
-               txs, bcTxs := mockTxs(c.txCount)
-               var err error
-
-               targetBlock.Transactions = txs
-               if targetBlock.TransactionsMerkleRoot, err = types.TxMerkleRoot(bcTxs); err != nil {
-                       t.Fatal(err)
-               }
-
-               spvNode := mockSync(blocks)
-               blockHash := targetBlock.Hash()
-               var statusResult *bc.TransactionStatus
-               if statusResult, err = spvNode.chain.GetTransactionStatus(&blockHash); err != nil {
-                       t.Fatal(err)
-               }
-               
-               if targetBlock.TransactionStatusHash, err = types.TxStatusMerkleRoot(statusResult.VerifyStatus); err != nil {
-                       t.Fatal(err)
-               }
-               
-               fullNode := mockSync(blocks)
-               netWork := NewNetWork()
-               netWork.Register(spvNode, "192.168.0.1", "spv_node", consensus.SFFastSync)
-               netWork.Register(fullNode, "192.168.0.2", "full_node", consensus.DefaultServices)
-
-               var F2S *P2PPeer
-               if F2S, _, err = netWork.HandsShake(spvNode, fullNode); err != nil {
-                       t.Errorf("fail on peer hands shake %v", err)
-               }
-
-               completed := make(chan error)
-               go func() {
-                       msgBytes := <-F2S.msgCh
-                       _, msg, _ := DecodeMessage(msgBytes)
-                       switch m := msg.(type) {
-                       case *MerkleBlockMessage:
-                               var relatedTxIDs []*bc.Hash
-                               for _, rawTx := range m.RawTxDatas {
-                                       tx := &types.Tx{}
-                                       if err := tx.UnmarshalText(rawTx); err != nil {
-                                               completed <- err
-                                       }
-
-                                       relatedTxIDs = append(relatedTxIDs, &tx.ID)
-                               }
-                               var txHashes []*bc.Hash
-                               for _, hashByte := range m.TxHashes {
-                                       hash := bc.NewHash(hashByte)
-                                       txHashes = append(txHashes, &hash)
-                               }
-                               if ok := types.ValidateTxMerkleTreeProof(txHashes, m.Flags, relatedTxIDs, targetBlock.TransactionsMerkleRoot); !ok {
-                                       completed <- errors.New("validate tx fail")
-                               }
-
-                               var statusHashes []*bc.Hash
-                               for _, statusByte := range m.StatusHashes {
-                                       hash := bc.NewHash(statusByte)
-                                       statusHashes = append(statusHashes, &hash)
-                               }
-                               var relatedStatuses []*bc.TxVerifyResult
-                               for _, statusByte := range m.RawTxStatuses {
-                                       status := &bc.TxVerifyResult{}
-                                       err := json.Unmarshal(statusByte, status)
-                                       if err != nil {
-                                               completed <- err
-                                       }
-                                       relatedStatuses = append(relatedStatuses, status)
-                               }
-                               if ok := types.ValidateStatusMerkleTreeProof(statusHashes, m.Flags, relatedStatuses, targetBlock.TransactionStatusHash); !ok {
-                                       completed <- errors.New("validate status fail")
-                               }
-
-                               completed <- nil
-                       }
-               }()
-
-               spvPeer := fullNode.peers.getPeer("spv_node")
-               for i := 0; i < len(c.relatedTxIndex); i++ {
-                       spvPeer.filterAdds.Add(hex.EncodeToString(txs[c.relatedTxIndex[i]].Outputs[0].ControlProgram))
-               }
-               msg := &GetMerkleBlockMessage{RawHash: targetBlock.Hash().Byte32()}
-               fullNode.handleGetMerkleBlockMsg(spvPeer, msg)
-               if err := <-completed; err != nil {
-                       t.Fatal(err)
-               }
-       }
-}
diff --git a/netsync/chainmgr/block_keeper.go b/netsync/chainmgr/block_keeper.go
new file mode 100644 (file)
index 0000000..70a785a
--- /dev/null
@@ -0,0 +1,267 @@
+package chainmgr
+
+import (
+       "time"
+
+       log "github.com/sirupsen/logrus"
+
+       "github.com/bytom/bytom/consensus"
+       dbm "github.com/bytom/bytom/database/leveldb"
+       "github.com/bytom/bytom/errors"
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/p2p/security"
+       "github.com/bytom/bytom/protocol"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+const (
+       syncCycle = 5 * time.Second
+
+       noNeedSync = iota
+       fastSyncType
+       regularSyncType
+)
+
+var (
+       maxNumOfBlocksPerMsg      = uint64(64)
+       maxNumOfHeadersPerMsg     = uint64(1000)
+       maxNumOfBlocksRegularSync = uint64(128)
+)
+
+// Fetcher is the interface for fetch struct
+type Fetcher interface {
+       processBlock(peerID string, block *types.Block)
+       processBlocks(peerID string, blocks []*types.Block)
+       processHeaders(peerID string, headers []*types.BlockHeader)
+       requireBlock(peerID string, height uint64) (*types.Block, error)
+}
+
+type blockMsg struct {
+       block  *types.Block
+       peerID string
+}
+
+type blocksMsg struct {
+       blocks []*types.Block
+       peerID string
+}
+
+type headersMsg struct {
+       headers []*types.BlockHeader
+       peerID  string
+}
+
+type blockKeeper struct {
+       chain      Chain
+       fastSync   *fastSync
+       msgFetcher Fetcher
+       peers      *peers.PeerSet
+       syncPeer   *peers.Peer
+
+       quit chan struct{}
+}
+
+func newBlockKeeper(chain Chain, peers *peers.PeerSet, fastSyncDB dbm.DB) *blockKeeper {
+       storage := newStorage(fastSyncDB)
+       msgFetcher := newMsgFetcher(storage, peers)
+       return &blockKeeper{
+               chain:      chain,
+               fastSync:   newFastSync(chain, msgFetcher, storage, peers),
+               msgFetcher: msgFetcher,
+               peers:      peers,
+               quit:       make(chan struct{}),
+       }
+}
+
+func (bk *blockKeeper) locateBlocks(locator []*bc.Hash, stopHash *bc.Hash, isTimeout func() bool) ([]*types.Block, error) {
+       headers, err := bk.locateHeaders(locator, stopHash, 0, maxNumOfBlocksPerMsg)
+       if err != nil {
+               return nil, err
+       }
+
+       blocks := []*types.Block{}
+       for _, header := range headers {
+               headerHash := header.Hash()
+               block, err := bk.chain.GetBlockByHash(&headerHash)
+               if err != nil {
+                       return nil, err
+               }
+
+               blocks = append(blocks, block)
+               if isTimeout() {
+                       break
+               }
+       }
+       return blocks, nil
+}
+
+func (bk *blockKeeper) locateHeaders(locator []*bc.Hash, stopHash *bc.Hash, skip uint64, maxNum uint64) ([]*types.BlockHeader, error) {
+       startHeader, err := bk.chain.GetHeaderByHeight(0)
+       if err != nil {
+               return nil, err
+       }
+
+       for _, hash := range locator {
+               header, err := bk.chain.GetHeaderByHash(hash)
+               if err == nil && bk.chain.InMainChain(header.Hash()) {
+                       startHeader = header
+                       break
+               }
+       }
+
+       headers := make([]*types.BlockHeader, 0)
+       stopHeader, err := bk.chain.GetHeaderByHash(stopHash)
+       if err != nil {
+               return headers, err
+       }
+
+       if !bk.chain.InMainChain(*stopHash) || stopHeader.Height < startHeader.Height {
+               return headers, nil
+       }
+
+       headers = append(headers, startHeader)
+       if stopHeader.Height == startHeader.Height {
+               return headers, nil
+       }
+
+       for num, index := uint64(0), startHeader.Height; num < maxNum-1; num++ {
+               index += skip + 1
+               if index >= stopHeader.Height {
+                       headers = append(headers, stopHeader)
+                       break
+               }
+
+               header, err := bk.chain.GetHeaderByHeight(index)
+               if err != nil {
+                       return nil, err
+               }
+
+               headers = append(headers, header)
+       }
+
+       return headers, nil
+}
+
+func (bk *blockKeeper) processBlock(peerID string, block *types.Block) {
+       bk.msgFetcher.processBlock(peerID, block)
+}
+
+func (bk *blockKeeper) processBlocks(peerID string, blocks []*types.Block) {
+       bk.msgFetcher.processBlocks(peerID, blocks)
+}
+
+func (bk *blockKeeper) processHeaders(peerID string, headers []*types.BlockHeader) {
+       bk.msgFetcher.processHeaders(peerID, headers)
+}
+
+func (bk *blockKeeper) regularBlockSync() error {
+       peerHeight := bk.syncPeer.Height()
+       bestHeight := bk.chain.BestBlockHeight()
+       targetHeight := bestHeight + maxNumOfBlocksRegularSync
+       if targetHeight > peerHeight {
+               targetHeight = peerHeight
+       }
+
+       for i := bestHeight + 1; i <= targetHeight; {
+               block, err := bk.msgFetcher.requireBlock(bk.syncPeer.ID(), i)
+               if err != nil {
+                       bk.peers.ProcessIllegal(bk.syncPeer.ID(), security.LevelConnException, err.Error())
+                       return err
+               }
+
+               isOrphan, err := bk.chain.ProcessBlock(block)
+               if err != nil {
+                       if errors.Root(err) != protocol.ErrDoubleSignBlock {
+                               bk.peers.ProcessIllegal(bk.syncPeer.ID(), security.LevelMsgIllegal, err.Error())
+                       }
+                       return err
+               }
+
+               if isOrphan {
+                       i--
+                       continue
+               }
+
+               //This code is used to preventing the sync peer return a dust block which will not change the node's chain status
+               if bestHeight = bk.chain.BestBlockHeight(); i == bestHeight+1 {
+                       log.WithFields(log.Fields{"module": logModule, "height": i}).Warn("stop regular sync due to loop sync same height")
+                       return nil
+               }
+
+               i = bestHeight + 1
+       }
+       log.WithFields(log.Fields{"module": logModule, "height": bk.chain.BestBlockHeight()}).Info("regular sync success")
+       return nil
+}
+
+func (bk *blockKeeper) start() {
+       go bk.syncWorker()
+}
+
+func (bk *blockKeeper) checkSyncType() int {
+       bestHeight := bk.chain.BestBlockHeight()
+       peer := bk.peers.BestIrreversiblePeer(consensus.SFFullNode | consensus.SFFastSync)
+       if peer != nil {
+               if peerIrreversibleHeight := peer.IrreversibleHeight(); peerIrreversibleHeight >= bestHeight+minGapStartFastSync {
+                       bk.fastSync.setSyncPeer(peer)
+                       return fastSyncType
+               }
+       }
+
+       peer = bk.peers.BestPeer(consensus.SFFullNode)
+       if peer == nil {
+               log.WithFields(log.Fields{"module": logModule}).Debug("can't find sync peer")
+               return noNeedSync
+       }
+
+       if peer.Height() > bestHeight {
+               bk.syncPeer = peer
+               return regularSyncType
+       }
+
+       return noNeedSync
+}
+
+func (bk *blockKeeper) startSync() bool {
+       switch bk.checkSyncType() {
+       case fastSyncType:
+               if err := bk.fastSync.process(); err != nil {
+                       log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("failed on fast sync")
+                       return false
+               }
+       case regularSyncType:
+               if err := bk.regularBlockSync(); err != nil {
+                       log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on regularBlockSync")
+                       return false
+               }
+       default:
+               return false
+       }
+
+       return true
+}
+
+func (bk *blockKeeper) stop() {
+       close(bk.quit)
+}
+
+func (bk *blockKeeper) syncWorker() {
+       syncTicker := time.NewTicker(syncCycle)
+       defer syncTicker.Stop()
+
+       for {
+               select {
+               case <-syncTicker.C:
+                       if update := bk.startSync(); !update {
+                               continue
+                       }
+
+                       if err := bk.peers.BroadcastNewStatus(bk.chain.BestBlockHeader(), bk.chain.LastIrreversibleHeader()); err != nil {
+                               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on syncWorker broadcast new status")
+                       }
+               case <-bk.quit:
+                       return
+               }
+       }
+}
diff --git a/netsync/chainmgr/block_keeper_test.go b/netsync/chainmgr/block_keeper_test.go
new file mode 100644 (file)
index 0000000..b4b7a20
--- /dev/null
@@ -0,0 +1,571 @@
+package chainmgr
+
+import (
+       "encoding/json"
+       "io/ioutil"
+       "os"
+       "testing"
+       "time"
+
+       "github.com/bytom/bytom/consensus"
+       dbm "github.com/bytom/bytom/database/leveldb"
+       "github.com/bytom/bytom/errors"
+       msgs "github.com/bytom/bytom/netsync/messages"
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/protocol"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+       "github.com/bytom/bytom/test/mock"
+       "github.com/bytom/bytom/testutil"
+)
+
+func TestCheckSyncType(t *testing.T) {
+       tmp, err := ioutil.TempDir(".", "")
+       if err != nil {
+               t.Fatalf("failed to create temporary data folder: %v", err)
+       }
+       fastSyncDB := dbm.NewDB("testdb", "leveldb", tmp)
+       defer func() {
+               fastSyncDB.Close()
+               os.RemoveAll(tmp)
+       }()
+
+       blocks := mockBlocks(nil, 50)
+       chain := mock.NewChain()
+       chain.SetBestBlockHeader(&blocks[len(blocks)-1].BlockHeader)
+       for _, block := range blocks {
+               chain.SetBlockByHeight(block.Height, block)
+       }
+
+       type syncPeer struct {
+               peer               *P2PPeer
+               bestHeight         uint64
+               irreversibleHeight uint64
+       }
+
+       cases := []struct {
+               peers    []*syncPeer
+               syncType int
+       }{
+               {
+                       peers:    []*syncPeer{},
+                       syncType: noNeedSync,
+               },
+               {
+                       peers: []*syncPeer{
+                               {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 1000, irreversibleHeight: 500},
+                               {peer: &P2PPeer{id: "peer2", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 50, irreversibleHeight: 50},
+                       },
+                       syncType: fastSyncType,
+               },
+               {
+                       peers: []*syncPeer{
+                               {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 1000, irreversibleHeight: 100},
+                               {peer: &P2PPeer{id: "peer2", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 500, irreversibleHeight: 50},
+                       },
+                       syncType: regularSyncType,
+               },
+               {
+                       peers: []*syncPeer{
+                               {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 51, irreversibleHeight: 50},
+                       },
+                       syncType: regularSyncType,
+               },
+               {
+                       peers: []*syncPeer{
+                               {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 30, irreversibleHeight: 30},
+                       },
+                       syncType: noNeedSync,
+               },
+               {
+                       peers: []*syncPeer{
+                               {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode}, bestHeight: 1000, irreversibleHeight: 1000},
+                       },
+                       syncType: regularSyncType,
+               },
+               {
+                       peers: []*syncPeer{
+                               {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 1000, irreversibleHeight: 50},
+                               {peer: &P2PPeer{id: "peer2", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 800, irreversibleHeight: 800},
+                       },
+                       syncType: fastSyncType,
+               },
+       }
+
+       for i, c := range cases {
+               peers := peers.NewPeerSet(NewPeerSet())
+               blockKeeper := newBlockKeeper(chain, peers, fastSyncDB)
+               for _, syncPeer := range c.peers {
+                       blockKeeper.peers.AddPeer(syncPeer.peer)
+                       blockKeeper.peers.SetStatus(syncPeer.peer.id, syncPeer.bestHeight, nil)
+                       blockKeeper.peers.SetIrreversibleStatus(syncPeer.peer.id, syncPeer.irreversibleHeight, nil)
+               }
+               gotType := blockKeeper.checkSyncType()
+               if c.syncType != gotType {
+                       t.Errorf("case %d: got %d want %d", i, gotType, c.syncType)
+               }
+       }
+}
+
+func TestRegularBlockSync(t *testing.T) {
+       baseChain := mockBlocks(nil, 50)
+       chainX := append(baseChain, mockBlocks(baseChain[50], 60)...)
+       chainY := append(baseChain, mockBlocks(baseChain[50], 70)...)
+       chainZ := append(baseChain, mockBlocks(baseChain[50], 200)...)
+       chainE := append(baseChain, mockErrorBlocks(baseChain[50], 200, 60)...)
+
+       cases := []struct {
+               syncTimeout time.Duration
+               aBlocks     []*types.Block
+               bBlocks     []*types.Block
+               want        []*types.Block
+               err         error
+       }{
+               {
+                       syncTimeout: 30 * time.Second,
+                       aBlocks:     baseChain[:20],
+                       bBlocks:     baseChain[:50],
+                       want:        baseChain[:50],
+                       err:         nil,
+               },
+               {
+                       syncTimeout: 30 * time.Second,
+                       aBlocks:     chainX,
+                       bBlocks:     chainY,
+                       want:        chainY,
+                       err:         nil,
+               },
+               {
+                       syncTimeout: 30 * time.Second,
+                       aBlocks:     chainX[:52],
+                       bBlocks:     chainY[:53],
+                       want:        chainY[:53],
+                       err:         nil,
+               },
+               {
+                       syncTimeout: 30 * time.Second,
+                       aBlocks:     chainX[:52],
+                       bBlocks:     chainZ,
+                       want:        chainZ[:180],
+                       err:         nil,
+               },
+               {
+                       syncTimeout: 0 * time.Second,
+                       aBlocks:     chainX[:52],
+                       bBlocks:     chainZ,
+                       want:        chainX[:52],
+                       err:         errRequestTimeout,
+               },
+               {
+                       syncTimeout: 30 * time.Second,
+                       aBlocks:     chainX[:52],
+                       bBlocks:     chainE,
+                       want:        chainE[:60],
+                       err:         protocol.ErrBadStateRoot,
+               },
+       }
+       tmp, err := ioutil.TempDir(".", "")
+       if err != nil {
+               t.Fatalf("failed to create temporary data folder: %v", err)
+       }
+       testDBA := dbm.NewDB("testdba", "leveldb", tmp)
+       testDBB := dbm.NewDB("testdbb", "leveldb", tmp)
+       defer func() {
+               testDBA.Close()
+               testDBB.Close()
+               os.RemoveAll(tmp)
+       }()
+
+       for i, c := range cases {
+               a := mockSync(c.aBlocks, nil, testDBA)
+               b := mockSync(c.bBlocks, nil, testDBB)
+               netWork := NewNetWork()
+               netWork.Register(a, "192.168.0.1", "test node A", consensus.SFFullNode)
+               netWork.Register(b, "192.168.0.2", "test node B", consensus.SFFullNode)
+               if B2A, A2B, err := netWork.HandsShake(a, b); err != nil {
+                       t.Errorf("fail on peer hands shake %v", err)
+               } else {
+                       go B2A.postMan()
+                       go A2B.postMan()
+               }
+
+               requireBlockTimeout = c.syncTimeout
+               a.blockKeeper.syncPeer = a.peers.GetPeer("test node B")
+               if err := a.blockKeeper.regularBlockSync(); errors.Root(err) != c.err {
+                       t.Errorf("case %d: got %v want %v", i, err, c.err)
+               }
+
+               got := []*types.Block{}
+               for i := uint64(0); i <= a.chain.BestBlockHeight(); i++ {
+                       block, err := a.chain.GetBlockByHeight(i)
+                       if err != nil {
+                               t.Errorf("case %d got err %v", i, err)
+                       }
+                       got = append(got, block)
+               }
+
+               if !testutil.DeepEqual(got, c.want) {
+                       t.Errorf("case %d: got %v want %v", i, got, c.want)
+               }
+       }
+}
+
+func TestRequireBlock(t *testing.T) {
+       tmp, err := ioutil.TempDir(".", "")
+       if err != nil {
+               t.Fatalf("failed to create temporary data folder: %v", err)
+       }
+       testDBA := dbm.NewDB("testdba", "leveldb", tmp)
+       testDBB := dbm.NewDB("testdbb", "leveldb", tmp)
+       defer func() {
+               testDBB.Close()
+               testDBA.Close()
+               os.RemoveAll(tmp)
+       }()
+
+       blocks := mockBlocks(nil, 5)
+       a := mockSync(blocks[:1], nil, testDBA)
+       b := mockSync(blocks[:5], nil, testDBB)
+       netWork := NewNetWork()
+       netWork.Register(a, "192.168.0.1", "test node A", consensus.SFFullNode)
+       netWork.Register(b, "192.168.0.2", "test node B", consensus.SFFullNode)
+       if B2A, A2B, err := netWork.HandsShake(a, b); err != nil {
+               t.Errorf("fail on peer hands shake %v", err)
+       } else {
+               go B2A.postMan()
+               go A2B.postMan()
+       }
+
+       a.blockKeeper.syncPeer = a.peers.GetPeer("test node B")
+       b.blockKeeper.syncPeer = b.peers.GetPeer("test node A")
+       cases := []struct {
+               syncTimeout   time.Duration
+               testNode      *Manager
+               requireHeight uint64
+               want          *types.Block
+               err           error
+       }{
+               {
+                       syncTimeout:   30 * time.Second,
+                       testNode:      a,
+                       requireHeight: 4,
+                       want:          blocks[4],
+                       err:           nil,
+               },
+               {
+                       syncTimeout:   1 * time.Millisecond,
+                       testNode:      b,
+                       requireHeight: 4,
+                       want:          nil,
+                       err:           errRequestTimeout,
+               },
+       }
+
+       defer func() {
+               requireBlockTimeout = 20 * time.Second
+       }()
+
+       for i, c := range cases {
+               requireBlockTimeout = c.syncTimeout
+               got, err := c.testNode.blockKeeper.msgFetcher.requireBlock(c.testNode.blockKeeper.syncPeer.ID(), c.requireHeight)
+               if !testutil.DeepEqual(got, c.want) {
+                       t.Errorf("case %d: got %v want %v", i, got, c.want)
+               }
+               if errors.Root(err) != c.err {
+                       t.Errorf("case %d: got %v want %v", i, err, c.err)
+               }
+       }
+}
+
+func TestSendMerkleBlock(t *testing.T) {
+       tmp, err := ioutil.TempDir(".", "")
+       if err != nil {
+               t.Fatalf("failed to create temporary data folder: %v", err)
+       }
+
+       testDBA := dbm.NewDB("testdba", "leveldb", tmp)
+       testDBB := dbm.NewDB("testdbb", "leveldb", tmp)
+       defer func() {
+               testDBA.Close()
+               testDBB.Close()
+               os.RemoveAll(tmp)
+       }()
+
+       cases := []struct {
+               txCount        int
+               relatedTxIndex []int
+       }{
+               {
+                       txCount:        10,
+                       relatedTxIndex: []int{0, 2, 5},
+               },
+               {
+                       txCount:        0,
+                       relatedTxIndex: []int{},
+               },
+               {
+                       txCount:        10,
+                       relatedTxIndex: []int{},
+               },
+               {
+                       txCount:        5,
+                       relatedTxIndex: []int{0, 1, 2, 3, 4},
+               },
+               {
+                       txCount:        20,
+                       relatedTxIndex: []int{1, 6, 3, 9, 10, 19},
+               },
+       }
+
+       for _, c := range cases {
+               blocks := mockBlocks(nil, 2)
+               targetBlock := blocks[1]
+               txs, bcTxs := mockTxs(c.txCount)
+               var err error
+
+               targetBlock.Transactions = txs
+               if targetBlock.TransactionsMerkleRoot, err = types.TxMerkleRoot(bcTxs); err != nil {
+                       t.Fatal(err)
+               }
+
+               spvNode := mockSync(blocks, nil, testDBA)
+               blockHash := targetBlock.Hash()
+               var statusResult *bc.TransactionStatus
+               if statusResult, err = spvNode.chain.GetTransactionStatus(&blockHash); err != nil {
+                       t.Fatal(err)
+               }
+
+               if targetBlock.TransactionStatusHash, err = types.TxStatusMerkleRoot(statusResult.VerifyStatus); err != nil {
+                       t.Fatal(err)
+               }
+
+               fullNode := mockSync(blocks, nil, testDBB)
+               netWork := NewNetWork()
+               netWork.Register(spvNode, "192.168.0.1", "spv_node", consensus.SFFastSync)
+               netWork.Register(fullNode, "192.168.0.2", "full_node", consensus.DefaultServices)
+
+               var F2S *P2PPeer
+               if F2S, _, err = netWork.HandsShake(spvNode, fullNode); err != nil {
+                       t.Errorf("fail on peer hands shake %v", err)
+               }
+
+               completed := make(chan error)
+               go func() {
+                       msgBytes := <-F2S.msgCh
+                       _, msg, _ := decodeMessage(msgBytes)
+                       switch m := msg.(type) {
+                       case *msgs.MerkleBlockMessage:
+                               var relatedTxIDs []*bc.Hash
+                               for _, rawTx := range m.RawTxDatas {
+                                       tx := &types.Tx{}
+                                       if err := tx.UnmarshalText(rawTx); err != nil {
+                                               completed <- err
+                                       }
+
+                                       relatedTxIDs = append(relatedTxIDs, &tx.ID)
+                               }
+                               var txHashes []*bc.Hash
+                               for _, hashByte := range m.TxHashes {
+                                       hash := bc.NewHash(hashByte)
+                                       txHashes = append(txHashes, &hash)
+                               }
+                               if ok := types.ValidateTxMerkleTreeProof(txHashes, m.Flags, relatedTxIDs, targetBlock.TransactionsMerkleRoot); !ok {
+                                       completed <- errors.New("validate tx fail")
+                               }
+
+                               var statusHashes []*bc.Hash
+                               for _, statusByte := range m.StatusHashes {
+                                       hash := bc.NewHash(statusByte)
+                                       statusHashes = append(statusHashes, &hash)
+                               }
+                               var relatedStatuses []*bc.TxVerifyResult
+                               for _, statusByte := range m.RawTxStatuses {
+                                       status := &bc.TxVerifyResult{}
+                                       err := json.Unmarshal(statusByte, status)
+                                       if err != nil {
+                                               completed <- err
+                                       }
+                                       relatedStatuses = append(relatedStatuses, status)
+                               }
+                               if ok := types.ValidateStatusMerkleTreeProof(statusHashes, m.Flags, relatedStatuses, targetBlock.TransactionStatusHash); !ok {
+                                       completed <- errors.New("validate status fail")
+                               }
+
+                               completed <- nil
+                       }
+               }()
+
+               spvPeer := fullNode.peers.GetPeer("spv_node")
+               for i := 0; i < len(c.relatedTxIndex); i++ {
+                       spvPeer.AddFilterAddress(txs[c.relatedTxIndex[i]].Outputs[0].ControlProgram)
+               }
+               msg := &msgs.GetMerkleBlockMessage{RawHash: targetBlock.Hash().Byte32()}
+               fullNode.handleGetMerkleBlockMsg(spvPeer, msg)
+               if err := <-completed; err != nil {
+                       t.Fatal(err)
+               }
+       }
+}
+
+func TestLocateBlocks(t *testing.T) {
+       maxNumOfBlocksPerMsg = 5
+       blocks := mockBlocks(nil, 100)
+       cases := []struct {
+               locator    []uint64
+               stopHash   bc.Hash
+               wantHeight []uint64
+               wantErr    error
+       }{
+               {
+                       locator:    []uint64{20},
+                       stopHash:   blocks[100].Hash(),
+                       wantHeight: []uint64{20, 21, 22, 23, 24},
+                       wantErr:    nil,
+               },
+               {
+                       locator:    []uint64{20},
+                       stopHash:   bc.NewHash([32]byte{0x01, 0x02}),
+                       wantHeight: []uint64{},
+                       wantErr:    mock.ErrFoundHeaderByHash,
+               },
+       }
+
+       mockChain := mock.NewChain()
+       bk := &blockKeeper{chain: mockChain}
+       for _, block := range blocks {
+               mockChain.SetBlockByHeight(block.Height, block)
+       }
+
+       for i, c := range cases {
+               locator := []*bc.Hash{}
+               for _, i := range c.locator {
+                       hash := blocks[i].Hash()
+                       locator = append(locator, &hash)
+               }
+
+               want := []*types.Block{}
+               for _, i := range c.wantHeight {
+                       want = append(want, blocks[i])
+               }
+
+               mockTimeout := func() bool { return false }
+               got, err := bk.locateBlocks(locator, &c.stopHash, mockTimeout)
+               if err != c.wantErr {
+                       t.Errorf("case %d: got %v want err = %v", i, err, c.wantErr)
+               }
+
+               if !testutil.DeepEqual(got, want) {
+                       t.Errorf("case %d: got %v want %v", i, got, want)
+               }
+       }
+}
+
+func TestLocateHeaders(t *testing.T) {
+       defer func() {
+               maxNumOfHeadersPerMsg = 1000
+       }()
+       maxNumOfHeadersPerMsg = 10
+       blocks := mockBlocks(nil, 150)
+       blocksHash := []bc.Hash{}
+       for _, block := range blocks {
+               blocksHash = append(blocksHash, block.Hash())
+       }
+
+       cases := []struct {
+               chainHeight uint64
+               locator     []uint64
+               stopHash    *bc.Hash
+               skip        uint64
+               wantHeight  []uint64
+               err         error
+       }{
+               {
+                       chainHeight: 100,
+                       locator:     []uint64{90},
+                       stopHash:    &blocksHash[100],
+                       skip:        0,
+                       wantHeight:  []uint64{90, 91, 92, 93, 94, 95, 96, 97, 98, 99},
+                       err:         nil,
+               },
+               {
+                       chainHeight: 100,
+                       locator:     []uint64{20},
+                       stopHash:    &blocksHash[24],
+                       skip:        0,
+                       wantHeight:  []uint64{20, 21, 22, 23, 24},
+                       err:         nil,
+               },
+               {
+                       chainHeight: 100,
+                       locator:     []uint64{20},
+                       stopHash:    &blocksHash[20],
+                       wantHeight:  []uint64{20},
+                       err:         nil,
+               },
+               {
+                       chainHeight: 100,
+                       locator:     []uint64{20},
+                       stopHash:    &blocksHash[120],
+                       wantHeight:  []uint64{},
+                       err:         mock.ErrFoundHeaderByHash,
+               },
+               {
+                       chainHeight: 100,
+                       locator:     []uint64{120, 70},
+                       stopHash:    &blocksHash[78],
+                       wantHeight:  []uint64{70, 71, 72, 73, 74, 75, 76, 77, 78},
+                       err:         nil,
+               },
+               {
+                       chainHeight: 100,
+                       locator:     []uint64{15},
+                       stopHash:    &blocksHash[10],
+                       skip:        10,
+                       wantHeight:  []uint64{},
+                       err:         nil,
+               },
+               {
+                       chainHeight: 100,
+                       locator:     []uint64{15},
+                       stopHash:    &blocksHash[80],
+                       skip:        10,
+                       wantHeight:  []uint64{15, 26, 37, 48, 59, 70, 80},
+                       err:         nil,
+               },
+               {
+                       chainHeight: 100,
+                       locator:     []uint64{0},
+                       stopHash:    &blocksHash[100],
+                       skip:        9,
+                       wantHeight:  []uint64{0, 10, 20, 30, 40, 50, 60, 70, 80, 90},
+                       err:         nil,
+               },
+       }
+
+       for i, c := range cases {
+               mockChain := mock.NewChain()
+               bk := &blockKeeper{chain: mockChain}
+               for i := uint64(0); i <= c.chainHeight; i++ {
+                       mockChain.SetBlockByHeight(i, blocks[i])
+               }
+
+               locator := []*bc.Hash{}
+               for _, i := range c.locator {
+                       hash := blocks[i].Hash()
+                       locator = append(locator, &hash)
+               }
+
+               want := []*types.BlockHeader{}
+               for _, i := range c.wantHeight {
+                       want = append(want, &blocks[i].BlockHeader)
+               }
+
+               got, err := bk.locateHeaders(locator, c.stopHash, c.skip, maxNumOfHeadersPerMsg)
+               if err != c.err {
+                       t.Errorf("case %d: got %v want err = %v", i, err, c.err)
+               }
+               if !testutil.DeepEqual(got, want) {
+                       t.Errorf("case %d: got %v want %v", i, got, want)
+               }
+       }
+}
diff --git a/netsync/chainmgr/block_process.go b/netsync/chainmgr/block_process.go
new file mode 100644 (file)
index 0000000..ee80ded
--- /dev/null
@@ -0,0 +1,69 @@
+package chainmgr
+
+import (
+       "sync"
+
+       log "github.com/sirupsen/logrus"
+
+       "github.com/bytom/bytom/errors"
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/p2p/security"
+       "github.com/bytom/bytom/protocol"
+)
+
+var errOrphanBlock = errors.New("fast sync inserting orphan block")
+
+type blockProcessor struct {
+       chain   Chain
+       storage *storage
+       peers   *peers.PeerSet
+}
+
+func newBlockProcessor(chain Chain, storage *storage, peers *peers.PeerSet) *blockProcessor {
+       return &blockProcessor{
+               chain:   chain,
+               peers:   peers,
+               storage: storage,
+       }
+}
+
+func (bp *blockProcessor) insert(blockStorage *blockStorage) error {
+       isOrphan, err := bp.chain.ProcessBlock(blockStorage.block)
+       if isOrphan {
+               bp.peers.ProcessIllegal(blockStorage.peerID, security.LevelMsgIllegal, errOrphanBlock.Error())
+               return errOrphanBlock
+       }
+
+       if err != nil && errors.Root(err) != protocol.ErrDoubleSignBlock {
+               bp.peers.ProcessIllegal(blockStorage.peerID, security.LevelMsgIllegal, err.Error())
+       }
+       return err
+}
+
+func (bp *blockProcessor) process(downloadNotifyCh chan struct{}, ProcessStop chan struct{}, syncHeight uint64, wg *sync.WaitGroup) {
+       defer func() {
+               close(ProcessStop)
+               wg.Done()
+       }()
+
+       for {
+               for {
+                       block, err := bp.storage.readBlock(syncHeight)
+                       if err != nil {
+                               break
+                       }
+
+                       if err := bp.insert(block); err != nil {
+                               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("failed on process block")
+                               return
+                       }
+
+                       bp.storage.deleteBlock(syncHeight)
+                       syncHeight++
+               }
+
+               if _, ok := <-downloadNotifyCh; !ok {
+                       return
+               }
+       }
+}
diff --git a/netsync/chainmgr/block_process_test.go b/netsync/chainmgr/block_process_test.go
new file mode 100644 (file)
index 0000000..ac390dd
--- /dev/null
@@ -0,0 +1,74 @@
+package chainmgr
+
+import (
+       "io/ioutil"
+       "os"
+       "sync"
+       "testing"
+       "time"
+
+       dbm "github.com/bytom/bytom/database/leveldb"
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/protocol/bc/types"
+       "github.com/bytom/bytom/test/mock"
+)
+
+func TestBlockProcess(t *testing.T) {
+       tmp, err := ioutil.TempDir(".", "")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer os.RemoveAll(tmp)
+
+       testDB := dbm.NewDB("testdb", "leveldb", tmp)
+       defer testDB.Close()
+
+       cases := []struct {
+               blocks      []*types.Block
+               startHeight uint64
+               stopHeight  uint64
+       }{
+               {
+                       blocks:      mockBlocks(nil, 200),
+                       startHeight: 100,
+                       stopHeight:  200,
+               },
+               {
+                       blocks:      mockBlocks(nil, 200),
+                       startHeight: 110,
+                       stopHeight:  100,
+               },
+               {
+                       blocks:      mockErrorBlocks(nil, 200, 150),
+                       startHeight: 100,
+                       stopHeight:  149,
+               },
+       }
+       s := newStorage(testDB)
+       mockChain := mock.NewChain()
+       for i, c := range cases {
+               for i := 0; i <= len(c.blocks)/2; i++ {
+                       mockChain.SetBlockByHeight(uint64(i), c.blocks[i])
+                       mockChain.SetBestBlockHeader(&c.blocks[i].BlockHeader)
+               }
+
+               if err := s.writeBlocks("testPeer", c.blocks); err != nil {
+                       t.Fatal(err)
+               }
+
+               bp := newBlockProcessor(mockChain, s, peers.NewPeerSet(nil))
+               downloadNotifyCh := make(chan struct{}, 1)
+               ProcessStopCh := make(chan struct{})
+               var wg sync.WaitGroup
+               go func() {
+                       time.Sleep(1 * time.Second)
+                       close(downloadNotifyCh)
+               }()
+               wg.Add(1)
+
+               bp.process(downloadNotifyCh, ProcessStopCh, c.startHeight, &wg)
+               if bp.chain.BestBlockHeight() != c.stopHeight {
+                       t.Fatalf("TestBlockProcess index: %d fail: got %d want %d", i, bp.chain.BestBlockHeight(), c.stopHeight)
+               }
+       }
+}
diff --git a/netsync/chainmgr/fast_sync.go b/netsync/chainmgr/fast_sync.go
new file mode 100644 (file)
index 0000000..62d44f2
--- /dev/null
@@ -0,0 +1,165 @@
+package chainmgr
+
+import (
+       "sync"
+
+       log "github.com/sirupsen/logrus"
+
+       "github.com/bytom/bytom/errors"
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/p2p/security"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+var (
+       minSizeOfSyncSkeleton  = 2
+       maxSizeOfSyncSkeleton  = 11
+       numOfBlocksSkeletonGap = maxNumOfBlocksPerMsg
+       maxNumOfBlocksPerSync  = numOfBlocksSkeletonGap * uint64(maxSizeOfSyncSkeleton-1)
+       fastSyncPivotGap       = uint64(64)
+       minGapStartFastSync    = uint64(128)
+
+       errNoSyncPeer      = errors.New("can't find sync peer")
+       errSkeletonSize    = errors.New("fast sync skeleton size wrong")
+       errNoMainSkeleton  = errors.New("No main skeleton found")
+       errNoSkeletonFound = errors.New("No skeleton found")
+)
+
+type fastSync struct {
+       chain          Chain
+       msgFetcher     MsgFetcher
+       blockProcessor *blockProcessor
+       peers          *peers.PeerSet
+       mainSyncPeer   *peers.Peer
+}
+
+func newFastSync(chain Chain, msgFetcher MsgFetcher, storage *storage, peers *peers.PeerSet) *fastSync {
+       return &fastSync{
+               chain:          chain,
+               msgFetcher:     msgFetcher,
+               blockProcessor: newBlockProcessor(chain, storage, peers),
+               peers:          peers,
+       }
+}
+
+func (fs *fastSync) blockLocator() []*bc.Hash {
+       header := fs.chain.BestBlockHeader()
+       locator := []*bc.Hash{}
+       step := uint64(1)
+
+       for header != nil {
+               headerHash := header.Hash()
+               locator = append(locator, &headerHash)
+               if header.Height == 0 {
+                       break
+               }
+
+               var err error
+               if header.Height < step {
+                       header, err = fs.chain.GetHeaderByHeight(0)
+               } else {
+                       header, err = fs.chain.GetHeaderByHeight(header.Height - step)
+               }
+               if err != nil {
+                       log.WithFields(log.Fields{"module": logModule, "err": err}).Error("blockKeeper fail on get blockLocator")
+                       break
+               }
+
+               if len(locator) >= 9 {
+                       step *= 2
+               }
+       }
+       return locator
+}
+
+// createFetchBlocksTasks get the skeleton and assign tasks according to the skeleton.
+func (fs *fastSync) createFetchBlocksTasks(stopBlock *types.Block) ([]*fetchBlocksWork, error) {
+       // Find peers that meet the height requirements.
+       peers := fs.peers.GetPeersByHeight(stopBlock.Height + fastSyncPivotGap)
+       if len(peers) == 0 {
+               return nil, errNoSyncPeer
+       }
+
+       // parallel fetch the skeleton from peers.
+       stopHash := stopBlock.Hash()
+       skeletonMap := fs.msgFetcher.parallelFetchHeaders(peers, fs.blockLocator(), &stopHash, numOfBlocksSkeletonGap-1)
+       if len(skeletonMap) == 0 {
+               return nil, errNoSkeletonFound
+       }
+
+       mainSkeleton, ok := skeletonMap[fs.mainSyncPeer.ID()]
+       if !ok {
+               return nil, errNoMainSkeleton
+       }
+
+       if len(mainSkeleton) < minSizeOfSyncSkeleton {
+               fs.peers.ProcessIllegal(fs.mainSyncPeer.ID(), security.LevelMsgIllegal, errSkeletonSize.Error())
+               return nil, errSkeletonSize
+       }
+
+       // collect peers that match the skeleton of the primary sync peer
+       fs.msgFetcher.addSyncPeer(fs.mainSyncPeer.ID())
+       delete(skeletonMap, fs.mainSyncPeer.ID())
+       for peerID, skeleton := range skeletonMap {
+               if len(skeleton) != len(mainSkeleton) {
+                       log.WithFields(log.Fields{"module": logModule, "main skeleton": len(mainSkeleton), "got skeleton": len(skeleton)}).Warn("different skeleton length")
+                       continue
+               }
+
+               for i, header := range skeleton {
+                       if header.Hash() != mainSkeleton[i].Hash() {
+                               log.WithFields(log.Fields{"module": logModule, "header index": i, "main skeleton": mainSkeleton[i].Hash(), "got skeleton": header.Hash()}).Warn("different skeleton hash")
+                               continue
+                       }
+               }
+               fs.msgFetcher.addSyncPeer(peerID)
+       }
+
+       blockFetchTasks := make([]*fetchBlocksWork, 0)
+       // create download task
+       for i := 0; i < len(mainSkeleton)-1 && i < maxSizeOfSyncSkeleton-1; i++ {
+               blockFetchTasks = append(blockFetchTasks, &fetchBlocksWork{startHeader: mainSkeleton[i], stopHeader: mainSkeleton[i+1]})
+       }
+
+       return blockFetchTasks, nil
+}
+
+func (fs *fastSync) process() error {
+       stopBlock, err := fs.findSyncRange()
+       if err != nil {
+               return err
+       }
+
+       tasks, err := fs.createFetchBlocksTasks(stopBlock)
+       if err != nil {
+               return err
+       }
+
+       downloadNotifyCh := make(chan struct{}, 1)
+       processStopCh := make(chan struct{})
+       var wg sync.WaitGroup
+       wg.Add(2)
+       go fs.msgFetcher.parallelFetchBlocks(tasks, downloadNotifyCh, processStopCh, &wg)
+       go fs.blockProcessor.process(downloadNotifyCh, processStopCh, tasks[0].startHeader.Height, &wg)
+       wg.Wait()
+       fs.msgFetcher.resetParameter()
+       log.WithFields(log.Fields{"module": logModule, "height": fs.chain.BestBlockHeight()}).Info("fast sync complete")
+       return nil
+}
+
+// findSyncRange find the start and end of this sync.
+// sync length cannot be greater than maxFastSyncBlocksNum.
+func (fs *fastSync) findSyncRange() (*types.Block, error) {
+       bestHeight := fs.chain.BestBlockHeight()
+       length := fs.mainSyncPeer.Height() - fastSyncPivotGap - bestHeight
+       if length > maxNumOfBlocksPerSync {
+               length = maxNumOfBlocksPerSync
+       }
+
+       return fs.msgFetcher.requireBlock(fs.mainSyncPeer.ID(), bestHeight+length)
+}
+
+func (fs *fastSync) setSyncPeer(peer *peers.Peer) {
+       fs.mainSyncPeer = peer
+}
diff --git a/netsync/chainmgr/fast_sync_test.go b/netsync/chainmgr/fast_sync_test.go
new file mode 100644 (file)
index 0000000..a8a4a34
--- /dev/null
@@ -0,0 +1,367 @@
+package chainmgr
+
+import (
+       "io/ioutil"
+       "os"
+       "reflect"
+       "sync"
+       "testing"
+       "time"
+
+       "github.com/bytom/bytom/consensus"
+       dbm "github.com/bytom/bytom/database/leveldb"
+       "github.com/bytom/bytom/errors"
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+       "github.com/bytom/bytom/test/mock"
+       "github.com/bytom/bytom/testutil"
+)
+
+func TestBlockLocator(t *testing.T) {
+       blocks := mockBlocks(nil, 500)
+       cases := []struct {
+               bestHeight uint64
+               wantHeight []uint64
+       }{
+               {
+                       bestHeight: 0,
+                       wantHeight: []uint64{0},
+               },
+               {
+                       bestHeight: 1,
+                       wantHeight: []uint64{1, 0},
+               },
+               {
+                       bestHeight: 7,
+                       wantHeight: []uint64{7, 6, 5, 4, 3, 2, 1, 0},
+               },
+               {
+                       bestHeight: 10,
+                       wantHeight: []uint64{10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
+               },
+               {
+                       bestHeight: 100,
+                       wantHeight: []uint64{100, 99, 98, 97, 96, 95, 94, 93, 92, 91, 89, 85, 77, 61, 29, 0},
+               },
+               {
+                       bestHeight: 500,
+                       wantHeight: []uint64{500, 499, 498, 497, 496, 495, 494, 493, 492, 491, 489, 485, 477, 461, 429, 365, 237, 0},
+               },
+       }
+
+       for i, c := range cases {
+               mockChain := mock.NewChain()
+               fs := &fastSync{chain: mockChain}
+               mockChain.SetBestBlockHeader(&blocks[c.bestHeight].BlockHeader)
+               for i := uint64(0); i <= c.bestHeight; i++ {
+                       mockChain.SetBlockByHeight(i, blocks[i])
+               }
+
+               want := []*bc.Hash{}
+               for _, i := range c.wantHeight {
+                       hash := blocks[i].Hash()
+                       want = append(want, &hash)
+               }
+
+               if got := fs.blockLocator(); !testutil.DeepEqual(got, want) {
+                       t.Errorf("case %d: got %v want %v", i, got, want)
+               }
+       }
+}
+
+func TestFastBlockSync(t *testing.T) {
+       tmp, err := ioutil.TempDir(".", "")
+       if err != nil {
+               t.Fatalf("failed to create temporary data folder: %v", err)
+       }
+       testDBA := dbm.NewDB("testdba", "leveldb", tmp)
+       testDBB := dbm.NewDB("testdbb", "leveldb", tmp)
+       defer func() {
+               testDBA.Close()
+               testDBB.Close()
+               os.RemoveAll(tmp)
+       }()
+
+       maxSizeOfSyncSkeleton = 11
+       numOfBlocksSkeletonGap = 10
+       maxNumOfBlocksPerSync = numOfBlocksSkeletonGap * uint64(maxSizeOfSyncSkeleton-1)
+       fastSyncPivotGap = uint64(5)
+       minGapStartFastSync = uint64(6)
+
+       defer func() {
+               maxSizeOfSyncSkeleton = 11
+               numOfBlocksSkeletonGap = maxNumOfBlocksPerMsg
+               maxNumOfBlocksPerSync = numOfBlocksSkeletonGap * uint64(maxSizeOfSyncSkeleton-1)
+               fastSyncPivotGap = uint64(64)
+               minGapStartFastSync = uint64(128)
+               requireHeadersTimeout = 30 * time.Second
+       }()
+
+       baseChain := mockBlocks(nil, 300)
+       chainX := []*types.Block{}
+       chainX = append(chainX, baseChain[:30]...)
+       chainX = append(chainX, mockBlocks(baseChain[30], 500)...)
+       cases := []struct {
+               syncTimeout time.Duration
+               aBlocks     []*types.Block
+               bBlocks     []*types.Block
+               want        []*types.Block
+               err         error
+       }{
+               {
+                       syncTimeout: 30 * time.Second,
+                       aBlocks:     baseChain[:50],
+                       bBlocks:     baseChain[:301],
+                       want:        baseChain[:150],
+                       err:         nil,
+               },
+               {
+                       syncTimeout: 30 * time.Second,
+                       aBlocks:     baseChain[:2],
+                       bBlocks:     baseChain[:300],
+                       want:        baseChain[:102],
+                       err:         nil,
+               },
+               {
+                       syncTimeout: 30 * time.Second,
+                       aBlocks:     baseChain[:2],
+                       bBlocks:     baseChain[:53],
+                       want:        baseChain[:48],
+                       err:         nil,
+               },
+               {
+                       syncTimeout: 30 * time.Second,
+                       aBlocks:     baseChain[:2],
+                       bBlocks:     baseChain[:53],
+                       want:        baseChain[:48],
+                       err:         nil,
+               },
+               {
+                       syncTimeout: 30 * time.Second,
+                       aBlocks:     baseChain[:2],
+                       bBlocks:     baseChain[:10],
+                       want:        baseChain[:5],
+                       err:         nil,
+               },
+               {
+                       syncTimeout: 0 * time.Second,
+                       aBlocks:     baseChain[:50],
+                       bBlocks:     baseChain[:301],
+                       want:        baseChain[:50],
+                       err:         errSkeletonSize,
+               },
+               {
+                       syncTimeout: 30 * time.Second,
+                       aBlocks:     chainX[:50],
+                       bBlocks:     baseChain[:301],
+                       want:        baseChain[:128],
+                       err:         nil,
+               },
+       }
+
+       for i, c := range cases {
+               a := mockSync(c.aBlocks, nil, testDBA)
+               b := mockSync(c.bBlocks, nil, testDBB)
+               netWork := NewNetWork()
+               netWork.Register(a, "192.168.0.1", "test node A", consensus.SFFullNode|consensus.SFFastSync)
+               netWork.Register(b, "192.168.0.2", "test node B", consensus.SFFullNode|consensus.SFFastSync)
+               if B2A, A2B, err := netWork.HandsShake(a, b); err != nil {
+                       t.Errorf("fail on peer hands shake %v", err)
+               } else {
+                       go B2A.postMan()
+                       go A2B.postMan()
+               }
+               a.blockKeeper.syncPeer = a.peers.GetPeer("test node B")
+               a.blockKeeper.fastSync.setSyncPeer(a.blockKeeper.syncPeer)
+
+               requireHeadersTimeout = c.syncTimeout
+               if err := a.blockKeeper.fastSync.process(); errors.Root(err) != c.err {
+                       t.Errorf("case %d: got %v want %v", i, err, c.err)
+               }
+
+               got := []*types.Block{}
+               for i := uint64(0); i <= a.chain.BestBlockHeight(); i++ {
+                       block, err := a.chain.GetBlockByHeight(i)
+                       if err != nil {
+                               t.Errorf("case %d got err %v", i, err)
+                       }
+                       got = append(got, block)
+               }
+               if !testutil.DeepEqual(got, c.want) {
+                       t.Errorf("case %d: got %v want %v", i, got, c.want)
+               }
+       }
+}
+
+type mockFetcher struct {
+       baseChain  []*types.Block
+       peerStatus map[string][]*types.Block
+       peers      []string
+       testType   int
+}
+
+func (mf *mockFetcher) resetParameter() {
+       return
+}
+
+func (mf *mockFetcher) addSyncPeer(peerID string) {
+       return
+}
+
+func (mf *mockFetcher) requireBlock(peerID string, height uint64) (*types.Block, error) {
+       return nil, nil
+}
+
+func (mf *mockFetcher) parallelFetchBlocks(work []*fetchBlocksWork, downloadNotifyCh chan struct{}, ProcessStopCh chan struct{}, wg *sync.WaitGroup) {
+       return
+}
+
+func (mf *mockFetcher) parallelFetchHeaders(peers []*peers.Peer, locator []*bc.Hash, stopHash *bc.Hash, skip uint64) map[string][]*types.BlockHeader {
+       result := make(map[string][]*types.BlockHeader)
+       switch mf.testType {
+       case 1:
+               result["peer1"] = []*types.BlockHeader{&mf.peerStatus["peer1"][1000].BlockHeader, &mf.peerStatus["peer1"][1100].BlockHeader, &mf.peerStatus["peer1"][1200].BlockHeader,
+                       &mf.peerStatus["peer1"][1300].BlockHeader, &mf.peerStatus["peer1"][1400].BlockHeader, &mf.peerStatus["peer1"][1500].BlockHeader,
+                       &mf.peerStatus["peer1"][1600].BlockHeader, &mf.peerStatus["peer1"][1700].BlockHeader, &mf.peerStatus["peer1"][1800].BlockHeader,
+               }
+               result["peer2"] = []*types.BlockHeader{&mf.peerStatus["peer2"][1000].BlockHeader, &mf.peerStatus["peer2"][1100].BlockHeader, &mf.peerStatus["peer2"][1200].BlockHeader,
+                       &mf.peerStatus["peer2"][1300].BlockHeader, &mf.peerStatus["peer2"][1400].BlockHeader, &mf.peerStatus["peer2"][1500].BlockHeader,
+                       &mf.peerStatus["peer2"][1600].BlockHeader, &mf.peerStatus["peer2"][1700].BlockHeader, &mf.peerStatus["peer2"][1800].BlockHeader,
+               }
+
+       case 2:
+               result["peer1"] = []*types.BlockHeader{}
+       case 3:
+       case 4:
+               result["peer2"] = []*types.BlockHeader{&mf.peerStatus["peer2"][1000].BlockHeader, &mf.peerStatus["peer2"][1100].BlockHeader, &mf.peerStatus["peer2"][1200].BlockHeader,
+                       &mf.peerStatus["peer2"][1300].BlockHeader, &mf.peerStatus["peer2"][1400].BlockHeader, &mf.peerStatus["peer2"][1500].BlockHeader,
+                       &mf.peerStatus["peer2"][1600].BlockHeader, &mf.peerStatus["peer2"][1700].BlockHeader, &mf.peerStatus["peer2"][1800].BlockHeader,
+               }
+       case 5:
+               result["peer1"] = []*types.BlockHeader{&mf.peerStatus["peer1"][1000].BlockHeader, &mf.peerStatus["peer1"][1100].BlockHeader, &mf.peerStatus["peer1"][1200].BlockHeader,
+                       &mf.peerStatus["peer1"][1300].BlockHeader, &mf.peerStatus["peer1"][1400].BlockHeader, &mf.peerStatus["peer1"][1500].BlockHeader,
+                       &mf.peerStatus["peer1"][1600].BlockHeader, &mf.peerStatus["peer1"][1700].BlockHeader, &mf.peerStatus["peer1"][1800].BlockHeader,
+               }
+               result["peer2"] = []*types.BlockHeader{&mf.peerStatus["peer2"][1000].BlockHeader, &mf.peerStatus["peer2"][1100].BlockHeader, &mf.peerStatus["peer2"][1200].BlockHeader,
+                       &mf.peerStatus["peer2"][1300].BlockHeader, &mf.peerStatus["peer2"][1400].BlockHeader, &mf.peerStatus["peer2"][1500].BlockHeader,
+                       &mf.peerStatus["peer2"][1600].BlockHeader, &mf.peerStatus["peer2"][1700].BlockHeader,
+               }
+       }
+       return result
+}
+
+func TestCreateFetchBlocksTasks(t *testing.T) {
+       baseChain := mockBlocks(nil, 1000)
+       chainX := append(baseChain, mockBlocks(baseChain[1000], 2000)...)
+       chainY := append(baseChain, mockBlocks(baseChain[1000], 1900)...)
+       peerStatus := make(map[string][]*types.Block)
+       peerStatus["peer1"] = chainX
+       peerStatus["peer2"] = chainY
+       type syncPeer struct {
+               peer               *P2PPeer
+               bestHeight         uint64
+               irreversibleHeight uint64
+       }
+
+       cases := []struct {
+               peers        []*syncPeer
+               mainSyncPeer string
+               testType     int
+               wantTasks    []*fetchBlocksWork
+               wantErr      error
+       }{
+               // normal test
+               {
+                       peers: []*syncPeer{
+                               {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 1000, irreversibleHeight: 1000},
+                               {peer: &P2PPeer{id: "peer2", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 800, irreversibleHeight: 800},
+                       },
+                       mainSyncPeer: "peer1",
+                       testType:     1,
+                       wantTasks: []*fetchBlocksWork{
+                               {&chainX[1000].BlockHeader, &chainX[1100].BlockHeader}, {&chainX[1100].BlockHeader, &chainX[1200].BlockHeader},
+                               {&chainX[1200].BlockHeader, &chainX[1300].BlockHeader}, {&chainX[1300].BlockHeader, &chainX[1400].BlockHeader},
+                               {&chainX[1400].BlockHeader, &chainX[1500].BlockHeader}, {&chainX[1500].BlockHeader, &chainX[1600].BlockHeader},
+                               {&chainX[1600].BlockHeader, &chainX[1700].BlockHeader}, {&chainX[1700].BlockHeader, &chainX[1800].BlockHeader},
+                       },
+                       wantErr: nil,
+               },
+               // test no sync peer
+               {
+                       peers:        []*syncPeer{},
+                       mainSyncPeer: "peer1",
+                       testType:     0,
+                       wantTasks:    nil,
+                       wantErr:      errNoSyncPeer,
+               },
+               // primary sync peer skeleton size error
+               {
+                       peers: []*syncPeer{
+                               {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 1000, irreversibleHeight: 1000},
+                               {peer: &P2PPeer{id: "peer2", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 800, irreversibleHeight: 800},
+                       },
+                       mainSyncPeer: "peer1",
+                       testType:     2,
+                       wantTasks:    nil,
+                       wantErr:      errSkeletonSize,
+               },
+               // no skeleton return
+               {
+                       peers: []*syncPeer{
+                               {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 1000, irreversibleHeight: 1000},
+                               {peer: &P2PPeer{id: "peer2", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 800, irreversibleHeight: 800},
+                       },
+                       mainSyncPeer: "peer1",
+                       testType:     3,
+                       wantTasks:    nil,
+                       wantErr:      errNoSkeletonFound,
+               },
+               // no main skeleton found
+               {
+                       peers: []*syncPeer{
+                               {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 1000, irreversibleHeight: 1000},
+                               {peer: &P2PPeer{id: "peer2", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 800, irreversibleHeight: 800},
+                       },
+                       mainSyncPeer: "peer1",
+                       testType:     4,
+                       wantTasks:    nil,
+                       wantErr:      errNoMainSkeleton,
+               },
+               // skeleton length mismatch
+               {
+                       peers: []*syncPeer{
+                               {peer: &P2PPeer{id: "peer1", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 1000, irreversibleHeight: 1000},
+                               {peer: &P2PPeer{id: "peer2", flag: consensus.SFFullNode | consensus.SFFastSync}, bestHeight: 800, irreversibleHeight: 800},
+                       },
+                       mainSyncPeer: "peer1",
+                       testType:     5,
+                       wantTasks: []*fetchBlocksWork{
+                               {&chainX[1000].BlockHeader, &chainX[1100].BlockHeader}, {&chainX[1100].BlockHeader, &chainX[1200].BlockHeader},
+                               {&chainX[1200].BlockHeader, &chainX[1300].BlockHeader}, {&chainX[1300].BlockHeader, &chainX[1400].BlockHeader},
+                               {&chainX[1400].BlockHeader, &chainX[1500].BlockHeader}, {&chainX[1500].BlockHeader, &chainX[1600].BlockHeader},
+                               {&chainX[1600].BlockHeader, &chainX[1700].BlockHeader}, {&chainX[1700].BlockHeader, &chainX[1800].BlockHeader},
+                       },
+                       wantErr: nil,
+               },
+       }
+
+       for i, c := range cases {
+               peers := peers.NewPeerSet(NewPeerSet())
+               for _, syncPeer := range c.peers {
+                       peers.AddPeer(syncPeer.peer)
+                       peers.SetStatus(syncPeer.peer.id, syncPeer.bestHeight, nil)
+                       peers.SetIrreversibleStatus(syncPeer.peer.id, syncPeer.irreversibleHeight, nil)
+               }
+               mockChain := mock.NewChain()
+               fs := newFastSync(mockChain, &mockFetcher{baseChain: baseChain, peerStatus: peerStatus, testType: c.testType}, nil, peers)
+               fs.mainSyncPeer = fs.peers.GetPeer(c.mainSyncPeer)
+               tasks, err := fs.createFetchBlocksTasks(baseChain[700])
+               if err != c.wantErr {
+                       t.Errorf("case %d: got %v want %v", i, err, c.wantErr)
+               }
+               if !reflect.DeepEqual(tasks, c.wantTasks) {
+                       t.Errorf("case %d: got %v want %v", i, tasks, c.wantTasks)
+               }
+       }
+}
diff --git a/netsync/chainmgr/handle.go b/netsync/chainmgr/handle.go
new file mode 100644 (file)
index 0000000..3cd4e97
--- /dev/null
@@ -0,0 +1,407 @@
+package chainmgr
+
+import (
+       "errors"
+       "reflect"
+       "time"
+
+       log "github.com/sirupsen/logrus"
+
+       cfg "github.com/bytom/bytom/config"
+       "github.com/bytom/bytom/consensus"
+       dbm "github.com/bytom/bytom/database/leveldb"
+       "github.com/bytom/bytom/event"
+       msgs "github.com/bytom/bytom/netsync/messages"
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/p2p"
+       "github.com/bytom/bytom/p2p/security"
+       core "github.com/bytom/bytom/protocol"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+const (
+       logModule = "netsync"
+)
+
+// Chain is the interface for Bytom core
+type Chain interface {
+       BestBlockHeader() *types.BlockHeader
+       LastIrreversibleHeader() *types.BlockHeader
+       BestBlockHeight() uint64
+       GetBlockByHash(*bc.Hash) (*types.Block, error)
+       GetBlockByHeight(uint64) (*types.Block, error)
+       GetHeaderByHash(*bc.Hash) (*types.BlockHeader, error)
+       GetHeaderByHeight(uint64) (*types.BlockHeader, error)
+       GetTransactionStatus(*bc.Hash) (*bc.TransactionStatus, error)
+       InMainChain(bc.Hash) bool
+       ProcessBlock(*types.Block) (bool, error)
+       ValidateTx(*types.Tx) (bool, error)
+}
+
+// Switch is the interface for network layer
+type Switch interface {
+       AddReactor(name string, reactor p2p.Reactor) p2p.Reactor
+       Start() (bool, error)
+       Stop() bool
+       IsListening() bool
+       DialPeerWithAddress(addr *p2p.NetAddress) error
+       Peers() *p2p.PeerSet
+}
+
+// Mempool is the interface for Bytom mempool
+type Mempool interface {
+       GetTransactions() []*core.TxDesc
+       IsDust(tx *types.Tx) bool
+}
+
+//Manager is responsible for the business layer information synchronization
+type Manager struct {
+       sw          Switch
+       chain       Chain
+       mempool     Mempool
+       blockKeeper *blockKeeper
+       peers       *peers.PeerSet
+
+       txSyncCh chan *txSyncMsg
+       quit     chan struct{}
+       config   *cfg.Config
+
+       eventDispatcher *event.Dispatcher
+       txMsgSub        *event.Subscription
+}
+
+//NewManager create a chain sync manager.
+func NewManager(config *cfg.Config, sw Switch, chain Chain, mempool Mempool, dispatcher *event.Dispatcher, peers *peers.PeerSet, fastSyncDB dbm.DB) (*Manager, error) {
+       manager := &Manager{
+               sw:              sw,
+               mempool:         mempool,
+               chain:           chain,
+               blockKeeper:     newBlockKeeper(chain, peers, fastSyncDB),
+               peers:           peers,
+               txSyncCh:        make(chan *txSyncMsg),
+               quit:            make(chan struct{}),
+               config:          config,
+               eventDispatcher: dispatcher,
+       }
+
+       if !config.VaultMode {
+               protocolReactor := NewProtocolReactor(manager)
+               manager.sw.AddReactor("PROTOCOL", protocolReactor)
+       }
+       return manager, nil
+}
+
+// AddPeer add the network layer peer to logic layer
+func (m *Manager) AddPeer(peer peers.BasePeer) {
+       m.peers.AddPeer(peer)
+}
+
+//IsCaughtUp check wheather the peer finish the sync
+func (m *Manager) IsCaughtUp() bool {
+       peer := m.peers.BestPeer(consensus.SFFullNode)
+       return peer == nil || peer.Height() <= m.chain.BestBlockHeight()
+}
+
+func (m *Manager) handleBlockMsg(peer *peers.Peer, msg *msgs.BlockMessage) {
+       block, err := msg.GetBlock()
+       if err != nil {
+               return
+       }
+
+       m.blockKeeper.processBlock(peer.ID(), block)
+}
+
+func (m *Manager) handleBlocksMsg(peer *peers.Peer, msg *msgs.BlocksMessage) {
+       blocks, err := msg.GetBlocks()
+       if err != nil {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Debug("fail on handleBlocksMsg GetBlocks")
+               return
+       }
+
+       m.blockKeeper.processBlocks(peer.ID(), blocks)
+}
+
+func (m *Manager) handleFilterAddMsg(peer *peers.Peer, msg *msgs.FilterAddMessage) {
+       peer.AddFilterAddress(msg.Address)
+}
+
+func (m *Manager) handleFilterClearMsg(peer *peers.Peer) {
+       peer.FilterClear()
+}
+
+func (m *Manager) handleFilterLoadMsg(peer *peers.Peer, msg *msgs.FilterLoadMessage) {
+       peer.AddFilterAddresses(msg.Addresses)
+}
+
+func (m *Manager) handleGetBlockMsg(peer *peers.Peer, msg *msgs.GetBlockMessage) {
+       var block *types.Block
+       var err error
+       if msg.Height != 0 {
+               block, err = m.chain.GetBlockByHeight(msg.Height)
+       } else {
+               block, err = m.chain.GetBlockByHash(msg.GetHash())
+       }
+       if err != nil {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on handleGetBlockMsg get block from chain")
+               return
+       }
+
+       ok, err := peer.SendBlock(block)
+       if !ok {
+               m.peers.RemovePeer(peer.ID())
+       }
+       if err != nil {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetBlockMsg sentBlock")
+       }
+}
+
+func (m *Manager) handleGetBlocksMsg(peer *peers.Peer, msg *msgs.GetBlocksMessage) {
+       endTime := time.Now().Add(requireBlocksTimeout / 10)
+       isTimeout := func() bool {
+               return time.Now().After(endTime)
+       }
+
+       blocks, err := m.blockKeeper.locateBlocks(msg.GetBlockLocator(), msg.GetStopHash(), isTimeout)
+       if err != nil || len(blocks) == 0 {
+               log.WithFields(log.Fields{
+                       "module": logModule,
+                       "err":    err,
+                       "size":   len(blocks),
+               }).Error("fail on handleGetBlocksMsg locateBlocks")
+               return
+       }
+
+       totalSize := 0
+       sendBlocks := []*types.Block{}
+       for _, block := range blocks {
+               rawData, err := block.MarshalText()
+               if err != nil {
+                       log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetBlocksMsg marshal block")
+                       return
+               }
+
+               if totalSize+len(rawData) > msgs.MaxBlockchainResponseSize/2 {
+                       break
+               }
+               totalSize += len(rawData)
+               sendBlocks = append(sendBlocks, block)
+       }
+
+       ok, err := peer.SendBlocks(sendBlocks)
+       if !ok {
+               m.peers.RemovePeer(peer.ID())
+       }
+       if err != nil {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetBlocksMsg sentBlock")
+       }
+}
+
+func (m *Manager) handleGetHeadersMsg(peer *peers.Peer, msg *msgs.GetHeadersMessage) {
+       headers, err := m.blockKeeper.locateHeaders(msg.GetBlockLocator(), msg.GetStopHash(), msg.GetSkip(), maxNumOfHeadersPerMsg)
+       if err != nil || len(headers) == 0 {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Debug("fail on handleGetHeadersMsg locateHeaders")
+               return
+       }
+
+       ok, err := peer.SendHeaders(headers)
+       if !ok {
+               m.peers.RemovePeer(peer.ID())
+       }
+       if err != nil {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetHeadersMsg sentBlock")
+       }
+}
+
+func (m *Manager) handleGetMerkleBlockMsg(peer *peers.Peer, msg *msgs.GetMerkleBlockMessage) {
+       var err error
+       var block *types.Block
+       if msg.Height != 0 {
+               block, err = m.chain.GetBlockByHeight(msg.Height)
+       } else {
+               block, err = m.chain.GetBlockByHash(msg.GetHash())
+       }
+       if err != nil {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on handleGetMerkleBlockMsg get block from chain")
+               return
+       }
+
+       blockHash := block.Hash()
+       txStatus, err := m.chain.GetTransactionStatus(&blockHash)
+       if err != nil {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on handleGetMerkleBlockMsg get transaction status")
+               return
+       }
+
+       ok, err := peer.SendMerkleBlock(block, txStatus)
+       if err != nil {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetMerkleBlockMsg sentMerkleBlock")
+               return
+       }
+
+       if !ok {
+               m.peers.RemovePeer(peer.ID())
+       }
+}
+
+func (m *Manager) handleHeadersMsg(peer *peers.Peer, msg *msgs.HeadersMessage) {
+       headers, err := msg.GetHeaders()
+       if err != nil {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Debug("fail on handleHeadersMsg GetHeaders")
+               return
+       }
+
+       m.blockKeeper.processHeaders(peer.ID(), headers)
+}
+
+func (m *Manager) handleStatusMsg(basePeer peers.BasePeer, msg *msgs.StatusMessage) {
+       if peer := m.peers.GetPeer(basePeer.ID()); peer != nil {
+               peer.SetBestStatus(msg.BestHeight, msg.GetBestHash())
+               peer.SetIrreversibleStatus(msg.IrreversibleHeight, msg.GetIrreversibleHash())
+       }
+}
+
+func (m *Manager) handleTransactionMsg(peer *peers.Peer, msg *msgs.TransactionMessage) {
+       tx, err := msg.GetTransaction()
+       if err != nil {
+               m.peers.ProcessIllegal(peer.ID(), security.LevelConnException, "fail on get tx from message")
+               return
+       }
+
+       if m.mempool.IsDust(tx) {
+               log.WithFields(log.Fields{"tx_hash": tx.ID.String(), "peer": peer.Addr()}).Warn("receive dust tx msg")
+               return
+       }
+
+       m.peers.MarkTx(peer.ID(), tx.ID)
+       if isOrphan, err := m.chain.ValidateTx(tx); err != nil && err != core.ErrDustTx && !isOrphan {
+               m.peers.ProcessIllegal(peer.ID(), security.LevelMsgIllegal, "fail on validate tx transaction")
+       }
+}
+
+func (m *Manager) handleTransactionsMsg(peer *peers.Peer, msg *msgs.TransactionsMessage) {
+       txs, err := msg.GetTransactions()
+       if err != nil {
+               m.peers.ProcessIllegal(peer.ID(), security.LevelConnException, "fail on get txs from message")
+               return
+       }
+
+       if len(txs) > msgs.TxsMsgMaxTxNum {
+               m.peers.ProcessIllegal(peer.ID(), security.LevelMsgIllegal, "exceeded the maximum tx number limit")
+               return
+       }
+
+       for _, tx := range txs {
+               if m.mempool.IsDust(tx) {
+                       m.peers.ProcessIllegal(peer.ID(), security.LevelMsgIllegal, "receive dust txs msg")
+                       continue
+               }
+
+               m.peers.MarkTx(peer.ID(), tx.ID)
+               if isOrphan, err := m.chain.ValidateTx(tx); err != nil && !isOrphan {
+                       m.peers.ProcessIllegal(peer.ID(), security.LevelMsgIllegal, "fail on validate tx transaction")
+                       return
+               }
+       }
+}
+
+func (m *Manager) processMsg(basePeer peers.BasePeer, msgType byte, msg msgs.BlockchainMessage) {
+       peer := m.peers.GetPeer(basePeer.ID())
+       if peer == nil {
+               return
+       }
+
+       log.WithFields(log.Fields{
+               "module":  logModule,
+               "peer":    basePeer.Addr(),
+               "type":    reflect.TypeOf(msg),
+               "message": msg.String(),
+       }).Debug("receive message from peer")
+
+       switch msg := msg.(type) {
+       case *msgs.GetBlockMessage:
+               m.handleGetBlockMsg(peer, msg)
+
+       case *msgs.BlockMessage:
+               m.handleBlockMsg(peer, msg)
+
+       case *msgs.StatusMessage:
+               m.handleStatusMsg(basePeer, msg)
+
+       case *msgs.TransactionMessage:
+               m.handleTransactionMsg(peer, msg)
+
+       case *msgs.TransactionsMessage:
+               m.handleTransactionsMsg(peer, msg)
+
+       case *msgs.GetHeadersMessage:
+               m.handleGetHeadersMsg(peer, msg)
+
+       case *msgs.HeadersMessage:
+               m.handleHeadersMsg(peer, msg)
+
+       case *msgs.GetBlocksMessage:
+               m.handleGetBlocksMsg(peer, msg)
+
+       case *msgs.BlocksMessage:
+               m.handleBlocksMsg(peer, msg)
+
+       case *msgs.FilterLoadMessage:
+               m.handleFilterLoadMsg(peer, msg)
+
+       case *msgs.FilterAddMessage:
+               m.handleFilterAddMsg(peer, msg)
+
+       case *msgs.FilterClearMessage:
+               m.handleFilterClearMsg(peer)
+
+       case *msgs.GetMerkleBlockMessage:
+               m.handleGetMerkleBlockMsg(peer, msg)
+
+       default:
+               log.WithFields(log.Fields{
+                       "module":       logModule,
+                       "peer":         basePeer.Addr(),
+                       "message_type": reflect.TypeOf(msg),
+               }).Error("unhandled message type")
+       }
+}
+
+// RemovePeer delete peer for peer set
+func (m *Manager) RemovePeer(peerID string) {
+       m.peers.RemovePeer(peerID)
+}
+
+// SendStatus sent the current self status to remote peer
+func (m *Manager) SendStatus(peer peers.BasePeer) error {
+       p := m.peers.GetPeer(peer.ID())
+       if p == nil {
+               return errors.New("invalid peer")
+       }
+
+       if err := p.SendStatus(m.chain.BestBlockHeader(), m.chain.LastIrreversibleHeader()); err != nil {
+               m.peers.RemovePeer(p.ID())
+               return err
+       }
+       return nil
+}
+
+// Start the network logic layer
+func (m *Manager) Start() error {
+       var err error
+       m.txMsgSub, err = m.eventDispatcher.Subscribe(core.TxMsgEvent{})
+       if err != nil {
+               return err
+       }
+       m.blockKeeper.start()
+       go m.broadcastTxsLoop()
+       go m.syncMempoolLoop()
+
+       return nil
+}
+
+//Stop stop sync manager
+func (m *Manager) Stop() {
+       m.blockKeeper.stop()
+       close(m.quit)
+}
diff --git a/netsync/chainmgr/msg_fetcher.go b/netsync/chainmgr/msg_fetcher.go
new file mode 100644 (file)
index 0000000..7370961
--- /dev/null
@@ -0,0 +1,359 @@
+package chainmgr
+
+import (
+       "sync"
+       "time"
+
+       log "github.com/sirupsen/logrus"
+
+       "github.com/bytom/bytom/errors"
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/p2p/security"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+const (
+       maxNumOfParallelFetchBlocks = 7
+       blockProcessChSize          = 1024
+       blocksProcessChSize         = 128
+       headersProcessChSize        = 1024
+       maxNumOfFastSyncPeers       = 128
+)
+
+var (
+       requireBlockTimeout      = 20 * time.Second
+       requireHeadersTimeout    = 30 * time.Second
+       requireBlocksTimeout     = 90 * time.Second
+       checkSyncPeerNumInterval = 5 * time.Second
+
+       errRequestBlocksTimeout = errors.New("request blocks timeout")
+       errRequestTimeout       = errors.New("request timeout")
+       errPeerDropped          = errors.New("Peer dropped")
+       errSendMsg              = errors.New("send message error")
+)
+
+// MsgFetcher is the interface for msg fetch struct
+type MsgFetcher interface {
+       resetParameter()
+       addSyncPeer(peerID string)
+       requireBlock(peerID string, height uint64) (*types.Block, error)
+       parallelFetchBlocks(work []*fetchBlocksWork, downloadNotifyCh chan struct{}, ProcessStopCh chan struct{}, wg *sync.WaitGroup)
+       parallelFetchHeaders(peers []*peers.Peer, locator []*bc.Hash, stopHash *bc.Hash, skip uint64) map[string][]*types.BlockHeader
+}
+
+type fetchBlocksWork struct {
+       startHeader, stopHeader *types.BlockHeader
+}
+
+type fetchBlocksResult struct {
+       startHeight, stopHeight uint64
+       err                     error
+}
+
+type msgFetcher struct {
+       storage          *storage
+       syncPeers        *fastSyncPeers
+       peers            *peers.PeerSet
+       blockProcessCh   chan *blockMsg
+       blocksProcessCh  chan *blocksMsg
+       headersProcessCh chan *headersMsg
+       blocksMsgChanMap map[string]chan []*types.Block
+       mux              sync.RWMutex
+}
+
+func newMsgFetcher(storage *storage, peers *peers.PeerSet) *msgFetcher {
+       return &msgFetcher{
+               storage:          storage,
+               syncPeers:        newFastSyncPeers(),
+               peers:            peers,
+               blockProcessCh:   make(chan *blockMsg, blockProcessChSize),
+               blocksProcessCh:  make(chan *blocksMsg, blocksProcessChSize),
+               headersProcessCh: make(chan *headersMsg, headersProcessChSize),
+               blocksMsgChanMap: make(map[string]chan []*types.Block),
+       }
+}
+
+func (mf *msgFetcher) addSyncPeer(peerID string) {
+       mf.syncPeers.add(peerID)
+}
+
+func (mf *msgFetcher) collectResultLoop(peerCh chan string, quit chan struct{}, resultCh chan *fetchBlocksResult, workerCloseCh chan struct{}, workSize int) {
+       defer close(workerCloseCh)
+       ticker := time.NewTicker(checkSyncPeerNumInterval)
+       defer ticker.Stop()
+
+       //collect fetch results
+       for resultCount := 0; resultCount < workSize && mf.syncPeers.size() > 0; {
+               select {
+               case result := <-resultCh:
+                       resultCount++
+                       if result.err != nil {
+                               log.WithFields(log.Fields{"module": logModule, "startHeight": result.startHeight, "stopHeight": result.stopHeight, "err": result.err}).Error("failed on fetch blocks")
+                               return
+                       }
+
+                       peer, err := mf.syncPeers.selectIdlePeer()
+                       if err != nil {
+                               log.WithFields(log.Fields{"module": logModule, "err": result.err}).Warn("failed on find fast sync peer")
+                               break
+                       }
+                       peerCh <- peer
+               case <-ticker.C:
+                       if mf.syncPeers.size() == 0 {
+                               log.WithFields(log.Fields{"module": logModule}).Warn("num of fast sync peer is 0")
+                               return
+                       }
+               case _, ok := <-quit:
+                       if !ok {
+                               return
+                       }
+               }
+       }
+}
+
+func (mf *msgFetcher) fetchBlocks(work *fetchBlocksWork, peerID string) ([]*types.Block, error) {
+       defer mf.syncPeers.setIdle(peerID)
+       startHash := work.startHeader.Hash()
+       stopHash := work.stopHeader.Hash()
+       blocks, err := mf.requireBlocks(peerID, []*bc.Hash{&startHash}, &stopHash)
+       if err != nil {
+               mf.syncPeers.delete(peerID)
+               mf.peers.ProcessIllegal(peerID, security.LevelConnException, err.Error())
+               return nil, err
+       }
+
+       if err := mf.verifyBlocksMsg(blocks, work.startHeader, work.stopHeader); err != nil {
+               mf.syncPeers.delete(peerID)
+               mf.peers.ProcessIllegal(peerID, security.LevelConnException, err.Error())
+               return nil, err
+       }
+
+       return blocks, nil
+}
+
+func (mf *msgFetcher) fetchBlocksProcess(work *fetchBlocksWork, peerCh chan string, downloadNotifyCh chan struct{}, closeCh chan struct{}) error {
+       for {
+               select {
+               case peerID := <-peerCh:
+                       for {
+                               blocks, err := mf.fetchBlocks(work, peerID)
+                               if err != nil {
+                                       log.WithFields(log.Fields{"module": logModule, "startHeight": work.startHeader.Height, "stopHeight": work.stopHeader.Height, "error": err}).Info("failed on fetch blocks")
+                                       break
+                               }
+
+                               if err := mf.storage.writeBlocks(peerID, blocks); err != nil {
+                                       log.WithFields(log.Fields{"module": logModule, "error": err}).Info("write block error")
+                                       return err
+                               }
+
+                               // send to block process pool
+                               select {
+                               case downloadNotifyCh <- struct{}{}:
+                               default:
+                               }
+
+                               // work completed
+                               if blocks[len(blocks)-1].Height >= work.stopHeader.Height-1 {
+                                       return nil
+                               }
+
+                               //unfinished work, continue
+                               work.startHeader = &blocks[len(blocks)-1].BlockHeader
+                       }
+               case <-closeCh:
+                       return nil
+               }
+       }
+}
+
+func (mf *msgFetcher) fetchBlocksWorker(workCh chan *fetchBlocksWork, peerCh chan string, resultCh chan *fetchBlocksResult, closeCh chan struct{}, downloadNotifyCh chan struct{}, wg *sync.WaitGroup) {
+       for {
+               select {
+               case work := <-workCh:
+                       err := mf.fetchBlocksProcess(work, peerCh, downloadNotifyCh, closeCh)
+                       resultCh <- &fetchBlocksResult{startHeight: work.startHeader.Height, stopHeight: work.stopHeader.Height, err: err}
+               case <-closeCh:
+                       wg.Done()
+                       return
+               }
+       }
+}
+
+func (mf *msgFetcher) parallelFetchBlocks(works []*fetchBlocksWork, downloadNotifyCh chan struct{}, ProcessStopCh chan struct{}, wg *sync.WaitGroup) {
+       workSize := len(works)
+       workCh := make(chan *fetchBlocksWork, workSize)
+       peerCh := make(chan string, maxNumOfFastSyncPeers)
+       resultCh := make(chan *fetchBlocksResult, workSize)
+       closeCh := make(chan struct{})
+
+       for _, work := range works {
+               workCh <- work
+       }
+       syncPeers := mf.syncPeers.selectIdlePeers()
+       for i := 0; i < len(syncPeers) && i < maxNumOfFastSyncPeers; i++ {
+               peerCh <- syncPeers[i]
+       }
+
+       var workWg sync.WaitGroup
+       for i := 0; i <= maxNumOfParallelFetchBlocks && i < workSize; i++ {
+               workWg.Add(1)
+               go mf.fetchBlocksWorker(workCh, peerCh, resultCh, closeCh, downloadNotifyCh, &workWg)
+       }
+
+       go mf.collectResultLoop(peerCh, ProcessStopCh, resultCh, closeCh, workSize)
+
+       workWg.Wait()
+       close(resultCh)
+       close(peerCh)
+       close(workCh)
+       close(downloadNotifyCh)
+       wg.Done()
+}
+
+func (mf *msgFetcher) parallelFetchHeaders(peers []*peers.Peer, locator []*bc.Hash, stopHash *bc.Hash, skip uint64) map[string][]*types.BlockHeader {
+       result := make(map[string][]*types.BlockHeader)
+       response := make(map[string]bool)
+       for _, peer := range peers {
+               if ok := peer.GetHeaders(locator, stopHash, skip); !ok {
+                       continue
+               }
+               result[peer.ID()] = nil
+       }
+
+       timeout := time.NewTimer(requireHeadersTimeout)
+       defer timeout.Stop()
+       for {
+               select {
+               case msg := <-mf.headersProcessCh:
+                       if _, ok := result[msg.peerID]; ok {
+                               result[msg.peerID] = append(result[msg.peerID], msg.headers[:]...)
+                               response[msg.peerID] = true
+                               if len(response) == len(result) {
+                                       return result
+                               }
+                       }
+               case <-timeout.C:
+                       log.WithFields(log.Fields{"module": logModule, "err": errRequestTimeout}).Warn("failed on parallel fetch headers")
+                       return result
+               }
+       }
+}
+
+func (mf *msgFetcher) processBlock(peerID string, block *types.Block) {
+       mf.blockProcessCh <- &blockMsg{block: block, peerID: peerID}
+}
+
+func (mf *msgFetcher) processBlocks(peerID string, blocks []*types.Block) {
+       mf.blocksProcessCh <- &blocksMsg{blocks: blocks, peerID: peerID}
+       mf.mux.RLock()
+       blocksMsgChan, ok := mf.blocksMsgChanMap[peerID]
+       mf.mux.RUnlock()
+       if !ok {
+               mf.peers.ProcessIllegal(peerID, security.LevelMsgIllegal, "msg from unsolicited peer")
+               return
+       }
+
+       blocksMsgChan <- blocks
+}
+
+func (mf *msgFetcher) processHeaders(peerID string, headers []*types.BlockHeader) {
+       mf.headersProcessCh <- &headersMsg{headers: headers, peerID: peerID}
+}
+
+func (mf *msgFetcher) requireBlock(peerID string, height uint64) (*types.Block, error) {
+       peer := mf.peers.GetPeer(peerID)
+       if peer == nil {
+               return nil, errPeerDropped
+       }
+
+       if ok := peer.GetBlockByHeight(height); !ok {
+               return nil, errSendMsg
+       }
+
+       timeout := time.NewTimer(requireBlockTimeout)
+       defer timeout.Stop()
+
+       for {
+               select {
+               case msg := <-mf.blockProcessCh:
+                       if msg.peerID != peerID {
+                               continue
+                       }
+                       if msg.block.Height != height {
+                               continue
+                       }
+                       return msg.block, nil
+               case <-timeout.C:
+                       return nil, errors.Wrap(errRequestTimeout, "requireBlock")
+               }
+       }
+}
+
+func (mf *msgFetcher) requireBlocks(peerID string, locator []*bc.Hash, stopHash *bc.Hash) ([]*types.Block, error) {
+       peer := mf.peers.GetPeer(peerID)
+       if peer == nil {
+               mf.syncPeers.delete(peerID)
+               return nil, errPeerDropped
+       }
+
+       receiveCh := make(chan []*types.Block, 1)
+       mf.mux.Lock()
+       mf.blocksMsgChanMap[peerID] = receiveCh
+       mf.mux.Unlock()
+
+       if ok := peer.GetBlocks(locator, stopHash); !ok {
+               return nil, errSendMsg
+       }
+
+       timeout := time.NewTimer(requireBlocksTimeout)
+       defer timeout.Stop()
+       select {
+       case blocks := <-receiveCh:
+               return blocks, nil
+       case <-timeout.C:
+               return nil, errRequestBlocksTimeout
+       }
+}
+
+func (mf *msgFetcher) resetParameter() {
+       mf.blocksMsgChanMap = make(map[string]chan []*types.Block)
+       mf.syncPeers = newFastSyncPeers()
+       mf.storage.resetParameter()
+       //empty chan
+       for {
+               select {
+               case <-mf.blocksProcessCh:
+               case <-mf.headersProcessCh:
+               default:
+                       return
+               }
+       }
+}
+
+func (mf *msgFetcher) verifyBlocksMsg(blocks []*types.Block, startHeader, stopHeader *types.BlockHeader) error {
+       // null blocks
+       if len(blocks) == 0 {
+               return errors.New("null blocks msg")
+       }
+
+       // blocks more than request
+       if uint64(len(blocks)) > stopHeader.Height-startHeader.Height+1 {
+               return errors.New("exceed length blocks msg")
+       }
+
+       // verify start block
+       if blocks[0].Hash() != startHeader.Hash() {
+               return errors.New("get mismatch blocks msg")
+       }
+
+       // verify blocks continuity
+       for i := 0; i < len(blocks)-1; i++ {
+               if blocks[i].Hash() != blocks[i+1].PreviousBlockHash {
+                       return errors.New("get discontinuous blocks msg")
+               }
+       }
+
+       return nil
+}
diff --git a/netsync/chainmgr/peers.go b/netsync/chainmgr/peers.go
new file mode 100644 (file)
index 0000000..4dea4fe
--- /dev/null
@@ -0,0 +1,88 @@
+package chainmgr
+
+import (
+       "errors"
+       "sync"
+)
+
+var errNoValidFastSyncPeer = errors.New("no valid fast sync peer")
+
+type fastSyncPeers struct {
+       peers map[string]bool
+       mtx   sync.RWMutex
+}
+
+func newFastSyncPeers() *fastSyncPeers {
+       return &fastSyncPeers{
+               peers: make(map[string]bool),
+       }
+}
+
+func (fs *fastSyncPeers) add(peerID string) {
+       fs.mtx.Lock()
+       defer fs.mtx.Unlock()
+
+       if _, ok := fs.peers[peerID]; ok {
+               return
+       }
+
+       fs.peers[peerID] = false
+}
+
+func (fs *fastSyncPeers) delete(peerID string) {
+       fs.mtx.Lock()
+       defer fs.mtx.Unlock()
+
+       delete(fs.peers, peerID)
+}
+
+func (fs *fastSyncPeers) selectIdlePeers() []string {
+       fs.mtx.Lock()
+       defer fs.mtx.Unlock()
+
+       peers := make([]string, 0)
+       for peerID, isBusy := range fs.peers {
+               if isBusy {
+                       continue
+               }
+
+               fs.peers[peerID] = true
+               peers = append(peers, peerID)
+       }
+
+       return peers
+}
+
+func (fs *fastSyncPeers) selectIdlePeer() (string, error) {
+       fs.mtx.Lock()
+       defer fs.mtx.Unlock()
+
+       for peerID, isBusy := range fs.peers {
+               if isBusy {
+                       continue
+               }
+
+               fs.peers[peerID] = true
+               return peerID, nil
+       }
+
+       return "", errNoValidFastSyncPeer
+}
+
+func (fs *fastSyncPeers) setIdle(peerID string) {
+       fs.mtx.Lock()
+       defer fs.mtx.Unlock()
+
+       if _, ok := fs.peers[peerID]; !ok {
+               return
+       }
+
+       fs.peers[peerID] = false
+}
+
+func (fs *fastSyncPeers) size() int {
+       fs.mtx.RLock()
+       defer fs.mtx.RUnlock()
+
+       return len(fs.peers)
+}
diff --git a/netsync/chainmgr/peers_test.go b/netsync/chainmgr/peers_test.go
new file mode 100644 (file)
index 0000000..cc159fa
--- /dev/null
@@ -0,0 +1,116 @@
+package chainmgr
+
+import (
+       "testing"
+)
+
+func TestAddDel(t *testing.T) {
+       syncPeers := newFastSyncPeers()
+       peers := make(map[string]bool)
+       peers["Peer1"] = true
+       peers["Peer2"] = true
+       for k := range peers {
+               syncPeers.add(k)
+               syncPeers.add(k)
+       }
+       if syncPeers.size() != len(peers) {
+               t.Errorf("add peer test err: got %d\nwant %d", syncPeers.size(), len(peers))
+       }
+
+       syncPeers.delete("Peer1")
+       if syncPeers.size() != 1 {
+               t.Errorf("add peer test err: got %d\nwant %d", syncPeers.size(), 1)
+       }
+
+       syncPeers.delete("Peer1")
+       if syncPeers.size() != 1 {
+               t.Errorf("add peer test err: got %d\nwant %d", syncPeers.size(), 1)
+       }
+}
+
+func TestIdlePeers(t *testing.T) {
+       syncPeers := newFastSyncPeers()
+       peers := make(map[string]bool)
+       peers["Peer1"] = true
+       peers["Peer2"] = true
+       for k := range peers {
+               syncPeers.add(k)
+               syncPeers.add(k)
+       }
+
+       idlePeers := syncPeers.selectIdlePeers()
+       if len(idlePeers) != len(peers) {
+               t.Errorf("selcet idle peers test err: got %d\nwant %d", len(idlePeers), len(peers))
+       }
+
+       for _, peer := range idlePeers {
+               if ok := peers[peer]; !ok {
+                       t.Errorf("selcet idle peers test err: want peers %v got %v", peers, idlePeers)
+               }
+       }
+
+       idlePeers = syncPeers.selectIdlePeers()
+       if len(idlePeers) != 0 {
+               t.Errorf("selcet idle peers test err: got %d\nwant %d", len(idlePeers), 0)
+       }
+
+}
+
+func TestIdlePeer(t *testing.T) {
+       syncPeers := newFastSyncPeers()
+       peers := make(map[string]bool)
+       peers["Peer1"] = true
+       peers["Peer2"] = true
+       for k := range peers {
+               syncPeers.add(k)
+               syncPeers.add(k)
+       }
+       idlePeer, err := syncPeers.selectIdlePeer()
+       if err != nil {
+               t.Errorf("selcet idle peers test err: got %v\nwant %v", err, nil)
+       }
+
+       if ok := peers[idlePeer]; !ok {
+               t.Error("selcet idle peers test err.")
+       }
+       idlePeer, err = syncPeers.selectIdlePeer()
+       if err != nil {
+               t.Errorf("selcet idle peers test err: got %v\nwant %v", err, nil)
+       }
+
+       if ok := peers[idlePeer]; !ok {
+               t.Error("selcet idle peers test err.")
+       }
+       idlePeer, err = syncPeers.selectIdlePeer()
+       if err != errNoValidFastSyncPeer {
+               t.Errorf("selcet idle peers test err: got %v\nwant %v", err, errNoValidFastSyncPeer)
+       }
+}
+
+func TestSetIdle(t *testing.T) {
+       syncPeers := newFastSyncPeers()
+       peers := make(map[string]bool)
+       peers["Peer2"] = true
+       for k := range peers {
+               syncPeers.add(k)
+       }
+       if syncPeers.size() != len(peers) {
+               t.Errorf("add peer test err: got %d\nwant %d", syncPeers.size(), len(peers))
+       }
+       idlePeers := syncPeers.selectIdlePeers()
+       if len(idlePeers) != len(peers) {
+               t.Errorf("selcet idle peers test err: got %d\nwant %d", len(idlePeers), len(peers))
+       }
+
+       syncPeers.setIdle("Peer1")
+       idlePeers = syncPeers.selectIdlePeers()
+       if len(idlePeers) != 0 {
+               t.Errorf("selcet idle peers test err: got %d\nwant %d", len(idlePeers), 0)
+       }
+
+       syncPeers.setIdle("Peer2")
+       idlePeers = syncPeers.selectIdlePeers()
+       if len(idlePeers) != len(peers) {
+               t.Errorf("selcet idle peers test err: got %d\nwant %d", len(idlePeers), len(peers))
+       }
+}
similarity index 57%
rename from netsync/protocol_reactor.go
rename to netsync/chainmgr/protocol_reactor.go
index 3e9768d..bd4d31b 100644 (file)
@@ -1,38 +1,28 @@
-package netsync
+package chainmgr
 
 import (
-       "time"
+       "bytes"
 
        log "github.com/sirupsen/logrus"
+       "github.com/tendermint/go-wire"
 
        "github.com/bytom/bytom/errors"
+       msgs "github.com/bytom/bytom/netsync/messages"
        "github.com/bytom/bytom/p2p"
        "github.com/bytom/bytom/p2p/connection"
 )
 
-const (
-       handshakeTimeout    = 10 * time.Second
-       handshakeCheckPerid = 500 * time.Millisecond
-)
-
-var (
-       errProtocolHandshakeTimeout = errors.New("Protocol handshake timeout")
-       errStatusRequest            = errors.New("Status request error")
-)
-
 //ProtocolReactor handles new coming protocol message.
 type ProtocolReactor struct {
        p2p.BaseReactor
 
-       sm    *SyncManager
-       peers *peerSet
+       manager *Manager
 }
 
 // NewProtocolReactor returns the reactor of whole blockchain.
-func NewProtocolReactor(sm *SyncManager, peers *peerSet) *ProtocolReactor {
+func NewProtocolReactor(manager *Manager) *ProtocolReactor {
        pr := &ProtocolReactor{
-               sm:    sm,
-               peers: peers,
+               manager: manager,
        }
        pr.BaseReactor = *p2p.NewBaseReactor("ProtocolReactor", pr)
        return pr
@@ -42,7 +32,7 @@ func NewProtocolReactor(sm *SyncManager, peers *peerSet) *ProtocolReactor {
 func (pr *ProtocolReactor) GetChannels() []*connection.ChannelDescriptor {
        return []*connection.ChannelDescriptor{
                {
-                       ID:                BlockchainChannel,
+                       ID:                msgs.BlockchainChannel,
                        Priority:          5,
                        SendQueueCapacity: 100,
                },
@@ -62,40 +52,39 @@ func (pr *ProtocolReactor) OnStop() {
 
 // AddPeer implements Reactor by sending our state to peer.
 func (pr *ProtocolReactor) AddPeer(peer *p2p.Peer) error {
-       if ok := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{&StatusRequestMessage{}}); !ok {
-               return errStatusRequest
+       pr.manager.AddPeer(peer)
+       if err := pr.manager.SendStatus(peer); err != nil {
+               return err
        }
 
-       checkTicker := time.NewTicker(handshakeCheckPerid)
-       defer checkTicker.Stop()
-       timeout := time.NewTimer(handshakeTimeout)
-       defer timeout.Stop()
-       for {
-               select {
-               case <-checkTicker.C:
-                       if exist := pr.peers.getPeer(peer.Key); exist != nil {
-                               pr.sm.syncTransactions(peer.Key)
-                               return nil
-                       }
-
-               case <-timeout.C:
-                       return errProtocolHandshakeTimeout
-               }
-       }
+       pr.manager.syncMempool(peer.Key)
+       return nil
 }
 
 // RemovePeer implements Reactor by removing peer from the pool.
 func (pr *ProtocolReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
-       pr.peers.removePeer(peer.Key)
+       pr.manager.RemovePeer(peer.Key)
+}
+
+//decodeMessage decode msg
+func decodeMessage(bz []byte) (msgType byte, msg msgs.BlockchainMessage, err error) {
+       msgType = bz[0]
+       n := int(0)
+       r := bytes.NewReader(bz)
+       msg = wire.ReadBinary(struct{ msgs.BlockchainMessage }{}, r, msgs.MaxBlockchainResponseSize, &n, &err).(struct{ msgs.BlockchainMessage }).BlockchainMessage
+       if err != nil && n != len(bz) {
+               err = errors.New("DecodeMessage() had bytes left over")
+       }
+       return
 }
 
 // Receive implements Reactor by handling 4 types of messages (look below).
 func (pr *ProtocolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
-       msgType, msg, err := DecodeMessage(msgBytes)
+       msgType, msg, err := decodeMessage(msgBytes)
        if err != nil {
                log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on reactor decoding message")
                return
        }
 
-       pr.sm.processMsg(src, msgType, msg)
+       pr.manager.processMsg(src, msgType, msg)
 }
diff --git a/netsync/chainmgr/storage.go b/netsync/chainmgr/storage.go
new file mode 100644 (file)
index 0000000..22cf900
--- /dev/null
@@ -0,0 +1,162 @@
+package chainmgr
+
+import (
+       "encoding/binary"
+       "sync"
+
+       dbm "github.com/bytom/bytom/database/leveldb"
+       "github.com/bytom/bytom/errors"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+var (
+       maxByteOfStorageRAM = 800 * 1024 * 1024 //100MB
+       errStorageFindBlock = errors.New("can't find block from storage")
+       errDBFindBlock      = errors.New("can't find block from DB")
+)
+
+// LocalStore is the interface for persistent storage
+type LocalStore interface {
+       writeBlock(block *types.Block) error
+       readBlock(height uint64) (*types.Block, error)
+       clearData()
+}
+
+type blockStorage struct {
+       block  *types.Block
+       peerID string
+       size   int
+       isRAM  bool
+}
+
+type storage struct {
+       actualUsage int
+       blocks      map[uint64]*blockStorage
+       localStore  LocalStore
+       mux         sync.RWMutex
+}
+
+func newStorage(db dbm.DB) *storage {
+       DBStorage := newDBStore(db)
+       DBStorage.clearData()
+       return &storage{
+               blocks:     make(map[uint64]*blockStorage),
+               localStore: DBStorage,
+       }
+}
+
+func (s *storage) writeBlocks(peerID string, blocks []*types.Block) error {
+       s.mux.Lock()
+       defer s.mux.Unlock()
+
+       for _, block := range blocks {
+               binaryBlock, err := block.MarshalText()
+               if err != nil {
+                       return errors.Wrap(err, "Marshal block header")
+               }
+
+               if len(binaryBlock)+s.actualUsage < maxByteOfStorageRAM {
+                       s.blocks[block.Height] = &blockStorage{block: block, peerID: peerID, size: len(binaryBlock), isRAM: true}
+                       s.actualUsage += len(binaryBlock)
+                       continue
+               }
+
+               if err := s.localStore.writeBlock(block); err != nil {
+                       return err
+               }
+
+               s.blocks[block.Height] = &blockStorage{peerID: peerID, isRAM: false}
+       }
+
+       return nil
+}
+
+func (s *storage) readBlock(height uint64) (*blockStorage, error) {
+       s.mux.RLock()
+       defer s.mux.RUnlock()
+
+       blockStore, ok := s.blocks[height]
+       if !ok {
+               return nil, errStorageFindBlock
+       }
+
+       if blockStore.isRAM {
+               return blockStore, nil
+       }
+
+       block, err := s.localStore.readBlock(height)
+       if err != nil {
+               return nil, err
+       }
+
+       blockStore.block = block
+       return blockStore, nil
+}
+
+// deleteBlock delete blocks in memory
+func (s *storage) deleteBlock(height uint64) {
+       s.mux.RLock()
+       defer s.mux.RUnlock()
+
+       blockStore, ok := s.blocks[height]
+       if !ok {
+               return
+       }
+
+       if blockStore.isRAM {
+               s.actualUsage -= blockStore.size
+               delete(s.blocks, height)
+       }
+}
+
+func (s *storage) resetParameter() {
+       s.mux.Lock()
+       defer s.mux.Unlock()
+
+       s.blocks = make(map[uint64]*blockStorage)
+       s.actualUsage = 0
+       s.localStore.clearData()
+}
+
+type levelDBStorage struct {
+       db dbm.DB
+}
+
+func newDBStore(db dbm.DB) *levelDBStorage {
+       return &levelDBStorage{
+               db: db,
+       }
+}
+
+func (ls *levelDBStorage) clearData() {
+       iter := ls.db.Iterator()
+       defer iter.Release()
+
+       for iter.Next() {
+               ls.db.Delete(iter.Key())
+       }
+}
+
+func (ls *levelDBStorage) writeBlock(block *types.Block) error {
+       binaryBlock, err := block.MarshalText()
+       if err != nil {
+               return err
+       }
+
+       key := make([]byte, 8)
+       binary.BigEndian.PutUint64(key, block.Height)
+       ls.db.Set(key, binaryBlock)
+       return nil
+}
+
+func (ls *levelDBStorage) readBlock(height uint64) (*types.Block, error) {
+       key := make([]byte, 8)
+       binary.BigEndian.PutUint64(key, height)
+       binaryBlock := ls.db.Get(key)
+       if binaryBlock == nil {
+               return nil, errDBFindBlock
+       }
+
+       block := &types.Block{}
+       return block, block.UnmarshalText(binaryBlock)
+}
diff --git a/netsync/chainmgr/storage_test.go b/netsync/chainmgr/storage_test.go
new file mode 100644 (file)
index 0000000..ba88c05
--- /dev/null
@@ -0,0 +1,133 @@
+package chainmgr
+
+import (
+       "io/ioutil"
+       "os"
+       "testing"
+
+       "github.com/davecgh/go-spew/spew"
+
+       dbm "github.com/bytom/bytom/database/leveldb"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+func TestReadWriteBlocks(t *testing.T) {
+       tmp, err := ioutil.TempDir(".", "")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer os.RemoveAll(tmp)
+
+       testDB := dbm.NewDB("testdb", "leveldb", tmp)
+       defer testDB.Close()
+
+       s := newStorage(testDB)
+
+       cases := []struct {
+               storageRAMLimit int
+               blocks          []*types.Block
+               peerID          string
+               isRAM           bool
+       }{
+               {
+                       storageRAMLimit: 800 * 1024 * 1024,
+                       blocks:          mockBlocks(nil, 500),
+                       peerID:          "testPeer",
+                       isRAM:           true,
+               },
+               {
+                       storageRAMLimit: 1,
+                       blocks:          mockBlocks(nil, 500),
+                       peerID:          "testPeer",
+                       isRAM:           false,
+               },
+       }
+
+       for index, c := range cases {
+               maxByteOfStorageRAM = c.storageRAMLimit
+               s.writeBlocks(c.peerID, c.blocks)
+
+               for i := 0; i < len(c.blocks); i++ {
+                       blockStorage, err := s.readBlock(uint64(i))
+                       if err != nil {
+                               t.Fatal(err)
+                       }
+
+                       if blockStorage.isRAM != c.isRAM {
+                               t.Fatalf("case %d: TestReadWriteBlocks block %d isRAM: got %t want %t", index, i, blockStorage.isRAM, c.isRAM)
+                       }
+
+                       if blockStorage.block.Hash() != c.blocks[i].Hash() {
+                               t.Fatalf("case %d: TestReadWriteBlocks block %d: got %s want %s", index, i, spew.Sdump(blockStorage.block), spew.Sdump(c.blocks[i]))
+                       }
+               }
+       }
+}
+
+func TestDeleteBlock(t *testing.T) {
+       tmp, err := ioutil.TempDir(".", "")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer os.RemoveAll(tmp)
+
+       testDB := dbm.NewDB("testdb", "leveldb", tmp)
+       defer testDB.Close()
+
+       maxByteOfStorageRAM = 1024
+       blocks := mockBlocks(nil, 500)
+       s := newStorage(testDB)
+       for i, block := range blocks {
+               if err := s.writeBlocks("testPeer", []*types.Block{block}); err != nil {
+                       t.Fatal(err)
+               }
+
+               blockStorage, err := s.readBlock(block.Height)
+               if err != nil {
+                       t.Fatal(err)
+               }
+
+               if !blockStorage.isRAM {
+                       t.Fatalf("TestReadWriteBlocks block %d isRAM: got %t want %t", i, blockStorage.isRAM, true)
+               }
+
+               s.deleteBlock(block.Height)
+       }
+
+}
+
+func TestLevelDBStorageReadWrite(t *testing.T) {
+       tmp, err := ioutil.TempDir(".", "")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer os.RemoveAll(tmp)
+
+       testDB := dbm.NewDB("testdb", "leveldb", tmp)
+       defer testDB.Close()
+
+       blocks := mockBlocks(nil, 16)
+       s := newDBStore(testDB)
+
+       for i, block := range blocks {
+               err := s.writeBlock(block)
+               if err != nil {
+                       t.Fatal(err)
+               }
+
+               gotBlock, err := s.readBlock(block.Height)
+               if err != nil {
+                       t.Fatal(err)
+               }
+
+               if gotBlock.Hash() != block.Hash() {
+                       t.Fatalf("TestLevelDBStorageReadWrite block %d: got %s want %s", i, spew.Sdump(gotBlock), spew.Sdump(block))
+               }
+
+               s.clearData()
+               _, err = s.readBlock(block.Height)
+               if err == nil {
+                       t.Fatalf("TestLevelDBStorageReadWrite clear data err block %d", i)
+               }
+       }
+}
similarity index 58%
rename from netsync/tool_test.go
rename to netsync/chainmgr/tool_test.go
index 6154bcb..0cda456 100644 (file)
@@ -1,4 +1,4 @@
-package netsync
+package chainmgr
 
 import (
        "errors"
@@ -6,10 +6,13 @@ import (
        "net"
        "time"
 
-       wire "github.com/tendermint/go-wire"
+       dbm "github.com/bytom/bytom/database/leveldb"
+       "github.com/tendermint/go-wire"
        "github.com/tendermint/tmlibs/flowrate"
 
        "github.com/bytom/bytom/consensus"
+       "github.com/bytom/bytom/event"
+       "github.com/bytom/bytom/netsync/peers"
        "github.com/bytom/bytom/protocol/bc"
        "github.com/bytom/bytom/protocol/bc/types"
        "github.com/bytom/bytom/test/mock"
@@ -21,7 +24,7 @@ type P2PPeer struct {
        flag consensus.ServiceFlag
 
        srcPeer    *P2PPeer
-       remoteNode *SyncManager
+       remoteNode *Manager
        msgCh      chan []byte
        async      bool
 }
@@ -48,6 +51,10 @@ func (p *P2PPeer) IsLAN() bool {
        return false
 }
 
+func (p *P2PPeer) Moniker() string {
+       return ""
+}
+
 func (p *P2PPeer) RemoteAddrHost() string {
        return ""
 }
@@ -56,7 +63,7 @@ func (p *P2PPeer) ServiceFlag() consensus.ServiceFlag {
        return p.flag
 }
 
-func (p *P2PPeer) SetConnection(srcPeer *P2PPeer, node *SyncManager) {
+func (p *P2PPeer) SetConnection(srcPeer *P2PPeer, node *Manager) {
        p.srcPeer = srcPeer
        p.remoteNode = node
 }
@@ -70,7 +77,7 @@ func (p *P2PPeer) TrySend(b byte, msg interface{}) bool {
        if p.async {
                p.msgCh <- msgBytes
        } else {
-               msgType, msg, _ := DecodeMessage(msgBytes)
+               msgType, msg, _ := decodeMessage(msgBytes)
                p.remoteNode.processMsg(p.srcPeer, msgType, msg)
        }
        return true
@@ -82,7 +89,8 @@ func (p *P2PPeer) setAsync(b bool) {
 
 func (p *P2PPeer) postMan() {
        for msgBytes := range p.msgCh {
-               msgType, msg, _ := DecodeMessage(msgBytes)
+               msgType, msg, _ := decodeMessage(msgBytes)
+               time.Sleep(10 * time.Millisecond)
                p.remoteNode.processMsg(p.srcPeer, msgType, msg)
        }
 }
@@ -100,19 +108,19 @@ func (ps *PeerSet) IsBanned(ip string, level byte, reason string) bool {
 func (ps *PeerSet) StopPeerGracefully(string) {}
 
 type NetWork struct {
-       nodes map[*SyncManager]P2PPeer
+       nodes map[*Manager]P2PPeer
 }
 
 func NewNetWork() *NetWork {
-       return &NetWork{map[*SyncManager]P2PPeer{}}
+       return &NetWork{map[*Manager]P2PPeer{}}
 }
 
-func (nw *NetWork) Register(node *SyncManager, addr, id string, flag consensus.ServiceFlag) {
+func (nw *NetWork) Register(node *Manager, addr, id string, flag consensus.ServiceFlag) {
        peer := NewP2PPeer(addr, id, flag)
        nw.nodes[node] = *peer
 }
 
-func (nw *NetWork) HandsShake(nodeA, nodeB *SyncManager) (*P2PPeer, *P2PPeer, error) {
+func (nw *NetWork) HandsShake(nodeA, nodeB *Manager) (*P2PPeer, *P2PPeer, error) {
        B2A, ok := nw.nodes[nodeA]
        if !ok {
                return nil, nil, errors.New("can't find nodeA's p2p peer on network")
@@ -125,9 +133,10 @@ func (nw *NetWork) HandsShake(nodeA, nodeB *SyncManager) (*P2PPeer, *P2PPeer, er
        A2B.SetConnection(&B2A, nodeB)
        B2A.SetConnection(&A2B, nodeA)
 
-       nodeA.handleStatusRequestMsg(&A2B)
-       nodeB.handleStatusRequestMsg(&B2A)
-
+       nodeA.AddPeer(&A2B)
+       nodeB.AddPeer(&B2A)
+       nodeA.SendStatus(B2A.srcPeer)
+       nodeB.SendStatus(A2B.srcPeer)
        A2B.setAsync(true)
        B2A.setAsync(true)
        return &B2A, &A2B, nil
@@ -137,7 +146,31 @@ func mockBlocks(startBlock *types.Block, height uint64) []*types.Block {
        blocks := []*types.Block{}
        indexBlock := &types.Block{}
        if startBlock == nil {
-               indexBlock = &types.Block{BlockHeader: types.BlockHeader{Nonce: uint64(rand.Uint32())}}
+               indexBlock = &types.Block{BlockHeader: types.BlockHeader{Version: uint64(rand.Uint32())}}
+               blocks = append(blocks, indexBlock)
+       } else {
+               indexBlock = startBlock
+       }
+
+       for indexBlock.Height < height {
+               block := &types.Block{
+                       BlockHeader: types.BlockHeader{
+                               Height:            indexBlock.Height + 1,
+                               PreviousBlockHash: indexBlock.Hash(),
+                               Version:           uint64(rand.Uint32()),
+                       },
+               }
+               blocks = append(blocks, block)
+               indexBlock = block
+       }
+       return blocks
+}
+
+func mockErrorBlocks(startBlock *types.Block, height uint64, errBlockHeight uint64) []*types.Block {
+       blocks := []*types.Block{}
+       indexBlock := &types.Block{}
+       if startBlock == nil {
+               indexBlock = &types.Block{BlockHeader: types.BlockHeader{Version: uint64(rand.Uint32())}}
                blocks = append(blocks, indexBlock)
        } else {
                indexBlock = startBlock
@@ -148,43 +181,45 @@ func mockBlocks(startBlock *types.Block, height uint64) []*types.Block {
                        BlockHeader: types.BlockHeader{
                                Height:            indexBlock.Height + 1,
                                PreviousBlockHash: indexBlock.Hash(),
-                               Nonce:             uint64(rand.Uint32()),
+                               Version:           uint64(rand.Uint32()),
                        },
                }
+               if block.Height == errBlockHeight {
+                       block.TransactionsMerkleRoot = bc.NewHash([32]byte{0x1})
+               }
                blocks = append(blocks, block)
                indexBlock = block
        }
        return blocks
 }
 
-func mockSync(blocks []*types.Block) *SyncManager {
+func mockSync(blocks []*types.Block, mempool *mock.Mempool, fastSyncDB dbm.DB) *Manager {
        chain := mock.NewChain()
-       peers := newPeerSet(NewPeerSet())
+       peers := peers.NewPeerSet(NewPeerSet())
        chain.SetBestBlockHeader(&blocks[len(blocks)-1].BlockHeader)
        for _, block := range blocks {
                chain.SetBlockByHeight(block.Height, block)
        }
 
-       genesis, _ := chain.GetHeaderByHeight(0)
-       return &SyncManager{
-               genesisHash: genesis.Hash(),
-               chain:       chain,
-               blockKeeper: newBlockKeeper(chain, peers),
-               peers:       peers,
+       return &Manager{
+               chain:           chain,
+               blockKeeper:     newBlockKeeper(chain, peers, fastSyncDB),
+               peers:           peers,
+               mempool:         mempool,
+               txSyncCh:        make(chan *txSyncMsg),
+               eventDispatcher: event.NewDispatcher(),
        }
 }
 
 func mockTxs(txCount int) ([]*types.Tx, []*bc.Tx) {
        var txs []*types.Tx
        var bcTxs []*bc.Tx
-       for i := 0; i < txCount; i++ {
-               trueProg := mockControlProgram(60)
-               assetID := bc.ComputeAssetID(trueProg, 1, &bc.EmptyStringHash)
-               now := []byte(time.Now().String())
-               issuanceInp := types.NewIssuanceInput(now, 1, trueProg, nil, nil)
+       trueProg := mockControlProgram(60)
+       assetID := bc.AssetID{V0: 9999}
+       for i := uint64(0); i < uint64(txCount); i++ {
                tx := types.NewTx(types.TxData{
                        Version: 1,
-                       Inputs:  []*types.TxInput{issuanceInp},
+                       Inputs:  []*types.TxInput{types.NewSpendInput(nil, bc.Hash{V0: i + 1}, assetID, i, i, trueProg)},
                        Outputs: []*types.TxOutput{types.NewTxOutput(assetID, 1, trueProg)},
                })
                txs = append(txs, tx)
similarity index 82%
rename from netsync/tx_keeper.go
rename to netsync/chainmgr/tx_keeper.go
index b8194aa..d348674 100644 (file)
@@ -1,4 +1,4 @@
-package netsync
+package chainmgr
 
 import (
        "math/rand"
@@ -21,8 +21,8 @@ type txSyncMsg struct {
        txs    []*types.Tx
 }
 
-func (sm *SyncManager) syncTransactions(peerID string) {
-       pending := sm.txPool.GetTransactions()
+func (m *Manager) syncMempool(peerID string) {
+       pending := m.mempool.GetTransactions()
        if len(pending) == 0 {
                return
        }
@@ -31,13 +31,13 @@ func (sm *SyncManager) syncTransactions(peerID string) {
        for i, batch := range pending {
                txs[i] = batch.Tx
        }
-       sm.txSyncCh <- &txSyncMsg{peerID, txs}
+       m.txSyncCh <- &txSyncMsg{peerID, txs}
 }
 
-func (sm *SyncManager) txBroadcastLoop() {
+func (m *Manager) broadcastTxsLoop() {
        for {
                select {
-               case obj, ok := <-sm.txMsgSub.Chan():
+               case obj, ok := <-m.txMsgSub.Chan():
                        if !ok {
                                log.WithFields(log.Fields{"module": logModule}).Warning("mempool tx msg subscription channel closed")
                                return
@@ -50,29 +50,29 @@ func (sm *SyncManager) txBroadcastLoop() {
                        }
 
                        if ev.TxMsg.MsgType == core.MsgNewTx {
-                               if err := sm.peers.broadcastTx(ev.TxMsg.Tx); err != nil {
+                               if err := m.peers.BroadcastTx(ev.TxMsg.Tx); err != nil {
                                        log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on broadcast new tx.")
                                        continue
                                }
                        }
-               case <-sm.quitSync:
+               case <-m.quit:
                        return
                }
        }
 }
 
-// txSyncLoop takes care of the initial transaction sync for each new
+// syncMempoolLoop takes care of the initial transaction sync for each new
 // connection. When a new peer appears, we relay all currently pending
 // transactions. In order to minimise egress bandwidth usage, we send
 // the transactions in small packs to one peer at a time.
-func (sm *SyncManager) txSyncLoop() {
+func (m *Manager) syncMempoolLoop() {
        pending := make(map[string]*txSyncMsg)
        sending := false            // whether a send is active
        done := make(chan error, 1) // result of the send
 
        // send starts a sending a pack of transactions from the sync.
        send := func(msg *txSyncMsg) {
-               peer := sm.peers.getPeer(msg.peerID)
+               peer := m.peers.GetPeer(msg.peerID)
                if peer == nil {
                        delete(pending, msg.peerID)
                        return
@@ -100,9 +100,9 @@ func (sm *SyncManager) txSyncLoop() {
                }).Debug("txSyncLoop sending transactions")
                sending = true
                go func() {
-                       ok, err := peer.sendTransactions(sendTxs)
-                       if !ok {
-                               sm.peers.removePeer(msg.peerID)
+                       err := peer.SendTransactions(sendTxs)
+                       if err != nil {
+                               m.peers.RemovePeer(msg.peerID)
                        }
                        done <- err
                }()
@@ -125,12 +125,11 @@ func (sm *SyncManager) txSyncLoop() {
 
        for {
                select {
-               case msg := <-sm.txSyncCh:
+               case msg := <-m.txSyncCh:
                        pending[msg.peerID] = msg
                        if !sending {
                                send(msg)
                        }
-
                case err := <-done:
                        sending = false
                        if err != nil {
@@ -140,6 +139,8 @@ func (sm *SyncManager) txSyncLoop() {
                        if s := pick(); s != nil {
                                send(s)
                        }
+               case <-m.quit:
+                       return
                }
        }
 }
diff --git a/netsync/chainmgr/tx_keeper_test.go b/netsync/chainmgr/tx_keeper_test.go
new file mode 100644 (file)
index 0000000..9f0586b
--- /dev/null
@@ -0,0 +1,188 @@
+package chainmgr
+
+import (
+       "io/ioutil"
+       "os"
+       "reflect"
+       "testing"
+       "time"
+
+       "github.com/davecgh/go-spew/spew"
+
+       "github.com/bytom/bytom/consensus"
+       dbm "github.com/bytom/bytom/database/leveldb"
+       "github.com/bytom/bytom/protocol"
+       core "github.com/bytom/bytom/protocol"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+       "github.com/bytom/bytom/test/mock"
+)
+
+const txsNumber = 2000
+
+type mempool struct {
+}
+
+func (m *mempool) GetTransactions() []*core.TxDesc {
+       txs := []*core.TxDesc{}
+       for i := 0; i < txsNumber; i++ {
+               txInput := types.NewSpendInput(nil, bc.NewHash([32]byte{0x01}), *consensus.BTMAssetID, uint64(i), 1, []byte{0x51})
+               txInput.CommitmentSuffix = []byte{0, 1, 2}
+               txInput.WitnessSuffix = []byte{0, 1, 2}
+
+               tx := &types.Tx{
+
+                       TxData: types.TxData{
+                               //SerializedSize: uint64(i * 10),
+                               Inputs: []*types.TxInput{
+                                       txInput,
+                               },
+                               Outputs: []*types.TxOutput{
+                                       types.NewTxOutput(*consensus.BTMAssetID, uint64(i), []byte{0x6a}),
+                               },
+                               SerializedSize: 1000,
+                       },
+                       Tx: &bc.Tx{
+                               ID: bc.Hash{V0: uint64(i), V1: uint64(i), V2: uint64(i), V3: uint64(i)},
+                       },
+               }
+               txs = append(txs, &core.TxDesc{Tx: tx})
+       }
+       return txs
+}
+
+func (m *mempool) IsDust(tx *types.Tx) bool {
+       return false
+}
+
+func TestSyncMempool(t *testing.T) {
+       tmpDir, err := ioutil.TempDir(".", "")
+       if err != nil {
+               t.Fatalf("failed to create temporary data folder: %v", err)
+       }
+       defer os.RemoveAll(tmpDir)
+       testDBA := dbm.NewDB("testdba", "leveldb", tmpDir)
+       testDBB := dbm.NewDB("testdbb", "leveldb", tmpDir)
+
+       blocks := mockBlocks(nil, 5)
+       a := mockSync(blocks, &mock.Mempool{}, testDBA)
+       b := mockSync(blocks, &mock.Mempool{}, testDBB)
+       a.mempool = &mempool{}
+       netWork := NewNetWork()
+       netWork.Register(a, "192.168.0.1", "test node A", consensus.SFFullNode)
+       netWork.Register(b, "192.168.0.2", "test node B", consensus.SFFullNode)
+       if B2A, A2B, err := netWork.HandsShake(a, b); err != nil {
+               t.Errorf("fail on peer hands shake %v", err)
+       } else {
+               go B2A.postMan()
+               go A2B.postMan()
+       }
+
+       go a.syncMempoolLoop()
+       a.syncMempool("test node B")
+       wantTxs := a.mempool.GetTransactions()
+
+       timeout := time.NewTimer(2 * time.Second)
+       defer timeout.Stop()
+       ticker := time.NewTicker(500 * time.Millisecond)
+       defer ticker.Stop()
+
+       gotTxs := []*protocol.TxDesc{}
+       for {
+               select {
+               case <-ticker.C:
+                       gotTxs = b.mempool.GetTransactions()
+                       if len(gotTxs) >= txsNumber {
+                               goto out
+                       }
+               case <-timeout.C:
+                       t.Fatalf("mempool sync timeout")
+               }
+       }
+
+out:
+       if len(gotTxs) != txsNumber {
+               t.Fatalf("mempool sync txs num err. got:%d want:%d", len(gotTxs), txsNumber)
+       }
+
+       for i, gotTx := range gotTxs {
+               index := gotTx.Tx.Inputs[0].Amount()
+               if !reflect.DeepEqual(gotTx.Tx.Inputs[0].Amount(), wantTxs[index].Tx.Inputs[0].Amount()) {
+                       t.Fatalf("mempool tx err. index:%d\n,gotTx:%s\n,wantTx:%s", i, spew.Sdump(gotTx.Tx.Inputs), spew.Sdump(wantTxs[0].Tx.Inputs))
+               }
+
+               if !reflect.DeepEqual(gotTx.Tx.Outputs[0].AssetAmount, wantTxs[index].Tx.Outputs[0].AssetAmount) {
+                       t.Fatalf("mempool tx err. index:%d\n,gotTx:%s\n,wantTx:%s", i, spew.Sdump(gotTx.Tx.Outputs), spew.Sdump(wantTxs[0].Tx.Outputs))
+               }
+       }
+}
+
+func TestBroadcastTxsLoop(t *testing.T) {
+       tmpDir, err := ioutil.TempDir(".", "")
+       if err != nil {
+               t.Fatalf("failed to create temporary data folder: %v", err)
+       }
+       defer os.RemoveAll(tmpDir)
+       testDBA := dbm.NewDB("testdba", "leveldb", tmpDir)
+       testDBB := dbm.NewDB("testdbb", "leveldb", tmpDir)
+
+       blocks := mockBlocks(nil, 5)
+       a := mockSync(blocks, &mock.Mempool{}, testDBA)
+       b := mockSync(blocks, &mock.Mempool{}, testDBB)
+       a.mempool = &mempool{}
+       netWork := NewNetWork()
+       netWork.Register(a, "192.168.0.1", "test node A", consensus.SFFullNode)
+       netWork.Register(b, "192.168.0.2", "test node B", consensus.SFFullNode)
+       if B2A, A2B, err := netWork.HandsShake(a, b); err != nil {
+               t.Errorf("fail on peer hands shake %v", err)
+       } else {
+               go B2A.postMan()
+               go A2B.postMan()
+       }
+       a.txMsgSub, err = a.eventDispatcher.Subscribe(core.TxMsgEvent{})
+       if err != nil {
+               t.Fatal("txMsgSub subscribe err", err)
+       }
+       go a.broadcastTxsLoop()
+       wantTxs := a.mempool.GetTransactions()
+       txsNum := 50
+       for i, txD := range wantTxs {
+               if i >= txsNum {
+                       break
+               }
+               a.eventDispatcher.Post(core.TxMsgEvent{TxMsg: &core.TxPoolMsg{TxDesc: txD, MsgType: core.MsgNewTx}})
+       }
+       timeout := time.NewTimer(2 * time.Second)
+       defer timeout.Stop()
+       ticker := time.NewTicker(500 * time.Millisecond)
+       defer ticker.Stop()
+
+       gotTxs := []*protocol.TxDesc{}
+       for {
+               select {
+               case <-ticker.C:
+                       gotTxs = b.mempool.GetTransactions()
+                       if len(gotTxs) >= txsNum {
+                               goto out
+                       }
+               case <-timeout.C:
+                       t.Fatalf("mempool sync timeout")
+               }
+       }
+
+out:
+       if len(gotTxs) != txsNum {
+               t.Fatalf("mempool sync txs num err. got:%d want:%d", len(gotTxs), txsNumber)
+       }
+
+       for i, gotTx := range gotTxs {
+               index := gotTx.Tx.Inputs[0].Amount()
+               if !reflect.DeepEqual(gotTx.Tx.Inputs[0].Amount(), wantTxs[index].Tx.Inputs[0].Amount()) {
+                       t.Fatalf("mempool tx err. index:%d\n,gotTx:%s\n,wantTx:%s", i, spew.Sdump(gotTx.Tx.Inputs), spew.Sdump(wantTxs[0].Tx.Inputs))
+               }
+
+               if !reflect.DeepEqual(gotTx.Tx.Outputs[0].AssetAmount, wantTxs[index].Tx.Outputs[0].AssetAmount) {
+                       t.Fatalf("mempool tx err. index:%d\n,gotTx:%s\n,wantTx:%s", i, spew.Sdump(gotTx.Tx.Outputs), spew.Sdump(wantTxs[0].Tx.Outputs))
+               }
+       }
+}
similarity index 54%
rename from netsync/block_fetcher.go
rename to netsync/consensusmgr/block_fetcher.go
index 51f312e..ee14b83 100644 (file)
@@ -1,4 +1,4 @@
-package netsync
+package consensusmgr
 
 import (
        log "github.com/sirupsen/logrus"
@@ -10,35 +10,35 @@ import (
 
 const (
        maxBlockDistance = 64
-       maxMsgSetSize    = 128
        newBlockChSize   = 64
+       msgLimit         = 128 // peer message number limit
 )
 
 // blockFetcher is responsible for accumulating block announcements from various peers
 // and scheduling them for retrieval.
 type blockFetcher struct {
        chain Chain
-       peers *peerSet
+       peers Peers
 
        newBlockCh chan *blockMsg
-       queue      *prque.Prque
-       msgSet     map[bc.Hash]*blockMsg
+       queue      *prque.Prque          // block import priority queue
+       msgSet     map[bc.Hash]*blockMsg // already queued blocks
+       msgCounter map[string]int        // per peer msg counter to prevent DOS
 }
 
-//NewBlockFetcher creates a block fetcher to retrieve blocks of the new mined.
-func newBlockFetcher(chain Chain, peers *peerSet) *blockFetcher {
-       f := &blockFetcher{
+//NewBlockFetcher creates a block fetcher to retrieve blocks of the new propose.
+func newBlockFetcher(chain Chain, peers Peers) *blockFetcher {
+       return &blockFetcher{
                chain:      chain,
                peers:      peers,
                newBlockCh: make(chan *blockMsg, newBlockChSize),
                queue:      prque.New(),
                msgSet:     make(map[bc.Hash]*blockMsg),
+               msgCounter: make(map[string]int),
        }
-       go f.blockProcessor()
-       return f
 }
 
-func (f *blockFetcher) blockProcessor() {
+func (f *blockFetcher) blockProcessorLoop() {
        for {
                for !f.queue.Empty() {
                        msg := f.queue.PopItem().(*blockMsg)
@@ -49,14 +49,25 @@ func (f *blockFetcher) blockProcessor() {
 
                        f.insert(msg)
                        delete(f.msgSet, msg.block.Hash())
+                       f.msgCounter[msg.peerID]--
+                       if f.msgCounter[msg.peerID] <= 0 {
+                               delete(f.msgCounter, msg.peerID)
+                       }
                }
-               f.add(<-f.newBlockCh)
+               f.add(<-f.newBlockCh, msgLimit)
        }
 }
 
-func (f *blockFetcher) add(msg *blockMsg) {
+func (f *blockFetcher) add(msg *blockMsg, limit int) {
+       // prevent DOS
+       count := f.msgCounter[msg.peerID] + 1
+       if count > limit {
+               log.WithFields(log.Fields{"module": logModule, "peer": msg.peerID, "limit": limit}).Warn("The number of peer messages exceeds the limit")
+               return
+       }
+
        bestHeight := f.chain.BestBlockHeight()
-       if len(f.msgSet) > maxMsgSetSize || bestHeight > msg.block.Height || msg.block.Height-bestHeight > maxBlockDistance {
+       if bestHeight > msg.block.Height || msg.block.Height-bestHeight > maxBlockDistance {
                return
        }
 
@@ -64,22 +75,22 @@ func (f *blockFetcher) add(msg *blockMsg) {
        if _, ok := f.msgSet[blockHash]; !ok {
                f.msgSet[blockHash] = msg
                f.queue.Push(msg, -float32(msg.block.Height))
+               f.msgCounter[msg.peerID] = count
                log.WithFields(log.Fields{
                        "module":       logModule,
                        "block height": msg.block.Height,
                        "block hash":   blockHash.String(),
-               }).Debug("blockFetcher receive mine block")
+               }).Debug("blockFetcher receive propose block")
        }
 }
 
 func (f *blockFetcher) insert(msg *blockMsg) {
        isOrphan, err := f.chain.ProcessBlock(msg.block)
        if err != nil {
-               peer := f.peers.getPeer(msg.peerID)
+               peer := f.peers.GetPeer(msg.peerID)
                if peer == nil {
                        return
                }
-
                f.peers.ProcessIllegal(msg.peerID, security.LevelMsgIllegal, err.Error())
                return
        }
@@ -88,8 +99,14 @@ func (f *blockFetcher) insert(msg *blockMsg) {
                return
        }
 
-       if err := f.peers.broadcastMinedBlock(msg.block); err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("blockFetcher fail on broadcast new block")
+       proposeMsg, err := NewBlockProposeMsg(msg.block)
+       if err != nil {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("failed on create BlockProposeMsg")
+               return
+       }
+
+       if err := f.peers.BroadcastMsg(NewBroadcastMsg(proposeMsg, consensusChannel)); err != nil {
+               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("failed on broadcast proposed block")
                return
        }
 }
diff --git a/netsync/consensusmgr/block_fetcher_test.go b/netsync/consensusmgr/block_fetcher_test.go
new file mode 100644 (file)
index 0000000..f39e737
--- /dev/null
@@ -0,0 +1,295 @@
+package consensusmgr
+
+import (
+       "testing"
+       "time"
+
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+type peerMgr struct {
+}
+
+func (pm *peerMgr) IsBanned(ip string, level byte, reason string) bool {
+       return false
+}
+
+func (pm *peerMgr) StopPeerGracefully(string) {
+       return
+}
+
+type chain struct {
+       blocks []uint64
+}
+
+func newChain() *chain {
+       blocks := make([]uint64, 1, 1)
+       blocks[0] = 99
+       return &chain{
+               blocks: blocks,
+       }
+}
+
+func (c *chain) BestBlockHeight() uint64 {
+       return c.blocks[len(c.blocks)-1]
+}
+
+func (c *chain) GetHeaderByHash(*bc.Hash) (*types.BlockHeader, error) {
+       return nil, nil
+}
+
+func (c *chain) ProcessBlock(block *types.Block) (bool, error) {
+       c.blocks = append(c.blocks, block.Height)
+       return false, nil
+}
+
+func (c *chain) ProcessBlockSignature(signature, pubkey []byte, blockHash *bc.Hash) error {
+       return nil
+}
+
+func TestBlockFetcher(t *testing.T) {
+       peers := peers.NewPeerSet(&peerMgr{})
+       testCase := []struct {
+               blockMsg *blockMsg
+               height   uint64
+       }{
+               {
+                       blockMsg: &blockMsg{
+                               block: &types.Block{
+                                       BlockHeader: types.BlockHeader{
+                                               Height: 100,
+                                       },
+                               },
+                       },
+                       height: 100,
+               },
+               {
+                       blockMsg: &blockMsg{
+                               block: &types.Block{
+                                       BlockHeader: types.BlockHeader{
+                                               Height: 101,
+                                       },
+                               },
+                       },
+                       height: 101,
+               },
+               {
+                       blockMsg: &blockMsg{
+                               block: &types.Block{
+                                       BlockHeader: types.BlockHeader{
+                                               Height: 105,
+                                       },
+                               },
+                       },
+                       height: 101,
+               },
+               {
+                       blockMsg: &blockMsg{
+                               block: &types.Block{
+                                       BlockHeader: types.BlockHeader{
+                                               Height: 200,
+                                       },
+                               },
+                       },
+                       height: 101,
+               },
+               {
+                       blockMsg: &blockMsg{
+                               block: &types.Block{
+                                       BlockHeader: types.BlockHeader{
+                                               Height: 104,
+                                       },
+                               },
+                       },
+                       height: 101,
+               },
+               {
+                       blockMsg: &blockMsg{
+                               block: &types.Block{
+                                       BlockHeader: types.BlockHeader{
+                                               Height: 103,
+                                       },
+                               },
+                       },
+                       height: 101,
+               },
+               {
+                       blockMsg: &blockMsg{
+                               block: &types.Block{
+                                       BlockHeader: types.BlockHeader{
+                                               Height: 102,
+                                       },
+                               },
+                       },
+                       height: 105,
+               },
+       }
+       fetcher := newBlockFetcher(newChain(), peers)
+       go fetcher.blockProcessorLoop()
+       for i, c := range testCase {
+               fetcher.processNewBlock(c.blockMsg)
+               time.Sleep(10 * time.Millisecond)
+               chainHeight := fetcher.chain.BestBlockHeight()
+               if chainHeight != c.height {
+                       t.Fatalf("test block fetcher error. index %d expected chain height %d but got %d", i, chainHeight, c.height)
+               }
+       }
+}
+
+func TestAddBlockMsg(t *testing.T) {
+       peers := peers.NewPeerSet(&peerMgr{})
+       testPeer := "peer1"
+       testCase := []struct {
+               blocksMsg  []*blockMsg
+               limit      int
+               queueSize  int
+               msgSetSize int
+               msgCounter int
+       }{
+               //normal test
+               {
+                       blocksMsg: []*blockMsg{
+                               {
+                                       block: &types.Block{
+                                               BlockHeader: types.BlockHeader{
+                                                       Height: 100,
+                                               },
+                                       },
+                                       peerID: testPeer,
+                               },
+                               {
+                                       block: &types.Block{
+                                               BlockHeader: types.BlockHeader{
+                                                       Height: 101,
+                                               },
+                                       },
+                                       peerID: testPeer,
+                               },
+                               {
+                                       block: &types.Block{
+                                               BlockHeader: types.BlockHeader{
+                                                       Height: 102,
+                                               },
+                                       },
+                                       peerID: testPeer,
+                               },
+                       },
+                       limit:      5,
+                       queueSize:  3,
+                       msgSetSize: 3,
+                       msgCounter: 3,
+               },
+               // test DOS
+               {
+                       blocksMsg: []*blockMsg{
+                               {
+                                       block: &types.Block{
+                                               BlockHeader: types.BlockHeader{
+                                                       Version: 1,
+                                                       Height:  100,
+                                               },
+                                       },
+                                       peerID: testPeer,
+                               },
+                               {
+                                       block: &types.Block{
+                                               BlockHeader: types.BlockHeader{
+                                                       Version: 2,
+                                                       Height:  100,
+                                               },
+                                       },
+                                       peerID: testPeer,
+                               },
+                               {
+                                       block: &types.Block{
+                                               BlockHeader: types.BlockHeader{
+                                                       Version: 3,
+                                                       Height:  100,
+                                               },
+                                       },
+                                       peerID: testPeer,
+                               },
+                               {
+                                       block: &types.Block{
+                                               BlockHeader: types.BlockHeader{
+                                                       Version: 4,
+                                                       Height:  100,
+                                               },
+                                       },
+                                       peerID: testPeer,
+                               },
+                       },
+                       limit:      3,
+                       queueSize:  3,
+                       msgSetSize: 3,
+                       msgCounter: 3,
+               },
+
+               // test msg height does not meet the requirements
+               {
+                       blocksMsg: []*blockMsg{
+                               {
+                                       block: &types.Block{
+                                               BlockHeader: types.BlockHeader{
+                                                       Version: 1,
+                                                       Height:  98,
+                                               },
+                                       },
+                                       peerID: testPeer,
+                               },
+                               {
+                                       block: &types.Block{
+                                               BlockHeader: types.BlockHeader{
+                                                       Version: 2,
+                                                       Height:  97,
+                                               },
+                                       },
+                                       peerID: testPeer,
+                               },
+                               {
+                                       block: &types.Block{
+                                               BlockHeader: types.BlockHeader{
+                                                       Version: 3,
+                                                       Height:  164,
+                                               },
+                                       },
+                                       peerID: testPeer,
+                               },
+                               {
+                                       block: &types.Block{
+                                               BlockHeader: types.BlockHeader{
+                                                       Version: 4,
+                                                       Height:  165,
+                                               },
+                                       },
+                                       peerID: testPeer,
+                               },
+                       },
+                       limit:      5,
+                       queueSize:  0,
+                       msgSetSize: 0,
+                       msgCounter: 0,
+               },
+       }
+
+       for i, c := range testCase {
+               fetcher := newBlockFetcher(newChain(), peers)
+               for _, msg := range c.blocksMsg {
+                       fetcher.add(msg, c.limit)
+               }
+
+               if fetcher.queue.Size() != c.queueSize {
+                       t.Fatalf("index: %d queue size err got %d: want %d", i, fetcher.queue.Size(), c.queueSize)
+               }
+
+               if len(fetcher.msgSet) != c.msgSetSize {
+                       t.Fatalf("index: %d msg set size err got %d: want %d", i, len(fetcher.msgSet), c.msgSetSize)
+               }
+
+               if fetcher.msgCounter[testPeer] != c.msgCounter {
+                       t.Fatalf("index: %d peer msg counter err got %d: want %d", i, fetcher.msgCounter[testPeer], c.msgCounter)
+               }
+       }
+}
diff --git a/netsync/consensusmgr/broadcast_msg.go b/netsync/consensusmgr/broadcast_msg.go
new file mode 100644 (file)
index 0000000..57e49eb
--- /dev/null
@@ -0,0 +1,45 @@
+package consensusmgr
+
+import (
+       "github.com/bytom/bytom/netsync/peers"
+)
+
+// BroadcastMsg the message that can be broadcast
+// by peer set.
+type BroadcastMsg struct {
+       msg       ConsensusMessage
+       transChan byte
+}
+
+// NewBroadcastMsg create concrete broadcast message, implement peers.BroadcastMsg interface.
+func NewBroadcastMsg(msg ConsensusMessage, transChan byte) *BroadcastMsg {
+       return &BroadcastMsg{
+               msg:       msg,
+               transChan: transChan,
+       }
+}
+
+// GetChan get message transfer channel.
+func (b *BroadcastMsg) GetChan() byte {
+       return b.transChan
+}
+
+// GetMsg get ConsensusMessage.
+func (b *BroadcastMsg) GetMsg() interface{} {
+       return struct{ ConsensusMessage }{b.msg}
+}
+
+// MsgString get a string representation of the message.
+func (b *BroadcastMsg) MsgString() string {
+       return b.msg.String()
+}
+
+// MarkSendRecord mark send message record to prevent messages from being sent repeatedly.
+func (b *BroadcastMsg) MarkSendRecord(ps *peers.PeerSet, peers []string) {
+       b.msg.BroadcastMarkSendRecord(ps, peers)
+}
+
+// FilterTargetPeers filter target peers to filter the nodes that need to send messages.
+func (b *BroadcastMsg) FilterTargetPeers(ps *peers.PeerSet) []string {
+       return b.msg.BroadcastFilterTargetPeers(ps)
+}
diff --git a/netsync/consensusmgr/consensus_msg.go b/netsync/consensusmgr/consensus_msg.go
new file mode 100644 (file)
index 0000000..c9a889c
--- /dev/null
@@ -0,0 +1,130 @@
+package consensusmgr
+
+import (
+       "bytes"
+       "encoding/hex"
+       "errors"
+       "fmt"
+
+       "github.com/tendermint/go-wire"
+
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+const (
+       blockSignatureByte = byte(0x10)
+       blockProposeByte   = byte(0x11)
+)
+
+//ConsensusMessage is a generic message for consensus reactor.
+type ConsensusMessage interface {
+       String() string
+       BroadcastMarkSendRecord(ps *peers.PeerSet, peers []string)
+       BroadcastFilterTargetPeers(ps *peers.PeerSet) []string
+}
+
+var _ = wire.RegisterInterface(
+       struct{ ConsensusMessage }{},
+       wire.ConcreteType{O: &BlockSignatureMsg{}, Byte: blockSignatureByte},
+       wire.ConcreteType{O: &BlockProposeMsg{}, Byte: blockProposeByte},
+)
+
+//decodeMessage decode msg
+func decodeMessage(bz []byte) (msgType byte, msg ConsensusMessage, err error) {
+       msgType = bz[0]
+       n := int(0)
+       r := bytes.NewReader(bz)
+       msg = wire.ReadBinary(struct{ ConsensusMessage }{}, r, maxBlockchainResponseSize, &n, &err).(struct{ ConsensusMessage }).ConsensusMessage
+       if err != nil && n != len(bz) {
+               err = errors.New("DecodeMessage() had bytes left over")
+       }
+       return
+}
+
+// BlockSignatureMsg block signature message transferred between nodes.
+type BlockSignatureMsg struct {
+       BlockHash [32]byte
+       Signature []byte
+       PubKey    []byte
+}
+
+//NewBlockSignatureMsg create new block signature msg.
+func NewBlockSignatureMsg(blockHash bc.Hash, signature, pubKey []byte) ConsensusMessage {
+       hash := blockHash.Byte32()
+       return &BlockSignatureMsg{BlockHash: hash, Signature: signature, PubKey: pubKey}
+}
+
+func (bs *BlockSignatureMsg) String() string {
+       return fmt.Sprintf("{block_hash: %s,signature:%s,pubkey:%s}", hex.EncodeToString(bs.BlockHash[:]), hex.EncodeToString(bs.Signature), hex.EncodeToString(bs.PubKey[:]))
+}
+
+// BroadcastMarkSendRecord mark send message record to prevent messages from being sent repeatedly.
+func (bs *BlockSignatureMsg) BroadcastMarkSendRecord(ps *peers.PeerSet, peers []string) {
+       for _, peer := range peers {
+               ps.MarkBlockSignature(peer, bs.Signature)
+       }
+}
+
+// BroadcastFilterTargetPeers filter target peers to filter the nodes that need to send messages.
+func (bs *BlockSignatureMsg) BroadcastFilterTargetPeers(ps *peers.PeerSet) []string {
+       return ps.PeersWithoutSignature(bs.Signature)
+}
+
+// BlockProposeMsg block propose message transferred between nodes.
+type BlockProposeMsg struct {
+       RawBlock []byte
+}
+
+//NewBlockProposeMsg create new block propose msg.
+func NewBlockProposeMsg(block *types.Block) (ConsensusMessage, error) {
+       rawBlock, err := block.MarshalText()
+       if err != nil {
+               return nil, err
+       }
+       return &BlockProposeMsg{RawBlock: rawBlock}, nil
+}
+
+//GetProposeBlock get propose block from msg.
+func (bp *BlockProposeMsg) GetProposeBlock() (*types.Block, error) {
+       block := &types.Block{}
+       if err := block.UnmarshalText(bp.RawBlock); err != nil {
+               return nil, err
+       }
+       return block, nil
+}
+
+func (bp *BlockProposeMsg) String() string {
+       block, err := bp.GetProposeBlock()
+       if err != nil {
+               return "{err: wrong message}"
+       }
+       blockHash := block.Hash()
+       return fmt.Sprintf("{block_height: %d, block_hash: %s}", block.Height, blockHash.String())
+}
+
+// BroadcastMarkSendRecord mark send message record to prevent messages from being sent repeatedly.
+func (bp *BlockProposeMsg) BroadcastMarkSendRecord(ps *peers.PeerSet, peers []string) {
+       block, err := bp.GetProposeBlock()
+       if err != nil {
+               return
+       }
+
+       hash := block.Hash()
+       height := block.Height
+       for _, peer := range peers {
+               ps.MarkBlock(peer, &hash)
+               ps.MarkStatus(peer, height)
+       }
+}
+
+// BroadcastFilterTargetPeers filter target peers to filter the nodes that need to send messages.
+func (bp *BlockProposeMsg) BroadcastFilterTargetPeers(ps *peers.PeerSet) []string {
+       block, err := bp.GetProposeBlock()
+       if err != nil {
+               return nil
+       }
+
+       return ps.PeersWithoutBlock(block.Hash())
+}
diff --git a/netsync/consensusmgr/consensus_msg_test.go b/netsync/consensusmgr/consensus_msg_test.go
new file mode 100644 (file)
index 0000000..afa8526
--- /dev/null
@@ -0,0 +1,153 @@
+package consensusmgr
+
+import (
+       "reflect"
+       "testing"
+
+       "github.com/davecgh/go-spew/spew"
+       "github.com/tendermint/go-wire"
+
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+var _ = wire.RegisterInterface(
+       struct{ ConsensusMessage }{},
+       wire.ConcreteType{O: &BlockSignatureMsg{}, Byte: blockSignatureByte},
+       wire.ConcreteType{O: &BlockProposeMsg{}, Byte: blockProposeByte},
+)
+
+func TestDecodeMessage(t *testing.T) {
+       testCases := []struct {
+               msg     ConsensusMessage
+               msgType byte
+       }{
+               {
+                       msg: &BlockSignatureMsg{
+                               BlockHash: [32]byte{0x01},
+                               Signature: []byte{0x00},
+                               PubKey:    []byte{0x01},
+                       },
+                       msgType: blockSignatureByte,
+               },
+               {
+                       msg: &BlockProposeMsg{
+                               RawBlock: []byte{0x01, 0x02},
+                       },
+                       msgType: blockProposeByte,
+               },
+       }
+       for i, c := range testCases {
+               binMsg := wire.BinaryBytes(struct{ ConsensusMessage }{c.msg})
+               gotMsgType, gotMsg, err := decodeMessage(binMsg)
+               if err != nil {
+                       t.Fatalf("index:%d decode Message err %s", i, err)
+               }
+               if gotMsgType != c.msgType {
+                       t.Fatalf("index:%d decode Message type err. got:%d want:%d", i, gotMsgType, c.msg)
+               }
+               if !reflect.DeepEqual(gotMsg, c.msg) {
+                       t.Fatalf("index:%d decode Message err. got:%s\n want:%s", i, spew.Sdump(gotMsg), spew.Sdump(c.msg))
+               }
+       }
+}
+
+func TestBlockSignBroadcastMsg(t *testing.T) {
+       blockSignMsg := &BlockSignatureMsg{
+               BlockHash: [32]byte{0x01},
+               Signature: []byte{0x00},
+               PubKey:    []byte{0x01},
+       }
+       signatureBroadcastMsg := NewBroadcastMsg(NewBlockSignatureMsg(bc.NewHash(blockSignMsg.BlockHash), blockSignMsg.Signature, blockSignMsg.PubKey), consensusChannel)
+
+       binMsg := wire.BinaryBytes(signatureBroadcastMsg.GetMsg())
+       gotMsgType, gotMsg, err := decodeMessage(binMsg)
+       if err != nil {
+               t.Fatalf("decode Message err %s", err)
+       }
+       if gotMsgType != blockSignatureByte {
+               t.Fatalf("decode Message type err. got:%d want:%d", gotMsgType, blockSignatureByte)
+       }
+       if !reflect.DeepEqual(gotMsg, blockSignMsg) {
+               t.Fatalf("decode Message err. got:%s\n want:%s", spew.Sdump(gotMsg), spew.Sdump(blockSignMsg))
+       }
+}
+
+func TestBlockProposeBroadcastMsg(t *testing.T) {
+       blockProposeMsg, _ := NewBlockProposeMsg(testBlock)
+
+       proposeBroadcastMsg := NewBroadcastMsg(blockProposeMsg, consensusChannel)
+
+       binMsg := wire.BinaryBytes(proposeBroadcastMsg.GetMsg())
+       gotMsgType, gotMsg, err := decodeMessage(binMsg)
+       if err != nil {
+               t.Fatalf("decode Message err %s", err)
+       }
+       if gotMsgType != blockProposeByte {
+               t.Fatalf("decode Message type err. got:%d want:%d", gotMsgType, blockProposeByte)
+       }
+       if !reflect.DeepEqual(gotMsg, blockProposeMsg) {
+               t.Fatalf("decode Message err. got:%s\n want:%s", spew.Sdump(gotMsg), spew.Sdump(blockProposeMsg))
+       }
+}
+
+var testBlock = &types.Block{
+       BlockHeader: types.BlockHeader{
+               Version:   1,
+               Height:    0,
+               Timestamp: 1528945000,
+               BlockCommitment: types.BlockCommitment{
+                       TransactionsMerkleRoot: bc.Hash{V0: uint64(0x11)},
+                       TransactionStatusHash:  bc.Hash{V0: uint64(0x55)},
+               },
+       },
+}
+
+func TestBlockProposeMsg(t *testing.T) {
+       blockMsg, err := NewBlockProposeMsg(testBlock)
+       if err != nil {
+               t.Fatalf("create new mine block msg err:%s", err)
+       }
+
+       gotBlock, err := blockMsg.(*BlockProposeMsg).GetProposeBlock()
+       if err != nil {
+               t.Fatalf("got block err:%s", err)
+       }
+
+       if !reflect.DeepEqual(gotBlock.BlockHeader, testBlock.BlockHeader) {
+               t.Errorf("block msg test err: got %s\nwant %s", spew.Sdump(gotBlock.BlockHeader), spew.Sdump(testBlock.BlockHeader))
+       }
+
+       wantString := "{block_height: 0, block_hash: f59514e2541488a38bc2667940bc2c24027e4a3a371d884b55570d036997bb57}"
+       if blockMsg.String() != wantString {
+               t.Errorf("block msg test err. got:%s want:%s", blockMsg.String(), wantString)
+       }
+
+       blockMsg.(*BlockProposeMsg).RawBlock[1] = blockMsg.(*BlockProposeMsg).RawBlock[1] + 0x1
+       _, err = blockMsg.(*BlockProposeMsg).GetProposeBlock()
+       if err == nil {
+               t.Fatalf("get mine block err")
+       }
+
+       wantString = "{err: wrong message}"
+       if blockMsg.String() != wantString {
+               t.Errorf("block msg test err. got:%s want:%s", blockMsg.String(), wantString)
+       }
+}
+
+func TestBlockSignatureMsg(t *testing.T) {
+       msg := &BlockSignatureMsg{
+               BlockHash: [32]byte{0x01},
+               Signature: []byte{0x00},
+               PubKey:    []byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+       }
+       gotMsg := NewBlockSignatureMsg(bc.NewHash(msg.BlockHash), msg.Signature, msg.PubKey)
+
+       if !reflect.DeepEqual(gotMsg, msg) {
+               t.Fatalf("test block signature message err. got:%s\n want:%s", spew.Sdump(gotMsg), spew.Sdump(msg))
+       }
+       wantString := "{block_hash: 0100000000000000000000000000000000000000000000000000000000000000,signature:00,pubkey:01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000}"
+       if gotMsg.String() != wantString {
+               t.Fatalf("test block signature message err. got string:%s\n want string:%s", gotMsg.String(), wantString)
+       }
+}
diff --git a/netsync/consensusmgr/handle.go b/netsync/consensusmgr/handle.go
new file mode 100644 (file)
index 0000000..fc85386
--- /dev/null
@@ -0,0 +1,206 @@
+package consensusmgr
+
+import (
+       "reflect"
+
+       "github.com/sirupsen/logrus"
+
+       "github.com/bytom/bytom/errors"
+       "github.com/bytom/bytom/event"
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/p2p"
+       "github.com/bytom/bytom/p2p/security"
+       "github.com/bytom/bytom/protocol"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+// Switch is the interface for p2p switch.
+type Switch interface {
+       AddReactor(name string, reactor p2p.Reactor) p2p.Reactor
+}
+
+// Chain is the interface for Bytom core.
+type Chain interface {
+       BestBlockHeight() uint64
+       GetHeaderByHash(*bc.Hash) (*types.BlockHeader, error)
+       ProcessBlock(*types.Block) (bool, error)
+       ProcessBlockSignature(signature, pubkey []byte, blockHash *bc.Hash) error
+}
+
+type Peers interface {
+       AddPeer(peer peers.BasePeer)
+       BroadcastMsg(bm peers.BroadcastMsg) error
+       GetPeer(id string) *peers.Peer
+       MarkBlock(peerID string, hash *bc.Hash)
+       MarkBlockSignature(peerID string, signature []byte)
+       ProcessIllegal(peerID string, level byte, reason string)
+       RemovePeer(peerID string)
+       SetStatus(peerID string, height uint64, hash *bc.Hash)
+}
+
+type blockMsg struct {
+       block  *types.Block
+       peerID string
+}
+
+// Manager is the consensus message network synchronization manager.
+type Manager struct {
+       sw              Switch
+       chain           Chain
+       peers           Peers
+       blockFetcher    *blockFetcher
+       eventDispatcher *event.Dispatcher
+
+       quit chan struct{}
+}
+
+// NewManager create new manager.
+func NewManager(sw Switch, chain Chain, peers Peers, dispatcher *event.Dispatcher) *Manager {
+       manager := &Manager{
+               sw:              sw,
+               chain:           chain,
+               peers:           peers,
+               blockFetcher:    newBlockFetcher(chain, peers),
+               eventDispatcher: dispatcher,
+               quit:            make(chan struct{}),
+       }
+       protocolReactor := NewConsensusReactor(manager)
+       manager.sw.AddReactor("CONSENSUS", protocolReactor)
+       return manager
+}
+
+func (m *Manager) addPeer(peer peers.BasePeer) {
+       m.peers.AddPeer(peer)
+}
+
+func (m *Manager) processMsg(peerID string, msgType byte, msg ConsensusMessage) {
+       peer := m.peers.GetPeer(peerID)
+       if peer == nil {
+               return
+       }
+
+       logrus.WithFields(logrus.Fields{"module": logModule, "peer": peer.Addr(), "type": reflect.TypeOf(msg), "message": msg.String()}).Debug("receive message from peer")
+
+       switch msg := msg.(type) {
+       case *BlockProposeMsg:
+               m.handleBlockProposeMsg(peerID, msg)
+
+       case *BlockSignatureMsg:
+               m.handleBlockSignatureMsg(peerID, msg)
+
+       default:
+               logrus.WithFields(logrus.Fields{"module": logModule, "peer": peerID, "message_type": reflect.TypeOf(msg)}).Error("unhandled message type")
+       }
+}
+
+func (m *Manager) handleBlockProposeMsg(peerID string, msg *BlockProposeMsg) {
+       block, err := msg.GetProposeBlock()
+       if err != nil {
+               logrus.WithFields(logrus.Fields{"module": logModule, "err": err}).Warning("failed on get propose block")
+               return
+       }
+
+       hash := block.Hash()
+       m.peers.MarkBlock(peerID, &hash)
+       m.blockFetcher.processNewBlock(&blockMsg{peerID: peerID, block: block})
+       m.peers.SetStatus(peerID, block.Height, &hash)
+}
+
+func (m *Manager) handleBlockSignatureMsg(peerID string, msg *BlockSignatureMsg) {
+       m.peers.MarkBlockSignature(peerID, msg.Signature)
+       blockHash := bc.NewHash(msg.BlockHash)
+       if err := m.chain.ProcessBlockSignature(msg.Signature, msg.PubKey, &blockHash); err != nil {
+               if errors.Root(err) != protocol.ErrDoubleSignBlock {
+                       m.peers.ProcessIllegal(peerID, security.LevelMsgIllegal, err.Error())
+               }
+       }
+}
+
+func (m *Manager) blockProposeMsgBroadcastLoop() {
+       blockProposeMsgSub, err := m.eventDispatcher.Subscribe(event.NewProposedBlockEvent{})
+       if err != nil {
+               logrus.WithFields(logrus.Fields{"module": logModule, "err": err}).Error("failed on subscribe NewBlockProposeEvent")
+               return
+       }
+       defer blockProposeMsgSub.Unsubscribe()
+
+       for {
+               select {
+               case obj, ok := <-blockProposeMsgSub.Chan():
+                       if !ok {
+                               logrus.WithFields(logrus.Fields{"module": logModule}).Warning("blockProposeMsgSub channel closed")
+                               return
+                       }
+
+                       ev, ok := obj.Data.(event.NewProposedBlockEvent)
+                       if !ok {
+                               logrus.WithFields(logrus.Fields{"module": logModule}).Error("event type error")
+                               continue
+                       }
+                       proposeMsg, err := NewBlockProposeMsg(&ev.Block)
+                       if err != nil {
+                               logrus.WithFields(logrus.Fields{"module": logModule, "err": err}).Error("failed on create BlockProposeMsg")
+                               return
+                       }
+
+                       if err := m.peers.BroadcastMsg(NewBroadcastMsg(proposeMsg, consensusChannel)); err != nil {
+                               logrus.WithFields(logrus.Fields{"module": logModule, "err": err}).Error("failed on broadcast BlockProposeBroadcastMsg")
+                               continue
+                       }
+
+               case <-m.quit:
+                       return
+               }
+       }
+}
+
+func (m *Manager) blockSignatureMsgBroadcastLoop() {
+       blockSignatureMsgSub, err := m.eventDispatcher.Subscribe(event.BlockSignatureEvent{})
+       if err != nil {
+               logrus.WithFields(logrus.Fields{"module": logModule, "err": err}).Error("failed on subscribe BlockSignatureEvent")
+               return
+       }
+       defer blockSignatureMsgSub.Unsubscribe()
+       for {
+               select {
+               case obj, ok := <-blockSignatureMsgSub.Chan():
+                       if !ok {
+                               logrus.WithFields(logrus.Fields{"module": logModule}).Warning("blockProposeMsgSub channel closed")
+                               return
+                       }
+
+                       ev, ok := obj.Data.(event.BlockSignatureEvent)
+                       if !ok {
+                               logrus.WithFields(logrus.Fields{"module": logModule}).Error("event type error")
+                               continue
+                       }
+
+                       blockSignatureMsg := NewBroadcastMsg(NewBlockSignatureMsg(ev.BlockHash, ev.Signature, ev.XPub), consensusChannel)
+                       if err := m.peers.BroadcastMsg(blockSignatureMsg); err != nil {
+                               logrus.WithFields(logrus.Fields{"module": logModule, "err": err}).Error("failed on broadcast BlockSignBroadcastMsg.")
+                               continue
+                       }
+
+               case <-m.quit:
+                       return
+               }
+       }
+}
+
+func (m *Manager) removePeer(peerID string) {
+       m.peers.RemovePeer(peerID)
+}
+
+//Start consensus manager service.
+func (m *Manager) Start() error {
+       go m.blockFetcher.blockProcessorLoop()
+       go m.blockProposeMsgBroadcastLoop()
+       go m.blockSignatureMsgBroadcastLoop()
+       return nil
+}
+
+//Stop consensus manager service.
+func (m *Manager) Stop() {
+       close(m.quit)
+}
diff --git a/netsync/consensusmgr/handle_test.go b/netsync/consensusmgr/handle_test.go
new file mode 100644 (file)
index 0000000..08ed650
--- /dev/null
@@ -0,0 +1,235 @@
+package consensusmgr
+
+import (
+       "math/rand"
+       "net"
+       "reflect"
+       "testing"
+       "time"
+
+       "github.com/tendermint/tmlibs/flowrate"
+
+       "github.com/bytom/bytom/consensus"
+       "github.com/bytom/bytom/event"
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/p2p"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+type p2peer struct {
+}
+
+func (p *p2peer) Addr() net.Addr {
+       return nil
+}
+
+func (p *p2peer) ID() string {
+       return ""
+}
+
+func (p *p2peer) Moniker() string {
+       return ""
+}
+
+func (p *p2peer) RemoteAddrHost() string {
+       return ""
+}
+func (p *p2peer) ServiceFlag() consensus.ServiceFlag {
+       return 0
+}
+func (p *p2peer) TrafficStatus() (*flowrate.Status, *flowrate.Status) {
+       return nil, nil
+}
+func (p *p2peer) TrySend(byte, interface{}) bool {
+       return true
+}
+func (p *p2peer) IsLAN() bool {
+       return false
+}
+
+func mockBlocks(startBlock *types.Block, height uint64) []*types.Block {
+       blocks := []*types.Block{}
+       indexBlock := &types.Block{}
+       if startBlock == nil {
+               indexBlock = &types.Block{BlockHeader: types.BlockHeader{Version: uint64(rand.Uint32())}}
+               blocks = append(blocks, indexBlock)
+       } else {
+               indexBlock = startBlock
+       }
+
+       for indexBlock.Height < height {
+               block := &types.Block{
+                       BlockHeader: types.BlockHeader{
+                               Height:            indexBlock.Height + 1,
+                               PreviousBlockHash: indexBlock.Hash(),
+                               Version:           uint64(rand.Uint32()),
+                       },
+               }
+               blocks = append(blocks, block)
+               indexBlock = block
+       }
+       return blocks
+}
+
+type mockSW struct {
+}
+
+func (s *mockSW) AddReactor(name string, reactor p2p.Reactor) p2p.Reactor {
+       return nil
+}
+
+type mockChain struct {
+}
+
+func (c *mockChain) BestBlockHeight() uint64 {
+       return 0
+}
+
+func (c *mockChain) GetHeaderByHash(*bc.Hash) (*types.BlockHeader, error) {
+       return nil, nil
+}
+
+func (c *mockChain) ProcessBlock(*types.Block) (bool, error) {
+       return false, nil
+}
+
+func (c *mockChain) ProcessBlockSignature(signature, pubkey []byte, blockHash *bc.Hash) error {
+       return nil
+}
+
+type mockPeers struct {
+       msgCount       *int
+       knownBlock     *bc.Hash
+       blockHeight    *uint64
+       knownSignature *[]byte
+}
+
+func newMockPeers(msgCount *int, knownBlock *bc.Hash, blockHeight *uint64, signature *[]byte) *mockPeers {
+       return &mockPeers{
+               msgCount:       msgCount,
+               knownBlock:     knownBlock,
+               blockHeight:    blockHeight,
+               knownSignature: signature,
+       }
+}
+
+func (ps *mockPeers) AddPeer(peer peers.BasePeer) {
+
+}
+
+func (ps *mockPeers) BroadcastMsg(bm peers.BroadcastMsg) error {
+       *ps.msgCount++
+       return nil
+}
+func (ps *mockPeers) GetPeer(id string) *peers.Peer {
+       return &peers.Peer{BasePeer: &p2peer{}}
+}
+func (ps *mockPeers) MarkBlock(peerID string, hash *bc.Hash) {
+       *ps.knownBlock = *hash
+}
+
+func (ps *mockPeers) MarkBlockSignature(peerID string, signature []byte) {
+       *ps.knownSignature = append(*ps.knownSignature, signature...)
+}
+
+func (ps *mockPeers) ProcessIllegal(peerID string, level byte, reason string) {
+
+}
+func (p *mockPeers) RemovePeer(peerID string) {
+
+}
+func (ps *mockPeers) SetStatus(peerID string, height uint64, hash *bc.Hash) {
+       *ps.blockHeight = height
+}
+
+func TestBlockProposeMsgBroadcastLoop(t *testing.T) {
+       dispatcher := event.NewDispatcher()
+       msgCount := 0
+       blockHeight := 100
+       mgr := NewManager(&mockSW{}, &mockChain{}, newMockPeers(&msgCount, nil, nil, nil), dispatcher)
+       blocks := mockBlocks(nil, uint64(blockHeight))
+
+       mgr.Start()
+       defer mgr.Stop()
+       time.Sleep(10 * time.Millisecond)
+       for _, block := range blocks {
+               mgr.eventDispatcher.Post(event.NewProposedBlockEvent{Block: *block})
+       }
+       time.Sleep(10 * time.Millisecond)
+       if msgCount != blockHeight+1 {
+               t.Fatalf("broad propose block msg err. got:%d\n want:%d", msgCount, blockHeight+1)
+       }
+}
+
+func TestBlockSignatureMsgBroadcastLoop(t *testing.T) {
+       dispatcher := event.NewDispatcher()
+       msgCount := 0
+       blockHeight := 100
+       mgr := NewManager(&mockSW{}, &mockChain{}, newMockPeers(&msgCount, nil, nil, nil), dispatcher)
+       blocks := mockBlocks(nil, uint64(blockHeight))
+
+       mgr.Start()
+       defer mgr.Stop()
+       time.Sleep(10 * time.Millisecond)
+       for _, block := range blocks {
+               mgr.eventDispatcher.Post(event.BlockSignatureEvent{BlockHash: block.Hash(), Signature: []byte{0x1, 0x2}, XPub: []byte{0x011, 0x022}})
+       }
+       time.Sleep(10 * time.Millisecond)
+       if msgCount != blockHeight+1 {
+               t.Fatalf("broad propose block msg err. got:%d\n want:%d", msgCount, blockHeight+1)
+       }
+}
+
+func TestProcessBlockProposeMsg(t *testing.T) {
+       dispatcher := event.NewDispatcher()
+       msgCount := 0
+       var knownBlock bc.Hash
+       blockHeight := uint64(0)
+       peerID := "Peer1"
+       mgr := NewManager(&mockSW{}, &mockChain{}, newMockPeers(&msgCount, &knownBlock, &blockHeight, nil), dispatcher)
+       block := &types.Block{
+               BlockHeader: types.BlockHeader{
+                       Height:            100,
+                       PreviousBlockHash: bc.NewHash([32]byte{0x1}),
+                       Version:           uint64(rand.Uint32()),
+               },
+       }
+       msg, err := NewBlockProposeMsg(block)
+       if err != nil {
+               t.Fatal("create new block propose msg err", err)
+       }
+
+       mgr.processMsg(peerID, 0, msg)
+       if knownBlock != block.Hash() {
+               t.Fatalf("mark propose block msg err. got:%d\n want:%d", knownBlock, block.Hash())
+       }
+
+       if blockHeight != block.Height {
+               t.Fatalf("set peer status err. got:%d\n want:%d", blockHeight, block.Height)
+       }
+}
+
+func TestProcessBlockSignatureMsg(t *testing.T) {
+       dispatcher := event.NewDispatcher()
+       msgCount := 0
+       knownSignature := []byte{}
+       peerID := "Peer1"
+       mgr := NewManager(&mockSW{}, &mockChain{}, newMockPeers(&msgCount, nil, nil, &knownSignature), dispatcher)
+       block := &types.Block{
+               BlockHeader: types.BlockHeader{
+                       Height:            100,
+                       PreviousBlockHash: bc.NewHash([32]byte{0x1}),
+                       Version:           uint64(rand.Uint32()),
+               },
+       }
+
+       signature := []byte{0x01, 0x02}
+       msg := NewBlockSignatureMsg(block.Hash(), signature, []byte{0x03, 0x04})
+
+       mgr.processMsg(peerID, 0, msg)
+
+       if !reflect.DeepEqual(knownSignature, signature) {
+               t.Fatalf("set peer status err. got:%d\n want:%d", knownSignature, signature)
+       }
+}
diff --git a/netsync/consensusmgr/reactor.go b/netsync/consensusmgr/reactor.go
new file mode 100644 (file)
index 0000000..556798b
--- /dev/null
@@ -0,0 +1,72 @@
+package consensusmgr
+
+import (
+       "github.com/sirupsen/logrus"
+
+       "github.com/bytom/bytom/p2p"
+       "github.com/bytom/bytom/p2p/connection"
+)
+
+const (
+       logModule                 = "consensus"
+       consensusChannel          = byte(0x50)
+       maxBlockchainResponseSize = 22020096 + 2
+)
+
+// ConsensusReactor handles new coming consensus message.
+type ConsensusReactor struct {
+       p2p.BaseReactor
+       manager *Manager
+}
+
+// NewConsensusReactor create consensus reactor.
+func NewConsensusReactor(manager *Manager) *ConsensusReactor {
+       cr := &ConsensusReactor{
+               manager: manager,
+       }
+       cr.BaseReactor = *p2p.NewBaseReactor("ConsensusReactor", cr)
+       return cr
+}
+
+// GetChannels implements Reactor
+func (cr *ConsensusReactor) GetChannels() []*connection.ChannelDescriptor {
+       return []*connection.ChannelDescriptor{
+               {
+                       ID:                consensusChannel,
+                       Priority:          10,
+                       SendQueueCapacity: 100,
+               },
+       }
+}
+
+// OnStart implements BaseService
+func (cr *ConsensusReactor) OnStart() error {
+       return cr.BaseReactor.OnStart()
+}
+
+// OnStop implements BaseService
+func (cr *ConsensusReactor) OnStop() {
+       cr.BaseReactor.OnStop()
+}
+
+// AddPeer implements Reactor by sending our state to peer.
+func (cr *ConsensusReactor) AddPeer(peer *p2p.Peer) error {
+       cr.manager.addPeer(peer)
+       return nil
+}
+
+// RemovePeer implements Reactor by removing peer from the pool.
+func (cr *ConsensusReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
+       cr.manager.removePeer(peer.Key)
+}
+
+// Receive implements Reactor by handling messages.
+func (cr *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
+       msgType, msg, err := decodeMessage(msgBytes)
+       if err != nil {
+               logrus.WithFields(logrus.Fields{"module": logModule, "err": err}).Error("fail on reactor decoding message")
+               return
+       }
+
+       cr.manager.processMsg(src.ID(), msgType, msg)
+}
diff --git a/netsync/handle.go b/netsync/handle.go
deleted file mode 100644 (file)
index 9a63dd5..0000000
+++ /dev/null
@@ -1,493 +0,0 @@
-package netsync
-
-import (
-       "errors"
-       "reflect"
-
-       log "github.com/sirupsen/logrus"
-
-       cfg "github.com/bytom/bytom/config"
-       "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/event"
-       "github.com/bytom/bytom/p2p"
-       "github.com/bytom/bytom/p2p/security"
-       core "github.com/bytom/bytom/protocol"
-       "github.com/bytom/bytom/protocol/bc"
-       "github.com/bytom/bytom/protocol/bc/types"
-       "github.com/tendermint/go-crypto"
-)
-
-const (
-       logModule             = "netsync"
-       maxTxChanSize         = 10000
-       maxFilterAddressSize  = 50
-       maxFilterAddressCount = 1000
-)
-
-var (
-       errVaultModeDialPeer = errors.New("can't dial peer in vault mode")
-)
-
-// Chain is the interface for Bytom core
-type Chain interface {
-       BestBlockHeader() *types.BlockHeader
-       BestBlockHeight() uint64
-       CalcNextSeed(*bc.Hash) (*bc.Hash, error)
-       GetBlockByHash(*bc.Hash) (*types.Block, error)
-       GetBlockByHeight(uint64) (*types.Block, error)
-       GetHeaderByHash(*bc.Hash) (*types.BlockHeader, error)
-       GetHeaderByHeight(uint64) (*types.BlockHeader, error)
-       GetTransactionStatus(*bc.Hash) (*bc.TransactionStatus, error)
-       InMainChain(bc.Hash) bool
-       ProcessBlock(*types.Block) (bool, error)
-       ValidateTx(*types.Tx) (bool, error)
-}
-
-type Switch interface {
-       AddReactor(name string, reactor p2p.Reactor) p2p.Reactor
-       StopPeerGracefully(string)
-       NodeInfo() *p2p.NodeInfo
-       Start() (bool, error)
-       Stop() bool
-       IsListening() bool
-       DialPeerWithAddress(addr *p2p.NetAddress) error
-       Peers() *p2p.PeerSet
-       IsBanned(peerID string, level byte, reason string) bool
-}
-
-//SyncManager Sync Manager is responsible for the business layer information synchronization
-type SyncManager struct {
-       sw           Switch
-       genesisHash  bc.Hash
-       chain        Chain
-       txPool       *core.TxPool
-       blockFetcher *blockFetcher
-       blockKeeper  *blockKeeper
-       peers        *peerSet
-
-       txSyncCh chan *txSyncMsg
-       quitSync chan struct{}
-       config   *cfg.Config
-
-       eventDispatcher *event.Dispatcher
-       minedBlockSub   *event.Subscription
-       txMsgSub        *event.Subscription
-}
-
-// CreateSyncManager create sync manager and set switch.
-func NewSyncManager(config *cfg.Config, chain Chain, txPool *core.TxPool, dispatcher *event.Dispatcher) (*SyncManager, error) {
-       sw, err := p2p.NewSwitch(config)
-       if err != nil {
-               return nil, err
-       }
-
-       return newSyncManager(config, sw, chain, txPool, dispatcher)
-}
-
-//NewSyncManager create a sync manager
-func newSyncManager(config *cfg.Config, sw Switch, chain Chain, txPool *core.TxPool, dispatcher *event.Dispatcher) (*SyncManager, error) {
-       genesisHeader, err := chain.GetHeaderByHeight(0)
-       if err != nil {
-               return nil, err
-       }
-       peers := newPeerSet(sw)
-       manager := &SyncManager{
-               sw:              sw,
-               genesisHash:     genesisHeader.Hash(),
-               txPool:          txPool,
-               chain:           chain,
-               blockFetcher:    newBlockFetcher(chain, peers),
-               blockKeeper:     newBlockKeeper(chain, peers),
-               peers:           peers,
-               txSyncCh:        make(chan *txSyncMsg),
-               quitSync:        make(chan struct{}),
-               config:          config,
-               eventDispatcher: dispatcher,
-       }
-
-       if !config.VaultMode {
-               protocolReactor := NewProtocolReactor(manager, peers)
-               manager.sw.AddReactor("PROTOCOL", protocolReactor)
-       }
-       return manager, nil
-}
-
-//BestPeer return the highest p2p peerInfo
-func (sm *SyncManager) BestPeer() *PeerInfo {
-       bestPeer := sm.peers.bestPeer(consensus.SFFullNode)
-       if bestPeer != nil {
-               return bestPeer.getPeerInfo()
-       }
-       return nil
-}
-
-func (sm *SyncManager) DialPeerWithAddress(addr *p2p.NetAddress) error {
-       if sm.config.VaultMode {
-               return errVaultModeDialPeer
-       }
-
-       return sm.sw.DialPeerWithAddress(addr)
-}
-
-func (sm *SyncManager) GetNetwork() string {
-       return sm.config.ChainID
-}
-
-//GetPeerInfos return peer info of all peers
-func (sm *SyncManager) GetPeerInfos() []*PeerInfo {
-       return sm.peers.getPeerInfos()
-}
-
-//IsCaughtUp check wheather the peer finish the sync
-func (sm *SyncManager) IsCaughtUp() bool {
-       peer := sm.peers.bestPeer(consensus.SFFullNode)
-       return peer == nil || peer.Height() <= sm.chain.BestBlockHeight()
-}
-
-//StopPeer try to stop peer by given ID
-func (sm *SyncManager) StopPeer(peerID string) error {
-       if peer := sm.peers.getPeer(peerID); peer == nil {
-               return errors.New("peerId not exist")
-       }
-       sm.peers.removePeer(peerID)
-       return nil
-}
-
-func (sm *SyncManager) handleBlockMsg(peer *peer, msg *BlockMessage) {
-       block, err := msg.GetBlock()
-       if err != nil {
-               return
-       }
-       sm.blockKeeper.processBlock(peer.ID(), block)
-}
-
-func (sm *SyncManager) handleBlocksMsg(peer *peer, msg *BlocksMessage) {
-       blocks, err := msg.GetBlocks()
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Debug("fail on handleBlocksMsg GetBlocks")
-               return
-       }
-
-       sm.blockKeeper.processBlocks(peer.ID(), blocks)
-}
-
-func (sm *SyncManager) handleFilterAddMsg(peer *peer, msg *FilterAddMessage) {
-       peer.addFilterAddress(msg.Address)
-}
-
-func (sm *SyncManager) handleFilterClearMsg(peer *peer) {
-       peer.filterAdds.Clear()
-}
-
-func (sm *SyncManager) handleFilterLoadMsg(peer *peer, msg *FilterLoadMessage) {
-       peer.addFilterAddresses(msg.Addresses)
-}
-
-func (sm *SyncManager) handleGetBlockMsg(peer *peer, msg *GetBlockMessage) {
-       var block *types.Block
-       var err error
-       if msg.Height != 0 {
-               block, err = sm.chain.GetBlockByHeight(msg.Height)
-       } else {
-               block, err = sm.chain.GetBlockByHash(msg.GetHash())
-       }
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on handleGetBlockMsg get block from chain")
-               return
-       }
-
-       ok, err := peer.sendBlock(block)
-       if !ok {
-               sm.peers.removePeer(peer.ID())
-       }
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetBlockMsg sentBlock")
-       }
-}
-
-func (sm *SyncManager) handleGetBlocksMsg(peer *peer, msg *GetBlocksMessage) {
-       blocks, err := sm.blockKeeper.locateBlocks(msg.GetBlockLocator(), msg.GetStopHash())
-       if err != nil || len(blocks) == 0 {
-               return
-       }
-
-       totalSize := 0
-       sendBlocks := []*types.Block{}
-       for _, block := range blocks {
-               rawData, err := block.MarshalText()
-               if err != nil {
-                       log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetBlocksMsg marshal block")
-                       continue
-               }
-
-               if totalSize+len(rawData) > maxBlockchainResponseSize/2 {
-                       break
-               }
-               totalSize += len(rawData)
-               sendBlocks = append(sendBlocks, block)
-       }
-
-       ok, err := peer.sendBlocks(sendBlocks)
-       if !ok {
-               sm.peers.removePeer(peer.ID())
-       }
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetBlocksMsg sentBlock")
-       }
-}
-
-func (sm *SyncManager) handleGetHeadersMsg(peer *peer, msg *GetHeadersMessage) {
-       headers, err := sm.blockKeeper.locateHeaders(msg.GetBlockLocator(), msg.GetStopHash())
-       if err != nil || len(headers) == 0 {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Debug("fail on handleGetHeadersMsg locateHeaders")
-               return
-       }
-
-       ok, err := peer.sendHeaders(headers)
-       if !ok {
-               sm.peers.removePeer(peer.ID())
-       }
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetHeadersMsg sentBlock")
-       }
-}
-
-func (sm *SyncManager) handleGetMerkleBlockMsg(peer *peer, msg *GetMerkleBlockMessage) {
-       var err error
-       var block *types.Block
-       if msg.Height != 0 {
-               block, err = sm.chain.GetBlockByHeight(msg.Height)
-       } else {
-               block, err = sm.chain.GetBlockByHash(msg.GetHash())
-       }
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on handleGetMerkleBlockMsg get block from chain")
-               return
-       }
-
-       blockHash := block.Hash()
-       txStatus, err := sm.chain.GetTransactionStatus(&blockHash)
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on handleGetMerkleBlockMsg get transaction status")
-               return
-       }
-
-       ok, err := peer.sendMerkleBlock(block, txStatus)
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetMerkleBlockMsg sentMerkleBlock")
-               return
-       }
-
-       if !ok {
-               sm.peers.removePeer(peer.ID())
-       }
-}
-
-func (sm *SyncManager) handleHeadersMsg(peer *peer, msg *HeadersMessage) {
-       headers, err := msg.GetHeaders()
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Debug("fail on handleHeadersMsg GetHeaders")
-               return
-       }
-
-       sm.blockKeeper.processHeaders(peer.ID(), headers)
-}
-
-func (sm *SyncManager) handleMineBlockMsg(peer *peer, msg *MineBlockMessage) {
-       block, err := msg.GetMineBlock()
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on handleMineBlockMsg GetMineBlock")
-               return
-       }
-
-       hash := block.Hash()
-       peer.markBlock(&hash)
-       sm.blockFetcher.processNewBlock(&blockMsg{peerID: peer.ID(), block: block})
-       peer.setStatus(block.Height, &hash)
-}
-
-func (sm *SyncManager) handleStatusRequestMsg(peer BasePeer) {
-       bestHeader := sm.chain.BestBlockHeader()
-       genesisBlock, err := sm.chain.GetBlockByHeight(0)
-       if err != nil {
-               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleStatusRequestMsg get genesis")
-       }
-
-       genesisHash := genesisBlock.Hash()
-       msg := NewStatusResponseMessage(bestHeader, &genesisHash)
-       if ok := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}); !ok {
-               sm.peers.removePeer(peer.ID())
-       }
-}
-
-func (sm *SyncManager) handleStatusResponseMsg(basePeer BasePeer, msg *StatusResponseMessage) {
-       if peer := sm.peers.getPeer(basePeer.ID()); peer != nil {
-               peer.setStatus(msg.Height, msg.GetHash())
-               return
-       }
-
-       if genesisHash := msg.GetGenesisHash(); sm.genesisHash != *genesisHash {
-               log.WithFields(log.Fields{"module": logModule, "remote genesis": genesisHash.String(), "local genesis": sm.genesisHash.String()}).Warn("fail hand shake due to differnt genesis")
-               return
-       }
-
-       sm.peers.addPeer(basePeer, msg.Height, msg.GetHash())
-}
-
-func (sm *SyncManager) handleTransactionMsg(peer *peer, msg *TransactionMessage) {
-       tx, err := msg.GetTransaction()
-       if err != nil {
-               sm.peers.ProcessIllegal(peer.ID(), security.LevelConnException, "fail on get txs from message")
-               return
-       }
-
-       if isOrphan, err := sm.chain.ValidateTx(tx); err != nil && err != core.ErrDustTx && !isOrphan {
-               sm.peers.ProcessIllegal(peer.ID(), security.LevelMsgIllegal, "fail on validate tx transaction")
-       }
-}
-
-func (sm *SyncManager) IsListening() bool {
-       if sm.config.VaultMode {
-               return false
-       }
-       return sm.sw.IsListening()
-}
-
-func (sm *SyncManager) NodeInfo() *p2p.NodeInfo {
-       if sm.config.VaultMode {
-               return p2p.NewNodeInfo(sm.config, crypto.PubKeyEd25519{}, "")
-       }
-       return sm.sw.NodeInfo()
-}
-
-func (sm *SyncManager) PeerCount() int {
-       if sm.config.VaultMode {
-               return 0
-       }
-       return len(sm.sw.Peers().List())
-}
-
-func (sm *SyncManager) processMsg(basePeer BasePeer, msgType byte, msg BlockchainMessage) {
-       peer := sm.peers.getPeer(basePeer.ID())
-       if peer == nil && msgType != StatusResponseByte && msgType != StatusRequestByte {
-               return
-       }
-
-       log.WithFields(log.Fields{
-               "module":  logModule,
-               "peer":    basePeer.Addr(),
-               "type":    reflect.TypeOf(msg),
-               "message": msg.String(),
-       }).Info("receive message from peer")
-
-       switch msg := msg.(type) {
-       case *GetBlockMessage:
-               sm.handleGetBlockMsg(peer, msg)
-
-       case *BlockMessage:
-               sm.handleBlockMsg(peer, msg)
-
-       case *StatusRequestMessage:
-               sm.handleStatusRequestMsg(basePeer)
-
-       case *StatusResponseMessage:
-               sm.handleStatusResponseMsg(basePeer, msg)
-
-       case *TransactionMessage:
-               sm.handleTransactionMsg(peer, msg)
-
-       case *MineBlockMessage:
-               sm.handleMineBlockMsg(peer, msg)
-
-       case *GetHeadersMessage:
-               sm.handleGetHeadersMsg(peer, msg)
-
-       case *HeadersMessage:
-               sm.handleHeadersMsg(peer, msg)
-
-       case *GetBlocksMessage:
-               sm.handleGetBlocksMsg(peer, msg)
-
-       case *BlocksMessage:
-               sm.handleBlocksMsg(peer, msg)
-
-       case *FilterLoadMessage:
-               sm.handleFilterLoadMsg(peer, msg)
-
-       case *FilterAddMessage:
-               sm.handleFilterAddMsg(peer, msg)
-
-       case *FilterClearMessage:
-               sm.handleFilterClearMsg(peer)
-
-       case *GetMerkleBlockMessage:
-               sm.handleGetMerkleBlockMsg(peer, msg)
-
-       default:
-               log.WithFields(log.Fields{
-                       "module":       logModule,
-                       "peer":         basePeer.Addr(),
-                       "message_type": reflect.TypeOf(msg),
-               }).Error("unhandled message type")
-       }
-}
-
-func (sm *SyncManager) Start() error {
-       var err error
-       if _, err = sm.sw.Start(); err != nil {
-               log.Error("switch start err")
-               return err
-       }
-
-       sm.minedBlockSub, err = sm.eventDispatcher.Subscribe(event.NewMinedBlockEvent{})
-       if err != nil {
-               return err
-       }
-
-       sm.txMsgSub, err = sm.eventDispatcher.Subscribe(core.TxMsgEvent{})
-       if err != nil {
-               return err
-       }
-
-       // broadcast transactions
-       go sm.txBroadcastLoop()
-       go sm.minedBroadcastLoop()
-       go sm.txSyncLoop()
-
-       return nil
-}
-
-//Stop stop sync manager
-func (sm *SyncManager) Stop() {
-       close(sm.quitSync)
-       sm.minedBlockSub.Unsubscribe()
-       if !sm.config.VaultMode {
-               sm.sw.Stop()
-       }
-}
-
-func (sm *SyncManager) minedBroadcastLoop() {
-       for {
-               select {
-               case obj, ok := <-sm.minedBlockSub.Chan():
-                       if !ok {
-                               log.WithFields(log.Fields{"module": logModule}).Warning("mined block subscription channel closed")
-                               return
-                       }
-
-                       ev, ok := obj.Data.(event.NewMinedBlockEvent)
-                       if !ok {
-                               log.WithFields(log.Fields{"module": logModule}).Error("event type error")
-                               continue
-                       }
-
-                       if err := sm.peers.broadcastMinedBlock(&ev.Block); err != nil {
-                               log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on broadcast mine block")
-                               continue
-                       }
-
-               case <-sm.quitSync:
-                       return
-               }
-       }
-}
similarity index 81%
rename from netsync/message.go
rename to netsync/messages/chain_msg.go
index da8d0b0..274d82c 100644 (file)
@@ -1,10 +1,8 @@
-package netsync
+package messages
 
 import (
-       "bytes"
        "encoding/hex"
        "encoding/json"
-       "errors"
        "fmt"
 
        "github.com/tendermint/go-wire"
@@ -23,9 +21,9 @@ const (
        HeadersResponseByte = byte(0x13)
        BlocksRequestByte   = byte(0x14)
        BlocksResponseByte  = byte(0x15)
-       StatusRequestByte   = byte(0x20)
-       StatusResponseByte  = byte(0x21)
+       StatusByte          = byte(0x21)
        NewTransactionByte  = byte(0x30)
+       NewTransactionsByte = byte(0x31)
        NewMineBlockByte    = byte(0x40)
        FilterLoadByte      = byte(0x50)
        FilterAddByte       = byte(0x51)
@@ -33,7 +31,8 @@ const (
        MerkleRequestByte   = byte(0x60)
        MerkleResponseByte  = byte(0x61)
 
-       maxBlockchainResponseSize = 22020096 + 2
+       MaxBlockchainResponseSize = 22020096 + 2
+       TxsMsgMaxTxNum            = 1024
 )
 
 //BlockchainMessage is a generic message for this reactor.
@@ -49,9 +48,9 @@ var _ = wire.RegisterInterface(
        wire.ConcreteType{&HeadersMessage{}, HeadersResponseByte},
        wire.ConcreteType{&GetBlocksMessage{}, BlocksRequestByte},
        wire.ConcreteType{&BlocksMessage{}, BlocksResponseByte},
-       wire.ConcreteType{&StatusRequestMessage{}, StatusRequestByte},
-       wire.ConcreteType{&StatusResponseMessage{}, StatusResponseByte},
+       wire.ConcreteType{&StatusMessage{}, StatusByte},
        wire.ConcreteType{&TransactionMessage{}, NewTransactionByte},
+       wire.ConcreteType{&TransactionsMessage{}, NewTransactionsByte},
        wire.ConcreteType{&MineBlockMessage{}, NewMineBlockByte},
        wire.ConcreteType{&FilterLoadMessage{}, FilterLoadByte},
        wire.ConcreteType{&FilterAddMessage{}, FilterAddByte},
@@ -60,18 +59,6 @@ var _ = wire.RegisterInterface(
        wire.ConcreteType{&MerkleBlockMessage{}, MerkleResponseByte},
 )
 
-//DecodeMessage decode msg
-func DecodeMessage(bz []byte) (msgType byte, msg BlockchainMessage, err error) {
-       msgType = bz[0]
-       n := int(0)
-       r := bytes.NewReader(bz)
-       msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxBlockchainResponseSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage
-       if err != nil && n != len(bz) {
-               err = errors.New("DecodeMessage() had bytes left over")
-       }
-       return
-}
-
 //GetBlockMessage request blocks from remote peers by height/hash
 type GetBlockMessage struct {
        Height  uint64
@@ -130,12 +117,14 @@ func (m *BlockMessage) String() string {
 type GetHeadersMessage struct {
        RawBlockLocator [][32]byte
        RawStopHash     [32]byte
+       Skip            uint64
 }
 
 //NewGetHeadersMessage return a new GetHeadersMessage
-func NewGetHeadersMessage(blockLocator []*bc.Hash, stopHash *bc.Hash) *GetHeadersMessage {
+func NewGetHeadersMessage(blockLocator []*bc.Hash, stopHash *bc.Hash, skip uint64) *GetHeadersMessage {
        msg := &GetHeadersMessage{
                RawStopHash: stopHash.Byte32(),
+               Skip:        skip,
        }
        for _, hash := range blockLocator {
                msg.RawBlockLocator = append(msg.RawBlockLocator, hash.Byte32())
@@ -154,7 +143,8 @@ func (m *GetHeadersMessage) GetBlockLocator() []*bc.Hash {
 }
 
 func (m *GetHeadersMessage) String() string {
-       return fmt.Sprintf("{stop_hash: %s}", hex.EncodeToString(m.RawStopHash[:]))
+       stopHash := bc.NewHash(m.RawStopHash)
+       return fmt.Sprintf("{skip:%d,stopHash:%s}", m.Skip, stopHash.String())
 }
 
 //GetStopHash return the stop hash of the msg
@@ -163,6 +153,10 @@ func (m *GetHeadersMessage) GetStopHash() *bc.Hash {
        return &hash
 }
 
+func (m *GetHeadersMessage) GetSkip() uint64 {
+       return m.Skip
+}
+
 //HeadersMessage is one of the bytom msg type
 type HeadersMessage struct {
        RawHeaders [][]byte
@@ -274,43 +268,37 @@ func (m *BlocksMessage) String() string {
        return fmt.Sprintf("{blocks_length: %d}", len(m.RawBlocks))
 }
 
-//StatusRequestMessage status request msg
-type StatusRequestMessage struct{}
-
-func (m *StatusRequestMessage) String() string {
-       return "{}"
-}
-
 //StatusResponseMessage get status response msg
-type StatusResponseMessage struct {
-       Height      uint64
-       RawHash     [32]byte
-       GenesisHash [32]byte
+type StatusMessage struct {
+       BestHeight         uint64
+       BestHash           [32]byte
+       IrreversibleHeight uint64
+       IrreversibleHash   [32]byte
 }
 
 //NewStatusResponseMessage construct get status response msg
-func NewStatusResponseMessage(blockHeader *types.BlockHeader, hash *bc.Hash) *StatusResponseMessage {
-       return &StatusResponseMessage{
-               Height:      blockHeader.Height,
-               RawHash:     blockHeader.Hash().Byte32(),
-               GenesisHash: hash.Byte32(),
+func NewStatusMessage(bestHeader, irreversibleHeader *types.BlockHeader) *StatusMessage {
+       return &StatusMessage{
+               BestHeight:         bestHeader.Height,
+               BestHash:           bestHeader.Hash().Byte32(),
+               IrreversibleHeight: irreversibleHeader.Height,
+               IrreversibleHash:   irreversibleHeader.Hash().Byte32(),
        }
 }
 
 //GetHash get hash from msg
-func (m *StatusResponseMessage) GetHash() *bc.Hash {
-       hash := bc.NewHash(m.RawHash)
+func (m *StatusMessage) GetBestHash() *bc.Hash {
+       hash := bc.NewHash(m.BestHash)
        return &hash
 }
 
-//GetGenesisHash get hash from msg
-func (m *StatusResponseMessage) GetGenesisHash() *bc.Hash {
-       hash := bc.NewHash(m.GenesisHash)
+func (m *StatusMessage) GetIrreversibleHash() *bc.Hash {
+       hash := bc.NewHash(m.IrreversibleHash)
        return &hash
 }
 
-func (m *StatusResponseMessage) String() string {
-       return fmt.Sprintf("{height: %d, hash: %s}", m.Height, hex.EncodeToString(m.RawHash[:]))
+func (m *StatusMessage) String() string {
+       return fmt.Sprintf("{best hash: %s, irreversible hash: %s}", hex.EncodeToString(m.BestHash[:]), hex.EncodeToString(m.IrreversibleHash[:]))
 }
 
 //TransactionMessage notify new tx msg
@@ -344,6 +332,43 @@ func (m *TransactionMessage) String() string {
        return fmt.Sprintf("{tx_size: %d, tx_hash: %s}", len(m.RawTx), tx.ID.String())
 }
 
+//TransactionsMessage notify new txs msg
+type TransactionsMessage struct {
+       RawTxs [][]byte
+}
+
+//NewTransactionsMessage construct notify new txs msg
+func NewTransactionsMessage(txs []*types.Tx) (*TransactionsMessage, error) {
+       rawTxs := make([][]byte, 0, len(txs))
+       for _, tx := range txs {
+               rawTx, err := tx.TxData.MarshalText()
+               if err != nil {
+                       return nil, err
+               }
+
+               rawTxs = append(rawTxs, rawTx)
+       }
+       return &TransactionsMessage{RawTxs: rawTxs}, nil
+}
+
+//GetTransactions get txs from msg
+func (m *TransactionsMessage) GetTransactions() ([]*types.Tx, error) {
+       txs := make([]*types.Tx, 0, len(m.RawTxs))
+       for _, rawTx := range m.RawTxs {
+               tx := &types.Tx{}
+               if err := tx.UnmarshalText(rawTx); err != nil {
+                       return nil, err
+               }
+
+               txs = append(txs, tx)
+       }
+       return txs, nil
+}
+
+func (m *TransactionsMessage) String() string {
+       return fmt.Sprintf("{tx_num: %d}", len(m.RawTxs))
+}
+
 //MineBlockMessage new mined block msg
 type MineBlockMessage struct {
        RawBlock []byte
@@ -430,7 +455,7 @@ type MerkleBlockMessage struct {
        Flags          []byte
 }
 
-func (m *MerkleBlockMessage) setRawBlockHeader(bh types.BlockHeader) error {
+func (m *MerkleBlockMessage) SetRawBlockHeader(bh types.BlockHeader) error {
        rawHeader, err := bh.MarshalText()
        if err != nil {
                return err
@@ -440,7 +465,7 @@ func (m *MerkleBlockMessage) setRawBlockHeader(bh types.BlockHeader) error {
        return nil
 }
 
-func (m *MerkleBlockMessage) setTxInfo(txHashes []*bc.Hash, txFlags []uint8, relatedTxs []*types.Tx) error {
+func (m *MerkleBlockMessage) SetTxInfo(txHashes []*bc.Hash, txFlags []uint8, relatedTxs []*types.Tx) error {
        for _, txHash := range txHashes {
                m.TxHashes = append(m.TxHashes, txHash.Byte32())
        }
@@ -456,7 +481,7 @@ func (m *MerkleBlockMessage) setTxInfo(txHashes []*bc.Hash, txFlags []uint8, rel
        return nil
 }
 
-func (m *MerkleBlockMessage) setStatusInfo(statusHashes []*bc.Hash, relatedStatuses []*bc.TxVerifyResult) error {
+func (m *MerkleBlockMessage) SetStatusInfo(statusHashes []*bc.Hash, relatedStatuses []*bc.TxVerifyResult) error {
        for _, statusHash := range statusHashes {
                m.StatusHashes = append(m.StatusHashes, statusHash.Byte32())
        }
diff --git a/netsync/messages/chain_msg_test.go b/netsync/messages/chain_msg_test.go
new file mode 100644 (file)
index 0000000..e4788d1
--- /dev/null
@@ -0,0 +1,252 @@
+package messages
+
+import (
+       "reflect"
+       "testing"
+
+       "github.com/davecgh/go-spew/spew"
+
+       "github.com/bytom/bytom/consensus"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+var txs = []*types.Tx{
+       types.NewTx(types.TxData{
+               SerializedSize: uint64(52),
+               Inputs:         []*types.TxInput{types.NewCoinbaseInput([]byte{0x01})},
+               Outputs:        []*types.TxOutput{types.NewTxOutput(*consensus.BTMAssetID, 5000, nil)},
+       }),
+       types.NewTx(types.TxData{
+               SerializedSize: uint64(53),
+               Inputs:         []*types.TxInput{types.NewCoinbaseInput([]byte{0x01, 0x02})},
+               Outputs:        []*types.TxOutput{types.NewTxOutput(*consensus.BTMAssetID, 5000, nil)},
+       }),
+       types.NewTx(types.TxData{
+               SerializedSize: uint64(54),
+               Inputs:         []*types.TxInput{types.NewCoinbaseInput([]byte{0x01, 0x02, 0x03})},
+               Outputs:        []*types.TxOutput{types.NewTxOutput(*consensus.BTMAssetID, 5000, nil)},
+       }),
+       types.NewTx(types.TxData{
+               SerializedSize: uint64(54),
+               Inputs:         []*types.TxInput{types.NewCoinbaseInput([]byte{0x01, 0x02, 0x03})},
+               Outputs:        []*types.TxOutput{types.NewTxOutput(*consensus.BTMAssetID, 2000, nil)},
+       }),
+       types.NewTx(types.TxData{
+               SerializedSize: uint64(54),
+               Inputs:         []*types.TxInput{types.NewCoinbaseInput([]byte{0x01, 0x02, 0x03})},
+               Outputs:        []*types.TxOutput{types.NewTxOutput(*consensus.BTMAssetID, 10000, nil)},
+       }),
+}
+
+func TestTransactionMessage(t *testing.T) {
+       for _, tx := range txs {
+               txMsg, err := NewTransactionMessage(tx)
+               if err != nil {
+                       t.Fatalf("create tx msg err:%s", err)
+               }
+
+               gotTx, err := txMsg.GetTransaction()
+               if err != nil {
+                       t.Fatalf("get txs from txsMsg err:%s", err)
+               }
+               if !reflect.DeepEqual(*tx.Tx, *gotTx.Tx) {
+                       t.Errorf("txs msg test err: got %s\nwant %s", spew.Sdump(tx.Tx), spew.Sdump(gotTx.Tx))
+               }
+       }
+}
+
+func TestTransactionsMessage(t *testing.T) {
+       txsMsg, err := NewTransactionsMessage(txs)
+       if err != nil {
+               t.Fatalf("create txs msg err:%s", err)
+       }
+
+       gotTxs, err := txsMsg.GetTransactions()
+       if err != nil {
+               t.Fatalf("get txs from txsMsg err:%s", err)
+       }
+
+       if len(gotTxs) != len(txs) {
+               t.Fatal("txs msg test err: number of txs not match ")
+       }
+
+       for i, tx := range txs {
+               if !reflect.DeepEqual(tx.Tx, gotTxs[i].Tx) {
+                       t.Errorf("txs msg test err: got %s\nwant %s", spew.Sdump(tx.Tx), spew.Sdump(gotTxs[i].Tx))
+               }
+       }
+}
+
+var testBlock = &types.Block{
+       BlockHeader: types.BlockHeader{
+               Version:   1,
+               Height:    0,
+               Timestamp: 1528945000000,
+               BlockCommitment: types.BlockCommitment{
+                       TransactionsMerkleRoot: bc.Hash{V0: uint64(0x11)},
+                       TransactionStatusHash:  bc.Hash{V0: uint64(0x55)},
+               },
+       },
+}
+
+func TestBlockMessage(t *testing.T) {
+       blockMsg, err := NewBlockMessage(testBlock)
+       if err != nil {
+               t.Fatalf("create new block msg err:%s", err)
+       }
+
+       gotBlock, err := blockMsg.GetBlock()
+       if err != nil {
+               t.Fatalf("got block err:%s", err)
+       }
+
+       if !reflect.DeepEqual(gotBlock.BlockHeader, testBlock.BlockHeader) {
+               t.Errorf("block msg test err: got %s\nwant %s", spew.Sdump(gotBlock.BlockHeader), spew.Sdump(testBlock.BlockHeader))
+       }
+
+       blockMsg.RawBlock[1] = blockMsg.RawBlock[1] + 0x1
+       _, err = blockMsg.GetBlock()
+       if err == nil {
+               t.Fatalf("get mine block err")
+       }
+}
+
+var testHeaders = []*types.BlockHeader{
+       {
+               Version:   1,
+               Height:    0,
+               Timestamp: 1528945000000,
+               BlockCommitment: types.BlockCommitment{
+                       TransactionsMerkleRoot: bc.Hash{V0: uint64(0x11)},
+                       TransactionStatusHash:  bc.Hash{V0: uint64(0x55)},
+               },
+       },
+       {
+               Version:   1,
+               Height:    1,
+               Timestamp: 1528945000000,
+               BlockCommitment: types.BlockCommitment{
+                       TransactionsMerkleRoot: bc.Hash{V0: uint64(0x11)},
+                       TransactionStatusHash:  bc.Hash{V0: uint64(0x55)},
+               },
+       },
+       {
+               Version:   1,
+               Height:    3,
+               Timestamp: 1528945000000,
+               BlockCommitment: types.BlockCommitment{
+                       TransactionsMerkleRoot: bc.Hash{V0: uint64(0x11)},
+                       TransactionStatusHash:  bc.Hash{V0: uint64(0x55)},
+               },
+       },
+}
+
+func TestHeadersMessage(t *testing.T) {
+       headersMsg, err := NewHeadersMessage(testHeaders)
+       if err != nil {
+               t.Fatalf("create headers msg err:%s", err)
+       }
+
+       gotHeaders, err := headersMsg.GetHeaders()
+       if err != nil {
+               t.Fatalf("got headers err:%s", err)
+       }
+
+       if !reflect.DeepEqual(gotHeaders, testHeaders) {
+               t.Errorf("headers msg test err: got %s\nwant %s", spew.Sdump(gotHeaders), spew.Sdump(testHeaders))
+       }
+}
+
+func TestGetBlockMessage(t *testing.T) {
+       getBlockMsg := GetBlockMessage{RawHash: [32]byte{0x01}}
+       gotHash := getBlockMsg.GetHash()
+
+       if !reflect.DeepEqual(gotHash.Byte32(), getBlockMsg.RawHash) {
+               t.Errorf("get block msg test err: got %s\nwant %s", spew.Sdump(gotHash.Byte32()), spew.Sdump(getBlockMsg.RawHash))
+       }
+}
+
+type testGetHeadersMessage struct {
+       blockLocator []*bc.Hash
+       stopHash     *bc.Hash
+       skip         uint64
+}
+
+func TestGetHeadersMessage(t *testing.T) {
+       testMsg := testGetHeadersMessage{
+               blockLocator: []*bc.Hash{{V0: 0x01}, {V0: 0x02}, {V0: 0x03}},
+               stopHash:     &bc.Hash{V0: 0x01},
+               skip:         888,
+       }
+       getHeadersMsg := NewGetHeadersMessage(testMsg.blockLocator, testMsg.stopHash, testMsg.skip)
+       gotBlockLocator := getHeadersMsg.GetBlockLocator()
+       gotStopHash := getHeadersMsg.GetStopHash()
+       gotSkip := getHeadersMsg.GetSkip()
+
+       if !reflect.DeepEqual(testMsg.blockLocator, gotBlockLocator) {
+               t.Errorf("get headers msg test err: got %s\nwant %s", spew.Sdump(gotBlockLocator), spew.Sdump(testMsg.blockLocator))
+       }
+
+       if !reflect.DeepEqual(testMsg.stopHash, gotStopHash) {
+               t.Errorf("get headers msg test err: amount:got %d\nwant %d", gotStopHash, testMsg.stopHash)
+       }
+
+       if !reflect.DeepEqual(testMsg.skip, gotSkip) {
+               t.Errorf("get headers msg test err: skip:got %d\nwant %d", gotSkip, testMsg.skip)
+       }
+}
+
+var testBlocks = []*types.Block{
+       {
+               BlockHeader: types.BlockHeader{
+                       Version:   1,
+                       Height:    0,
+                       Timestamp: 1528945000000,
+                       BlockCommitment: types.BlockCommitment{
+                               TransactionsMerkleRoot: bc.Hash{V0: uint64(0x11)},
+                               TransactionStatusHash:  bc.Hash{V0: uint64(0x55)},
+                       },
+               },
+       },
+       {
+               BlockHeader: types.BlockHeader{
+                       Version:   1,
+                       Height:    0,
+                       Timestamp: 1528945000000,
+                       BlockCommitment: types.BlockCommitment{
+                               TransactionsMerkleRoot: bc.Hash{V0: uint64(0x11)},
+                               TransactionStatusHash:  bc.Hash{V0: uint64(0x55)},
+                       },
+               },
+       },
+}
+
+func TestBlocksMessage(t *testing.T) {
+       blocksMsg, err := NewBlocksMessage(testBlocks)
+       if err != nil {
+               t.Fatalf("create blocks msg err:%s", err)
+       }
+       gotBlocks, err := blocksMsg.GetBlocks()
+       if err != nil {
+               t.Fatalf("get blocks err:%s", err)
+       }
+
+       for _, gotBlock := range gotBlocks {
+               if !reflect.DeepEqual(gotBlock.BlockHeader, testBlock.BlockHeader) {
+                       t.Errorf("block msg test err: got %s\nwant %s", spew.Sdump(gotBlock.BlockHeader), spew.Sdump(testBlock.BlockHeader))
+               }
+       }
+}
+
+func TestStatusMessage(t *testing.T) {
+       statusResponseMsg := NewStatusMessage(&testBlock.BlockHeader, &testBlock.BlockHeader)
+       gotBestHash := statusResponseMsg.GetBestHash()
+       if !reflect.DeepEqual(*gotBestHash, testBlock.Hash()) {
+               t.Errorf("status response msg test err: got %s\nwant %s", spew.Sdump(*gotBestHash), spew.Sdump(testBlock.Hash()))
+       }
+       gotIrreversibleHash := statusResponseMsg.GetIrreversibleHash()
+       if !reflect.DeepEqual(*gotIrreversibleHash, testBlock.Hash()) {
+               t.Errorf("status response msg test err: got %s\nwant %s", spew.Sdump(*gotIrreversibleHash), spew.Sdump(testBlock.Hash()))
+       }
+}
diff --git a/netsync/peer.go b/netsync/peer.go
deleted file mode 100644 (file)
index 5be6921..0000000
+++ /dev/null
@@ -1,462 +0,0 @@
-package netsync
-
-import (
-       "encoding/hex"
-       "net"
-       "reflect"
-       "sync"
-
-       log "github.com/sirupsen/logrus"
-       "github.com/tendermint/tmlibs/flowrate"
-       "gopkg.in/fatih/set.v0"
-
-       "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/errors"
-       "github.com/bytom/bytom/protocol/bc"
-       "github.com/bytom/bytom/protocol/bc/types"
-)
-
-const (
-       maxKnownTxs    = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS)
-       maxKnownBlocks = 1024  // Maximum block hashes to keep in the known list (prevent DOS)
-)
-
-//BasePeer is the interface for connection level peer
-type BasePeer interface {
-       Addr() net.Addr
-       ID() string
-       RemoteAddrHost() string
-       ServiceFlag() consensus.ServiceFlag
-       TrafficStatus() (*flowrate.Status, *flowrate.Status)
-       TrySend(byte, interface{}) bool
-       IsLAN() bool
-}
-
-//BasePeerSet is the intergace for connection level peer manager
-type BasePeerSet interface {
-       StopPeerGracefully(string)
-       IsBanned(ip string, level byte, reason string) bool
-}
-
-// PeerInfo indicate peer status snap
-type PeerInfo struct {
-       ID                  string `json:"peer_id"`
-       RemoteAddr          string `json:"remote_addr"`
-       Height              uint64 `json:"height"`
-       Ping                string `json:"ping"`
-       Duration            string `json:"duration"`
-       TotalSent           int64  `json:"total_sent"`
-       TotalReceived       int64  `json:"total_received"`
-       AverageSentRate     int64  `json:"average_sent_rate"`
-       AverageReceivedRate int64  `json:"average_received_rate"`
-       CurrentSentRate     int64  `json:"current_sent_rate"`
-       CurrentReceivedRate int64  `json:"current_received_rate"`
-}
-
-type peer struct {
-       BasePeer
-       mtx         sync.RWMutex
-       services    consensus.ServiceFlag
-       height      uint64
-       hash        *bc.Hash
-       knownTxs    *set.Set // Set of transaction hashes known to be known by this peer
-       knownBlocks *set.Set // Set of block hashes known to be known by this peer
-       filterAdds  *set.Set // Set of addresses that the spv node cares about.
-}
-
-func newPeer(height uint64, hash *bc.Hash, basePeer BasePeer) *peer {
-       return &peer{
-               BasePeer:    basePeer,
-               services:    basePeer.ServiceFlag(),
-               height:      height,
-               hash:        hash,
-               knownTxs:    set.New(),
-               knownBlocks: set.New(),
-               filterAdds:  set.New(),
-       }
-}
-
-func (p *peer) Height() uint64 {
-       p.mtx.RLock()
-       defer p.mtx.RUnlock()
-       return p.height
-}
-
-func (p *peer) addFilterAddress(address []byte) {
-       p.mtx.Lock()
-       defer p.mtx.Unlock()
-
-       if p.filterAdds.Size() >= maxFilterAddressCount {
-               log.WithField("module", logModule).Warn("the count of filter addresses is greater than limit")
-               return
-       }
-       if len(address) > maxFilterAddressSize {
-               log.WithField("module", logModule).Warn("the size of filter address is greater than limit")
-               return
-       }
-
-       p.filterAdds.Add(hex.EncodeToString(address))
-}
-
-func (p *peer) addFilterAddresses(addresses [][]byte) {
-       if !p.filterAdds.IsEmpty() {
-               p.filterAdds.Clear()
-       }
-       for _, address := range addresses {
-               p.addFilterAddress(address)
-       }
-}
-
-func (p *peer) getBlockByHeight(height uint64) bool {
-       msg := struct{ BlockchainMessage }{&GetBlockMessage{Height: height}}
-       return p.TrySend(BlockchainChannel, msg)
-}
-
-func (p *peer) getBlocks(locator []*bc.Hash, stopHash *bc.Hash) bool {
-       msg := struct{ BlockchainMessage }{NewGetBlocksMessage(locator, stopHash)}
-       return p.TrySend(BlockchainChannel, msg)
-}
-
-func (p *peer) getHeaders(locator []*bc.Hash, stopHash *bc.Hash) bool {
-       msg := struct{ BlockchainMessage }{NewGetHeadersMessage(locator, stopHash)}
-       return p.TrySend(BlockchainChannel, msg)
-}
-
-func (p *peer) getPeerInfo() *PeerInfo {
-       p.mtx.RLock()
-       defer p.mtx.RUnlock()
-
-       sentStatus, receivedStatus := p.TrafficStatus()
-       ping := sentStatus.Idle - receivedStatus.Idle
-       if receivedStatus.Idle > sentStatus.Idle {
-               ping = -ping
-       }
-
-       return &PeerInfo{
-               ID:                  p.ID(),
-               RemoteAddr:          p.Addr().String(),
-               Height:              p.height,
-               Ping:                ping.String(),
-               Duration:            sentStatus.Duration.String(),
-               TotalSent:           sentStatus.Bytes,
-               TotalReceived:       receivedStatus.Bytes,
-               AverageSentRate:     sentStatus.AvgRate,
-               AverageReceivedRate: receivedStatus.AvgRate,
-               CurrentSentRate:     sentStatus.CurRate,
-               CurrentReceivedRate: receivedStatus.CurRate,
-       }
-}
-
-func (p *peer) getRelatedTxAndStatus(txs []*types.Tx, txStatuses *bc.TransactionStatus) ([]*types.Tx, []*bc.TxVerifyResult) {
-       var relatedTxs []*types.Tx
-       var relatedStatuses []*bc.TxVerifyResult
-       for i, tx := range txs {
-               if p.isRelatedTx(tx) {
-                       relatedTxs = append(relatedTxs, tx)
-                       relatedStatuses = append(relatedStatuses, txStatuses.VerifyStatus[i])
-               }
-       }
-       return relatedTxs, relatedStatuses
-}
-
-func (p *peer) isRelatedTx(tx *types.Tx) bool {
-       for _, input := range tx.Inputs {
-               switch inp := input.TypedInput.(type) {
-               case *types.SpendInput:
-                       if p.filterAdds.Has(hex.EncodeToString(inp.ControlProgram)) {
-                               return true
-                       }
-               }
-       }
-       for _, output := range tx.Outputs {
-               if p.filterAdds.Has(hex.EncodeToString(output.ControlProgram)) {
-                       return true
-               }
-       }
-       return false
-}
-
-func (p *peer) isSPVNode() bool {
-       return !p.services.IsEnable(consensus.SFFullNode)
-}
-
-func (p *peer) markBlock(hash *bc.Hash) {
-       p.mtx.Lock()
-       defer p.mtx.Unlock()
-
-       for p.knownBlocks.Size() >= maxKnownBlocks {
-               p.knownBlocks.Pop()
-       }
-       p.knownBlocks.Add(hash.String())
-}
-
-func (p *peer) markTransaction(hash *bc.Hash) {
-       p.mtx.Lock()
-       defer p.mtx.Unlock()
-
-       for p.knownTxs.Size() >= maxKnownTxs {
-               p.knownTxs.Pop()
-       }
-       p.knownTxs.Add(hash.String())
-}
-
-func (p *peer) sendBlock(block *types.Block) (bool, error) {
-       msg, err := NewBlockMessage(block)
-       if err != nil {
-               return false, errors.Wrap(err, "fail on NewBlockMessage")
-       }
-
-       ok := p.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg})
-       if ok {
-               blcokHash := block.Hash()
-               p.knownBlocks.Add(blcokHash.String())
-       }
-       return ok, nil
-}
-
-func (p *peer) sendBlocks(blocks []*types.Block) (bool, error) {
-       msg, err := NewBlocksMessage(blocks)
-       if err != nil {
-               return false, errors.Wrap(err, "fail on NewBlocksMessage")
-       }
-
-       if ok := p.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}); !ok {
-               return ok, nil
-       }
-
-       for _, block := range blocks {
-               blcokHash := block.Hash()
-               p.knownBlocks.Add(blcokHash.String())
-       }
-       return true, nil
-}
-
-func (p *peer) sendHeaders(headers []*types.BlockHeader) (bool, error) {
-       msg, err := NewHeadersMessage(headers)
-       if err != nil {
-               return false, errors.New("fail on NewHeadersMessage")
-       }
-
-       ok := p.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg})
-       return ok, nil
-}
-
-func (p *peer) sendMerkleBlock(block *types.Block, txStatuses *bc.TransactionStatus) (bool, error) {
-       msg := NewMerkleBlockMessage()
-       if err := msg.setRawBlockHeader(block.BlockHeader); err != nil {
-               return false, err
-       }
-
-       relatedTxs, relatedStatuses := p.getRelatedTxAndStatus(block.Transactions, txStatuses)
-
-       txHashes, txFlags := types.GetTxMerkleTreeProof(block.Transactions, relatedTxs)
-       if err := msg.setTxInfo(txHashes, txFlags, relatedTxs); err != nil {
-               return false, nil
-       }
-
-       statusHashes := types.GetStatusMerkleTreeProof(txStatuses.VerifyStatus, txFlags)
-       if err := msg.setStatusInfo(statusHashes, relatedStatuses); err != nil {
-               return false, nil
-       }
-
-       ok := p.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg})
-       return ok, nil
-}
-
-func (p *peer) sendTransactions(txs []*types.Tx) (bool, error) {
-       for _, tx := range txs {
-               if p.isSPVNode() && !p.isRelatedTx(tx) {
-                       continue
-               }
-               msg, err := NewTransactionMessage(tx)
-               if err != nil {
-                       return false, errors.Wrap(err, "failed to tx msg")
-               }
-
-               if p.knownTxs.Has(tx.ID.String()) {
-                       continue
-               }
-               if ok := p.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}); !ok {
-                       return ok, nil
-               }
-               p.knownTxs.Add(tx.ID.String())
-       }
-       return true, nil
-}
-
-func (p *peer) setStatus(height uint64, hash *bc.Hash) {
-       p.mtx.Lock()
-       defer p.mtx.Unlock()
-       p.height = height
-       p.hash = hash
-}
-
-type peerSet struct {
-       BasePeerSet
-       mtx   sync.RWMutex
-       peers map[string]*peer
-}
-
-// newPeerSet creates a new peer set to track the active participants.
-func newPeerSet(basePeerSet BasePeerSet) *peerSet {
-       return &peerSet{
-               BasePeerSet: basePeerSet,
-               peers:       make(map[string]*peer),
-       }
-}
-
-func (ps *peerSet) ProcessIllegal(peerID string, level byte, reason string) {
-       ps.mtx.Lock()
-       peer := ps.peers[peerID]
-       ps.mtx.Unlock()
-
-       if peer == nil {
-               return
-       }
-       if banned := ps.IsBanned(peer.RemoteAddrHost(), level, reason); banned {
-               ps.removePeer(peerID)
-       }
-       return
-}
-
-func (ps *peerSet) addPeer(peer BasePeer, height uint64, hash *bc.Hash) {
-       ps.mtx.Lock()
-       defer ps.mtx.Unlock()
-
-       if _, ok := ps.peers[peer.ID()]; !ok {
-               ps.peers[peer.ID()] = newPeer(height, hash, peer)
-               return
-       }
-       log.WithField("module", logModule).Warning("add existing peer to blockKeeper")
-}
-
-func (ps *peerSet) bestPeer(flag consensus.ServiceFlag) *peer {
-       ps.mtx.RLock()
-       defer ps.mtx.RUnlock()
-
-       var bestPeer *peer
-       for _, p := range ps.peers {
-               if !p.services.IsEnable(flag) {
-                       continue
-               }
-               if bestPeer == nil || p.height > bestPeer.height || (p.height == bestPeer.height && p.IsLAN()) {
-                       bestPeer = p
-               }
-       }
-       return bestPeer
-}
-
-func (ps *peerSet) broadcastMinedBlock(block *types.Block) error {
-       msg, err := NewMinedBlockMessage(block)
-       if err != nil {
-               return errors.Wrap(err, "fail on broadcast mined block")
-       }
-
-       hash := block.Hash()
-       peers := ps.peersWithoutBlock(&hash)
-       for _, peer := range peers {
-               if peer.isSPVNode() {
-                       continue
-               }
-               if ok := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}); !ok {
-                       log.WithFields(log.Fields{"module": logModule, "peer": peer.Addr(), "type": reflect.TypeOf(msg), "message": msg.String()}).Warning("send message to peer error")
-                       ps.removePeer(peer.ID())
-                       continue
-               }
-               peer.markBlock(&hash)
-       }
-       return nil
-}
-
-func (ps *peerSet) broadcastNewStatus(bestBlock, genesisBlock *types.Block) error {
-       bestBlockHash := bestBlock.Hash()
-       peers := ps.peersWithoutBlock(&bestBlockHash)
-
-       genesisHash := genesisBlock.Hash()
-       msg := NewStatusResponseMessage(&bestBlock.BlockHeader, &genesisHash)
-       for _, peer := range peers {
-               if ok := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}); !ok {
-                       log.WithFields(log.Fields{"module": logModule, "peer": peer.Addr(), "type": reflect.TypeOf(msg), "message": msg.String()}).Warning("send message to peer error")
-                       ps.removePeer(peer.ID())
-                       continue
-               }
-       }
-       return nil
-}
-
-func (ps *peerSet) broadcastTx(tx *types.Tx) error {
-       msg, err := NewTransactionMessage(tx)
-       if err != nil {
-               return errors.Wrap(err, "fail on broadcast tx")
-       }
-
-       peers := ps.peersWithoutTx(&tx.ID)
-       for _, peer := range peers {
-               if peer.isSPVNode() && !peer.isRelatedTx(tx) {
-                       continue
-               }
-               if ok := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}); !ok {
-                       log.WithFields(log.Fields{
-                               "module":  logModule,
-                               "peer":    peer.Addr(),
-                               "type":    reflect.TypeOf(msg),
-                               "message": msg.String(),
-                       }).Warning("send message to peer error")
-                       ps.removePeer(peer.ID())
-                       continue
-               }
-               peer.markTransaction(&tx.ID)
-       }
-       return nil
-}
-
-// Peer retrieves the registered peer with the given id.
-func (ps *peerSet) getPeer(id string) *peer {
-       ps.mtx.RLock()
-       defer ps.mtx.RUnlock()
-       return ps.peers[id]
-}
-
-func (ps *peerSet) getPeerInfos() []*PeerInfo {
-       ps.mtx.RLock()
-       defer ps.mtx.RUnlock()
-
-       result := []*PeerInfo{}
-       for _, peer := range ps.peers {
-               result = append(result, peer.getPeerInfo())
-       }
-       return result
-}
-
-func (ps *peerSet) peersWithoutBlock(hash *bc.Hash) []*peer {
-       ps.mtx.RLock()
-       defer ps.mtx.RUnlock()
-
-       peers := []*peer{}
-       for _, peer := range ps.peers {
-               if !peer.knownBlocks.Has(hash.String()) {
-                       peers = append(peers, peer)
-               }
-       }
-       return peers
-}
-
-func (ps *peerSet) peersWithoutTx(hash *bc.Hash) []*peer {
-       ps.mtx.RLock()
-       defer ps.mtx.RUnlock()
-
-       peers := []*peer{}
-       for _, peer := range ps.peers {
-               if !peer.knownTxs.Has(hash.String()) {
-                       peers = append(peers, peer)
-               }
-       }
-       return peers
-}
-
-func (ps *peerSet) removePeer(peerID string) {
-       ps.mtx.Lock()
-       delete(ps.peers, peerID)
-       ps.mtx.Unlock()
-       ps.StopPeerGracefully(peerID)
-}
diff --git a/netsync/peers/peer.go b/netsync/peers/peer.go
new file mode 100644 (file)
index 0000000..cad9656
--- /dev/null
@@ -0,0 +1,665 @@
+package peers
+
+import (
+       "encoding/hex"
+       "net"
+       "reflect"
+       "sync"
+
+       log "github.com/sirupsen/logrus"
+       "github.com/tendermint/tmlibs/flowrate"
+       "gopkg.in/fatih/set.v0"
+
+       "github.com/bytom/bytom/consensus"
+       "github.com/bytom/bytom/errors"
+       msgs "github.com/bytom/bytom/netsync/messages"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+const (
+       maxKnownTxs           = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS)
+       maxKnownSignatures    = 1024  // Maximum block signatures to keep in the known list (prevent DOS)
+       maxKnownBlocks        = 1024  // Maximum block hashes to keep in the known list (prevent DOS)
+       maxFilterAddressSize  = 50
+       maxFilterAddressCount = 1000
+
+       logModule = "peers"
+)
+
+var (
+       errSendStatusMsg = errors.New("send status msg fail")
+       ErrPeerMisbehave = errors.New("peer is misbehave")
+       ErrNoValidPeer   = errors.New("Can't find valid fast sync peer")
+)
+
+//BasePeer is the interface for connection level peer
+type BasePeer interface {
+       Moniker() string
+       Addr() net.Addr
+       ID() string
+       RemoteAddrHost() string
+       ServiceFlag() consensus.ServiceFlag
+       TrafficStatus() (*flowrate.Status, *flowrate.Status)
+       TrySend(byte, interface{}) bool
+       IsLAN() bool
+}
+
+//BasePeerSet is the intergace for connection level peer manager
+type BasePeerSet interface {
+       StopPeerGracefully(string)
+       IsBanned(ip string, level byte, reason string) bool
+}
+
+type BroadcastMsg interface {
+       FilterTargetPeers(ps *PeerSet) []string
+       MarkSendRecord(ps *PeerSet, peers []string)
+       GetChan() byte
+       GetMsg() interface{}
+       MsgString() string
+}
+
+// PeerInfo indicate peer status snap
+type PeerInfo struct {
+       ID                  string `json:"peer_id"`
+       Moniker             string `json:"moniker"`
+       RemoteAddr          string `json:"remote_addr"`
+       Height              uint64 `json:"height"`
+       Ping                string `json:"ping"`
+       Duration            string `json:"duration"`
+       TotalSent           int64  `json:"total_sent"`
+       TotalReceived       int64  `json:"total_received"`
+       AverageSentRate     int64  `json:"average_sent_rate"`
+       AverageReceivedRate int64  `json:"average_received_rate"`
+       CurrentSentRate     int64  `json:"current_sent_rate"`
+       CurrentReceivedRate int64  `json:"current_received_rate"`
+}
+
+type Peer struct {
+       BasePeer
+       mtx                sync.RWMutex
+       services           consensus.ServiceFlag
+       bestHeight         uint64
+       bestHash           *bc.Hash
+       irreversibleHeight uint64
+       irreversibleHash   *bc.Hash
+       knownTxs           *set.Set // Set of transaction hashes known to be known by this peer
+       knownBlocks        *set.Set // Set of block hashes known to be known by this peer
+       knownSignatures    *set.Set // Set of block signatures known to be known by this peer
+       knownStatus        uint64   // Set of chain status known to be known by this peer
+       filterAdds         *set.Set // Set of addresses that the spv node cares about.
+}
+
+func newPeer(basePeer BasePeer) *Peer {
+       return &Peer{
+               BasePeer:        basePeer,
+               services:        basePeer.ServiceFlag(),
+               knownTxs:        set.New(),
+               knownBlocks:     set.New(),
+               knownSignatures: set.New(),
+               filterAdds:      set.New(),
+       }
+}
+
+func (p *Peer) Height() uint64 {
+       p.mtx.RLock()
+       defer p.mtx.RUnlock()
+
+       return p.bestHeight
+}
+
+func (p *Peer) IrreversibleHeight() uint64 {
+       p.mtx.RLock()
+       defer p.mtx.RUnlock()
+
+       return p.irreversibleHeight
+}
+
+func (p *Peer) AddFilterAddress(address []byte) {
+       p.mtx.Lock()
+       defer p.mtx.Unlock()
+
+       if p.filterAdds.Size() >= maxFilterAddressCount {
+               log.WithField("module", logModule).Warn("the count of filter addresses is greater than limit")
+               return
+       }
+       if len(address) > maxFilterAddressSize {
+               log.WithField("module", logModule).Warn("the size of filter address is greater than limit")
+               return
+       }
+
+       p.filterAdds.Add(hex.EncodeToString(address))
+}
+
+func (p *Peer) AddFilterAddresses(addresses [][]byte) {
+       if !p.filterAdds.IsEmpty() {
+               p.filterAdds.Clear()
+       }
+       for _, address := range addresses {
+               p.AddFilterAddress(address)
+       }
+}
+
+func (p *Peer) FilterClear() {
+       p.filterAdds.Clear()
+}
+
+func (p *Peer) GetBlockByHeight(height uint64) bool {
+       msg := struct{ msgs.BlockchainMessage }{&msgs.GetBlockMessage{Height: height}}
+       return p.TrySend(msgs.BlockchainChannel, msg)
+}
+
+func (p *Peer) GetBlocks(locator []*bc.Hash, stopHash *bc.Hash) bool {
+       msg := struct{ msgs.BlockchainMessage }{msgs.NewGetBlocksMessage(locator, stopHash)}
+       return p.TrySend(msgs.BlockchainChannel, msg)
+}
+
+func (p *Peer) GetHeaders(locator []*bc.Hash, stopHash *bc.Hash, skip uint64) bool {
+       msg := struct{ msgs.BlockchainMessage }{msgs.NewGetHeadersMessage(locator, stopHash, skip)}
+       return p.TrySend(msgs.BlockchainChannel, msg)
+}
+
+func (p *Peer) GetPeerInfo() *PeerInfo {
+       p.mtx.RLock()
+       defer p.mtx.RUnlock()
+
+       sentStatus, receivedStatus := p.TrafficStatus()
+       ping := sentStatus.Idle - receivedStatus.Idle
+       if receivedStatus.Idle > sentStatus.Idle {
+               ping = -ping
+       }
+
+       return &PeerInfo{
+               ID:                  p.ID(),
+               Moniker:             p.BasePeer.Moniker(),
+               RemoteAddr:          p.Addr().String(),
+               Height:              p.bestHeight,
+               Ping:                ping.String(),
+               Duration:            sentStatus.Duration.String(),
+               TotalSent:           sentStatus.Bytes,
+               TotalReceived:       receivedStatus.Bytes,
+               AverageSentRate:     sentStatus.AvgRate,
+               AverageReceivedRate: receivedStatus.AvgRate,
+               CurrentSentRate:     sentStatus.CurRate,
+               CurrentReceivedRate: receivedStatus.CurRate,
+       }
+}
+
+func (p *Peer) getRelatedTxAndStatus(txs []*types.Tx, txStatuses *bc.TransactionStatus) ([]*types.Tx, []*bc.TxVerifyResult) {
+       var relatedTxs []*types.Tx
+       var relatedStatuses []*bc.TxVerifyResult
+       for i, tx := range txs {
+               if p.isRelatedTx(tx) {
+                       relatedTxs = append(relatedTxs, tx)
+                       relatedStatuses = append(relatedStatuses, txStatuses.VerifyStatus[i])
+               }
+       }
+       return relatedTxs, relatedStatuses
+}
+
+func (p *Peer) isRelatedTx(tx *types.Tx) bool {
+       for _, input := range tx.Inputs {
+               switch inp := input.TypedInput.(type) {
+               case *types.SpendInput:
+                       if p.filterAdds.Has(hex.EncodeToString(inp.ControlProgram)) {
+                               return true
+                       }
+               }
+       }
+       for _, output := range tx.Outputs {
+               if p.filterAdds.Has(hex.EncodeToString(output.ControlProgram)) {
+                       return true
+               }
+       }
+       return false
+}
+
+func (p *Peer) isSPVNode() bool {
+       return !p.services.IsEnable(consensus.SFFullNode)
+}
+
+func (p *Peer) MarkBlock(hash *bc.Hash) {
+       p.mtx.Lock()
+       defer p.mtx.Unlock()
+
+       for p.knownBlocks.Size() >= maxKnownBlocks {
+               p.knownBlocks.Pop()
+       }
+       p.knownBlocks.Add(hash.String())
+}
+
+func (p *Peer) markNewStatus(height uint64) {
+       p.mtx.Lock()
+       defer p.mtx.Unlock()
+
+       p.knownStatus = height
+}
+
+func (p *Peer) markSign(signature []byte) {
+       p.mtx.Lock()
+       defer p.mtx.Unlock()
+
+       for p.knownSignatures.Size() >= maxKnownSignatures {
+               p.knownSignatures.Pop()
+       }
+       p.knownSignatures.Add(hex.EncodeToString(signature))
+}
+
+func (p *Peer) markTransaction(hash *bc.Hash) {
+       p.mtx.Lock()
+       defer p.mtx.Unlock()
+
+       for p.knownTxs.Size() >= maxKnownTxs {
+               p.knownTxs.Pop()
+       }
+       p.knownTxs.Add(hash.String())
+}
+
+func (p *Peer) SendBlock(block *types.Block) (bool, error) {
+       msg, err := msgs.NewBlockMessage(block)
+       if err != nil {
+               return false, errors.Wrap(err, "fail on NewBlockMessage")
+       }
+
+       ok := p.TrySend(msgs.BlockchainChannel, struct{ msgs.BlockchainMessage }{msg})
+       if ok {
+               blcokHash := block.Hash()
+               p.knownBlocks.Add(blcokHash.String())
+       }
+       return ok, nil
+}
+
+func (p *Peer) SendBlocks(blocks []*types.Block) (bool, error) {
+       msg, err := msgs.NewBlocksMessage(blocks)
+       if err != nil {
+               return false, errors.Wrap(err, "fail on NewBlocksMessage")
+       }
+
+       if ok := p.TrySend(msgs.BlockchainChannel, struct{ msgs.BlockchainMessage }{msg}); !ok {
+               return ok, nil
+       }
+
+       for _, block := range blocks {
+               blcokHash := block.Hash()
+               p.knownBlocks.Add(blcokHash.String())
+       }
+       return true, nil
+}
+
+func (p *Peer) SendHeaders(headers []*types.BlockHeader) (bool, error) {
+       msg, err := msgs.NewHeadersMessage(headers)
+       if err != nil {
+               return false, errors.New("fail on NewHeadersMessage")
+       }
+
+       ok := p.TrySend(msgs.BlockchainChannel, struct{ msgs.BlockchainMessage }{msg})
+       return ok, nil
+}
+
+func (p *Peer) SendMerkleBlock(block *types.Block, txStatuses *bc.TransactionStatus) (bool, error) {
+       msg := msgs.NewMerkleBlockMessage()
+       if err := msg.SetRawBlockHeader(block.BlockHeader); err != nil {
+               return false, err
+       }
+
+       relatedTxs, relatedStatuses := p.getRelatedTxAndStatus(block.Transactions, txStatuses)
+
+       txHashes, txFlags := types.GetTxMerkleTreeProof(block.Transactions, relatedTxs)
+       if err := msg.SetTxInfo(txHashes, txFlags, relatedTxs); err != nil {
+               return false, nil
+       }
+
+       statusHashes := types.GetStatusMerkleTreeProof(txStatuses.VerifyStatus, txFlags)
+       if err := msg.SetStatusInfo(statusHashes, relatedStatuses); err != nil {
+               return false, nil
+       }
+
+       ok := p.TrySend(msgs.BlockchainChannel, struct{ msgs.BlockchainMessage }{msg})
+       return ok, nil
+}
+
+func (p *Peer) SendTransactions(txs []*types.Tx) error {
+       validTxs := make([]*types.Tx, 0, len(txs))
+       for i, tx := range txs {
+               if p.isSPVNode() && !p.isRelatedTx(tx) || p.knownTxs.Has(tx.ID.String()) {
+                       continue
+               }
+
+               validTxs = append(validTxs, tx)
+               if len(validTxs) != msgs.TxsMsgMaxTxNum && i != len(txs)-1 {
+                       continue
+               }
+
+               msg, err := msgs.NewTransactionsMessage(validTxs)
+               if err != nil {
+                       return err
+               }
+
+               if ok := p.TrySend(msgs.BlockchainChannel, struct{ msgs.BlockchainMessage }{msg}); !ok {
+                       return errors.New("failed to send txs msg")
+               }
+
+               for _, validTx := range validTxs {
+                       p.knownTxs.Add(validTx.ID.String())
+               }
+
+               validTxs = make([]*types.Tx, 0, len(txs))
+       }
+
+       return nil
+}
+
+func (p *Peer) SendStatus(bestHeader, irreversibleHeader *types.BlockHeader) error {
+       msg := msgs.NewStatusMessage(bestHeader, irreversibleHeader)
+       if ok := p.TrySend(msgs.BlockchainChannel, struct{ msgs.BlockchainMessage }{msg}); !ok {
+               return errSendStatusMsg
+       }
+       p.markNewStatus(bestHeader.Height)
+       return nil
+}
+
+func (p *Peer) SetBestStatus(bestHeight uint64, bestHash *bc.Hash) {
+       p.mtx.Lock()
+       defer p.mtx.Unlock()
+
+       p.bestHeight = bestHeight
+       p.bestHash = bestHash
+}
+
+func (p *Peer) SetIrreversibleStatus(irreversibleHeight uint64, irreversibleHash *bc.Hash) {
+       p.mtx.Lock()
+       defer p.mtx.Unlock()
+
+       p.irreversibleHeight = irreversibleHeight
+       p.irreversibleHash = irreversibleHash
+}
+
+type PeerSet struct {
+       BasePeerSet
+       mtx   sync.RWMutex
+       peers map[string]*Peer
+}
+
+// newPeerSet creates a new peer set to track the active participants.
+func NewPeerSet(basePeerSet BasePeerSet) *PeerSet {
+       return &PeerSet{
+               BasePeerSet: basePeerSet,
+               peers:       make(map[string]*Peer),
+       }
+}
+
+func (ps *PeerSet) ProcessIllegal(peerID string, level byte, reason string) {
+       ps.mtx.Lock()
+       peer := ps.peers[peerID]
+       ps.mtx.Unlock()
+
+       if peer == nil {
+               return
+       }
+
+       if banned := ps.IsBanned(peer.RemoteAddrHost(), level, reason); banned {
+               ps.RemovePeer(peerID)
+       }
+       return
+}
+
+func (ps *PeerSet) AddPeer(peer BasePeer) {
+       ps.mtx.Lock()
+       defer ps.mtx.Unlock()
+
+       if _, ok := ps.peers[peer.ID()]; !ok {
+               ps.peers[peer.ID()] = newPeer(peer)
+               return
+       }
+       log.WithField("module", logModule).Warning("add existing peer to blockKeeper")
+}
+
+func (ps *PeerSet) BestPeer(flag consensus.ServiceFlag) *Peer {
+       ps.mtx.RLock()
+       defer ps.mtx.RUnlock()
+
+       var bestPeer *Peer
+       for _, p := range ps.peers {
+               if !p.services.IsEnable(flag) {
+                       continue
+               }
+               if bestPeer == nil || p.bestHeight > bestPeer.bestHeight || (p.bestHeight == bestPeer.bestHeight && p.IsLAN()) {
+                       bestPeer = p
+               }
+       }
+       return bestPeer
+}
+
+func (ps *PeerSet) BestIrreversiblePeer(flag consensus.ServiceFlag) *Peer {
+       ps.mtx.RLock()
+       defer ps.mtx.RUnlock()
+
+       var bestPeer *Peer
+       for _, p := range ps.peers {
+               if !p.services.IsEnable(flag) {
+                       continue
+               }
+               if bestPeer == nil || p.irreversibleHeight > bestPeer.irreversibleHeight || (p.irreversibleHeight == bestPeer.irreversibleHeight && p.IsLAN()) {
+                       bestPeer = p
+               }
+       }
+       return bestPeer
+}
+
+//SendMsg send message to the target peer.
+func (ps *PeerSet) SendMsg(peerID string, msgChannel byte, msg interface{}) bool {
+       peer := ps.GetPeer(peerID)
+       if peer == nil {
+               return false
+       }
+
+       ok := peer.TrySend(msgChannel, msg)
+       if !ok {
+               ps.RemovePeer(peerID)
+       }
+       return ok
+}
+
+//BroadcastMsg Broadcast message to the target peers
+// and mark the message send record
+func (ps *PeerSet) BroadcastMsg(bm BroadcastMsg) error {
+       //filter target peers
+       peers := bm.FilterTargetPeers(ps)
+
+       //broadcast to target peers
+       peersSuccess := make([]string, 0)
+       for _, peer := range peers {
+               if ok := ps.SendMsg(peer, bm.GetChan(), bm.GetMsg()); !ok {
+                       log.WithFields(log.Fields{"module": logModule, "peer": peer, "type": reflect.TypeOf(bm.GetMsg()), "message": bm.MsgString()}).Warning("send message to peer error")
+                       continue
+               }
+               peersSuccess = append(peersSuccess, peer)
+       }
+
+       //mark the message send record
+       bm.MarkSendRecord(ps, peersSuccess)
+       return nil
+}
+
+func (ps *PeerSet) BroadcastNewStatus(bestHeader, irreversibleHeader *types.BlockHeader) error {
+       msg := msgs.NewStatusMessage(bestHeader, irreversibleHeader)
+       peers := ps.peersWithoutNewStatus(bestHeader.Height)
+       for _, peer := range peers {
+               if ok := peer.TrySend(msgs.BlockchainChannel, struct{ msgs.BlockchainMessage }{msg}); !ok {
+                       ps.RemovePeer(peer.ID())
+                       continue
+               }
+
+               peer.markNewStatus(bestHeader.Height)
+       }
+       return nil
+}
+
+func (ps *PeerSet) BroadcastTx(tx *types.Tx) error {
+       msg, err := msgs.NewTransactionMessage(tx)
+       if err != nil {
+               return errors.Wrap(err, "fail on broadcast tx")
+       }
+
+       peers := ps.peersWithoutTx(&tx.ID)
+       for _, peer := range peers {
+               if peer.isSPVNode() && !peer.isRelatedTx(tx) {
+                       continue
+               }
+               if ok := peer.TrySend(msgs.BlockchainChannel, struct{ msgs.BlockchainMessage }{msg}); !ok {
+                       log.WithFields(log.Fields{
+                               "module":  logModule,
+                               "peer":    peer.Addr(),
+                               "type":    reflect.TypeOf(msg),
+                               "message": msg.String(),
+                       }).Warning("send message to peer error")
+                       ps.RemovePeer(peer.ID())
+                       continue
+               }
+               peer.markTransaction(&tx.ID)
+       }
+       return nil
+}
+
+// Peer retrieves the registered peer with the given id.
+func (ps *PeerSet) GetPeer(id string) *Peer {
+       ps.mtx.RLock()
+       defer ps.mtx.RUnlock()
+       return ps.peers[id]
+}
+
+func (ps *PeerSet) GetPeersByHeight(height uint64) []*Peer {
+       ps.mtx.RLock()
+       defer ps.mtx.RUnlock()
+
+       peers := []*Peer{}
+       for _, peer := range ps.peers {
+               if peer.Height() >= height {
+                       peers = append(peers, peer)
+               }
+       }
+       return peers
+}
+
+func (ps *PeerSet) GetPeerInfos() []*PeerInfo {
+       ps.mtx.RLock()
+       defer ps.mtx.RUnlock()
+
+       result := []*PeerInfo{}
+       for _, peer := range ps.peers {
+               result = append(result, peer.GetPeerInfo())
+       }
+       return result
+}
+
+func (ps *PeerSet) MarkBlock(peerID string, hash *bc.Hash) {
+       peer := ps.GetPeer(peerID)
+       if peer == nil {
+               return
+       }
+       peer.MarkBlock(hash)
+}
+
+func (ps *PeerSet) MarkBlockSignature(peerID string, signature []byte) {
+       peer := ps.GetPeer(peerID)
+       if peer == nil {
+               return
+       }
+       peer.markSign(signature)
+}
+
+func (ps *PeerSet) MarkStatus(peerID string, height uint64) {
+       peer := ps.GetPeer(peerID)
+       if peer == nil {
+               return
+       }
+       peer.markNewStatus(height)
+}
+
+func (ps *PeerSet) MarkTx(peerID string, txHash bc.Hash) {
+       ps.mtx.Lock()
+       peer := ps.peers[peerID]
+       ps.mtx.Unlock()
+
+       if peer == nil {
+               return
+       }
+       peer.markTransaction(&txHash)
+}
+
+func (ps *PeerSet) PeersWithoutBlock(hash bc.Hash) []string {
+       ps.mtx.RLock()
+       defer ps.mtx.RUnlock()
+
+       var peers []string
+       for _, peer := range ps.peers {
+               if !peer.knownBlocks.Has(hash.String()) {
+                       peers = append(peers, peer.ID())
+               }
+       }
+       return peers
+}
+
+func (ps *PeerSet) PeersWithoutSignature(signature []byte) []string {
+       ps.mtx.RLock()
+       defer ps.mtx.RUnlock()
+
+       var peers []string
+       for _, peer := range ps.peers {
+               if !peer.knownSignatures.Has(hex.EncodeToString(signature)) {
+                       peers = append(peers, peer.ID())
+               }
+       }
+       return peers
+}
+
+func (ps *PeerSet) peersWithoutNewStatus(height uint64) []*Peer {
+       ps.mtx.RLock()
+       defer ps.mtx.RUnlock()
+
+       var peers []*Peer
+       for _, peer := range ps.peers {
+               if peer.knownStatus < height {
+                       peers = append(peers, peer)
+               }
+       }
+       return peers
+}
+
+func (ps *PeerSet) peersWithoutTx(hash *bc.Hash) []*Peer {
+       ps.mtx.RLock()
+       defer ps.mtx.RUnlock()
+
+       peers := []*Peer{}
+       for _, peer := range ps.peers {
+               if !peer.knownTxs.Has(hash.String()) {
+                       peers = append(peers, peer)
+               }
+       }
+       return peers
+}
+
+func (ps *PeerSet) RemovePeer(peerID string) {
+       ps.mtx.Lock()
+       delete(ps.peers, peerID)
+       ps.mtx.Unlock()
+       ps.StopPeerGracefully(peerID)
+}
+
+func (ps *PeerSet) SetStatus(peerID string, height uint64, hash *bc.Hash) {
+       peer := ps.GetPeer(peerID)
+       if peer == nil {
+               return
+       }
+
+       peer.SetBestStatus(height, hash)
+}
+
+func (ps *PeerSet) SetIrreversibleStatus(peerID string, height uint64, hash *bc.Hash) {
+       peer := ps.GetPeer(peerID)
+       if peer == nil {
+               return
+       }
+
+       peer.SetIrreversibleStatus(height, hash)
+}
diff --git a/netsync/peers/peer_test.go b/netsync/peers/peer_test.go
new file mode 100644 (file)
index 0000000..6305aec
--- /dev/null
@@ -0,0 +1,421 @@
+package peers
+
+import (
+       "net"
+       "reflect"
+       "testing"
+
+       "github.com/bytom/bytom/consensus"
+       "github.com/bytom/bytom/p2p/security"
+       "github.com/bytom/bytom/protocol/bc"
+       "github.com/bytom/bytom/protocol/bc/types"
+       "github.com/davecgh/go-spew/spew"
+       "github.com/tendermint/tmlibs/flowrate"
+)
+
+var (
+       peer1ID = "PEER1"
+       peer2ID = "PEER2"
+       peer3ID = "PEER3"
+       peer4ID = "PEER4"
+
+       block1000Hash = bc.NewHash([32]byte{0x01, 0x02})
+       block2000Hash = bc.NewHash([32]byte{0x02, 0x03})
+       block3000Hash = bc.NewHash([32]byte{0x03, 0x04})
+)
+
+type basePeer struct {
+       id          string
+       serviceFlag consensus.ServiceFlag
+       isLan       bool
+}
+
+func (bp *basePeer) Addr() net.Addr {
+       return nil
+}
+
+func (bp *basePeer) ID() string {
+       return bp.id
+}
+
+func (bp *basePeer) Moniker() string {
+       return ""
+}
+
+func (bp *basePeer) RemoteAddrHost() string {
+       switch bp.ID() {
+       case peer1ID:
+               return peer1ID
+       case peer2ID:
+               return peer2ID
+       case peer3ID:
+               return peer3ID
+       case peer4ID:
+               return peer4ID
+       }
+       return ""
+}
+
+func (bp *basePeer) ServiceFlag() consensus.ServiceFlag {
+       return bp.serviceFlag
+}
+
+func (bp *basePeer) TrafficStatus() (*flowrate.Status, *flowrate.Status) {
+       return nil, nil
+}
+
+func (bp *basePeer) TrySend(byte, interface{}) bool {
+       return true
+}
+
+func (bp *basePeer) IsLAN() bool {
+       return bp.isLan
+}
+
+func TestSetPeerStatus(t *testing.T) {
+       peer := newPeer(&basePeer{})
+       height := uint64(100)
+       hash := bc.NewHash([32]byte{0x1, 0x2})
+       peer.SetBestStatus(height, &hash)
+       if peer.Height() != height {
+               t.Fatalf("test set best status err. got %d want %d", peer.Height(), height)
+       }
+}
+
+func TestSetIrreversibleStatus(t *testing.T) {
+       peer := newPeer(&basePeer{})
+       height := uint64(100)
+       hash := bc.NewHash([32]byte{0x1, 0x2})
+       peer.SetIrreversibleStatus(height, &hash)
+       if peer.IrreversibleHeight() != height {
+               t.Fatalf("test set Irreversible status err. got %d want %d", peer.Height(), height)
+       }
+}
+
+func TestAddFilterAddresses(t *testing.T) {
+       peer := newPeer(&basePeer{})
+       tx := types.NewTx(types.TxData{
+               Inputs: []*types.TxInput{
+                       types.NewSpendInput(nil, bc.Hash{}, bc.NewAssetID([32]byte{1}), 5, 1, []byte("spendProgram")),
+               },
+               Outputs: []*types.TxOutput{
+                       types.NewTxOutput(bc.NewAssetID([32]byte{3}), 8, []byte("outProgram")),
+               },
+       })
+
+       peer.AddFilterAddresses([][]byte{[]byte("spendProgram")})
+       if !peer.isRelatedTx(tx) {
+               t.Fatal("test filter addresses error.")
+       }
+
+       peer.AddFilterAddresses([][]byte{[]byte("testProgram")})
+       if peer.isRelatedTx(tx) {
+               t.Fatal("test filter addresses error.")
+       }
+}
+
+func TestFilterClear(t *testing.T) {
+       peer := newPeer(&basePeer{})
+       tx := types.NewTx(types.TxData{
+               Inputs: []*types.TxInput{
+                       types.NewSpendInput(nil, bc.Hash{}, bc.NewAssetID([32]byte{1}), 5, 1, []byte("spendProgram")),
+               },
+               Outputs: []*types.TxOutput{
+                       types.NewTxOutput(bc.NewAssetID([32]byte{3}), 8, []byte("outProgram")),
+               },
+       })
+
+       peer.AddFilterAddresses([][]byte{[]byte("spendProgram")})
+       if !peer.isRelatedTx(tx) {
+               t.Fatal("test filter addresses error.")
+       }
+
+       peer.FilterClear()
+       if peer.isRelatedTx(tx) {
+               t.Fatal("test filter addresses error.")
+       }
+}
+
+func TestGetRelatedTxAndStatus(t *testing.T) {
+       peer := newPeer(&basePeer{})
+       txs := []*types.Tx{
+               types.NewTx(types.TxData{
+                       Inputs: []*types.TxInput{
+                               types.NewSpendInput(nil, bc.Hash{}, bc.NewAssetID([32]byte{1}), 5, 1, []byte("spendProgram1")),
+                       },
+                       Outputs: []*types.TxOutput{
+                               types.NewTxOutput(bc.NewAssetID([32]byte{3}), 8, []byte("outProgram1")),
+                       },
+               }),
+               types.NewTx(types.TxData{
+                       Inputs: []*types.TxInput{
+                               types.NewSpendInput(nil, bc.Hash{}, bc.NewAssetID([32]byte{1}), 5, 1, []byte("spendProgram2")),
+                       },
+                       Outputs: []*types.TxOutput{
+                               types.NewTxOutput(bc.NewAssetID([32]byte{3}), 8, []byte("outProgram2")),
+                       },
+               }),
+               types.NewTx(types.TxData{
+                       Inputs: []*types.TxInput{
+                               types.NewSpendInput(nil, bc.Hash{}, bc.NewAssetID([32]byte{1}), 5, 1, []byte("spendProgram3")),
+                       },
+                       Outputs: []*types.TxOutput{
+                               types.NewTxOutput(bc.NewAssetID([32]byte{3}), 8, []byte("outProgram3")),
+                       },
+               }),
+       }
+       txStatuses := &bc.TransactionStatus{
+               VerifyStatus: []*bc.TxVerifyResult{{StatusFail: true}, {StatusFail: false}, {StatusFail: false}},
+       }
+       peer.AddFilterAddresses([][]byte{[]byte("spendProgram1"), []byte("outProgram3")})
+       gotTxs, gotStatus := peer.getRelatedTxAndStatus(txs, txStatuses)
+       if len(gotTxs) != 2 {
+               t.Error("TestGetRelatedTxAndStatus txs size error")
+       }
+
+       if !reflect.DeepEqual(*gotTxs[0].Tx, *txs[0].Tx) {
+               t.Errorf("txs msg test err: got %s\nwant %s", spew.Sdump(gotTxs[0].Tx), spew.Sdump(txs[0].Tx))
+       }
+
+       if !reflect.DeepEqual(*gotTxs[1].Tx, *txs[2].Tx) {
+               t.Errorf("txs msg test err: got %s\nwant %s", spew.Sdump(gotTxs[1].Tx), spew.Sdump(txs[2].Tx))
+       }
+
+       if gotStatus[0].StatusFail != true || gotStatus[1].StatusFail != false {
+               t.Error("TestGetRelatedTxAndStatus txs status error")
+       }
+}
+
+type basePeerSet struct {
+}
+
+func (bp *basePeerSet) StopPeerGracefully(string) {
+
+}
+
+func (bp *basePeerSet) IsBanned(ip string, level byte, reason string) bool {
+       switch ip {
+       case peer1ID:
+               return true
+       case peer2ID:
+               return false
+       case peer3ID:
+               return true
+       case peer4ID:
+               return false
+       }
+       return false
+}
+
+func TestMarkBlock(t *testing.T) {
+       ps := NewPeerSet(&basePeerSet{})
+       ps.AddPeer(&basePeer{id: peer1ID})
+       ps.AddPeer(&basePeer{id: peer2ID})
+       ps.AddPeer(&basePeer{id: peer3ID})
+
+       blockHash := bc.NewHash([32]byte{0x01, 0x02})
+       ps.MarkBlock(peer1ID, &blockHash)
+       targetPeers := []string{peer2ID, peer3ID}
+
+       peers := ps.PeersWithoutBlock(blockHash)
+       if len(peers) != len(targetPeers) {
+               t.Fatalf("test mark block err. Number of target peers %d got %d", 1, len(peers))
+       }
+
+       for _, targetPeer := range targetPeers {
+               flag := false
+               for _, gotPeer := range peers {
+                       if gotPeer == targetPeer {
+                               flag = true
+                               break
+                       }
+               }
+               if !flag {
+                       t.Errorf("test mark block err. can't found target peer %s ", targetPeer)
+               }
+       }
+}
+
+func TestMarkStatus(t *testing.T) {
+       ps := NewPeerSet(&basePeerSet{})
+       ps.AddPeer(&basePeer{id: peer1ID})
+       ps.AddPeer(&basePeer{id: peer2ID})
+       ps.AddPeer(&basePeer{id: peer3ID})
+
+       height := uint64(1024)
+       ps.MarkStatus(peer1ID, height)
+       targetPeers := []string{peer2ID, peer3ID}
+
+       peers := ps.peersWithoutNewStatus(height)
+       if len(peers) != len(targetPeers) {
+               t.Fatalf("test mark status err. Number of target peers %d got %d", 1, len(peers))
+       }
+
+       for _, targetPeer := range targetPeers {
+               flag := false
+               for _, gotPeer := range peers {
+                       if gotPeer.ID() == targetPeer {
+                               flag = true
+                               break
+                       }
+               }
+               if !flag {
+                       t.Errorf("test mark status err. can't found target peer %s ", targetPeer)
+               }
+       }
+}
+
+func TestMarkBlockSignature(t *testing.T) {
+       ps := NewPeerSet(&basePeerSet{})
+       ps.AddPeer(&basePeer{id: peer1ID})
+       ps.AddPeer(&basePeer{id: peer2ID})
+       ps.AddPeer(&basePeer{id: peer3ID})
+
+       signature := []byte{0x01, 0x02}
+       ps.MarkBlockSignature(peer1ID, signature)
+       targetPeers := []string{peer2ID, peer3ID}
+
+       peers := ps.PeersWithoutSignature(signature)
+       if len(peers) != len(targetPeers) {
+               t.Fatalf("test mark block signature err. Number of target peers %d got %d", 1, len(peers))
+       }
+
+       for _, targetPeer := range targetPeers {
+               flag := false
+               for _, gotPeer := range peers {
+                       if gotPeer == targetPeer {
+                               flag = true
+                               break
+                       }
+               }
+               if !flag {
+                       t.Errorf("test mark block signature err. can't found target peer %s ", targetPeer)
+               }
+       }
+}
+
+func TestMarkTx(t *testing.T) {
+       ps := NewPeerSet(&basePeerSet{})
+       ps.AddPeer(&basePeer{id: peer1ID})
+       ps.AddPeer(&basePeer{id: peer2ID})
+       ps.AddPeer(&basePeer{id: peer3ID})
+
+       txHash := bc.NewHash([32]byte{0x01, 0x02})
+       ps.MarkTx(peer1ID, txHash)
+       peers := ps.peersWithoutTx(&txHash)
+       targetPeers := []string{peer2ID, peer3ID}
+       if len(peers) != len(targetPeers) {
+               t.Fatalf("test mark tx err. Number of target peers %d got %d", 1, len(peers))
+       }
+
+       for _, targetPeer := range targetPeers {
+               flag := false
+               for _, gotPeer := range peers {
+                       if gotPeer.ID() == targetPeer {
+                               flag = true
+                               break
+                       }
+               }
+               if !flag {
+                       t.Errorf("test mark tx err. can't found target peer %s ", targetPeer)
+               }
+       }
+}
+
+func TestSetStatus(t *testing.T) {
+       ps := NewPeerSet(&basePeerSet{})
+       ps.AddPeer(&basePeer{id: peer1ID, serviceFlag: consensus.SFFullNode})
+       ps.AddPeer(&basePeer{id: peer2ID, serviceFlag: consensus.SFFullNode})
+       ps.AddPeer(&basePeer{id: peer3ID, serviceFlag: consensus.SFFastSync})
+       ps.AddPeer(&basePeer{id: peer4ID, serviceFlag: consensus.SFFullNode, isLan: true})
+       ps.SetStatus(peer1ID, 1000, &block1000Hash)
+       ps.SetStatus(peer2ID, 2000, &block2000Hash)
+       ps.SetStatus(peer3ID, 3000, &block3000Hash)
+       ps.SetStatus(peer4ID, 2000, &block2000Hash)
+       targetPeer := peer4ID
+
+       peer := ps.BestPeer(consensus.SFFullNode)
+
+       if peer.ID() != targetPeer {
+               t.Fatalf("test set status err. Name of target peer %s got %s", peer4ID, peer.ID())
+       }
+}
+
+func TestIrreversibleStatus(t *testing.T) {
+       ps := NewPeerSet(&basePeerSet{})
+       ps.AddPeer(&basePeer{id: peer1ID, serviceFlag: consensus.SFFullNode})
+       ps.AddPeer(&basePeer{id: peer2ID, serviceFlag: consensus.SFFullNode})
+       ps.AddPeer(&basePeer{id: peer3ID, serviceFlag: consensus.SFFastSync})
+       ps.AddPeer(&basePeer{id: peer4ID, serviceFlag: consensus.SFFastSync, isLan: true})
+       ps.SetIrreversibleStatus(peer1ID, 1000, &block1000Hash)
+       ps.SetIrreversibleStatus(peer2ID, 2000, &block2000Hash)
+       ps.SetIrreversibleStatus(peer3ID, 3000, &block3000Hash)
+       ps.SetIrreversibleStatus(peer4ID, 3000, &block3000Hash)
+       targetPeer := peer4ID
+       peer := ps.BestIrreversiblePeer(consensus.SFFastSync)
+
+       if peer.ID() != targetPeer {
+               t.Fatalf("test set status err. Name of target peer %s got %s", peer4ID, peer.ID())
+       }
+}
+
+func TestGetPeersByHeight(t *testing.T) {
+       ps := NewPeerSet(&basePeerSet{})
+       ps.AddPeer(&basePeer{id: peer1ID, serviceFlag: consensus.SFFullNode})
+       ps.AddPeer(&basePeer{id: peer2ID, serviceFlag: consensus.SFFullNode})
+       ps.AddPeer(&basePeer{id: peer3ID, serviceFlag: consensus.SFFastSync})
+       ps.AddPeer(&basePeer{id: peer4ID, serviceFlag: consensus.SFFullNode, isLan: true})
+       ps.SetStatus(peer1ID, 1000, &block1000Hash)
+       ps.SetStatus(peer2ID, 2000, &block2000Hash)
+       ps.SetStatus(peer3ID, 3000, &block3000Hash)
+       ps.SetStatus(peer4ID, 2000, &block2000Hash)
+       peers := ps.GetPeersByHeight(2000)
+       targetPeers := []string{peer2ID, peer3ID, peer4ID}
+       if len(peers) != len(targetPeers) {
+               t.Fatalf("test get peers by height err. Number of target peers %d got %d", 3, len(peers))
+       }
+
+       for _, targetPeer := range targetPeers {
+               flag := false
+               for _, gotPeer := range peers {
+                       if gotPeer.ID() == targetPeer {
+                               flag = true
+                               break
+                       }
+               }
+               if !flag {
+                       t.Errorf("test get peers by height err. can't found target peer %s ", targetPeer)
+               }
+       }
+}
+
+func TestRemovePeer(t *testing.T) {
+       ps := NewPeerSet(&basePeerSet{})
+       ps.AddPeer(&basePeer{id: peer1ID})
+       ps.AddPeer(&basePeer{id: peer2ID})
+
+       ps.RemovePeer(peer1ID)
+       if peer := ps.GetPeer(peer1ID); peer != nil {
+               t.Fatalf("remove peer %s err", peer1ID)
+       }
+
+       if peer := ps.GetPeer(peer2ID); peer == nil {
+               t.Fatalf("Error remove peer %s err", peer2ID)
+       }
+}
+
+func TestProcessIllegal(t *testing.T) {
+       ps := NewPeerSet(&basePeerSet{})
+       ps.AddPeer(&basePeer{id: peer1ID})
+       ps.AddPeer(&basePeer{id: peer2ID})
+
+       ps.ProcessIllegal(peer1ID, security.LevelMsgIllegal, "test")
+       if peer := ps.GetPeer(peer1ID); peer != nil {
+               t.Fatalf("remove peer %s err", peer1ID)
+       }
+
+       ps.ProcessIllegal(peer2ID, security.LevelMsgIllegal, "test")
+       if peer := ps.GetPeer(peer2ID); peer == nil {
+               t.Fatalf("Error remove peer %s err", peer2ID)
+       }
+}
diff --git a/netsync/sync_manager.go b/netsync/sync_manager.go
new file mode 100644 (file)
index 0000000..66ee99a
--- /dev/null
@@ -0,0 +1,161 @@
+package netsync
+
+import (
+       "errors"
+
+       "github.com/sirupsen/logrus"
+
+       "github.com/bytom/bytom/config"
+       "github.com/bytom/bytom/consensus"
+       dbm "github.com/bytom/bytom/database/leveldb"
+       "github.com/bytom/bytom/event"
+       "github.com/bytom/bytom/netsync/chainmgr"
+       "github.com/bytom/bytom/netsync/consensusmgr"
+       "github.com/bytom/bytom/netsync/peers"
+       "github.com/bytom/bytom/p2p"
+       "github.com/bytom/bytom/protocol"
+)
+
+const (
+       logModule = "netsync"
+)
+
+var (
+       errVaultModeDialPeer = errors.New("can't dial peer in vault mode")
+)
+
+// ChainMgr is the interface for p2p chain message sync manager.
+type ChainMgr interface {
+       Start() error
+       IsCaughtUp() bool
+       Stop()
+}
+
+// ConsensusMgr is the interface for consensus message sync manager.
+type ConsensusMgr interface {
+       Start() error
+       Stop()
+}
+
+// Switch is the interface for p2p switch.
+type Switch interface {
+       Start() (bool, error)
+       Stop() bool
+       IsListening() bool
+       DialPeerWithAddress(addr *p2p.NetAddress) error
+       Peers() *p2p.PeerSet
+}
+
+//SyncManager Sync Manager is responsible for the business layer information synchronization
+type SyncManager struct {
+       config       *config.Config
+       sw           Switch
+       chainMgr     ChainMgr
+       consensusMgr ConsensusMgr
+       peers        *peers.PeerSet
+}
+
+// NewSyncManager create sync manager and set switch.
+func NewSyncManager(config *config.Config, chain *protocol.Chain, txPool *protocol.TxPool, dispatcher *event.Dispatcher, fastSyncDB dbm.DB) (*SyncManager, error) {
+       sw, err := p2p.NewSwitch(config)
+       if err != nil {
+               return nil, err
+       }
+       peers := peers.NewPeerSet(sw)
+
+       chainManger, err := chainmgr.NewManager(config, sw, chain, txPool, dispatcher, peers, fastSyncDB)
+       if err != nil {
+               return nil, err
+       }
+       consensusMgr := consensusmgr.NewManager(sw, chain, peers, dispatcher)
+       return &SyncManager{
+               config:       config,
+               sw:           sw,
+               chainMgr:     chainManger,
+               consensusMgr: consensusMgr,
+               peers:        peers,
+       }, nil
+}
+
+// Start message sync manager service.
+func (sm *SyncManager) Start() error {
+       if _, err := sm.sw.Start(); err != nil {
+               logrus.WithFields(logrus.Fields{"module": logModule, "err": err}).Error("failed start switch")
+               return err
+       }
+
+       if err := sm.chainMgr.Start(); err != nil {
+               return err
+       }
+
+       return sm.consensusMgr.Start()
+}
+
+// Stop message sync manager service.
+func (sm *SyncManager) Stop() {
+       sm.chainMgr.Stop()
+       sm.consensusMgr.Stop()
+       if !sm.config.VaultMode {
+               sm.sw.Stop()
+       }
+
+}
+
+// IsListening check if the bytomd service port is open?
+func (sm *SyncManager) IsListening() bool {
+       if sm.config.VaultMode {
+               return false
+       }
+       return sm.sw.IsListening()
+
+}
+
+//IsCaughtUp check wheather the peer finish the sync
+func (sm *SyncManager) IsCaughtUp() bool {
+       return sm.chainMgr.IsCaughtUp()
+}
+
+// PeerCount count the number of connected peers.
+func (sm *SyncManager) PeerCount() int {
+       if sm.config.VaultMode {
+               return 0
+       }
+       return len(sm.sw.Peers().List())
+}
+
+// GetNetwork get the type of network.
+func (sm *SyncManager) GetNetwork() string {
+       return sm.config.ChainID
+}
+
+// BestPeer fine the peer with the highest height from the connected peers.
+func (sm *SyncManager) BestPeer() *peers.PeerInfo {
+       bestPeer := sm.peers.BestPeer(consensus.SFFullNode)
+       if bestPeer != nil {
+               return bestPeer.GetPeerInfo()
+       }
+       return nil
+}
+
+// DialPeerWithAddress dial the peer and establish a connection.
+func (sm *SyncManager) DialPeerWithAddress(addr *p2p.NetAddress) error {
+       if sm.config.VaultMode {
+               return errVaultModeDialPeer
+       }
+
+       return sm.sw.DialPeerWithAddress(addr)
+}
+
+//GetPeerInfos return peer info of all connected peers.
+func (sm *SyncManager) GetPeerInfos() []*peers.PeerInfo {
+       return sm.peers.GetPeerInfos()
+}
+
+//StopPeer try to stop peer by given ID
+func (sm *SyncManager) StopPeer(peerID string) error {
+       if peer := sm.peers.GetPeer(peerID); peer == nil {
+               return errors.New("peerId not exist")
+       }
+       sm.peers.RemovePeer(peerID)
+       return nil
+}
index e64f863..c39df69 100644 (file)
@@ -26,12 +26,9 @@ import (
        "github.com/bytom/bytom/env"
        "github.com/bytom/bytom/event"
        bytomLog "github.com/bytom/bytom/log"
-       "github.com/bytom/bytom/mining/cpuminer"
-       "github.com/bytom/bytom/mining/miningpool"
-       "github.com/bytom/bytom/mining/tensority"
+
        "github.com/bytom/bytom/net/websocket"
        "github.com/bytom/bytom/netsync"
-       "github.com/bytom/bytom/p2p"
        "github.com/bytom/bytom/protocol"
        w "github.com/bytom/bytom/wallet"
 )
@@ -55,9 +52,6 @@ type Node struct {
        api             *api.API
        chain           *protocol.Chain
        txfeed          *txfeed.Tracker
-       cpuMiner        *cpuminer.CPUMiner
-       miningPool      *miningpool.MiningPool
-       miningEnable    bool
 }
 
 // NewNode create bytom node
@@ -124,7 +118,8 @@ func NewNode(config *cfg.Config) *Node {
                }
        }
 
-       syncManager, err := netsync.NewSyncManager(config, chain, txPool, dispatcher)
+       fastSyncDB := dbm.NewDB("fastsync", config.DBBackend, config.DBDir())
+       syncManager, err := netsync.NewSyncManager(config, chain, txPool, dispatcher, fastSyncDB)
        if err != nil {
                cmn.Exit(cmn.Fmt("Failed to create sync manager: %v", err))
        }
@@ -151,20 +146,12 @@ func NewNode(config *cfg.Config) *Node {
                wallet:          wallet,
                chain:           chain,
                txfeed:          txFeed,
-               miningEnable:    config.Mining,
 
                notificationMgr: notificationMgr,
        }
 
-       node.cpuMiner = cpuminer.NewCPUMiner(chain, accounts, txPool, dispatcher)
-       node.miningPool = miningpool.NewMiningPool(chain, accounts, txPool, dispatcher)
-
        node.BaseService = *cmn.NewBaseService(nil, "Node", node)
 
-       if config.Simd.Enable {
-               tensority.UseSIMD = true
-       }
-
        return node
 }
 
@@ -200,7 +187,7 @@ func launchWebBrowser(port string) {
 }
 
 func (n *Node) initAndstartAPIServer() {
-       n.api = api.NewAPI(n.syncManager, n.wallet, n.txfeed, n.cpuMiner, n.miningPool, n.chain, n.config, n.accessTokens, n.eventDispatcher, n.notificationMgr)
+       n.api = api.NewAPI(n.syncManager, n.wallet, n.txfeed, n.chain, n.config, n.accessTokens, n.eventDispatcher, n.notificationMgr)
 
        listenAddr := env.String("LISTEN", n.config.ApiAddress)
        env.Parse()
@@ -208,14 +195,6 @@ func (n *Node) initAndstartAPIServer() {
 }
 
 func (n *Node) OnStart() error {
-       if n.miningEnable {
-               if _, err := n.wallet.AccountMgr.GetMiningAddress(); err != nil {
-                       n.miningEnable = false
-                       log.Error(err)
-               } else {
-                       n.cpuMiner.Start()
-               }
-       }
        if !n.config.VaultMode {
                if err := n.syncManager.Start(); err != nil {
                        return err
@@ -242,9 +221,6 @@ func (n *Node) OnStop() {
        n.notificationMgr.Shutdown()
        n.notificationMgr.WaitForShutdown()
        n.BaseService.OnStop()
-       if n.miningEnable {
-               n.cpuMiner.Stop()
-       }
        if !n.config.VaultMode {
                n.syncManager.Stop()
        }
@@ -257,11 +233,3 @@ func (n *Node) RunForever() {
                n.Stop()
        })
 }
-
-func (n *Node) NodeInfo() *p2p.NodeInfo {
-       return n.syncManager.NodeInfo()
-}
-
-func (n *Node) MiningPool() *miningpool.MiningPool {
-       return n.miningPool
-}
index 5c0a07b..0d112c7 100644 (file)
@@ -59,6 +59,10 @@ type Peer struct {
        isLAN bool
 }
 
+func (p *Peer) Moniker() string {
+       return p.NodeInfo.Moniker
+}
+
 // OnStart implements BaseService.
 func (p *Peer) OnStart() error {
        p.BaseService.OnStart()
index 49a6a18..3cff8de 100644 (file)
@@ -19,7 +19,6 @@ type BlockHeader struct {
        PreviousBlockHash bc.Hash // The hash of the previous block.
        Timestamp         uint64  // The time of the block in seconds.
        Nonce             uint64  // Nonce used to generate the block.
-       Bits              uint64  // Difficulty target for the block.
        BlockCommitment
 }
 
@@ -89,9 +88,7 @@ func (bh *BlockHeader) readFrom(r *blockchain.Reader) (serflag uint8, err error)
        if bh.Nonce, err = blockchain.ReadVarint63(r); err != nil {
                return 0, err
        }
-       if bh.Bits, err = blockchain.ReadVarint63(r); err != nil {
-               return 0, err
-       }
+
        return
 }
 
@@ -124,8 +121,6 @@ func (bh *BlockHeader) writeTo(w io.Writer, serflags uint8) (err error) {
        if _, err = blockchain.WriteVarint63(w, bh.Nonce); err != nil {
                return err
        }
-       if _, err = blockchain.WriteVarint63(w, bh.Bits); err != nil {
-               return err
-       }
+
        return nil
 }
index 1d6c3a5..13a6757 100644 (file)
@@ -21,7 +21,6 @@ func TestBlockHeader(t *testing.T) {
                PreviousBlockHash: testutil.MustDecodeHash("c34048bd60c4c13144fd34f408627d1be68f6cb4fdd34e879d6d791060ea73a0"),
                Timestamp:         1522908275,
                Nonce:             34342,
-               Bits:              2305843009222082559,
                BlockCommitment: BlockCommitment{
                        TransactionStatusHash:  testutil.MustDecodeHash("b94301ea4e316bee00109f68d25beaca90aeff08e9bf439a37d91d7a3b5a1470"),
                        TransactionsMerkleRoot: testutil.MustDecodeHash("ad9ac003d08ff305181a345d64fe0b02311cc1a6ec04ab73f3318d90139bfe03"),
@@ -74,7 +73,6 @@ func TestMarshalBlockHeader(t *testing.T) {
                                PreviousBlockHash: testutil.MustDecodeHash("c34048bd60c4c13144fd34f408627d1be68f6cb4fdd34e879d6d791060ea73a0"),
                                Timestamp:         1528945000,
                                Nonce:             9253507043297,
-                               Bits:              2305843009214532812,
                                BlockCommitment: BlockCommitment{
                                        TransactionsMerkleRoot: testutil.MustDecodeHash("ad9ac003d08ff305181a345d64fe0b02311cc1a6ec04ab73f3318d90139bfe03"),
                                        TransactionStatusHash:  testutil.MustDecodeHash("b94301ea4e316bee00109f68d25beaca90aeff08e9bf439a37d91d7a3b5a1470"),
@@ -100,7 +98,6 @@ func TestMarshalBlockHeader(t *testing.T) {
                                PreviousBlockHash: testutil.MustDecodeHash("c34048bd60c4c13144fd34f408627d1be68f6cb4fdd34e879d6d791060ea73a0"),
                                Timestamp:         1528945000,
                                Nonce:             9253507043297,
-                               Bits:              2305843009222082559,
                                BlockCommitment: BlockCommitment{
                                        TransactionsMerkleRoot: testutil.MustDecodeHash("ad9ac003d08ff305181a345d64fe0b02311cc1a6ec04ab73f3318d90139bfe03"),
                                        TransactionStatusHash:  testutil.MustDecodeHash("b94301ea4e316bee00109f68d25beaca90aeff08e9bf439a37d91d7a3b5a1470"),
@@ -115,7 +112,6 @@ func TestMarshalBlockHeader(t *testing.T) {
                                PreviousBlockHash: testutil.MustDecodeHash("c34048bd60c4c13144fd34f408627d1be68f6cb4fdd34e879d6d791060ea73a0"),
                                Timestamp:         9223372036854775808, // Timestamp > MaxInt64(9223372036854775807)
                                Nonce:             9253507043297,
-                               Bits:              2305843009222082559,
                                BlockCommitment: BlockCommitment{
                                        TransactionsMerkleRoot: testutil.MustDecodeHash("ad9ac003d08ff305181a345d64fe0b02311cc1a6ec04ab73f3318d90139bfe03"),
                                        TransactionStatusHash:  testutil.MustDecodeHash("b94301ea4e316bee00109f68d25beaca90aeff08e9bf439a37d91d7a3b5a1470"),
@@ -130,7 +126,6 @@ func TestMarshalBlockHeader(t *testing.T) {
                                PreviousBlockHash: testutil.MustDecodeHash("c34048bd60c4c13144fd34f408627d1be68f6cb4fdd34e879d6d791060ea73a0"),
                                Timestamp:         1528945000,
                                Nonce:             9223372036854775808, // Nonce > MaxInt64(9223372036854775807)
-                               Bits:              2305843009222082559,
                                BlockCommitment: BlockCommitment{
                                        TransactionsMerkleRoot: testutil.MustDecodeHash("ad9ac003d08ff305181a345d64fe0b02311cc1a6ec04ab73f3318d90139bfe03"),
                                        TransactionStatusHash:  testutil.MustDecodeHash("b94301ea4e316bee00109f68d25beaca90aeff08e9bf439a37d91d7a3b5a1470"),
@@ -145,7 +140,6 @@ func TestMarshalBlockHeader(t *testing.T) {
                                PreviousBlockHash: testutil.MustDecodeHash("c34048bd60c4c13144fd34f408627d1be68f6cb4fdd34e879d6d791060ea73a0"),
                                Timestamp:         1528945000,
                                Nonce:             9253507043297,
-                               Bits:              9223372036854775808, // Bits > MaxInt64(9223372036854775807)
                                BlockCommitment: BlockCommitment{
                                        TransactionsMerkleRoot: testutil.MustDecodeHash("ad9ac003d08ff305181a345d64fe0b02311cc1a6ec04ab73f3318d90139bfe03"),
                                        TransactionStatusHash:  testutil.MustDecodeHash("b94301ea4e316bee00109f68d25beaca90aeff08e9bf439a37d91d7a3b5a1470"),
@@ -160,7 +154,6 @@ func TestMarshalBlockHeader(t *testing.T) {
                                PreviousBlockHash: testutil.MustDecodeHash("c34048bd60c4c13144fd34f408627d1be68f6cb4fdd34e879d6d791060ea73a0"),
                                Timestamp:         1528945000,
                                Nonce:             9253507043297,
-                               Bits:              2305843009214532812,
                        },
                        wantHex: strings.Join([]string{
                                "01",                 // serialization flags
@@ -225,7 +218,6 @@ func TestUnmarshalBlockHeader(t *testing.T) {
                                PreviousBlockHash: testutil.MustDecodeHash("c34048bd60c4c13144fd34f408627d1be68f6cb4fdd34e879d6d791060ea73a0"),
                                Timestamp:         1528945000,
                                Nonce:             9253507043297,
-                               Bits:              2305843009214532812,
                                BlockCommitment: BlockCommitment{
                                        TransactionsMerkleRoot: testutil.MustDecodeHash("ad9ac003d08ff305181a345d64fe0b02311cc1a6ec04ab73f3318d90139bfe03"),
                                        TransactionStatusHash:  testutil.MustDecodeHash("b94301ea4e316bee00109f68d25beaca90aeff08e9bf439a37d91d7a3b5a1470"),
@@ -251,7 +243,6 @@ func TestUnmarshalBlockHeader(t *testing.T) {
                                PreviousBlockHash: testutil.MustDecodeHash("c34048bd60c4c13144fd34f408627d1be68f6cb4fdd34e879d6d791060ea73a0"),
                                Timestamp:         1528945000,
                                Nonce:             9253507043297,
-                               Bits:              2305843009214532812,
                                BlockCommitment: BlockCommitment{
                                        TransactionsMerkleRoot: testutil.MustDecodeHash("ad9ac003d08ff305181a345d64fe0b02311cc1a6ec04ab73f3318d90139bfe03"),
                                        TransactionStatusHash:  testutil.MustDecodeHash("b94301ea4e316bee00109f68d25beaca90aeff08e9bf439a37d91d7a3b5a1470"),
@@ -352,7 +343,6 @@ func TestUnmarshalBlockHeader(t *testing.T) {
                                PreviousBlockHash: testutil.MustDecodeHash("c34048bd60c4c13144fd34f408627d1be68f6cb4fdd34e879d6d791060ea73a0"),
                                Timestamp:         1528945000,
                                Nonce:             9253507043297,
-                               Bits:              2305843009214532812,
                                BlockCommitment: BlockCommitment{
                                        TransactionsMerkleRoot: testutil.MustDecodeHash("ad9ac003d08ff305181a345d64fe0b02311cc1a6ec04ab73f3318d90139bfe03"),
                                        TransactionStatusHash:  testutil.MustDecodeHash("b94301ea4e316bee00109f68d25beaca90aeff08e9bf439a37d91d7a3b5a1470"),
index 0249994..c524997 100644 (file)
@@ -52,7 +52,6 @@ func TestBlock(t *testing.T) {
                                        PreviousBlockHash: testutil.MustDecodeHash("c34048bd60c4c13144fd34f408627d1be68f6cb4fdd34e879d6d791060ea73a0"),
                                        Timestamp:         1522908275,
                                        Nonce:             34342,
-                                       Bits:              2305843009222082559,
                                        BlockCommitment: BlockCommitment{
                                                TransactionStatusHash:  testutil.MustDecodeHash("b94301ea4e316bee00109f68d25beaca90aeff08e9bf439a37d91d7a3b5a1470"),
                                                TransactionsMerkleRoot: testutil.MustDecodeHash("ad9ac003d08ff305181a345d64fe0b02311cc1a6ec04ab73f3318d90139bfe03"),
@@ -150,7 +149,6 @@ func TestReadFrom(t *testing.T) {
                                        PreviousBlockHash: testutil.MustDecodeHash("3077f24528e94ecfc4491bb2e9ed6264a632a9a4b86b00c88093ca545d14a137"),
                                        Timestamp:         1553496788,
                                        Nonce:             23,
-                                       Bits:              2305843009213970283,
                                        BlockCommitment: BlockCommitment{
                                                TransactionsMerkleRoot: testutil.MustDecodeHash("35a2d11158f47a5c5267630b2b6cf9e9a5f79a598085a2572a68defeb8013ad2"),
                                                TransactionStatusHash:  testutil.MustDecodeHash("6978a65b4ee5b6f4914fe5c05000459a803ecf59132604e5d334d64249c5e50a"),
index 6488861..c415f66 100644 (file)
@@ -174,7 +174,7 @@ func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash
 }
 
 func mapBlockHeader(old *BlockHeader) (bc.Hash, *bc.BlockHeader) {
-       bh := bc.NewBlockHeader(old.Version, old.Height, &old.PreviousBlockHash, old.Timestamp, &old.TransactionsMerkleRoot, &old.TransactionStatusHash, old.Nonce, old.Bits)
+       bh := bc.NewBlockHeader(old.Version, old.Height, &old.PreviousBlockHash, old.Timestamp, &old.TransactionsMerkleRoot, &old.TransactionStatusHash, old.Nonce, 0)
        return bc.EntryID(bh), bh
 }
 
index 3cb6908..580a9cb 100644 (file)
@@ -280,7 +280,7 @@ func (c *Chain) processBlock(block *types.Block) (bool, error) {
                return false, c.connectBlock(bestBlock)
        }
 
-       if bestNode.Height > c.bestNode.Height && bestNode.WorkSum.Cmp(c.bestNode.WorkSum) >= 0 {
+       if bestNode.Height > c.bestNode.Height {
                log.WithFields(log.Fields{"module": logModule}).Debug("start to reorganize chain")
                return false, c.reorganizeChain(bestNode)
        }
diff --git a/protocol/consensus.go b/protocol/consensus.go
new file mode 100644 (file)
index 0000000..2cd26b0
--- /dev/null
@@ -0,0 +1,9 @@
+package protocol
+
+import "github.com/bytom/bytom/errors"
+
+var (
+       // ErrDoubleSignBlock represent the consensus is double sign in same height of different block
+       ErrDoubleSignBlock  = errors.New("the consensus is double sign in same height of different block")
+       errInvalidSignature = errors.New("the signature of block is invalid")
+)
index fab3ced..0022b83 100644 (file)
@@ -85,6 +85,18 @@ func (c *Chain) initChainStatus() error {
        return c.store.SaveChainStatus(node, utxoView)
 }
 
+// BestBlockHeight returns the last irreversible block header of the blockchain
+func (c *Chain) LastIrreversibleHeader() *types.BlockHeader {
+       // TODO: LastIrreversibleHeader
+       return nil
+}
+
+// ProcessBlockSignature process blockchain signature
+func (c *Chain) ProcessBlockSignature(signature, pubkey []byte, blockHash *bc.Hash) error {
+       // TODO: ProcessBlockSignature
+       return nil
+}
+
 // BestBlockHeight returns the current height of the blockchain.
 func (c *Chain) BestBlockHeight() uint64 {
        c.cond.L.Lock()
@@ -119,15 +131,6 @@ func (c *Chain) CalcNextSeed(preBlock *bc.Hash) (*bc.Hash, error) {
        return node.CalcNextSeed(), nil
 }
 
-// CalcNextBits return the seed for the given block
-func (c *Chain) CalcNextBits(preBlock *bc.Hash) (uint64, error) {
-       node := c.index.GetNode(preBlock)
-       if node == nil {
-               return 0, errors.New("can't find preblock in the blockindex")
-       }
-       return node.CalcNextBits(), nil
-}
-
 func (c *Chain) GetBlockIndex() *state.BlockIndex {
        return c.index
 }
index 57746d0..e55e57b 100644 (file)
@@ -2,13 +2,11 @@ package state
 
 import (
        "errors"
-       "math/big"
        "sort"
        "sync"
 
        "github.com/bytom/bytom/common"
        "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/consensus/difficulty"
        "github.com/bytom/bytom/protocol/bc"
        "github.com/bytom/bytom/protocol/bc/types"
        "github.com/bytom/bytom/testutil"
@@ -21,16 +19,14 @@ const approxNodesPerDay = 24 * 24
 // BlockNode represents a block within the block chain and is primarily used to
 // aid in selecting the best chain to be the main chain.
 type BlockNode struct {
-       Parent  *BlockNode // parent is the parent block for this node.
-       Hash    bc.Hash    // hash of the block.
-       Seed    *bc.Hash   // seed hash of the block
-       WorkSum *big.Int   // total amount of work in the chain up to
+       Parent *BlockNode // parent is the parent block for this node.
+       Hash   bc.Hash    // hash of the block.
+       Seed   *bc.Hash   // seed hash of the block
 
        Version                uint64
        Height                 uint64
        Timestamp              uint64
        Nonce                  uint64
-       Bits                   uint64
        TransactionsMerkleRoot bc.Hash
        TransactionStatusHash  bc.Hash
 }
@@ -41,14 +37,12 @@ func NewBlockNode(bh *types.BlockHeader, parent *BlockNode) (*BlockNode, error)
        }
 
        node := &BlockNode{
-               Parent:    parent,
-               Hash:      bh.Hash(),
-               WorkSum:   difficulty.CalcWork(bh.Bits),
-               Version:   bh.Version,
-               Height:    bh.Height,
-               Timestamp: bh.Timestamp,
-               Nonce:     bh.Nonce,
-               Bits:      bh.Bits,
+               Parent:                 parent,
+               Hash:                   bh.Hash(),
+               Version:                bh.Version,
+               Height:                 bh.Height,
+               Timestamp:              bh.Timestamp,
+               Nonce:                  bh.Nonce,
                TransactionsMerkleRoot: bh.TransactionsMerkleRoot,
                TransactionStatusHash:  bh.TransactionStatusHash,
        }
@@ -57,7 +51,6 @@ func NewBlockNode(bh *types.BlockHeader, parent *BlockNode) (*BlockNode, error)
                node.Seed = consensus.InitialSeed
        } else {
                node.Seed = parent.CalcNextSeed()
-               node.WorkSum = node.WorkSum.Add(parent.WorkSum, node.WorkSum)
        }
        return node, nil
 }
@@ -74,7 +67,6 @@ func (node *BlockNode) BlockHeader() *types.BlockHeader {
                PreviousBlockHash: previousBlockHash,
                Timestamp:         node.Timestamp,
                Nonce:             node.Nonce,
-               Bits:              node.Bits,
                BlockCommitment: types.BlockCommitment{
                        TransactionsMerkleRoot: node.TransactionsMerkleRoot,
                        TransactionStatusHash:  node.TransactionStatusHash,
@@ -94,19 +86,6 @@ func (node *BlockNode) CalcPastMedianTime() uint64 {
        return timestamps[len(timestamps)/2]
 }
 
-// CalcNextBits calculate the bits for next block
-func (node *BlockNode) CalcNextBits() uint64 {
-       if node.Height%consensus.BlocksPerRetarget != 0 || node.Height == 0 {
-               return node.Bits
-       }
-
-       compareNode := node.Parent
-       for compareNode.Height%consensus.BlocksPerRetarget != 0 {
-               compareNode = compareNode.Parent
-       }
-       return difficulty.CalcNextRequiredDifficulty(node.BlockHeader(), compareNode.BlockHeader())
-}
-
 // CalcNextSeed calculate the seed for next block
 func (node *BlockNode) CalcNextSeed() *bc.Hash {
        if node.Height == 0 {
index 3b291ac..a859be6 100644 (file)
@@ -1,7 +1,6 @@
 package state
 
 import (
-       "math"
        "math/big"
        "reflect"
        "testing"
@@ -9,7 +8,6 @@ import (
        "github.com/davecgh/go-spew/spew"
 
        "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/consensus/difficulty"
        "github.com/bytom/bytom/protocol/bc"
        "github.com/bytom/bytom/protocol/bc/types"
        "github.com/bytom/bytom/testutil"
@@ -30,59 +28,38 @@ func TestNewBlockNode(t *testing.T) {
                        blockHeader: &types.BlockHeader{
                                Height:    uint64(0),
                                Timestamp: 0,
-                               Bits:      1000,
-                       },
-                       parentNode: &BlockNode{
-                               WorkSum: &big.Int{},
                        },
+                       parentNode: &BlockNode{},
                        wantBlockNode: &BlockNode{
-                               Bits:    1000,
-                               Hash:    testutil.MustDecodeHash("f1a5a6ddebad7285928a07ce1534104a8d1cd435fc80e90bb9f0034bbe5f8109"),
-                               Seed:    consensus.InitialSeed,
-                               WorkSum: new(big.Int).SetInt64(0),
-                               Parent: &BlockNode{
-                                       WorkSum: &big.Int{},
-                               },
+                               Hash:   testutil.MustDecodeHash("f1a5a6ddebad7285928a07ce1534104a8d1cd435fc80e90bb9f0034bbe5f8109"),
+                               Seed:   consensus.InitialSeed,
+                               Parent: &BlockNode{},
                        },
                },
                {
                        blockHeader: &types.BlockHeader{
                                Height:    uint64(100),
                                Timestamp: 0,
-                               Bits:      10000000000,
-                       },
-                       parentNode: &BlockNode{
-                               WorkSum: new(big.Int).SetInt64(100),
                        },
+                       parentNode: &BlockNode{},
                        wantBlockNode: &BlockNode{
-                               Bits:    10000000000,
-                               Hash:    testutil.MustDecodeHash("b14067726f09d74da89aeb97ca1b15a8b95760b47a0d71549b0aa5ab8c5e724f"),
-                               Seed:    consensus.InitialSeed,
-                               Height:  uint64(100),
-                               WorkSum: stringToBigInt("193956598387464313942329958138505708296934647681139973265423088790474254103", 10),
-                               Parent: &BlockNode{
-                                       WorkSum: new(big.Int).SetInt64(100),
-                               },
+                               Hash:   testutil.MustDecodeHash("b14067726f09d74da89aeb97ca1b15a8b95760b47a0d71549b0aa5ab8c5e724f"),
+                               Seed:   consensus.InitialSeed,
+                               Height: uint64(100),
+                               Parent: &BlockNode{},
                        },
                },
                {
                        blockHeader: &types.BlockHeader{
                                Height:    uint64(100),
                                Timestamp: 0,
-                               Bits:      10000000000,
-                       },
-                       parentNode: &BlockNode{
-                               WorkSum: new(big.Int).SetInt64(math.MaxInt64),
                        },
+                       parentNode: &BlockNode{},
                        wantBlockNode: &BlockNode{
-                               Bits:    10000000000,
-                               Hash:    testutil.MustDecodeHash("b14067726f09d74da89aeb97ca1b15a8b95760b47a0d71549b0aa5ab8c5e724f"),
-                               Seed:    consensus.InitialSeed,
-                               Height:  uint64(100),
-                               WorkSum: stringToBigInt("193956598387464313942329958138505708296934647681139973274646460827329029810", 10),
-                               Parent: &BlockNode{
-                                       WorkSum: new(big.Int).SetInt64(math.MaxInt64),
-                               },
+                               Hash:   testutil.MustDecodeHash("b14067726f09d74da89aeb97ca1b15a8b95760b47a0d71549b0aa5ab8c5e724f"),
+                               Seed:   consensus.InitialSeed,
+                               Height: uint64(100),
+                               Parent: &BlockNode{},
                        },
                },
        }
@@ -152,62 +129,6 @@ func TestCalcPastMedianTime(t *testing.T) {
        }
 }
 
-func TestCalcNextBits(t *testing.T) {
-       targetTimeSpan := uint64(consensus.BlocksPerRetarget * consensus.TargetSecondsPerBlock)
-       cases := []struct {
-               parentNode  *BlockNode
-               currentNode *BlockNode
-               bits        uint64
-       }{
-               {
-                       currentNode: &BlockNode{
-                               Height: 0,
-                               Bits:   1000,
-                       },
-                       bits: 1000,
-               },
-               {
-                       currentNode: &BlockNode{
-                               Height: consensus.BlocksPerRetarget - 1,
-                               Bits:   1000,
-                       },
-                       bits: 1000,
-               },
-               {
-                       parentNode: &BlockNode{
-                               Height:    0,
-                               Timestamp: 0,
-                       },
-                       currentNode: &BlockNode{
-                               Height:    consensus.BlocksPerRetarget,
-                               Bits:      difficulty.BigToCompact(big.NewInt(1000)),
-                               Timestamp: targetTimeSpan,
-                       },
-                       bits: difficulty.BigToCompact(big.NewInt(1000)),
-               },
-               {
-                       parentNode: &BlockNode{
-                               Height:    0,
-                               Timestamp: 0,
-                       },
-                       currentNode: &BlockNode{
-                               Height:    consensus.BlocksPerRetarget,
-                               Bits:      difficulty.BigToCompact(big.NewInt(1000)),
-                               Timestamp: targetTimeSpan * 2,
-                       },
-                       bits: difficulty.BigToCompact(big.NewInt(2000)),
-               },
-       }
-
-       for i, c := range cases {
-               c.currentNode.Parent = c.parentNode
-               bits := c.currentNode.CalcNextBits()
-               if bits != c.bits {
-                       t.Fatalf("calc next bit failed, index: %d, expected: %d, have: %d", i, c.bits, bits)
-               }
-       }
-}
-
 func TestCalcNextSeed(t *testing.T) {
        cases := []struct {
                node *BlockNode
index ef4711e..91be77c 100644 (file)
@@ -6,7 +6,6 @@ import (
        log "github.com/sirupsen/logrus"
 
        "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/consensus/difficulty"
        "github.com/bytom/bytom/errors"
        "github.com/bytom/bytom/protocol/bc"
        "github.com/bytom/bytom/protocol/bc/types"
@@ -22,7 +21,6 @@ var (
        errMismatchedMerkleRoot  = errors.New("mismatched merkle root")
        errMisorderedBlockHeight = errors.New("misordered block height")
        errOverBlockLimit        = errors.New("block's gas is over the limit")
-       errWorkProof             = errors.New("invalid difficulty proof of work")
        errVersionRegression     = errors.New("version regression")
 )
 
@@ -66,18 +64,13 @@ func ValidateBlockHeader(b *bc.Block, parent *state.BlockNode) error {
        if b.Height != parent.Height+1 {
                return errors.WithDetailf(errMisorderedBlockHeight, "previous block height %d, current block height %d", parent.Height, b.Height)
        }
-       if b.Bits != parent.CalcNextBits() {
-               return errBadBits
-       }
+
        if parent.Hash != *b.PreviousBlockId {
                return errors.WithDetailf(errMismatchedBlock, "previous block ID %x, current block wants %x", parent.Hash.Bytes(), b.PreviousBlockId.Bytes())
        }
        if err := checkBlockTime(b, parent); err != nil {
                return err
        }
-       if !difficulty.CheckProofOfWork(&b.ID, parent.CalcNextSeed(), b.BlockHeader.Bits) {
-               return errWorkProof
-       }
        return nil
 }
 
index 48049bc..4abf744 100644 (file)
@@ -6,13 +6,11 @@ import (
        "time"
 
        "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/mining/tensority"
        "github.com/bytom/bytom/protocol/bc"
        "github.com/bytom/bytom/protocol/bc/types"
        "github.com/bytom/bytom/protocol/state"
        "github.com/bytom/bytom/protocol/vm"
        "github.com/bytom/bytom/protocol/vm/vmutil"
-       "github.com/bytom/bytom/testutil"
 )
 
 func TestCheckBlockTime(t *testing.T) {
@@ -110,8 +108,6 @@ func TestCheckCoinbaseAmount(t *testing.T) {
 }
 
 func TestValidateBlockHeader(t *testing.T) {
-       iniTtensority()
-
        cases := []struct {
                desc   string
                block  *bc.Block
@@ -148,7 +144,6 @@ func TestValidateBlockHeader(t *testing.T) {
                        parent: &state.BlockNode{
                                Version: 1,
                                Height:  19,
-                               Bits:    2305843009214532812,
                        },
                        err: errBadBits,
                },
@@ -167,28 +162,6 @@ func TestValidateBlockHeader(t *testing.T) {
                        err: errMismatchedBlock,
                },
                {
-                       desc: "check work proof fail (blocktest#1011)",
-                       block: &bc.Block{
-                               ID: bc.Hash{V0: 0},
-                               BlockHeader: &bc.BlockHeader{
-                                       Version:         1,
-                                       Height:          1,
-                                       Timestamp:       1523352601,
-                                       PreviousBlockId: &bc.Hash{V0: 0},
-                                       Bits:            2305843009214532812,
-                               },
-                       },
-                       parent: &state.BlockNode{
-                               Version:   1,
-                               Height:    0,
-                               Timestamp: 1523352600,
-                               Hash:      bc.Hash{V0: 0},
-                               Seed:      &bc.Hash{V1: 1},
-                               Bits:      2305843009214532812,
-                       },
-                       err: errWorkProof,
-               },
-               {
                        block: &bc.Block{
                                ID: bc.Hash{V0: 1},
                                BlockHeader: &bc.BlockHeader{
@@ -196,7 +169,6 @@ func TestValidateBlockHeader(t *testing.T) {
                                        Height:          1,
                                        Timestamp:       1523352601,
                                        PreviousBlockId: &bc.Hash{V0: 0},
-                                       Bits:            2305843009214532812,
                                },
                        },
                        parent: &state.BlockNode{
@@ -205,7 +177,6 @@ func TestValidateBlockHeader(t *testing.T) {
                                Timestamp: 1523352600,
                                Hash:      bc.Hash{V0: 0},
                                Seed:      &bc.Hash{V1: 1},
-                               Bits:      2305843009214532812,
                        },
                        err: nil,
                },
@@ -259,8 +230,6 @@ func TestValidateBlockHeader(t *testing.T) {
 
 // TestValidateBlock test the ValidateBlock function
 func TestValidateBlock(t *testing.T) {
-       iniTtensority()
-
        cp, _ := vmutil.DefaultCoinbaseProgram()
        cases := []struct {
                desc   string
@@ -295,7 +264,6 @@ func TestValidateBlock(t *testing.T) {
                                Timestamp: 1523352600,
                                Hash:      bc.Hash{V0: 0},
                                Seed:      &bc.Hash{V1: 1},
-                               Bits:      2305843009214532812,
                        },
                        err: errMismatchedMerkleRoot,
                },
@@ -327,7 +295,6 @@ func TestValidateBlock(t *testing.T) {
                                Timestamp: 1523352600,
                                Hash:      bc.Hash{V0: 0},
                                Seed:      &bc.Hash{V1: 1},
-                               Bits:      2305843009214532812,
                        },
                        err: errMismatchedMerkleRoot,
                },
@@ -340,7 +307,6 @@ func TestValidateBlock(t *testing.T) {
                                        Height:          1,
                                        Timestamp:       1523352601,
                                        PreviousBlockId: &bc.Hash{V0: 0},
-                                       Bits:            2305843009214532812,
                                },
                                Transactions: []*bc.Tx{
                                        types.MapTx(&types.TxData{
@@ -363,7 +329,6 @@ func TestValidateBlock(t *testing.T) {
                                Timestamp: 1523352600,
                                Hash:      bc.Hash{V0: 0},
                                Seed:      &bc.Hash{V1: 1},
-                               Bits:      2305843009214532812,
                        },
                        err: ErrWrongCoinbaseTransaction,
                },
@@ -379,8 +344,6 @@ func TestValidateBlock(t *testing.T) {
 
 // TestGasOverBlockLimit check if the gas of the block has the max limit (blocktest#1012)
 func TestGasOverBlockLimit(t *testing.T) {
-       iniTtensority()
-
        cp, _ := vmutil.DefaultCoinbaseProgram()
        parent := &state.BlockNode{
                Version:   1,
@@ -388,7 +351,6 @@ func TestGasOverBlockLimit(t *testing.T) {
                Timestamp: 1523352600,
                Hash:      bc.Hash{V0: 0},
                Seed:      &bc.Hash{V1: 1},
-               Bits:      2305843009214532812,
        }
        block := &bc.Block{
                ID: bc.Hash{V0: 1},
@@ -430,8 +392,6 @@ func TestGasOverBlockLimit(t *testing.T) {
 
 // TestSetTransactionStatus verify the transaction status is set correctly (blocktest#1010)
 func TestSetTransactionStatus(t *testing.T) {
-       iniTtensority()
-
        cp, _ := vmutil.DefaultCoinbaseProgram()
        parent := &state.BlockNode{
                Version:   1,
@@ -439,7 +399,6 @@ func TestSetTransactionStatus(t *testing.T) {
                Timestamp: 1523352600,
                Hash:      bc.Hash{V0: 0},
                Seed:      &bc.Hash{V1: 1},
-               Bits:      2305843009214532812,
        }
        block := &bc.Block{
                ID: bc.Hash{V0: 1},
@@ -500,11 +459,3 @@ func TestSetTransactionStatus(t *testing.T) {
                }
        }
 }
-
-func iniTtensority() {
-       // add (hash, seed) --> (tensority hash) to the  tensority cache for avoid
-       // real matrix calculate cost.
-       tensority.AIHash.AddCache(&bc.Hash{V0: 0}, &bc.Hash{}, testutil.MaxHash)
-       tensority.AIHash.AddCache(&bc.Hash{V0: 1}, &bc.Hash{}, testutil.MinHash)
-       tensority.AIHash.AddCache(&bc.Hash{V0: 1}, consensus.InitialSeed, testutil.MinHash)
-}
index a94a0fd..7ee7052 100644 (file)
 package test
 
-import (
-       "fmt"
-       "io/ioutil"
-       "os"
-       "testing"
-       "time"
-
-       "github.com/bytom/bytom/account"
-       "github.com/bytom/bytom/blockchain/pseudohsm"
-       "github.com/bytom/bytom/blockchain/signers"
-       "github.com/bytom/bytom/blockchain/txbuilder"
-       "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/consensus/difficulty"
-       "github.com/bytom/bytom/crypto/ed25519/chainkd"
-       "github.com/bytom/bytom/database"
-       "github.com/bytom/bytom/database/storage"
-       "github.com/bytom/bytom/event"
-       "github.com/bytom/bytom/mining"
-       "github.com/bytom/bytom/protocol"
-       "github.com/bytom/bytom/protocol/bc"
-       "github.com/bytom/bytom/protocol/bc/types"
-       "github.com/bytom/bytom/protocol/state"
-       dbm "github.com/bytom/bytom/database/leveldb"
-)
-
-func BenchmarkChain_CoinBaseTx_NoAsset(b *testing.B) {
-       benchInsertChain(b, 0, 0, "")
-}
-
-func BenchmarkChain_BtmTx_NoAsset_BASE(b *testing.B) {
-       benchInsertChain(b, 1, 0, "")
-}
-
-func BenchmarkChain_5000BtmTx_NoAsset_BASE(b *testing.B) {
-       benchInsertChain(b, 5000, 0, "")
-}
-
-func BenchmarkChain_5000BtmTx_1Asset_BASE(b *testing.B) {
-       benchInsertChain(b, 5000, 1, "")
-}
-
-// standard Transaction
-func BenchmarkChain_BtmTx_NoAsset_P2PKH(b *testing.B) {
-       benchInsertChain(b, 1000, 0, "P2PKH")
-}
-
-func BenchmarkChain_BtmTx_1Asset_P2PKH(b *testing.B) {
-       benchInsertChain(b, 1000, 1, "P2PKH")
-}
-
-func BenchmarkChain_BtmTx_NoAsset_P2SH(b *testing.B) {
-       benchInsertChain(b, 100, 0, "P2SH")
-}
-
-func BenchmarkChain_BtmTx_1Asset_P2SH(b *testing.B) {
-       benchInsertChain(b, 100, 1, "P2SH")
-}
-
-func BenchmarkChain_BtmTx_NoAsset_MultiSign(b *testing.B) {
-       benchInsertChain(b, 100, 0, "MultiSign")
-}
-
-func BenchmarkChain_BtmTx_1Asset_MultiSign(b *testing.B) {
-       benchInsertChain(b, 100, 1, "MultiSign")
-}
-
-func benchInsertChain(b *testing.B, blockTxNumber int, otherAssetNum int, txType string) {
-       b.StopTimer()
-       testNumber := b.N
-       totalTxNumber := testNumber * blockTxNumber
-
-       dirPath, err := ioutil.TempDir(".", "testDB")
-       if err != nil {
-               b.Fatal("create dirPath err:", err)
-       }
-       defer os.RemoveAll(dirPath)
-
-       testDB := dbm.NewDB("testdb", "leveldb", dirPath)
-       defer testDB.Close()
-
-       // Generate a chain test data.
-       chain, txs, txPool, err := GenerateChainData(dirPath, testDB, totalTxNumber, otherAssetNum, txType)
-       if err != nil {
-               b.Fatal("GenerateChainData err:", err)
-       }
-
-       b.ReportAllocs()
-       b.StartTimer()
-
-       for i := 0; i < b.N; i++ {
-               testTxs := txs[blockTxNumber*i : blockTxNumber*(i+1)]
-               if err := InsertChain(chain, txPool, testTxs); err != nil {
-                       b.Fatal("Failed to insert block into chain:", err)
-               }
-       }
-}
-
-func GenerateChainData(dirPath string, testDB dbm.DB, txNumber, otherAssetNum int, txType string) (*protocol.Chain, []*types.Tx, *protocol.TxPool, error) {
-       var err error
-
-       // generate transactions
-       txs := []*types.Tx{}
-       switch txType {
-       case "P2PKH":
-               txs, err = MockTxsP2PKH(dirPath, testDB, txNumber, otherAssetNum)
-               if err != nil {
-                       return nil, nil, nil, err
-               }
-       case "P2SH":
-               txs, err = MockTxsP2SH(dirPath, testDB, txNumber, otherAssetNum)
-               if err != nil {
-                       return nil, nil, nil, err
-               }
-       case "MultiSign":
-               txs, err = MockTxsMultiSign(dirPath, testDB, txNumber, otherAssetNum)
-               if err != nil {
-                       return nil, nil, nil, err
-               }
-       default:
-               txs, err = CreateTxbyNum(txNumber, otherAssetNum)
-               if err != nil {
-                       return nil, nil, nil, err
-               }
-       }
-
-       // init UtxoViewpoint
-       utxoView := state.NewUtxoViewpoint()
-       utxoEntry := storage.NewUtxoEntry(false, 1, false)
-       for _, tx := range txs {
-               for _, id := range tx.SpentOutputIDs {
-                       utxoView.Entries[id] = utxoEntry
-               }
-       }
-
-       if err := SetUtxoView(testDB, utxoView); err != nil {
-               return nil, nil, nil, err
-       }
-
-       store := database.NewStore(testDB)
-       dispatcher := event.NewDispatcher()
-       txPool := protocol.NewTxPool(store, dispatcher)
-       chain, err := protocol.NewChain(store, txPool)
-       if err != nil {
-               return nil, nil, nil, err
-       }
-
-       go processNewTxch(txPool)
-
-       return chain, txs, txPool, nil
-}
-
-func InsertChain(chain *protocol.Chain, txPool *protocol.TxPool, txs []*types.Tx) error {
-       for _, tx := range txs {
-               if err := txbuilder.FinalizeTx(nil, chain, tx); err != nil {
-                       return err
-               }
-       }
-
-       block, err := mining.NewBlockTemplate(chain, txPool, nil)
-       if err != nil {
-               return err
-       }
-
-       blockSize, err := block.MarshalText()
-       if err != nil {
-               return err
-       }
-
-       fmt.Println("blocksize:", uint64(len(blockSize)))
-       fmt.Println("block tx count:", uint64(len(block.Transactions)))
-       fmt.Println("coinbase txsize:", uint64(block.Transactions[0].SerializedSize))
-       if len(block.Transactions) > 1 {
-               fmt.Println("txsize:", uint64(block.Transactions[1].SerializedSize))
-       }
-
-       seed, err := chain.CalcNextSeed(&block.PreviousBlockHash)
-       if err != nil {
-               return err
-       }
-
-       if err := SolveBlock(seed, block); err != nil {
-               return err
-       }
-
-       if _, err := chain.ProcessBlock(block); err != nil {
-               return err
-       }
-
-       return nil
-}
-
-func processNewTxch(txPool *protocol.TxPool) {
-}
-
-func SolveBlock(seed *bc.Hash, block *types.Block) error {
-       maxNonce := ^uint64(0) // 2^64 - 1
-       header := &block.BlockHeader
-       for i := uint64(0); i < maxNonce; i++ {
-               header.Nonce = i
-               headerHash := header.Hash()
-               if difficulty.CheckProofOfWork(&headerHash, seed, header.Bits) {
-                       return nil
-               }
-       }
-       return nil
-}
-
-func MockSimpleUtxo(index uint64, assetID *bc.AssetID, amount uint64, ctrlProg *account.CtrlProgram) *account.UTXO {
-       if ctrlProg == nil {
-               ctrlProg = &account.CtrlProgram{
-                       AccountID:      "",
-                       Address:        "",
-                       KeyIndex:       uint64(0),
-                       ControlProgram: []byte{81},
-                       Change:         false,
-               }
-       }
-
-       utxo := &account.UTXO{
-               OutputID:            bc.Hash{V0: 1},
-               SourceID:            bc.Hash{V0: 1},
-               AssetID:             *assetID,
-               Amount:              amount,
-               SourcePos:           index,
-               ControlProgram:      ctrlProg.ControlProgram,
-               ControlProgramIndex: ctrlProg.KeyIndex,
-               AccountID:           ctrlProg.AccountID,
-               Address:             ctrlProg.Address,
-               ValidHeight:         0,
-       }
-
-       return utxo
-}
-
-func GenerateBaseUtxos(num int, amount uint64, ctrlProg *account.CtrlProgram) []*account.UTXO {
-       utxos := []*account.UTXO{}
-       for i := 0; i < num; i++ {
-               utxo := MockSimpleUtxo(uint64(i), consensus.BTMAssetID, amount, ctrlProg)
-               utxos = append(utxos, utxo)
-       }
-
-       return utxos
-}
-
-func GenerateOtherUtxos(typeCount, num int, amount uint64, ctrlProg *account.CtrlProgram) []*account.UTXO {
-       utxos := []*account.UTXO{}
-
-       assetID := &bc.AssetID{
-               V0: uint64(typeCount),
-               V1: uint64(1),
-               V2: uint64(0),
-               V3: uint64(1),
-       }
-
-       for i := 0; i < num; i++ {
-               utxo := MockSimpleUtxo(uint64(typeCount*num+i), assetID, amount, ctrlProg)
-               utxos = append(utxos, utxo)
-       }
-
-       return utxos
-}
-
-func AddTxInputFromUtxo(utxo *account.UTXO, singer *signers.Signer) (*types.TxInput, *txbuilder.SigningInstruction, error) {
-       txInput, signInst, err := account.UtxoToInputs(singer, utxo)
-       if err != nil {
-               return nil, nil, err
-       }
-
-       return txInput, signInst, nil
-}
-
-func AddTxOutput(assetID bc.AssetID, amount uint64, controlProgram []byte) *types.TxOutput {
-       out := types.NewTxOutput(assetID, amount, controlProgram)
-       return out
-}
-
-func CreateTxBuilder(baseUtxo *account.UTXO, btmServiceFlag bool, signer *signers.Signer) (*txbuilder.TemplateBuilder, error) {
-       tplBuilder := txbuilder.NewBuilder(time.Now())
-
-       // add input
-       txInput, signInst, err := AddTxInputFromUtxo(baseUtxo, signer)
-       if err != nil {
-               return nil, err
-       }
-       tplBuilder.AddInput(txInput, signInst)
-
-       // if the btm is the service charge, didn't need to add the output
-       if btmServiceFlag {
-               txOutput := AddTxOutput(baseUtxo.AssetID, 100, baseUtxo.ControlProgram)
-               tplBuilder.AddOutput(txOutput)
-       }
-
-       return tplBuilder, nil
-}
-
-func AddTxBuilder(tplBuilder *txbuilder.TemplateBuilder, utxo *account.UTXO, signer *signers.Signer) error {
-       txInput, signInst, err := AddTxInputFromUtxo(utxo, signer)
-       if err != nil {
-               return err
-       }
-       tplBuilder.AddInput(txInput, signInst)
-
-       txOutput := AddTxOutput(utxo.AssetID, utxo.Amount, utxo.ControlProgram)
-       tplBuilder.AddOutput(txOutput)
-
-       return nil
-}
-
-func BuildTx(baseUtxo *account.UTXO, otherUtxos []*account.UTXO, signer *signers.Signer) (*txbuilder.Template, error) {
-       btmServiceFlag := false
-       if otherUtxos == nil || len(otherUtxos) == 0 {
-               btmServiceFlag = true
-       }
-
-       tplBuilder, err := CreateTxBuilder(baseUtxo, btmServiceFlag, signer)
-       if err != nil {
-               return nil, err
-       }
-
-       for _, u := range otherUtxos {
-               if err := AddTxBuilder(tplBuilder, u, signer); err != nil {
-                       return nil, err
-               }
-       }
-
-       tpl, _, err := tplBuilder.Build()
-       if err != nil {
-               return nil, err
-       }
-
-       return tpl, nil
-}
-
-func GenetrateTxbyUtxo(baseUtxo []*account.UTXO, otherUtxo [][]*account.UTXO) ([]*types.Tx, error) {
-       tmpUtxo := []*account.UTXO{}
-       txs := []*types.Tx{}
-       otherUtxoFlag := true
-
-       if len(otherUtxo) == 0 || len(otherUtxo) != len(baseUtxo) {
-               otherUtxoFlag = false
-       }
-
-       for i := 0; i < len(baseUtxo); i++ {
-               if otherUtxoFlag {
-                       tmpUtxo = otherUtxo[i]
-               } else {
-                       tmpUtxo = nil
-               }
-
-               tpl, err := BuildTx(baseUtxo[i], tmpUtxo, nil)
-               if err != nil {
-                       return nil, err
-               }
-
-               txs = append(txs, tpl.Transaction)
-       }
-
-       return txs, nil
-}
-
-func CreateTxbyNum(txNumber, otherAssetNum int) ([]*types.Tx, error) {
-       baseUtxos := GenerateBaseUtxos(txNumber, 1000000000, nil)
-       otherUtxos := make([][]*account.UTXO, 0, txNumber)
-       if otherAssetNum != 0 {
-               for i := 0; i < txNumber; i++ {
-                       utxos := GenerateOtherUtxos(i, otherAssetNum, 6000, nil)
-                       otherUtxos = append(otherUtxos, utxos)
-               }
-       }
-
-       txs, err := GenetrateTxbyUtxo(baseUtxos, otherUtxos)
-       if err != nil {
-               return nil, err
-       }
-
-       return txs, nil
-}
-
-func SetUtxoView(db dbm.DB, view *state.UtxoViewpoint) error {
-       batch := db.NewBatch()
-       if err := database.SaveUtxoView(batch, view); err != nil {
-               return err
-       }
-       batch.Write()
-       return nil
-}
-
-//-------------------------Mock actual transaction----------------------------------
-func MockTxsP2PKH(keyDirPath string, testDB dbm.DB, txNumber, otherAssetNum int) ([]*types.Tx, error) {
-       accountManager := account.NewManager(testDB, nil)
-       hsm, err := pseudohsm.New(keyDirPath)
-       if err != nil {
-               return nil, err
-       }
-
-       xpub, _, err := hsm.XCreate("TestP2PKH", "password", "en")
-       if err != nil {
-               return nil, err
-       }
-
-       txs := []*types.Tx{}
-       for i := 0; i < txNumber; i++ {
-               testAccountAlias := fmt.Sprintf("testAccount%d", i)
-               testAccount, err := accountManager.Create([]chainkd.XPub{xpub.XPub}, 1, testAccountAlias, signers.BIP0044)
-               if err != nil {
-                       return nil, err
-               }
-
-               controlProg, err := accountManager.CreateAddress(testAccount.ID, false)
-               if err != nil {
-                       return nil, err
-               }
-
-               utxo := MockSimpleUtxo(0, consensus.BTMAssetID, 1000000000, controlProg)
-               otherUtxos := GenerateOtherUtxos(i, otherAssetNum, 6000, controlProg)
-               tpl, err := BuildTx(utxo, otherUtxos, testAccount.Signer)
-               if err != nil {
-                       return nil, err
-               }
-
-               if _, err := MockSign(tpl, hsm, "password"); err != nil {
-                       return nil, err
-               }
-
-               txs = append(txs, tpl.Transaction)
-       }
-
-       return txs, nil
-}
-
-func MockTxsP2SH(keyDirPath string, testDB dbm.DB, txNumber, otherAssetNum int) ([]*types.Tx, error) {
-       accountManager := account.NewManager(testDB, nil)
-       hsm, err := pseudohsm.New(keyDirPath)
-       if err != nil {
-               return nil, err
-       }
-
-       xpub1, _, err := hsm.XCreate("TestP2SH1", "password", "en")
-       if err != nil {
-               return nil, err
-       }
-
-       xpub2, _, err := hsm.XCreate("TestP2SH2", "password", "en")
-       if err != nil {
-               return nil, err
-       }
-
-       txs := []*types.Tx{}
-       for i := 0; i < txNumber; i++ {
-               testAccountAlias := fmt.Sprintf("testAccount%d", i)
-               testAccount, err := accountManager.Create([]chainkd.XPub{xpub1.XPub, xpub2.XPub}, 2, testAccountAlias, signers.BIP0044)
-               if err != nil {
-                       return nil, err
-               }
-
-               controlProg, err := accountManager.CreateAddress(testAccount.ID, false)
-               if err != nil {
-                       return nil, err
-               }
-
-               utxo := MockSimpleUtxo(0, consensus.BTMAssetID, 1000000000, controlProg)
-               otherUtxos := GenerateOtherUtxos(i, otherAssetNum, 6000, controlProg)
-               tpl, err := BuildTx(utxo, otherUtxos, testAccount.Signer)
-               if err != nil {
-                       return nil, err
-               }
-
-               if _, err := MockSign(tpl, hsm, "password"); err != nil {
-                       return nil, err
-               }
-
-               txs = append(txs, tpl.Transaction)
-       }
-
-       return txs, nil
-}
-
-func MockTxsMultiSign(keyDirPath string, testDB dbm.DB, txNumber, otherAssetNum int) ([]*types.Tx, error) {
-       accountManager := account.NewManager(testDB, nil)
-       hsm, err := pseudohsm.New(keyDirPath)
-       if err != nil {
-               return nil, err
-       }
-
-       xpub1, _, err := hsm.XCreate("TestMultilNodeSign1", "password1", "en")
-       if err != nil {
-               return nil, err
-       }
-
-       xpub2, _, err := hsm.XCreate("TestMultilNodeSign2", "password2", "en")
-       if err != nil {
-               return nil, err
-       }
-       txs := []*types.Tx{}
-       for i := 0; i < txNumber; i++ {
-               testAccountAlias := fmt.Sprintf("testAccount%d", i)
-               testAccount, err := accountManager.Create([]chainkd.XPub{xpub1.XPub, xpub2.XPub}, 2, testAccountAlias, signers.BIP0044)
-               if err != nil {
-                       return nil, err
-               }
-
-               controlProg, err := accountManager.CreateAddress(testAccount.ID, false)
-               if err != nil {
-                       return nil, err
-               }
-
-               utxo := MockSimpleUtxo(0, consensus.BTMAssetID, 1000000000, controlProg)
-               otherUtxos := GenerateOtherUtxos(i, otherAssetNum, 6000, controlProg)
-               tpl, err := BuildTx(utxo, otherUtxos, testAccount.Signer)
-               if err != nil {
-                       return nil, err
-               }
-
-               if _, err := MockSign(tpl, hsm, "password1"); err != nil {
-                       return nil, err
-               }
-
-               if _, err := MockSign(tpl, hsm, "password2"); err != nil {
-                       return nil, err
-               }
-
-               txs = append(txs, tpl.Transaction)
-       }
-
-       return txs, nil
-}
+// import (
+//     "fmt"
+//     "io/ioutil"
+//     "os"
+//     "testing"
+//     "time"
+
+//     "github.com/bytom/bytom/account"
+//     "github.com/bytom/bytom/blockchain/pseudohsm"
+//     "github.com/bytom/bytom/blockchain/signers"
+//     "github.com/bytom/bytom/blockchain/txbuilder"
+//     "github.com/bytom/bytom/consensus"
+//     "github.com/bytom/bytom/crypto/ed25519/chainkd"
+//     "github.com/bytom/bytom/database"
+//     dbm "github.com/bytom/bytom/database/leveldb"
+//     "github.com/bytom/bytom/database/storage"
+//     "github.com/bytom/bytom/event"
+//     "github.com/bytom/bytom/protocol"
+//     "github.com/bytom/bytom/protocol/bc"
+//     "github.com/bytom/bytom/protocol/bc/types"
+//     "github.com/bytom/bytom/protocol/state"
+// )
+
+// func BenchmarkChain_CoinBaseTx_NoAsset(b *testing.B) {
+//     benchInsertChain(b, 0, 0, "")
+// }
+
+// func BenchmarkChain_BtmTx_NoAsset_BASE(b *testing.B) {
+//     benchInsertChain(b, 1, 0, "")
+// }
+
+// func BenchmarkChain_5000BtmTx_NoAsset_BASE(b *testing.B) {
+//     benchInsertChain(b, 5000, 0, "")
+// }
+
+// func BenchmarkChain_5000BtmTx_1Asset_BASE(b *testing.B) {
+//     benchInsertChain(b, 5000, 1, "")
+// }
+
+// // standard Transaction
+// func BenchmarkChain_BtmTx_NoAsset_P2PKH(b *testing.B) {
+//     benchInsertChain(b, 1000, 0, "P2PKH")
+// }
+
+// func BenchmarkChain_BtmTx_1Asset_P2PKH(b *testing.B) {
+//     benchInsertChain(b, 1000, 1, "P2PKH")
+// }
+
+// func BenchmarkChain_BtmTx_NoAsset_P2SH(b *testing.B) {
+//     benchInsertChain(b, 100, 0, "P2SH")
+// }
+
+// func BenchmarkChain_BtmTx_1Asset_P2SH(b *testing.B) {
+//     benchInsertChain(b, 100, 1, "P2SH")
+// }
+
+// func BenchmarkChain_BtmTx_NoAsset_MultiSign(b *testing.B) {
+//     benchInsertChain(b, 100, 0, "MultiSign")
+// }
+
+// func BenchmarkChain_BtmTx_1Asset_MultiSign(b *testing.B) {
+//     benchInsertChain(b, 100, 1, "MultiSign")
+// }
+
+// // func benchInsertChain(b *testing.B, blockTxNumber int, otherAssetNum int, txType string) {
+// //  b.StopTimer()
+// //  testNumber := b.N
+// //  totalTxNumber := testNumber * blockTxNumber
+
+// //  dirPath, err := ioutil.TempDir(".", "testDB")
+// //  if err != nil {
+// //          b.Fatal("create dirPath err:", err)
+// //  }
+// //  defer os.RemoveAll(dirPath)
+
+// //  testDB := dbm.NewDB("testdb", "leveldb", dirPath)
+// //  defer testDB.Close()
+
+// //  // Generate a chain test data.
+// //  chain, txs, txPool, err := GenerateChainData(dirPath, testDB, totalTxNumber, otherAssetNum, txType)
+// //  if err != nil {
+// //          b.Fatal("GenerateChainData err:", err)
+// //  }
+
+// //  b.ReportAllocs()
+// //  b.StartTimer()
+
+// //  for i := 0; i < b.N; i++ {
+// //          testTxs := txs[blockTxNumber*i : blockTxNumber*(i+1)]
+// //          if err := InsertChain(chain, txPool, testTxs); err != nil {
+// //                  b.Fatal("Failed to insert block into chain:", err)
+// //          }
+// //  }
+// // }
+
+// func GenerateChainData(dirPath string, testDB dbm.DB, txNumber, otherAssetNum int, txType string) (*protocol.Chain, []*types.Tx, *protocol.TxPool, error) {
+//     var err error
+
+//     // generate transactions
+//     txs := []*types.Tx{}
+//     switch txType {
+//     case "P2PKH":
+//             txs, err = MockTxsP2PKH(dirPath, testDB, txNumber, otherAssetNum)
+//             if err != nil {
+//                     return nil, nil, nil, err
+//             }
+//     case "P2SH":
+//             txs, err = MockTxsP2SH(dirPath, testDB, txNumber, otherAssetNum)
+//             if err != nil {
+//                     return nil, nil, nil, err
+//             }
+//     case "MultiSign":
+//             txs, err = MockTxsMultiSign(dirPath, testDB, txNumber, otherAssetNum)
+//             if err != nil {
+//                     return nil, nil, nil, err
+//             }
+//     default:
+//             txs, err = CreateTxbyNum(txNumber, otherAssetNum)
+//             if err != nil {
+//                     return nil, nil, nil, err
+//             }
+//     }
+
+//     // init UtxoViewpoint
+//     utxoView := state.NewUtxoViewpoint()
+//     utxoEntry := storage.NewUtxoEntry(false, 1, false)
+//     for _, tx := range txs {
+//             for _, id := range tx.SpentOutputIDs {
+//                     utxoView.Entries[id] = utxoEntry
+//             }
+//     }
+
+//     if err := SetUtxoView(testDB, utxoView); err != nil {
+//             return nil, nil, nil, err
+//     }
+
+//     store := database.NewStore(testDB)
+//     dispatcher := event.NewDispatcher()
+//     txPool := protocol.NewTxPool(store, dispatcher)
+//     chain, err := protocol.NewChain(store, txPool)
+//     if err != nil {
+//             return nil, nil, nil, err
+//     }
+
+//     go processNewTxch(txPool)
+
+//     return chain, txs, txPool, nil
+// }
+
+// // func InsertChain(chain *protocol.Chain, txPool *protocol.TxPool, txs []*types.Tx) error {
+// //  for _, tx := range txs {
+// //          if err := txbuilder.FinalizeTx(nil, chain, tx); err != nil {
+// //                  return err
+// //          }
+// //  }
+
+// //  block, err := mining.NewBlockTemplate(chain, txPool, nil)
+// //  if err != nil {
+// //          return err
+// //  }
+
+// //  blockSize, err := block.MarshalText()
+// //  if err != nil {
+// //          return err
+// //  }
+
+// //  fmt.Println("blocksize:", uint64(len(blockSize)))
+// //  fmt.Println("block tx count:", uint64(len(block.Transactions)))
+// //  fmt.Println("coinbase txsize:", uint64(block.Transactions[0].SerializedSize))
+// //  if len(block.Transactions) > 1 {
+// //          fmt.Println("txsize:", uint64(block.Transactions[1].SerializedSize))
+// //  }
+
+// //  seed, err := chain.CalcNextSeed(&block.PreviousBlockHash)
+// //  if err != nil {
+// //          return err
+// //  }
+
+// //  if err := SolveBlock(seed, block); err != nil {
+// //          return err
+// //  }
+
+// //  if _, err := chain.ProcessBlock(block); err != nil {
+// //          return err
+// //  }
+
+// //  return nil
+// // }
+
+// func processNewTxch(txPool *protocol.TxPool) {
+// }
+
+// // func SolveBlock(seed *bc.Hash, block *types.Block) error {
+// //  maxNonce := ^uint64(0) // 2^64 - 1
+// //  header := &block.BlockHeader
+// //  for i := uint64(0); i < maxNonce; i++ {
+// //          header.Nonce = i
+// //          headerHash := header.Hash()
+// //          if difficulty.CheckProofOfWork(&headerHash, seed, header.Bits) {
+// //                  return nil
+// //          }
+// //  }
+// //  return nil
+// // }
+
+// func MockSimpleUtxo(index uint64, assetID *bc.AssetID, amount uint64, ctrlProg *account.CtrlProgram) *account.UTXO {
+//     if ctrlProg == nil {
+//             ctrlProg = &account.CtrlProgram{
+//                     AccountID:      "",
+//                     Address:        "",
+//                     KeyIndex:       uint64(0),
+//                     ControlProgram: []byte{81},
+//                     Change:         false,
+//             }
+//     }
+
+//     utxo := &account.UTXO{
+//             OutputID:            bc.Hash{V0: 1},
+//             SourceID:            bc.Hash{V0: 1},
+//             AssetID:             *assetID,
+//             Amount:              amount,
+//             SourcePos:           index,
+//             ControlProgram:      ctrlProg.ControlProgram,
+//             ControlProgramIndex: ctrlProg.KeyIndex,
+//             AccountID:           ctrlProg.AccountID,
+//             Address:             ctrlProg.Address,
+//             ValidHeight:         0,
+//     }
+
+//     return utxo
+// }
+
+// func GenerateBaseUtxos(num int, amount uint64, ctrlProg *account.CtrlProgram) []*account.UTXO {
+//     utxos := []*account.UTXO{}
+//     for i := 0; i < num; i++ {
+//             utxo := MockSimpleUtxo(uint64(i), consensus.BTMAssetID, amount, ctrlProg)
+//             utxos = append(utxos, utxo)
+//     }
+
+//     return utxos
+// }
+
+// func GenerateOtherUtxos(typeCount, num int, amount uint64, ctrlProg *account.CtrlProgram) []*account.UTXO {
+//     utxos := []*account.UTXO{}
+
+//     assetID := &bc.AssetID{
+//             V0: uint64(typeCount),
+//             V1: uint64(1),
+//             V2: uint64(0),
+//             V3: uint64(1),
+//     }
+
+//     for i := 0; i < num; i++ {
+//             utxo := MockSimpleUtxo(uint64(typeCount*num+i), assetID, amount, ctrlProg)
+//             utxos = append(utxos, utxo)
+//     }
+
+//     return utxos
+// }
+
+// func AddTxInputFromUtxo(utxo *account.UTXO, singer *signers.Signer) (*types.TxInput, *txbuilder.SigningInstruction, error) {
+//     txInput, signInst, err := account.UtxoToInputs(singer, utxo)
+//     if err != nil {
+//             return nil, nil, err
+//     }
+
+//     return txInput, signInst, nil
+// }
+
+// func AddTxOutput(assetID bc.AssetID, amount uint64, controlProgram []byte) *types.TxOutput {
+//     out := types.NewTxOutput(assetID, amount, controlProgram)
+//     return out
+// }
+
+// func CreateTxBuilder(baseUtxo *account.UTXO, btmServiceFlag bool, signer *signers.Signer) (*txbuilder.TemplateBuilder, error) {
+//     tplBuilder := txbuilder.NewBuilder(time.Now())
+
+//     // add input
+//     txInput, signInst, err := AddTxInputFromUtxo(baseUtxo, signer)
+//     if err != nil {
+//             return nil, err
+//     }
+//     tplBuilder.AddInput(txInput, signInst)
+
+//     // if the btm is the service charge, didn't need to add the output
+//     if btmServiceFlag {
+//             txOutput := AddTxOutput(baseUtxo.AssetID, 100, baseUtxo.ControlProgram)
+//             tplBuilder.AddOutput(txOutput)
+//     }
+
+//     return tplBuilder, nil
+// }
+
+// func AddTxBuilder(tplBuilder *txbuilder.TemplateBuilder, utxo *account.UTXO, signer *signers.Signer) error {
+//     txInput, signInst, err := AddTxInputFromUtxo(utxo, signer)
+//     if err != nil {
+//             return err
+//     }
+//     tplBuilder.AddInput(txInput, signInst)
+
+//     txOutput := AddTxOutput(utxo.AssetID, utxo.Amount, utxo.ControlProgram)
+//     tplBuilder.AddOutput(txOutput)
+
+//     return nil
+// }
+
+// func BuildTx(baseUtxo *account.UTXO, otherUtxos []*account.UTXO, signer *signers.Signer) (*txbuilder.Template, error) {
+//     btmServiceFlag := false
+//     if otherUtxos == nil || len(otherUtxos) == 0 {
+//             btmServiceFlag = true
+//     }
+
+//     tplBuilder, err := CreateTxBuilder(baseUtxo, btmServiceFlag, signer)
+//     if err != nil {
+//             return nil, err
+//     }
+
+//     for _, u := range otherUtxos {
+//             if err := AddTxBuilder(tplBuilder, u, signer); err != nil {
+//                     return nil, err
+//             }
+//     }
+
+//     tpl, _, err := tplBuilder.Build()
+//     if err != nil {
+//             return nil, err
+//     }
+
+//     return tpl, nil
+// }
+
+// func GenetrateTxbyUtxo(baseUtxo []*account.UTXO, otherUtxo [][]*account.UTXO) ([]*types.Tx, error) {
+//     tmpUtxo := []*account.UTXO{}
+//     txs := []*types.Tx{}
+//     otherUtxoFlag := true
+
+//     if len(otherUtxo) == 0 || len(otherUtxo) != len(baseUtxo) {
+//             otherUtxoFlag = false
+//     }
+
+//     for i := 0; i < len(baseUtxo); i++ {
+//             if otherUtxoFlag {
+//                     tmpUtxo = otherUtxo[i]
+//             } else {
+//                     tmpUtxo = nil
+//             }
+
+//             tpl, err := BuildTx(baseUtxo[i], tmpUtxo, nil)
+//             if err != nil {
+//                     return nil, err
+//             }
+
+//             txs = append(txs, tpl.Transaction)
+//     }
+
+//     return txs, nil
+// }
+
+// func CreateTxbyNum(txNumber, otherAssetNum int) ([]*types.Tx, error) {
+//     baseUtxos := GenerateBaseUtxos(txNumber, 1000000000, nil)
+//     otherUtxos := make([][]*account.UTXO, 0, txNumber)
+//     if otherAssetNum != 0 {
+//             for i := 0; i < txNumber; i++ {
+//                     utxos := GenerateOtherUtxos(i, otherAssetNum, 6000, nil)
+//                     otherUtxos = append(otherUtxos, utxos)
+//             }
+//     }
+
+//     txs, err := GenetrateTxbyUtxo(baseUtxos, otherUtxos)
+//     if err != nil {
+//             return nil, err
+//     }
+
+//     return txs, nil
+// }
+
+// func SetUtxoView(db dbm.DB, view *state.UtxoViewpoint) error {
+//     batch := db.NewBatch()
+//     if err := database.SaveUtxoView(batch, view); err != nil {
+//             return err
+//     }
+//     batch.Write()
+//     return nil
+// }
+
+// //-------------------------Mock actual transaction----------------------------------
+// func MockTxsP2PKH(keyDirPath string, testDB dbm.DB, txNumber, otherAssetNum int) ([]*types.Tx, error) {
+//     accountManager := account.NewManager(testDB, nil)
+//     hsm, err := pseudohsm.New(keyDirPath)
+//     if err != nil {
+//             return nil, err
+//     }
+
+//     xpub, _, err := hsm.XCreate("TestP2PKH", "password", "en")
+//     if err != nil {
+//             return nil, err
+//     }
+
+//     txs := []*types.Tx{}
+//     for i := 0; i < txNumber; i++ {
+//             testAccountAlias := fmt.Sprintf("testAccount%d", i)
+//             testAccount, err := accountManager.Create([]chainkd.XPub{xpub.XPub}, 1, testAccountAlias, signers.BIP0044)
+//             if err != nil {
+//                     return nil, err
+//             }
+
+//             controlProg, err := accountManager.CreateAddress(testAccount.ID, false)
+//             if err != nil {
+//                     return nil, err
+//             }
+
+//             utxo := MockSimpleUtxo(0, consensus.BTMAssetID, 1000000000, controlProg)
+//             otherUtxos := GenerateOtherUtxos(i, otherAssetNum, 6000, controlProg)
+//             tpl, err := BuildTx(utxo, otherUtxos, testAccount.Signer)
+//             if err != nil {
+//                     return nil, err
+//             }
+
+//             if _, err := MockSign(tpl, hsm, "password"); err != nil {
+//                     return nil, err
+//             }
+
+//             txs = append(txs, tpl.Transaction)
+//     }
+
+//     return txs, nil
+// }
+
+// func MockTxsP2SH(keyDirPath string, testDB dbm.DB, txNumber, otherAssetNum int) ([]*types.Tx, error) {
+//     accountManager := account.NewManager(testDB, nil)
+//     hsm, err := pseudohsm.New(keyDirPath)
+//     if err != nil {
+//             return nil, err
+//     }
+
+//     xpub1, _, err := hsm.XCreate("TestP2SH1", "password", "en")
+//     if err != nil {
+//             return nil, err
+//     }
+
+//     xpub2, _, err := hsm.XCreate("TestP2SH2", "password", "en")
+//     if err != nil {
+//             return nil, err
+//     }
+
+//     txs := []*types.Tx{}
+//     for i := 0; i < txNumber; i++ {
+//             testAccountAlias := fmt.Sprintf("testAccount%d", i)
+//             testAccount, err := accountManager.Create([]chainkd.XPub{xpub1.XPub, xpub2.XPub}, 2, testAccountAlias, signers.BIP0044)
+//             if err != nil {
+//                     return nil, err
+//             }
+
+//             controlProg, err := accountManager.CreateAddress(testAccount.ID, false)
+//             if err != nil {
+//                     return nil, err
+//             }
+
+//             utxo := MockSimpleUtxo(0, consensus.BTMAssetID, 1000000000, controlProg)
+//             otherUtxos := GenerateOtherUtxos(i, otherAssetNum, 6000, controlProg)
+//             tpl, err := BuildTx(utxo, otherUtxos, testAccount.Signer)
+//             if err != nil {
+//                     return nil, err
+//             }
+
+//             if _, err := MockSign(tpl, hsm, "password"); err != nil {
+//                     return nil, err
+//             }
+
+//             txs = append(txs, tpl.Transaction)
+//     }
+
+//     return txs, nil
+// }
+
+// func MockTxsMultiSign(keyDirPath string, testDB dbm.DB, txNumber, otherAssetNum int) ([]*types.Tx, error) {
+//     accountManager := account.NewManager(testDB, nil)
+//     hsm, err := pseudohsm.New(keyDirPath)
+//     if err != nil {
+//             return nil, err
+//     }
+
+//     xpub1, _, err := hsm.XCreate("TestMultilNodeSign1", "password1", "en")
+//     if err != nil {
+//             return nil, err
+//     }
+
+//     xpub2, _, err := hsm.XCreate("TestMultilNodeSign2", "password2", "en")
+//     if err != nil {
+//             return nil, err
+//     }
+//     txs := []*types.Tx{}
+//     for i := 0; i < txNumber; i++ {
+//             testAccountAlias := fmt.Sprintf("testAccount%d", i)
+//             testAccount, err := accountManager.Create([]chainkd.XPub{xpub1.XPub, xpub2.XPub}, 2, testAccountAlias, signers.BIP0044)
+//             if err != nil {
+//                     return nil, err
+//             }
+
+//             controlProg, err := accountManager.CreateAddress(testAccount.ID, false)
+//             if err != nil {
+//                     return nil, err
+//             }
+
+//             utxo := MockSimpleUtxo(0, consensus.BTMAssetID, 1000000000, controlProg)
+//             otherUtxos := GenerateOtherUtxos(i, otherAssetNum, 6000, controlProg)
+//             tpl, err := BuildTx(utxo, otherUtxos, testAccount.Signer)
+//             if err != nil {
+//                     return nil, err
+//             }
+
+//             if _, err := MockSign(tpl, hsm, "password1"); err != nil {
+//                     return nil, err
+//             }
+
+//             if _, err := MockSign(tpl, hsm, "password2"); err != nil {
+//                     return nil, err
+//             }
+
+//             txs = append(txs, tpl.Transaction)
+//     }
+
+//     return txs, nil
+// }
index 66d3454..1e0db0c 100644 (file)
@@ -1,7 +1,6 @@
 package test
 
 import (
-       "github.com/bytom/bytom/mining/tensority"
        "github.com/bytom/bytom/protocol"
        "github.com/bytom/bytom/protocol/bc"
        "github.com/bytom/bytom/protocol/bc/types"
@@ -20,11 +19,6 @@ func NewBlock(chain *protocol.Chain, txs []*types.Tx, controlProgram []byte) (*t
        }
 
        preBlockHeader := chain.BestBlockHeader()
-       preBlockHash := preBlockHeader.Hash()
-       nextBits, err := chain.CalcNextBits(&preBlockHash)
-       if err != nil {
-               return nil, err
-       }
 
        b := &types.Block{
                BlockHeader: types.BlockHeader{
@@ -33,7 +27,6 @@ func NewBlock(chain *protocol.Chain, txs []*types.Tx, controlProgram []byte) (*t
                        PreviousBlockHash: preBlockHeader.Hash(),
                        Timestamp:         preBlockHeader.Timestamp + 1,
                        BlockCommitment:   types.BlockCommitment{},
-                       Bits:              nextBits,
                },
                Transactions: []*types.Tx{nil},
        }
@@ -111,6 +104,6 @@ func SolveAndUpdate(chain *protocol.Chain, block *types.Block) error {
 
 // Solve simulate solve difficulty by add result to cache
 func Solve(seed *bc.Hash, block *types.Block) {
-       hash := block.BlockHeader.Hash()
-       tensority.AIHash.AddCache(&hash, seed, &bc.Hash{})
+       // hash := block.BlockHeader.Hash()
+       // tensority.AIHash.AddCache(&hash, seed, &bc.Hash{})
 }
index 84dc1a0..9de664a 100644 (file)
@@ -41,7 +41,6 @@ func init() {
                                                Version:           1,
                                                Timestamp:         1556431597,
                                                Nonce:             5,
-                                               Bits:              2305843009214532812,
                                                PreviousBlockHash: testutil.MustDecodeHash("ce4fe9431cd0225b3a811f8f8ec922f2b07a921bb12a8dddae9a85540072c770"),
                                        },
                                        Transactions: []*types.Tx{
@@ -67,7 +66,6 @@ func init() {
                                                Version:           1,
                                                Timestamp:         1556431697,
                                                Nonce:             36,
-                                               Bits:              2305843009214532812,
                                                PreviousBlockHash: testutil.MustDecodeHash("ce4fe9431cd0225b3a811f8f8ec922f2b07a921bb12a8dddae9a85540072c770"),
                                        },
                                        Transactions: []*types.Tx{
@@ -96,7 +94,6 @@ func init() {
                                                Version:           1,
                                                Timestamp:         1556431604,
                                                Nonce:             0,
-                                               Bits:              2305843009214532812,
                                                PreviousBlockHash: testutil.MustDecodeHash("2eaf7f40b0a0d4a5025f3d5d9b8589d3db1634f7b55089ca59253a9c587266b2"),
                                        },
                                        Transactions: []*types.Tx{
@@ -123,7 +120,6 @@ func init() {
                                                Version:           1,
                                                Timestamp:         1556431604,
                                                Nonce:             12,
-                                               Bits:              2305843009214532812,
                                                PreviousBlockHash: testutil.MustDecodeHash("2eaf7f40b0a0d4a5025f3d5d9b8589d3db1634f7b55089ca59253a9c587266b2"),
                                        },
                                        Transactions: []*types.Tx{
@@ -168,7 +164,6 @@ func init() {
                                                Version:           1,
                                                Timestamp:         1556431607,
                                                Nonce:             4,
-                                               Bits:              2305843009214532812,
                                                PreviousBlockHash: testutil.MustDecodeHash("2eaf7f40b0a0d4a5025f3d5d9b8589d3db1634f7b55089ca59253a9c587266b2"),
                                        },
                                        Transactions: []*types.Tx{
@@ -213,7 +208,6 @@ func init() {
                                                Version:           1,
                                                Timestamp:         1556431607,
                                                Nonce:             17,
-                                               Bits:              2305843009214532812,
                                                PreviousBlockHash: testutil.MustDecodeHash("2eaf7f40b0a0d4a5025f3d5d9b8589d3db1634f7b55089ca59253a9c587266b2"),
                                        },
                                        Transactions: []*types.Tx{
@@ -266,7 +260,6 @@ func init() {
                                                Version:           1,
                                                Timestamp:         1556431607,
                                                Nonce:             4,
-                                               Bits:              2305843009214532812,
                                                PreviousBlockHash: testutil.MustDecodeHash("2eaf7f40b0a0d4a5025f3d5d9b8589d3db1634f7b55089ca59253a9c587266b2"),
                                        },
                                        Transactions: []*types.Tx{
@@ -320,7 +313,6 @@ func init() {
                                                Version:           1,
                                                Timestamp:         1556431607,
                                                Nonce:             4,
-                                               Bits:              2305843009214532812,
                                                PreviousBlockHash: testutil.MustDecodeHash("2eaf7f40b0a0d4a5025f3d5d9b8589d3db1634f7b55089ca59253a9c587266b2"),
                                        },
                                        Transactions: []*types.Tx{
@@ -375,7 +367,6 @@ func init() {
                                                Version:           1,
                                                Timestamp:         1556431607,
                                                Nonce:             12,
-                                               Bits:              2305843009214532812,
                                                PreviousBlockHash: testutil.MustDecodeHash("2eaf7f40b0a0d4a5025f3d5d9b8589d3db1634f7b55089ca59253a9c587266b2"),
                                        },
                                        Transactions: []*types.Tx{
@@ -433,7 +424,6 @@ func init() {
                                                Version:           1,
                                                Timestamp:         1556431640,
                                                Nonce:             0,
-                                               Bits:              2305843009214532812,
                                                PreviousBlockHash: testutil.MustDecodeHash("09c6064f4f1e7325440c45df03e97f97dbfbb66033315a384308256038af6c30"),
                                        },
                                        Transactions: []*types.Tx{
@@ -460,7 +450,6 @@ func init() {
                                                Version:           1,
                                                Timestamp:         1556431640,
                                                Nonce:             5,
-                                               Bits:              2305843009214532812,
                                                PreviousBlockHash: testutil.MustDecodeHash("33f56264283cc12e3b232068caa13c1fd052c21b231a94e8c0a40bac25629f88"),
                                        },
                                        Transactions: []*types.Tx{
@@ -493,7 +482,6 @@ func TestProcessBlock(t *testing.T) {
                                BlockHeader: types.BlockHeader{
                                        Height:            1,
                                        Version:           1,
-                                       Bits:              2305843009214532812,
                                        PreviousBlockHash: blockMap[0][0].block.Hash(),
                                },
                        },
index 12d180f..601894b 100644 (file)
@@ -8,6 +8,11 @@ import (
        "github.com/bytom/bytom/protocol/bc/types"
 )
 
+var (
+       ErrFoundHeaderByHash   = errors.New("can't find header by hash")
+       ErrFoundHeaderByHeight = errors.New("can't find header by height")
+)
+
 type Chain struct {
        bestBlockHeader *types.BlockHeader
        heightMap       map[uint64]*types.Block
@@ -24,6 +29,10 @@ func NewChain() *Chain {
        }
 }
 
+func (c *Chain) LastIrreversibleHeader() *types.BlockHeader {
+       return nil
+}
+
 func (c *Chain) BestBlockHeader() *types.BlockHeader {
        return c.bestBlockHeader
 }
diff --git a/test/mock/mempool.go b/test/mock/mempool.go
new file mode 100644 (file)
index 0000000..c4f920a
--- /dev/null
@@ -0,0 +1,28 @@
+package mock
+
+import (
+       "github.com/bytom/bytom/protocol"
+       "github.com/bytom/bytom/protocol/bc/types"
+)
+
+type Mempool struct {
+       txs []*protocol.TxDesc
+}
+
+func newMempool() *Mempool {
+       return &Mempool{
+               txs: []*protocol.TxDesc{},
+       }
+}
+
+func (m *Mempool) AddTx(tx *types.Tx) {
+       m.txs = append(m.txs, &protocol.TxDesc{Tx: tx})
+}
+
+func (m *Mempool) GetTransactions() []*protocol.TxDesc {
+       return m.txs
+}
+
+func (m *Mempool) IsDust(tx *types.Tx) bool {
+       return false
+}
diff --git a/test/performance/mining_test.go b/test/performance/mining_test.go
deleted file mode 100644 (file)
index eec2e9f..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-package performance
-
-import (
-       "os"
-       "testing"
-
-       "github.com/bytom/bytom/account"
-       "github.com/bytom/bytom/mining"
-       "github.com/bytom/bytom/test"
-       dbm "github.com/bytom/bytom/database/leveldb"
-)
-
-// Function NewBlockTemplate's benchmark - 0.05s
-func BenchmarkNewBlockTpl(b *testing.B) {
-       testDB := dbm.NewDB("testdb", "leveldb", "temp")
-       defer os.RemoveAll("temp")
-
-       chain, _, txPool, err := test.MockChain(testDB)
-       if err != nil {
-               b.Fatal(err)
-       }
-       accountManager := account.NewManager(testDB, chain)
-
-       b.ResetTimer()
-       for i := 0; i < b.N; i++ {
-               mining.NewBlockTemplate(chain, txPool, accountManager)
-       }
-}
index eeeb1f7..34dc757 100644 (file)
@@ -4,7 +4,6 @@ import (
        "encoding/hex"
 
        "github.com/bytom/bytom/consensus"
-       "github.com/bytom/bytom/consensus/difficulty"
        "github.com/bytom/bytom/protocol/bc"
        "github.com/bytom/bytom/protocol/bc/types"
        "github.com/bytom/bytom/protocol/state"
@@ -56,18 +55,15 @@ func blockNode(header *bc.BlockHeader) *state.BlockNode {
                Height:            header.Height,
                PreviousBlockHash: *header.PreviousBlockId,
                Timestamp:         header.Timestamp,
-               Bits:              header.Bits,
                Nonce:             header.Nonce,
        }
        return &state.BlockNode{
                Parent:    nil,
                Hash:      h.Hash(),
-               WorkSum:   difficulty.CalcWork(h.Bits),
                Version:   h.Version,
                Height:    h.Height,
                Timestamp: h.Timestamp,
                Nonce:     h.Nonce,
-               Bits:      h.Bits,
        }
 }
 
index 0d0666e..40b97e5 100644 (file)
@@ -389,7 +389,6 @@ func mockSingleBlock(tx *types.Tx) *types.Block {
                BlockHeader: types.BlockHeader{
                        Version: 1,
                        Height:  1,
-                       Bits:    2305843009230471167,
                },
                Transactions: []*types.Tx{config.GenesisTx(), tx},
        }