OSDN Git Service

fix log
[bytom/vapor.git] / toolbar / precog / monitor / monitor.go
index 392495c..e934990 100644 (file)
 package monitor
 
 import (
-       // "encoding/binary"
-       // "encoding/hex"
-       // "io/ioutil"
        "fmt"
-       "net"
        "os"
+       "os/user"
        "strings"
-       "time"
+       "sync"
 
        "github.com/jinzhu/gorm"
        log "github.com/sirupsen/logrus"
-       // dbm "github.com/vapor/database/leveldb"
 
        vaporCfg "github.com/vapor/config"
-       "github.com/vapor/consensus"
        "github.com/vapor/crypto/ed25519/chainkd"
+       dbm "github.com/vapor/database/leveldb"
        "github.com/vapor/event"
-       "github.com/vapor/p2p"
-       // "github.com/vapor/protocol/bc/types"
-       // conn "github.com/vapor/p2p/connection"
-       "github.com/vapor/netsync/peers"
-       // "github.com/vapor/consensus"
-       // "github.com/vapor/crypto/sha3pool"
-       // "github.com/vapor/netsync/consensusmgr"
        "github.com/vapor/netsync/chainmgr"
+       "github.com/vapor/netsync/consensusmgr"
+       "github.com/vapor/netsync/peers"
+       "github.com/vapor/p2p"
        "github.com/vapor/p2p/discover/dht"
        "github.com/vapor/p2p/discover/mdns"
        "github.com/vapor/p2p/signlib"
        "github.com/vapor/test/mock"
        "github.com/vapor/toolbar/precog/config"
-       "github.com/vapor/toolbar/precog/database/orm"
-)
-
-var (
-       nodesToDiscv = 150
-       discvFreqSec = 60
 )
 
 type monitor struct {
+       *sync.RWMutex
        cfg     *config.Config
        db      *gorm.DB
        nodeCfg *vaporCfg.Config
        sw      *p2p.Switch
-       discvCh chan *dht.Node
        privKey chainkd.XPrv
+       chain   *mock.Chain
+       txPool  *mock.Mempool
+       // discvMap maps a node's public key to the node itself
+       discvMap       map[string]*dht.Node
+       dialCh         chan struct{}
+       checkStatusCh  chan struct{}
+       bestHeightSeen uint64
 }
 
-// TODO: set SF myself?
 func NewMonitor(cfg *config.Config, db *gorm.DB) *monitor {
-       //TODO: for test
-       cfg.CheckFreqSeconds = 1
+       dbPath, err := makePath()
+       if err != nil {
+               log.WithFields(log.Fields{"err": err}).Fatal("makePath")
+       }
 
        nodeCfg := &vaporCfg.Config{
                BaseConfig: vaporCfg.DefaultBaseConfig(),
                P2P:        vaporCfg.DefaultP2PConfig(),
                Federation: vaporCfg.DefaultFederationConfig(),
        }
-       nodeCfg.DBPath = "vapor_precog_data"
+       nodeCfg.DBPath = dbPath
        nodeCfg.ChainID = "mainnet"
-       discvCh := make(chan *dht.Node)
        privKey, err := signlib.NewPrivKey()
        if err != nil {
-               log.Fatal(err)
+               log.WithFields(log.Fields{"err": err}).Fatal("NewPrivKey")
+       }
+
+       chain, txPool, err := mockChainAndPool()
+       if err != nil {
+               log.WithFields(log.Fields{"err": err}).Fatal("mockChainAndPool")
        }
 
        return &monitor{
-               cfg:     cfg,
-               db:      db,
-               nodeCfg: nodeCfg,
-               discvCh: discvCh,
-               privKey: privKey.(chainkd.XPrv),
+               RWMutex:        &sync.RWMutex{},
+               cfg:            cfg,
+               db:             db,
+               nodeCfg:        nodeCfg,
+               privKey:        privKey.(chainkd.XPrv),
+               chain:          chain,
+               txPool:         txPool,
+               discvMap:       make(map[string]*dht.Node),
+               dialCh:         make(chan struct{}, 1),
+               checkStatusCh:  make(chan struct{}, 1),
+               bestHeightSeen: uint64(0),
        }
 }
 
-func (m *monitor) Run() {
-       defer os.RemoveAll(m.nodeCfg.DBPath)
+func makePath() (string, error) {
+       usr, err := user.Current()
+       if err != nil {
+               return "", err
+       }
+
+       dataPath := usr.HomeDir + "/.vapor/precog"
+       if err := os.MkdirAll(dataPath, os.ModePerm); err != nil {
+               return "", err
+       }
 
+       return dataPath, nil
+}
+
+func (m *monitor) Run() {
        var seeds []string
        for _, node := range m.cfg.Nodes {
-               seeds = append(seeds, fmt.Sprintf("%s:%d", node.Host, node.Port))
+               seeds = append(seeds, fmt.Sprintf("%s:%d", node.IP, node.Port))
                if err := m.upSertNode(&node); err != nil {
-                       log.Error(err)
+                       log.WithFields(log.Fields{
+                               "node": node,
+                               "err":  err,
+                       }).Error("upSertNode")
                }
        }
        m.nodeCfg.P2P.Seeds = strings.Join(seeds, ",")
        if err := m.makeSwitch(); err != nil {
-               log.Fatal(err)
+               log.WithFields(log.Fields{"err": err}).Fatal("makeSwitch")
        }
 
+       m.dialCh <- struct{}{}
        go m.discoveryRoutine()
-       go m.collectDiscoveredNodes()
        go m.connectNodesRoutine()
        go m.checkStatusRoutine()
 }
 
-// create or update: https://github.com/jinzhu/gorm/issues/1307
-func (m *monitor) upSertNode(node *config.Node) error {
-       if node.XPub != nil {
-               node.PublicKey = fmt.Sprintf("%v", node.XPub.PublicKey().String())
-       }
-
-       ormNode := &orm.Node{PublicKey: node.PublicKey}
-       if err := m.db.Where(&orm.Node{PublicKey: node.PublicKey}).First(ormNode).Error; err != nil && err != gorm.ErrRecordNotFound {
-               return err
-       }
-
-       if node.Alias != "" {
-               ormNode.Alias = node.Alias
-       }
-       if node.XPub != nil {
-               ormNode.Xpub = node.XPub.String()
-       }
-       ormNode.Host = node.Host
-       ormNode.Port = node.Port
-       return m.db.Where(&orm.Node{PublicKey: ormNode.PublicKey}).
-               Assign(&orm.Node{
-                       Xpub:  ormNode.Xpub,
-                       Alias: ormNode.Alias,
-                       Host:  ormNode.Host,
-                       Port:  ormNode.Port,
-               }).FirstOrCreate(ormNode).Error
-}
-
 func (m *monitor) makeSwitch() error {
        l, listenAddr := p2p.GetListener(m.nodeCfg.P2P)
        discv, err := dht.NewDiscover(m.nodeCfg, m.privKey, l.ExternalAddress().Port, m.cfg.NetworkID)
@@ -133,135 +124,89 @@ func (m *monitor) makeSwitch() error {
 
        // no need for lanDiscv, but passing &mdns.LANDiscover{} will cause NilPointer
        lanDiscv := mdns.NewLANDiscover(mdns.NewProtocol(), int(l.ExternalAddress().Port))
-       sw, err := p2p.NewSwitch(m.nodeCfg, discv, lanDiscv, l, m.privKey, listenAddr, m.cfg.NetworkID)
+       m.sw, err = p2p.NewSwitch(m.nodeCfg, discv, lanDiscv, l, m.privKey, listenAddr, m.cfg.NetworkID)
        if err != nil {
                return err
        }
 
-       m.sw = sw
        return nil
 }
 
-func (m *monitor) discoveryRoutine() {
-       ticker := time.NewTicker(time.Duration(discvFreqSec) * time.Second)
-       for range ticker.C {
-               nodes := make([]*dht.Node, nodesToDiscv)
-               n := m.sw.GetDiscv().ReadRandomNodes(nodes)
-               for i := 0; i < n; i++ {
-                       m.discvCh <- nodes[i]
-               }
-       }
-}
-
-func (m *monitor) collectDiscoveredNodes() {
-       // nodeMap maps a node's public key to the node itself
-       nodeMap := make(map[string]*dht.Node)
-       for node := range m.discvCh {
-               if n, ok := nodeMap[node.ID.String()]; ok && n.String() == node.String() {
-                       continue
-               }
-               log.Info("discover new node: ", node)
-
-               if err := m.upSertNode(&config.Node{
-                       PublicKey: node.ID.String(),
-                       Host:      node.IP.String(),
-                       Port:      node.TCP,
-               }); err != nil {
-                       log.Error(err)
-               }
-
-               nodeMap[node.ID.String()] = node
-       }
-}
-
-func (m *monitor) connectNodesRoutine() {
-       // TODO: change name?
-       ticker := time.NewTicker(time.Duration(m.cfg.CheckFreqSeconds) * time.Second)
-       for ; true; <-ticker.C {
-               if err := m.dialNodes(); err != nil {
-                       log.Error(err)
-               }
-       }
-}
-
-func (m *monitor) dialNodes() error {
-       var nodes []*orm.Node
-       if err := m.db.Model(&orm.Node{}).Find(&nodes).Error; err != nil {
+func (m *monitor) prepareReactors(peers *peers.PeerSet) error {
+       dispatcher := event.NewDispatcher()
+       // add ConsensusReactor for consensusChannel
+       _ = consensusmgr.NewManager(m.sw, m.chain, peers, dispatcher)
+       fastSyncDB := dbm.NewDB("fastsync", m.nodeCfg.DBBackend, m.nodeCfg.DBDir())
+       // add ProtocolReactor to handle msgs
+       if _, err := chainmgr.NewManager(m.nodeCfg, m.sw, m.chain, m.txPool, dispatcher, peers, fastSyncDB); err != nil {
                return err
        }
 
-       addresses := make([]*p2p.NetAddress, 0)
-       for i := 0; i < len(nodes); i++ {
-               ips, err := net.LookupIP(nodes[i].Host)
-               if err != nil {
-                       log.Error(err)
-                       continue
-               }
-               if len(ips) == 0 {
-                       log.Errorf("fail to look up ip for %s", nodes[i].Host)
-                       continue
+       for label, reactor := range m.sw.GetReactors() {
+               log.WithFields(log.Fields{
+                       "label":   label,
+                       "reactor": reactor,
+               }).Debug("start reactor")
+               if _, err := reactor.Start(); err != nil {
+                       return nil
                }
-
-               address := p2p.NewNetAddressIPPort(ips[0], nodes[i].Port)
-               addresses = append(addresses, address)
        }
 
-       m.sw.DialPeers(addresses)
-       return nil
+       m.sw.GetSecurity().RegisterFilter(m.sw.GetNodeInfo())
+       m.sw.GetSecurity().RegisterFilter(m.sw.GetPeers())
+       return m.sw.GetSecurity().Start()
 }
 
 func (m *monitor) checkStatusRoutine() {
        peers := peers.NewPeerSet(m.sw)
-       dispatcher := event.NewDispatcher()
-       // TODO: mockchain?
-       // TODO: ???
-       // consensusMgr := consensusmgr.NewManager(sw, chain, peers, dispatcher)
-       // consensusMgr := consensusmgr.NewManager(m.sw, nil, peers, dispatcher)
-       // consensusMgr.Start()
-       //
-
-       // chainMgr, err := chainmgr.NewManager(m.nodeCfg, m.sw, chain, txPool, dispatcher, peers, fastSyncDB)
-       txPool := &mock.Mempool{}
-       chainMgr, err := chainmgr.NewManager(m.nodeCfg, m.sw, mock.NewChain(txPool), txPool, dispatcher, peers, &mockFastSyncDB{})
-       if err != nil {
-               log.Fatal(err)
+       if err := m.prepareReactors(peers); err != nil {
+               log.WithFields(log.Fields{"err": err}).Fatal("prepareReactors")
        }
 
-       for k, v := range m.sw.GetReactors() {
-               log.Debug("start", k, ",", v)
-               v.Start()
-       }
-       ticker := time.NewTicker(time.Duration(m.cfg.CheckFreqSeconds) * time.Second)
-       for ; true; <-ticker.C {
-               for _, v := range m.sw.GetReactors() {
-                       for _, peer := range m.sw.GetPeers().List() {
-                               log.Debug("AddPeer for", v, peer)
-                               // TODO: if not in sw
-                               v.AddPeer(peer)
+       for range m.checkStatusCh {
+               for _, peer := range m.sw.GetPeers().List() {
+                       peer.Start()
+                       peers.AddPeer(peer)
+               }
+               log.WithFields(log.Fields{
+                       "num":   len(m.sw.GetPeers().List()),
+                       "peers": m.sw.GetPeers().List(),
+               }).Info("connected peers")
+
+               for _, peer := range m.sw.GetPeers().List() {
+                       p := peers.GetPeer(peer.ID())
+                       if p == nil {
+                               continue
+                       }
+
+                       if err := p.(m.chain.BestBlockHeader(), m.chain.LastIrreversibleHeader()); err != nil {
+                               log.WithFields(log.Fields{
+                                       "peer": p,
+                                       "err": err,
+                                       }).Error("SendStatus")
+                               peers.RemovePeer(p.ID())
                        }
                }
 
-               // TODO: SFSPV?
-               log.Debug("best", peers.BestPeer(consensus.SFFullNode))
                for _, peerInfo := range peers.GetPeerInfos() {
-                       log.Debug(peerInfo)
+                       if peerInfo.Height > m.bestHeightSeen {
+                               m.bestHeightSeen = peerInfo.Height
+                       }
                }
-       }
-}
+               log.Info("bestHeight: ", m.bestHeightSeen)
+               m.processPeerInfos(peers.GetPeerInfos())
 
-// TODO:
-// implement logic first, and then refactor
-// /home/gavin/work/go/src/github.com/vapor/
-// p2p/test_util.go
-// p2p/switch_test.go
-// syncManager
-// notificationMgr???
-// mock chain????
-// mock chain????
+               for _, peer := range m.sw.GetPeers().List() {
+                       p := peers.GetPeer(peer.ID())
+                       if p == nil {
+                               continue
+                       }
 
-// TODO: dial nodes
-// TODO: get lantency
-// TODO: get best_height
-// TODO: decide check_height("best best_height" - "confirmations")
-// TODO: get blockhash by check_height, get latency
-// TODO: update lantency, active_time and status
+                       peers.RemovePeer(p.ID())
+               }
+               log.Info("Disonnect all peers.")
+
+               m.Unlock()
+               m.dialCh <- struct{}{}
+       }
+}