10 "github.com/jinzhu/gorm"
11 log "github.com/sirupsen/logrus"
13 vaporCfg "github.com/vapor/config"
14 "github.com/vapor/crypto/ed25519/chainkd"
15 dbm "github.com/vapor/database/leveldb"
16 "github.com/vapor/event"
17 "github.com/vapor/netsync/chainmgr"
18 "github.com/vapor/netsync/consensusmgr"
19 "github.com/vapor/netsync/peers"
20 "github.com/vapor/p2p"
21 "github.com/vapor/p2p/discover/dht"
22 "github.com/vapor/p2p/discover/mdns"
23 "github.com/vapor/p2p/signlib"
24 "github.com/vapor/test/mock"
25 "github.com/vapor/toolbar/precog/config"
32 nodeCfg *vaporCfg.Config
37 // discvMap maps a node's public key to the node itself
38 discvMap map[string]*dht.Node
40 checkStatusCh chan struct{}
44 func NewMonitor(cfg *config.Config, db *gorm.DB) *monitor {
45 dbPath, err := makePath()
50 nodeCfg := &vaporCfg.Config{
51 BaseConfig: vaporCfg.DefaultBaseConfig(),
52 P2P: vaporCfg.DefaultP2PConfig(),
53 Federation: vaporCfg.DefaultFederationConfig(),
55 nodeCfg.DBPath = dbPath
56 nodeCfg.ChainID = "mainnet"
57 privKey, err := signlib.NewPrivKey()
62 chain, txPool, err := mockChainAndPool()
68 RWMutex: &sync.RWMutex{},
72 privKey: privKey.(chainkd.XPrv),
75 discvMap: make(map[string]*dht.Node),
76 dialCh: make(chan struct{}, 1),
77 checkStatusCh: make(chan struct{}, 1),
78 bestHeightSeen: uint64(0),
82 func makePath() (string, error) {
83 usr, err := user.Current()
88 dataPath := usr.HomeDir + "/.precog"
89 if err := os.MkdirAll(dataPath, os.ModePerm); err != nil {
96 func (m *monitor) Run() {
98 for _, node := range m.cfg.Nodes {
99 seeds = append(seeds, fmt.Sprintf("%s:%d", node.IP, node.Port))
100 if err := m.upSertNode(&node); err != nil {
104 m.nodeCfg.P2P.Seeds = strings.Join(seeds, ",")
105 if err := m.makeSwitch(); err != nil {
109 m.dialCh <- struct{}{}
110 go m.discoveryRoutine()
111 go m.connectNodesRoutine()
112 go m.checkStatusRoutine()
115 func (m *monitor) makeSwitch() error {
116 l, listenAddr := p2p.GetListener(m.nodeCfg.P2P)
117 discv, err := dht.NewDiscover(m.nodeCfg, m.privKey, l.ExternalAddress().Port, m.cfg.NetworkID)
122 // no need for lanDiscv, but passing &mdns.LANDiscover{} will cause NilPointer
123 lanDiscv := mdns.NewLANDiscover(mdns.NewProtocol(), int(l.ExternalAddress().Port))
124 sw, err := p2p.NewSwitch(m.nodeCfg, discv, lanDiscv, l, m.privKey, listenAddr, m.cfg.NetworkID)
133 func (m *monitor) prepareReactors(peers *peers.PeerSet) error {
134 dispatcher := event.NewDispatcher()
135 // add ConsensusReactor for consensusChannel
136 _ = consensusmgr.NewManager(m.sw, m.chain, peers, dispatcher)
137 fastSyncDB := dbm.NewDB("fastsync", m.nodeCfg.DBBackend, m.nodeCfg.DBDir())
138 // add ProtocolReactor to handle msgs
139 _, err := chainmgr.NewManager(m.nodeCfg, m.sw, m.chain, m.txPool, dispatcher, peers, fastSyncDB)
144 for label, reactor := range m.sw.GetReactors() {
145 log.Debugf("start reactor: (%s:%v)", label, reactor)
146 if _, err := reactor.Start(); err != nil {
151 m.sw.GetSecurity().RegisterFilter(m.sw.GetNodeInfo())
152 m.sw.GetSecurity().RegisterFilter(m.sw.GetPeers())
153 return m.sw.GetSecurity().Start()
156 func (m *monitor) checkStatusRoutine() {
157 peers := peers.NewPeerSet(m.sw)
158 if err := m.prepareReactors(peers); err != nil {
162 for range m.checkStatusCh {
163 for _, peer := range m.sw.GetPeers().List() {
167 log.Infof("%d connected peers: %v", len(m.sw.GetPeers().List()), m.sw.GetPeers().List())
169 for _, peer := range m.sw.GetPeers().List() {
170 p := peers.GetPeer(peer.ID())
175 if err := p.SendStatus(m.chain.BestBlockHeader(), m.chain.LastIrreversibleHeader()); err != nil {
177 peers.RemovePeer(p.ID())
181 for _, peerInfo := range peers.GetPeerInfos() {
182 if peerInfo.Height > m.bestHeightSeen {
183 m.bestHeightSeen = peerInfo.Height
186 log.Info("bestHeight: ", m.bestHeightSeen)
187 m.processPeerInfos(peers.GetPeerInfos())
189 for _, peer := range m.sw.GetPeers().List() {
190 p := peers.GetPeer(peer.ID())
195 peers.RemovePeer(p.ID())
197 log.Info("Disonnect all peers.")
200 m.dialCh <- struct{}{}