10 "github.com/jinzhu/gorm"
11 log "github.com/sirupsen/logrus"
13 vaporCfg "github.com/vapor/config"
14 "github.com/vapor/crypto/ed25519/chainkd"
15 dbm "github.com/vapor/database/leveldb"
16 "github.com/vapor/event"
17 "github.com/vapor/netsync/chainmgr"
18 "github.com/vapor/netsync/consensusmgr"
19 "github.com/vapor/netsync/peers"
20 "github.com/vapor/p2p"
21 "github.com/vapor/p2p/discover/dht"
22 "github.com/vapor/p2p/discover/mdns"
23 "github.com/vapor/p2p/signlib"
24 "github.com/vapor/test/mock"
25 "github.com/vapor/toolbar/precog/config"
32 nodeCfg *vaporCfg.Config
37 // discvMap maps a node's public key to the node itself
38 discvMap map[string]*dht.Node
40 checkStatusCh chan struct{}
43 // TODO: set myself as SPV?
44 func NewMonitor(cfg *config.Config, db *gorm.DB) *monitor {
46 cfg.CheckFreqSeconds = 15
48 dbPath, err := makePath()
53 nodeCfg := &vaporCfg.Config{
54 BaseConfig: vaporCfg.DefaultBaseConfig(),
55 P2P: vaporCfg.DefaultP2PConfig(),
56 Federation: vaporCfg.DefaultFederationConfig(),
58 nodeCfg.DBPath = dbPath
59 nodeCfg.ChainID = "mainnet"
60 privKey, err := signlib.NewPrivKey()
65 chain, txPool, err := mockChainAndPool()
71 RWMutex: &sync.RWMutex{},
75 privKey: privKey.(chainkd.XPrv),
78 discvMap: make(map[string]*dht.Node),
79 dialCh: make(chan struct{}, 1),
80 checkStatusCh: make(chan struct{}, 1),
84 func makePath() (string, error) {
85 usr, err := user.Current()
90 dataPath := usr.HomeDir + "/.precog"
91 if err := os.MkdirAll(dataPath, os.ModePerm); err != nil {
98 func (m *monitor) Run() {
100 for _, node := range m.cfg.Nodes {
101 seeds = append(seeds, fmt.Sprintf("%s:%d", node.Host, node.Port))
102 if err := m.upSertNode(&node); err != nil {
106 m.nodeCfg.P2P.Seeds = strings.Join(seeds, ",")
107 if err := m.makeSwitch(); err != nil {
111 m.dialCh <- struct{}{}
112 go m.discoveryRoutine()
113 go m.connectNodesRoutine()
114 go m.checkStatusRoutine()
117 func (m *monitor) makeSwitch() error {
118 l, listenAddr := p2p.GetListener(m.nodeCfg.P2P)
119 discv, err := dht.NewDiscover(m.nodeCfg, m.privKey, l.ExternalAddress().Port, m.cfg.NetworkID)
124 // no need for lanDiscv, but passing &mdns.LANDiscover{} will cause NilPointer
125 lanDiscv := mdns.NewLANDiscover(mdns.NewProtocol(), int(l.ExternalAddress().Port))
126 sw, err := p2p.NewSwitch(m.nodeCfg, discv, lanDiscv, l, m.privKey, listenAddr, m.cfg.NetworkID)
135 func (m *monitor) prepareReactors(peers *peers.PeerSet) error {
136 dispatcher := event.NewDispatcher()
137 // add ConsensusReactor for consensusChannel
138 _ = consensusmgr.NewManager(m.sw, m.chain, peers, dispatcher)
139 fastSyncDB := dbm.NewDB("fastsync", m.nodeCfg.DBBackend, m.nodeCfg.DBDir())
140 // add ProtocolReactor to handle msgs
141 _, err := chainmgr.NewManager(m.nodeCfg, m.sw, m.chain, m.txPool, dispatcher, peers, fastSyncDB)
146 for label, reactor := range m.sw.GetReactors() {
147 log.Debugf("start reactor: (%s:%v)", label, reactor)
148 if _, err := reactor.Start(); err != nil {
153 m.sw.GetSecurity().RegisterFilter(m.sw.GetNodeInfo())
154 m.sw.GetSecurity().RegisterFilter(m.sw.GetPeers())
155 return m.sw.GetSecurity().Start()
158 func (m *monitor) checkStatusRoutine() {
159 peers := peers.NewPeerSet(m.sw)
160 if err := m.prepareReactors(peers); err != nil {
164 bestHeight := uint64(0)
165 for range m.checkStatusCh {
166 for _, peer := range m.sw.GetPeers().List() {
170 log.Infof("%d connected peers: %v", len(m.sw.GetPeers().List()), m.sw.GetPeers().List())
172 for _, peer := range m.sw.GetPeers().List() {
173 p := peers.GetPeer(peer.ID())
178 if err := p.SendStatus(m.chain.BestBlockHeader(), m.chain.LastIrreversibleHeader()); err != nil {
180 peers.RemovePeer(p.ID())
184 for _, peerInfo := range peers.GetPeerInfos() {
185 if peerInfo.Height > bestHeight {
186 bestHeight = peerInfo.Height
189 log.Info("bestHeight: ", bestHeight)
190 m.processPeerInfos(peers.GetPeerInfos())
192 for _, peer := range m.sw.GetPeers().List() {
193 p := peers.GetPeer(peer.ID())
198 peers.RemovePeer(p.ID())
200 log.Info("Disonnect all peers.")
202 m.dialCh <- struct{}{}