13 "github.com/jinzhu/gorm"
14 log "github.com/sirupsen/logrus"
15 // dbm "github.com/vapor/database/leveldb"
17 vaporCfg "github.com/vapor/config"
18 "github.com/vapor/consensus"
19 "github.com/vapor/crypto/ed25519/chainkd"
20 dbm "github.com/vapor/database/leveldb"
21 "github.com/vapor/event"
22 "github.com/vapor/p2p"
23 // "github.com/vapor/protocol/bc/types"
24 // conn "github.com/vapor/p2p/connection"
25 "github.com/vapor/netsync/peers"
26 // "github.com/vapor/consensus"
27 // "github.com/vapor/crypto/sha3pool"
28 // "github.com/vapor/netsync/consensusmgr"
29 "github.com/vapor/netsync/chainmgr"
30 "github.com/vapor/p2p/discover/dht"
31 "github.com/vapor/p2p/discover/mdns"
32 "github.com/vapor/p2p/signlib"
33 "github.com/vapor/test/mock"
34 "github.com/vapor/toolbar/precog/config"
35 "github.com/vapor/toolbar/precog/database/orm"
46 nodeCfg *vaporCfg.Config
48 discvCh chan *dht.Node
52 // TODO: set SF myself?
53 func NewMonitor(cfg *config.Config, db *gorm.DB) *monitor {
55 cfg.CheckFreqSeconds = 1
57 nodeCfg := &vaporCfg.Config{
58 BaseConfig: vaporCfg.DefaultBaseConfig(),
59 P2P: vaporCfg.DefaultP2PConfig(),
60 Federation: vaporCfg.DefaultFederationConfig(),
62 nodeCfg.DBPath = "vapor_precog_data"
63 nodeCfg.ChainID = "mainnet"
64 discvCh := make(chan *dht.Node)
65 privKey, err := signlib.NewPrivKey()
75 privKey: privKey.(chainkd.XPrv),
79 func (m *monitor) Run() {
80 defer os.RemoveAll(m.nodeCfg.DBPath)
83 for _, node := range m.cfg.Nodes {
84 seeds = append(seeds, fmt.Sprintf("%s:%d", node.Host, node.Port))
85 if err := m.upSertNode(&node); err != nil {
89 m.nodeCfg.P2P.Seeds = strings.Join(seeds, ",")
90 if err := m.makeSwitch(); err != nil {
94 go m.discoveryRoutine()
95 go m.collectDiscoveredNodes()
96 go m.connectNodesRoutine()
97 go m.checkStatusRoutine()
100 // create or update: https://github.com/jinzhu/gorm/issues/1307
101 func (m *monitor) upSertNode(node *config.Node) error {
102 if node.XPub != nil {
103 node.PublicKey = fmt.Sprintf("%v", node.XPub.PublicKey().String())
106 ormNode := &orm.Node{PublicKey: node.PublicKey}
107 if err := m.db.Where(&orm.Node{PublicKey: node.PublicKey}).First(ormNode).Error; err != nil && err != gorm.ErrRecordNotFound {
111 if node.Alias != "" {
112 ormNode.Alias = node.Alias
114 if node.XPub != nil {
115 ormNode.Xpub = node.XPub.String()
117 ormNode.Host = node.Host
118 ormNode.Port = node.Port
119 return m.db.Where(&orm.Node{PublicKey: ormNode.PublicKey}).
122 Alias: ormNode.Alias,
125 }).FirstOrCreate(ormNode).Error
128 func (m *monitor) makeSwitch() error {
129 l, listenAddr := p2p.GetListener(m.nodeCfg.P2P)
130 discv, err := dht.NewDiscover(m.nodeCfg, m.privKey, l.ExternalAddress().Port, m.cfg.NetworkID)
135 // no need for lanDiscv, but passing &mdns.LANDiscover{} will cause NilPointer
136 lanDiscv := mdns.NewLANDiscover(mdns.NewProtocol(), int(l.ExternalAddress().Port))
137 sw, err := p2p.NewSwitch(m.nodeCfg, discv, lanDiscv, l, m.privKey, listenAddr, m.cfg.NetworkID)
146 func (m *monitor) discoveryRoutine() {
147 ticker := time.NewTicker(time.Duration(discvFreqSec) * time.Second)
149 nodes := make([]*dht.Node, nodesToDiscv)
150 n := m.sw.GetDiscv().ReadRandomNodes(nodes)
151 for i := 0; i < n; i++ {
152 m.discvCh <- nodes[i]
157 func (m *monitor) collectDiscoveredNodes() {
158 // nodeMap maps a node's public key to the node itself
159 nodeMap := make(map[string]*dht.Node)
160 for node := range m.discvCh {
161 if n, ok := nodeMap[node.ID.String()]; ok && n.String() == node.String() {
164 log.Info("discover new node: ", node)
166 if err := m.upSertNode(&config.Node{
167 PublicKey: node.ID.String(),
168 Host: node.IP.String(),
174 nodeMap[node.ID.String()] = node
178 func (m *monitor) connectNodesRoutine() {
179 // TODO: change name?
180 ticker := time.NewTicker(time.Duration(m.cfg.CheckFreqSeconds) * time.Second)
181 for ; true; <-ticker.C {
182 if err := m.dialNodes(); err != nil {
188 func (m *monitor) dialNodes() error {
189 var nodes []*orm.Node
190 if err := m.db.Model(&orm.Node{}).Find(&nodes).Error; err != nil {
194 addresses := make([]*p2p.NetAddress, 0)
195 for i := 0; i < len(nodes); i++ {
196 ips, err := net.LookupIP(nodes[i].Host)
202 log.Errorf("fail to look up ip for %s", nodes[i].Host)
206 address := p2p.NewNetAddressIPPort(ips[0], nodes[i].Port)
207 addresses = append(addresses, address)
210 m.sw.DialPeers(addresses)
214 func (m *monitor) checkStatusRoutine() {
215 peers := peers.NewPeerSet(m.sw)
216 dispatcher := event.NewDispatcher()
219 // consensusMgr := consensusmgr.NewManager(sw, chain, peers, dispatcher)
220 // consensusMgr := consensusmgr.NewManager(m.sw, nil, peers, dispatcher)
221 // consensusMgr.Start()
224 // chainMgr, err := chainmgr.NewManager(m.nodeCfg, m.sw, chain, txPool, dispatcher, peers, fastSyncDB)
225 txPool := &mock.Mempool{}
226 fastSyncDB := dbm.NewDB("fastsync", m.nodeCfg.DBBackend, m.nodeCfg.DBDir())
227 chainMgr, err := chainmgr.NewManager(m.nodeCfg, m.sw, mock.NewChain(txPool), txPool, dispatcher, peers, fastSyncDB)
234 for k, v := range m.sw.GetReactors() {
235 log.Debug("start", k, ",", v)
238 ticker := time.NewTicker(time.Duration(m.cfg.CheckFreqSeconds) * time.Second)
239 for ; true; <-ticker.C {
240 for _, v := range m.sw.GetReactors() {
241 for _, peer := range m.sw.GetPeers().List() {
242 log.Debug("AddPeer for", v, peer)
243 // TODO: if not in sw
249 log.Debug("best", peers.BestPeer(consensus.SFFullNode))
250 for _, peerInfo := range peers.GetPeerInfos() {
257 // implement logic first, and then refactor
258 // /home/gavin/work/go/src/github.com/vapor/
260 // p2p/switch_test.go
262 // notificationMgr???
267 // TODO: get lantency
268 // TODO: get best_height
269 // TODO: decide check_height("best best_height" - "confirmations")
270 // TODO: get blockhash by check_height, get latency
271 // TODO: update lantency, active_time and status