14 "github.com/jinzhu/gorm"
15 log "github.com/sirupsen/logrus"
16 // dbm "github.com/vapor/database/leveldb"
18 vaporCfg "github.com/vapor/config"
19 "github.com/vapor/consensus"
20 "github.com/vapor/crypto/ed25519/chainkd"
21 dbm "github.com/vapor/database/leveldb"
22 "github.com/vapor/event"
23 "github.com/vapor/p2p"
24 "github.com/vapor/protocol/bc/types"
25 // conn "github.com/vapor/p2p/connection"
26 "github.com/vapor/netsync/peers"
27 // "github.com/vapor/consensus"
28 // "github.com/vapor/crypto/sha3pool"
29 // "github.com/vapor/netsync/consensusmgr"
30 "github.com/vapor/netsync/chainmgr"
31 "github.com/vapor/p2p/discover/dht"
32 "github.com/vapor/p2p/discover/mdns"
33 "github.com/vapor/p2p/signlib"
34 "github.com/vapor/test/mock"
35 "github.com/vapor/toolbar/precog/config"
36 "github.com/vapor/toolbar/precog/database/orm"
47 nodeCfg *vaporCfg.Config
49 discvCh chan *dht.Node
53 // TODO: set SF myself?
54 func NewMonitor(cfg *config.Config, db *gorm.DB) *monitor {
56 cfg.CheckFreqSeconds = 1
58 tmpDir, err := ioutil.TempDir(".", "vpPrecog")
60 log.Fatalf("failed to create temporary data folder: %v", err)
63 nodeCfg := &vaporCfg.Config{
64 BaseConfig: vaporCfg.DefaultBaseConfig(),
65 P2P: vaporCfg.DefaultP2PConfig(),
66 Federation: vaporCfg.DefaultFederationConfig(),
68 nodeCfg.DBPath = tmpDir
69 nodeCfg.ChainID = "mainnet"
70 discvCh := make(chan *dht.Node)
71 privKey, err := signlib.NewPrivKey()
81 privKey: privKey.(chainkd.XPrv),
85 func (m *monitor) Run() {
86 defer os.RemoveAll(m.nodeCfg.DBPath)
89 for _, node := range m.cfg.Nodes {
90 seeds = append(seeds, fmt.Sprintf("%s:%d", node.Host, node.Port))
91 if err := m.upSertNode(&node); err != nil {
95 m.nodeCfg.P2P.Seeds = strings.Join(seeds, ",")
96 if err := m.makeSwitch(); err != nil {
100 go m.discoveryRoutine()
101 go m.collectDiscoveredNodes()
102 go m.connectNodesRoutine()
103 go m.checkStatusRoutine()
106 // create or update: https://github.com/jinzhu/gorm/issues/1307
107 func (m *monitor) upSertNode(node *config.Node) error {
108 if node.XPub != nil {
109 node.PublicKey = fmt.Sprintf("%v", node.XPub.PublicKey().String())
112 ormNode := &orm.Node{PublicKey: node.PublicKey}
113 if err := m.db.Where(&orm.Node{PublicKey: node.PublicKey}).First(ormNode).Error; err != nil && err != gorm.ErrRecordNotFound {
117 if node.Alias != "" {
118 ormNode.Alias = node.Alias
120 if node.XPub != nil {
121 ormNode.Xpub = node.XPub.String()
123 ormNode.Host = node.Host
124 ormNode.Port = node.Port
125 return m.db.Where(&orm.Node{PublicKey: ormNode.PublicKey}).
128 Alias: ormNode.Alias,
131 }).FirstOrCreate(ormNode).Error
134 func (m *monitor) makeSwitch() error {
135 l, listenAddr := p2p.GetListener(m.nodeCfg.P2P)
136 discv, err := dht.NewDiscover(m.nodeCfg, m.privKey, l.ExternalAddress().Port, m.cfg.NetworkID)
141 // no need for lanDiscv, but passing &mdns.LANDiscover{} will cause NilPointer
142 lanDiscv := mdns.NewLANDiscover(mdns.NewProtocol(), int(l.ExternalAddress().Port))
143 sw, err := p2p.NewSwitch(m.nodeCfg, discv, lanDiscv, l, m.privKey, listenAddr, m.cfg.NetworkID)
152 func (m *monitor) discoveryRoutine() {
153 ticker := time.NewTicker(time.Duration(discvFreqSec) * time.Second)
155 nodes := make([]*dht.Node, nodesToDiscv)
156 n := m.sw.GetDiscv().ReadRandomNodes(nodes)
157 for i := 0; i < n; i++ {
158 m.discvCh <- nodes[i]
163 func (m *monitor) collectDiscoveredNodes() {
164 // nodeMap maps a node's public key to the node itself
165 nodeMap := make(map[string]*dht.Node)
166 for node := range m.discvCh {
167 if n, ok := nodeMap[node.ID.String()]; ok && n.String() == node.String() {
170 log.Info("discover new node: ", node)
172 if err := m.upSertNode(&config.Node{
173 PublicKey: node.ID.String(),
174 Host: node.IP.String(),
180 nodeMap[node.ID.String()] = node
184 func (m *monitor) connectNodesRoutine() {
185 // TODO: change name?
186 ticker := time.NewTicker(time.Duration(m.cfg.CheckFreqSeconds) * time.Second)
187 for ; true; <-ticker.C {
188 if err := m.dialNodes(); err != nil {
194 func (m *monitor) dialNodes() error {
195 var nodes []*orm.Node
196 if err := m.db.Model(&orm.Node{}).Find(&nodes).Error; err != nil {
200 addresses := make([]*p2p.NetAddress, 0)
201 for i := 0; i < len(nodes); i++ {
202 ips, err := net.LookupIP(nodes[i].Host)
208 log.Errorf("fail to look up ip for %s", nodes[i].Host)
212 address := p2p.NewNetAddressIPPort(ips[0], nodes[i].Port)
213 addresses = append(addresses, address)
216 m.sw.DialPeers(addresses)
220 func (m *monitor) getGenesisBlock() (*types.Block, error) {
221 genesisBlock := &types.Block{}
222 if err := genesisBlock.UnmarshalText([]byte("030100000000000000000000000000000000000000000000000000000000000000000082bfe3f4bf2d4052415e796436f587fac94677b20f027e910b70e2c220c411c0e87c37e0e1cc2ec9c377e5192668bc0a367e4a4764f11e7c725ecced1d7b6a492974fab1b6d5bc01000107010001012402220020f86826d640810eb08a2bfb706e0092273e05e9a7d3d71f9d53f4f6cc2e3d6c6a0001013b0039ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00011600148c9d063ff74ee6d9ffa88d83aeb038068366c4c400")); err != nil {
226 return genesisBlock, nil
229 func (m *monitor) checkStatusRoutine() {
232 // consensusMgr := consensusmgr.NewManager(sw, chain, peers, dispatcher)
233 // consensusMgr := consensusmgr.NewManager(m.sw, nil, peers, dispatcher)
234 // consensusMgr.Start()
237 // chainMgr, err := chainmgr.NewManager(m.nodeCfg, m.sw, chain, txPool, dispatcher, peers, fastSyncDB)
238 txPool := &mock.Mempool{}
239 mockChain := mock.NewChain(txPool)
240 dispatcher := event.NewDispatcher()
241 peers := peers.NewPeerSet(m.sw)
242 fastSyncDB := dbm.NewDB("fastsync", m.nodeCfg.DBBackend, m.nodeCfg.DBDir())
243 chainMgr, err := chainmgr.NewManager(m.nodeCfg, m.sw, mockChain, txPool, dispatcher, peers, fastSyncDB)
248 genesisBlock, err := m.getGenesisBlock()
252 mockChain.SetBlockByHeight(genesisBlock.BlockHeader.Height, genesisBlock)
253 mockChain.SetBestBlockHeader(&genesisBlock.BlockHeader)
258 // for k, v := range m.sw.GetReactors() {
259 // log.Debug("start", k, ",", v)
262 ticker := time.NewTicker(time.Duration(m.cfg.CheckFreqSeconds) * time.Second)
263 for ; true; <-ticker.C {
264 for _, v := range m.sw.GetReactors() {
265 for _, peer := range m.sw.GetPeers().List() {
266 log.Debug("AddPeer for", v, peer)
267 // TODO: if not in sw
273 log.Debug("best", peers.BestPeer(consensus.SFFullNode))
274 for _, peerInfo := range peers.GetPeerInfos() {
281 // implement logic first, and then refactor
282 // /home/gavin/work/go/src/github.com/vapor/
284 // p2p/switch_test.go
286 // notificationMgr???
291 // TODO: get lantency
292 // TODO: get best_height
293 // TODO: decide check_height("best best_height" - "confirmations")
294 // TODO: get blockhash by check_height, get latency
295 // TODO: update lantency, active_time and status