package monitor
import (
- // "encoding/binary"
- // "encoding/hex"
- // "io/ioutil"
"fmt"
- "net"
"os"
+ "os/user"
"strings"
- "time"
+ "sync"
"github.com/jinzhu/gorm"
log "github.com/sirupsen/logrus"
- // dbm "github.com/vapor/database/leveldb"
vaporCfg "github.com/vapor/config"
"github.com/vapor/crypto/ed25519/chainkd"
+ dbm "github.com/vapor/database/leveldb"
+ "github.com/vapor/event"
+ "github.com/vapor/netsync/chainmgr"
+ "github.com/vapor/netsync/consensusmgr"
+ "github.com/vapor/netsync/peers"
"github.com/vapor/p2p"
- // conn "github.com/vapor/p2p/connection"
- // "github.com/vapor/consensus"
- // "github.com/vapor/crypto/sha3pool"
"github.com/vapor/p2p/discover/dht"
"github.com/vapor/p2p/discover/mdns"
"github.com/vapor/p2p/signlib"
+ "github.com/vapor/test/mock"
"github.com/vapor/toolbar/precog/config"
- "github.com/vapor/toolbar/precog/database/orm"
-)
-
-var (
- nodesToDiscv = 150
- discvFreqSec = 1
)
type monitor struct {
+ *sync.RWMutex
cfg *config.Config
db *gorm.DB
nodeCfg *vaporCfg.Config
sw *p2p.Switch
- discvCh chan *dht.Node
privKey chainkd.XPrv
+ chain *mock.Chain
+ txPool *mock.Mempool
+ // discvMap maps a node's public key to the node itself
+ discvMap map[string]*dht.Node
+ dialCh chan struct{}
+ checkStatusCh chan struct{}
}
+// TODO: set myself as SPV?
func NewMonitor(cfg *config.Config, db *gorm.DB) *monitor {
//TODO: for test
- cfg.CheckFreqSeconds = 1
+ cfg.CheckFreqSeconds = 15
+
+ dbPath, err := makePath()
+ if err != nil {
+ log.Fatal(err)
+ }
nodeCfg := &vaporCfg.Config{
BaseConfig: vaporCfg.DefaultBaseConfig(),
P2P: vaporCfg.DefaultP2PConfig(),
Federation: vaporCfg.DefaultFederationConfig(),
}
- nodeCfg.DBPath = "vapor_precog_data"
+ nodeCfg.DBPath = dbPath
nodeCfg.ChainID = "mainnet"
- discvCh := make(chan *dht.Node)
privKey, err := signlib.NewPrivKey()
if err != nil {
log.Fatal(err)
}
+ chain, txPool, err := mockChainAndPool()
+ if err != nil {
+ log.Fatal(err)
+ }
+
return &monitor{
- cfg: cfg,
- db: db,
- nodeCfg: nodeCfg,
- discvCh: discvCh,
- privKey: privKey.(chainkd.XPrv),
+ RWMutex: &sync.RWMutex{},
+ cfg: cfg,
+ db: db,
+ nodeCfg: nodeCfg,
+ privKey: privKey.(chainkd.XPrv),
+ chain: chain,
+ txPool: txPool,
+ discvMap: make(map[string]*dht.Node),
+ dialCh: make(chan struct{}, 1),
+ checkStatusCh: make(chan struct{}, 1),
}
}
-func (m *monitor) Run() {
- defer os.RemoveAll(m.nodeCfg.DBPath)
-
- var seeds []string
- for _, node := range m.cfg.Nodes {
- seeds = append(seeds, fmt.Sprintf("%s:%d", node.Host, node.Port))
- m.upSertNode(&node)
+func makePath() (string, error) {
+ usr, err := user.Current()
+ if err != nil {
+ return "", err
}
- m.nodeCfg.P2P.Seeds = strings.Join(seeds, ",")
- m.makeSwitch()
- go m.discoveryRoutine()
- go m.collectDiscv()
-
- ticker := time.NewTicker(time.Duration(m.cfg.CheckFreqSeconds) * time.Second)
- for ; true; <-ticker.C {
- // TODO: lock?
- m.monitorRountine()
+ dataPath := usr.HomeDir + "/.precog"
+ if err := os.MkdirAll(dataPath, os.ModePerm); err != nil {
+ return "", err
}
+
+ return dataPath, nil
}
-// create or update: https://github.com/jinzhu/gorm/issues/1307
-func (m *monitor) upSertNode(node *config.Node) error {
- if node.XPub != nil {
- node.PublicKey = fmt.Sprintf("%v", node.XPub.PublicKey().String())
+func (m *monitor) Run() {
+ var seeds []string
+ for _, node := range m.cfg.Nodes {
+ seeds = append(seeds, fmt.Sprintf("%s:%d", node.Host, node.Port))
+ if err := m.upSertNode(&node); err != nil {
+ log.Error(err)
+ }
}
-
- ormNode := &orm.Node{PublicKey: node.PublicKey}
- if err := m.db.Where(&orm.Node{PublicKey: node.PublicKey}).First(ormNode).Error; err != nil && err != gorm.ErrRecordNotFound {
- return err
+ m.nodeCfg.P2P.Seeds = strings.Join(seeds, ",")
+ if err := m.makeSwitch(); err != nil {
+ log.Fatal(err)
}
- if node.Alias != "" {
- ormNode.Alias = node.Alias
- }
- if node.XPub != nil {
- ormNode.Xpub = node.XPub.String()
- }
- ormNode.Host = node.Host
- ormNode.Port = node.Port
- return m.db.Where(&orm.Node{PublicKey: ormNode.PublicKey}).
- Assign(&orm.Node{
- Xpub: ormNode.Xpub,
- Alias: ormNode.Alias,
- Host: ormNode.Host,
- Port: ormNode.Port,
- }).FirstOrCreate(ormNode).Error
+ m.dialCh <- struct{}{}
+ go m.discoveryRoutine()
+ go m.connectNodesRoutine()
+ go m.checkStatusRoutine()
}
-func (m *monitor) makeSwitch() {
+func (m *monitor) makeSwitch() error {
l, listenAddr := p2p.GetListener(m.nodeCfg.P2P)
discv, err := dht.NewDiscover(m.nodeCfg, m.privKey, l.ExternalAddress().Port, m.cfg.NetworkID)
if err != nil {
- log.Fatal(err)
+ return err
}
// no need for lanDiscv, but passing &mdns.LANDiscover{} will cause NilPointer
lanDiscv := mdns.NewLANDiscover(mdns.NewProtocol(), int(l.ExternalAddress().Port))
sw, err := p2p.NewSwitch(m.nodeCfg, discv, lanDiscv, l, m.privKey, listenAddr, m.cfg.NetworkID)
if err != nil {
- log.Fatal(err)
+ return err
}
m.sw = sw
+ return nil
}
-func (m *monitor) discoveryRoutine() {
- ticker := time.NewTicker(time.Duration(discvFreqSec) * time.Second)
- for range ticker.C {
- nodes := make([]*dht.Node, nodesToDiscv)
- n := m.sw.GetDiscv().ReadRandomNodes(nodes)
- for i := 0; i < n; i++ {
- m.discvCh <- nodes[i]
- }
+func (m *monitor) prepareReactors(peers *peers.PeerSet) error {
+ dispatcher := event.NewDispatcher()
+ // add ConsensusReactor for consensusChannel
+ _ = consensusmgr.NewManager(m.sw, m.chain, peers, dispatcher)
+ fastSyncDB := dbm.NewDB("fastsync", m.nodeCfg.DBBackend, m.nodeCfg.DBDir())
+ // add ProtocolReactor to handle msgs
+ _, err := chainmgr.NewManager(m.nodeCfg, m.sw, m.chain, m.txPool, dispatcher, peers, fastSyncDB)
+ if err != nil {
+ return err
}
-}
-func (m *monitor) collectDiscv() {
- // nodeMap maps a node's public key to the node itself
- nodeMap := make(map[string]*dht.Node)
- for node := range m.discvCh {
- if n, ok := nodeMap[node.ID.String()]; ok && n.String() == node.String() {
- continue
+ for label, reactor := range m.sw.GetReactors() {
+ log.Debugf("start reactor: (%s:%v)", label, reactor)
+ if _, err := reactor.Start(); err != nil {
+ return nil
}
- log.Info("discover new node: ", node)
-
- m.upSertNode(&config.Node{
- PublicKey: node.ID.String(),
- Host: node.IP.String(),
- Port: node.TCP,
- })
- nodeMap[node.ID.String()] = node
}
+
+ m.sw.GetSecurity().RegisterFilter(m.sw.GetNodeInfo())
+ m.sw.GetSecurity().RegisterFilter(m.sw.GetPeers())
+ return m.sw.GetSecurity().Start()
}
-func (m *monitor) monitorRountine() error {
- var nodes []*orm.Node
- if err := m.db.Model(&orm.Node{}).Find(&nodes).Error; err != nil {
- return err
+func (m *monitor) checkStatusRoutine() {
+ peers := peers.NewPeerSet(m.sw)
+ if err := m.prepareReactors(peers); err != nil {
+ log.Fatal(err)
}
- addresses := make([]*p2p.NetAddress, 0)
- for i := 0; i < len(nodes); i++ {
- ips, err := net.LookupIP(nodes[i].Host)
- if err != nil || len(ips) == 0 {
- continue
+ bestHeight := uint64(0)
+ for range m.checkStatusCh {
+ for _, peer := range m.sw.GetPeers().List() {
+ peer.Start()
+ peers.AddPeer(peer)
+ }
+ log.Infof("%d connected peers: %v", len(m.sw.GetPeers().List()), m.sw.GetPeers().List())
+
+ for _, peer := range m.sw.GetPeers().List() {
+ p := peers.GetPeer(peer.ID())
+ if p == nil {
+ continue
+ }
+
+ if err := p.SendStatus(m.chain.BestBlockHeader(), m.chain.LastIrreversibleHeader()); err != nil {
+ log.Error(err)
+ peers.RemovePeer(p.ID())
+ }
}
- address := p2p.NewNetAddressIPPort(ips[0], nodes[i].Port)
- addresses = append(addresses, address)
- }
+ for _, peerInfo := range peers.GetPeerInfos() {
+ if peerInfo.Height > bestHeight {
+ bestHeight = peerInfo.Height
+ }
+ }
+ log.Info("bestHeight: ", bestHeight)
+ m.processPeerInfos(peers.GetPeerInfos())
- m.sw.DialPeers(addresses)
- return nil
-}
+ for _, peer := range m.sw.GetPeers().List() {
+ p := peers.GetPeer(peer.ID())
+ if p == nil {
+ continue
+ }
-// TODO:
-// implement logic first, and then refactor
-// /home/gavin/work/go/src/github.com/vapor/
-// p2p/test_util.go
-// p2p/switch_test.go
-// syncManager
-// notificationMgr
-
-// TODO: dail nodes, get lantency & best_height
-// TODO: decide check_height("best best_height" - "confirmations")
-// TODO: get blockhash by check_height, get latency
-// TODO: update lantency, active_time and status
+ peers.RemovePeer(p.ID())
+ }
+ log.Info("Disonnect all peers.")
+ m.Unlock()
+ m.dialCh <- struct{}{}
+ }
+}