"os"
"os/user"
"strings"
- "sync"
"github.com/jinzhu/gorm"
log "github.com/sirupsen/logrus"
"github.com/vapor/toolbar/precog/config"
)
+// TODO:
+// 1. moniker 理论是安全的,只是记得测试一下,这么改不会让vapor node出坑
+// 2. StatusMap 感觉没什么问题,double check一下别的出名项目是不是也是这么叫的
+// 3. toolbar/precog/monitor/stats.go FirstOrCreate&Update 弱弱的问一下,直接save会出事么?
+// 4. 碰到一个玄学问题,究竟是以ip为单位,还是pubkey为单位。 如果同一个pubkey出现在2个不同的ip,会不会让数据混乱?
+// 5. toolbar/precog/monitor/stats.go err := m.db.Model(&orm.NodeLiveness{}).Joins("join nodes on nodes.id = node_livenesses.node_id"). 那什么既然都传ormNode进来了,是不是直接有node id了
+// 6. NodeLiveness应该是存每次的通讯记录?至于一些统计数据之类的都丢node上去?
+// 7. m这个为什么需要锁呀?一个是节点发现,一个是生命探测,中间交互都是数据库把?
+
type monitor struct {
- *sync.RWMutex
- cfg *config.Config
- db *gorm.DB
- nodeCfg *vaporCfg.Config
- sw *p2p.Switch
- privKey chainkd.XPrv
- chain *mock.Chain
- txPool *mock.Mempool
- // discvMap maps a node's public key to the node itself
- discvMap map[string]*dht.Node
- dialCh chan struct{}
- // TODO: maybe remove?
- checkStatusCh chan struct{}
+ cfg *config.Config
+ db *gorm.DB
+ nodeCfg *vaporCfg.Config
+ sw *p2p.Switch
+ privKey chainkd.XPrv
+ chain *mock.Chain
+ txPool *mock.Mempool
+ bestHeightSeen uint64
+ peers *peers.PeerSet
}
-// TODO: set myself as SPV?
func NewMonitor(cfg *config.Config, db *gorm.DB) *monitor {
- //TODO: for test
- cfg.CheckFreqSeconds = 15
-
dbPath, err := makePath()
if err != nil {
- log.Fatal(err)
+ log.WithFields(log.Fields{"err": err}).Fatal("makePath")
}
nodeCfg := &vaporCfg.Config{
nodeCfg.ChainID = "mainnet"
privKey, err := signlib.NewPrivKey()
if err != nil {
- log.Fatal(err)
+ log.WithFields(log.Fields{"err": err}).Fatal("NewPrivKey")
}
chain, txPool, err := mockChainAndPool()
if err != nil {
- log.Fatal(err)
+ log.WithFields(log.Fields{"err": err}).Fatal("mockChainAndPool")
}
return &monitor{
- RWMutex: &sync.RWMutex{},
- cfg: cfg,
- db: db,
- nodeCfg: nodeCfg,
- privKey: privKey.(chainkd.XPrv),
- chain: chain,
- txPool: txPool,
- discvMap: make(map[string]*dht.Node),
- dialCh: make(chan struct{}, 1),
- checkStatusCh: make(chan struct{}, 1),
+ cfg: cfg,
+ db: db,
+ nodeCfg: nodeCfg,
+ privKey: privKey.(chainkd.XPrv),
+ chain: chain,
+ txPool: txPool,
+ bestHeightSeen: uint64(0),
}
}
return "", err
}
- dataPath := usr.HomeDir + "/.precog"
+ dataPath := usr.HomeDir + "/.vapor/precog"
if err := os.MkdirAll(dataPath, os.ModePerm); err != nil {
return "", err
}
}
func (m *monitor) Run() {
- var seeds []string
- for _, node := range m.cfg.Nodes {
- seeds = append(seeds, fmt.Sprintf("%s:%d", node.Host, node.Port))
- if err := m.upSertNode(&node); err != nil {
- log.Error(err)
- }
- }
- m.nodeCfg.P2P.Seeds = strings.Join(seeds, ",")
if err := m.makeSwitch(); err != nil {
- log.Fatal(err)
+ log.WithFields(log.Fields{"err": err}).Fatal("makeSwitch")
}
- m.dialCh <- struct{}{}
go m.discoveryRoutine()
- go m.connectNodesRoutine()
- go m.checkStatusRoutine()
+ go m.connectionRoutine()
}
func (m *monitor) makeSwitch() error {
+ var seeds []string
+ for _, node := range m.cfg.Nodes {
+ seeds = append(seeds, fmt.Sprintf("%s:%d", node.IP, node.Port))
+ }
+ m.nodeCfg.P2P.Seeds = strings.Join(seeds, ",")
+
l, listenAddr := p2p.GetListener(m.nodeCfg.P2P)
discv, err := dht.NewDiscover(m.nodeCfg, m.privKey, l.ExternalAddress().Port, m.cfg.NetworkID)
if err != nil {
// no need for lanDiscv, but passing &mdns.LANDiscover{} will cause NilPointer
lanDiscv := mdns.NewLANDiscover(mdns.NewProtocol(), int(l.ExternalAddress().Port))
- sw, err := p2p.NewSwitch(m.nodeCfg, discv, lanDiscv, l, m.privKey, listenAddr, m.cfg.NetworkID)
+ m.sw, err = p2p.NewSwitch(m.nodeCfg, discv, lanDiscv, l, m.privKey, listenAddr, m.cfg.NetworkID)
if err != nil {
return err
}
- m.sw = sw
- return nil
+ m.peers = peers.NewPeerSet(m.sw)
+ return m.prepareReactors(m.peers)
}
func (m *monitor) prepareReactors(peers *peers.PeerSet) error {
_ = consensusmgr.NewManager(m.sw, m.chain, peers, dispatcher)
fastSyncDB := dbm.NewDB("fastsync", m.nodeCfg.DBBackend, m.nodeCfg.DBDir())
// add ProtocolReactor to handle msgs
- _, err := chainmgr.NewManager(m.nodeCfg, m.sw, m.chain, m.txPool, dispatcher, peers, fastSyncDB)
- if err != nil {
+ if _, err := chainmgr.NewManager(m.nodeCfg, m.sw, m.chain, m.txPool, dispatcher, peers, fastSyncDB); err != nil {
return err
}
for label, reactor := range m.sw.GetReactors() {
- log.Debugf("start reactor: (%s:%v)", label, reactor)
+ log.WithFields(log.Fields{"label": label, "reactor": reactor}).Debug("start reactor")
if _, err := reactor.Start(); err != nil {
- return nil
+ return err
}
}
m.sw.GetSecurity().RegisterFilter(m.sw.GetPeers())
return m.sw.GetSecurity().Start()
}
-
-func (m *monitor) checkStatusRoutine() {
- peers := peers.NewPeerSet(m.sw)
- if err := m.prepareReactors(peers); err != nil {
- log.Fatal(err)
- }
-
- bestHeight := uint64(0)
- for range m.checkStatusCh {
- for _, peer := range m.sw.GetPeers().List() {
- peer.Start()
- peers.AddPeer(peer)
- }
- log.Infof("%d connected peers: %v", len(m.sw.GetPeers().List()), m.sw.GetPeers().List())
-
- for _, peer := range m.sw.GetPeers().List() {
- p := peers.GetPeer(peer.ID())
- if p == nil {
- continue
- }
-
- if err := p.SendStatus(m.chain.BestBlockHeader(), m.chain.LastIrreversibleHeader()); err != nil {
- log.Error(err)
- peers.RemovePeer(p.ID())
- }
- }
-
- for _, peerInfo := range peers.GetPeerInfos() {
- if peerInfo.Height > bestHeight {
- bestHeight = peerInfo.Height
- }
- }
- log.Info("bestHeight: ", bestHeight)
- m.processPeerInfos(peers.GetPeerInfos())
-
- for _, peer := range m.sw.GetPeers().List() {
- p := peers.GetPeer(peer.ID())
- if p == nil {
- continue
- }
-
- peers.RemovePeer(p.ID())
- }
- log.Info("Disonnect all peers.")
- m.Unlock()
- m.dialCh <- struct{}{}
- }
-}