11 log "github.com/sirupsen/logrus"
12 "github.com/tendermint/go-wire"
13 "golang.org/x/crypto/sha3"
15 "github.com/vapor/common"
16 "github.com/vapor/crypto/ed25519"
17 "github.com/vapor/p2p/netutil"
21 errInvalidEvent = errors.New("invalid in current state")
22 errNoQuery = errors.New("no pending query")
23 errWrongAddress = errors.New("unknown sender address")
27 autoRefreshInterval = 1 * time.Hour
28 bucketRefreshInterval = 1 * time.Minute
30 seedMaxAge = 5 * 24 * time.Hour
35 printTestImgLogs = false
38 // Network manages the table and all protocol interaction.
40 db *nodeDB // database of known nodes
42 netrestrict *netutil.Netlist
44 closed chan struct{} // closed when loop is done
45 closeReq chan struct{} // 'request to close'
46 refreshReq chan []*Node // lookups ask for refresh on this channel
47 refreshResp chan (<-chan struct{}) // ...and get the channel to block on from this one
48 read chan ingressPacket // ingress packets arrive here
49 timeout chan timeoutEvent
50 queryReq chan *findnodeQuery // lookups submit findnode queries on this channel
51 tableOpReq chan func()
52 tableOpResp chan struct{}
53 topicRegisterReq chan topicRegisterReq
54 topicSearchReq chan topicSearchReq
56 // State of the main loop.
59 ticketStore *ticketStore
61 nodes map[NodeID]*Node // tracks active nodes with state != known
62 timeoutTimers map[timeoutEvent]*time.Timer
64 // Revalidation queues.
65 // Nodes put on these queues will be pinged eventually.
66 slowRevalidateQueue []*Node
67 fastRevalidateQueue []*Node
69 // Buffers for state transition.
70 sendBuf []*ingressPacket
73 // transport is implemented by the UDP transport.
74 // it is an interface so we can test without opening lots of UDP
75 // sockets and without generating a private key.
76 type transport interface {
77 sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte)
78 sendNeighbours(remote *Node, nodes []*Node)
79 sendFindnodeHash(remote *Node, target common.Hash)
80 sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte)
81 sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node)
83 send(remote *Node, ptype nodeEvent, p interface{}) (hash []byte)
85 localAddr() *net.UDPAddr
90 type findnodeQuery struct {
94 nresults int // counter for received nodes
97 type topicRegisterReq struct {
102 type topicSearchReq struct {
109 type topicSearchResult struct {
114 type timeoutEvent struct {
119 func newNetwork(conn transport, ourPubkey ed25519.PublicKey, dbPath string, netrestrict *netutil.Netlist) (*Network, error) {
121 copy(ourID[:], ourPubkey[:nodeIDBits])
124 if dbPath != "<no database>" {
126 if db, err = newNodeDB(dbPath, Version, ourID); err != nil {
131 tab := newTable(ourID, conn.localAddr())
135 netrestrict: netrestrict,
137 topictab: newTopicTable(db, tab.self),
138 ticketStore: newTicketStore(),
139 refreshReq: make(chan []*Node),
140 refreshResp: make(chan (<-chan struct{})),
141 closed: make(chan struct{}),
142 closeReq: make(chan struct{}),
143 read: make(chan ingressPacket, 100),
144 timeout: make(chan timeoutEvent),
145 timeoutTimers: make(map[timeoutEvent]*time.Timer),
146 tableOpReq: make(chan func()),
147 tableOpResp: make(chan struct{}),
148 queryReq: make(chan *findnodeQuery),
149 topicRegisterReq: make(chan topicRegisterReq),
150 topicSearchReq: make(chan topicSearchReq),
151 nodes: make(map[NodeID]*Node),
157 // Close terminates the network listener and flushes the node database.
158 func (net *Network) Close() {
162 case net.closeReq <- struct{}{}:
167 // Self returns the local node.
168 // The returned node should not be modified by the caller.
169 func (net *Network) Self() *Node {
173 func (net *Network) selfIP() net.IP {
174 return net.tab.self.IP
177 // ReadRandomNodes fills the given slice with random nodes from the
178 // table. It will not write the same node more than once. The nodes in
179 // the slice are copies and can be modified by the caller.
180 func (net *Network) ReadRandomNodes(buf []*Node) (n int) {
181 net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) })
185 // SetFallbackNodes sets the initial points of contact. These nodes
186 // are used to connect to the network if the table is empty and there
187 // are no known nodes in the database.
188 func (net *Network) SetFallbackNodes(nodes []*Node) error {
189 nursery := make([]*Node, 0, len(nodes))
190 for _, n := range nodes {
191 if err := n.validateComplete(); err != nil {
192 return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
194 // Recompute cpy.sha because the node might not have been
195 // created by NewNode or ParseNode.
197 cpy.sha = common.BytesToHash(n.ID[:])
198 nursery = append(nursery, &cpy)
200 net.reqRefresh(nursery)
204 // Resolve searches for a specific node with the given ID.
205 // It returns nil if the node could not be found.
206 func (net *Network) Resolve(targetID NodeID) *Node {
207 result := net.lookup(common.BytesToHash(targetID[:]), true)
208 for _, n := range result {
209 if n.ID == targetID {
216 // Lookup performs a network search for nodes close
217 // to the given target. It approaches the target by querying
218 // nodes that are closer to it on each iteration.
219 // The given target does not need to be an actual node
222 // The local node may be included in the result.
223 func (net *Network) Lookup(targetID NodeID) []*Node {
224 return net.lookup(common.BytesToHash(targetID[:]), false)
227 func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node {
229 asked = make(map[NodeID]bool)
230 seen = make(map[NodeID]bool)
231 reply = make(chan []*Node, alpha)
232 result = nodesByDistance{target: target}
235 // Get initial answers from the local node.
236 result.push(net.tab.self, bucketSize)
238 // Ask the α closest nodes that we haven't asked yet.
239 for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
240 n := result.entries[i]
244 net.reqQueryFindnode(n, target, reply)
247 if pendingQueries == 0 {
248 // We have asked all closest nodes, stop the search.
251 // Wait for the next reply.
253 case nodes := <-reply:
254 for _, n := range nodes {
255 if n != nil && !seen[n.ID] {
257 result.push(n, bucketSize)
258 if stopOnMatch && n.sha == target {
259 return result.entries
264 case <-time.After(respTimeout):
265 // forget all pending requests, start new ones
267 reply = make(chan []*Node, alpha)
270 return result.entries
273 func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) {
275 case net.topicRegisterReq <- topicRegisterReq{true, topic}:
283 case net.topicRegisterReq <- topicRegisterReq{false, topic}:
289 func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) {
294 case delay, ok := <-setPeriod:
296 case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}:
307 func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} {
309 case net.refreshReq <- nursery:
310 return <-net.refreshResp
316 func (net *Network) reqQueryFindnode(n *Node, target common.Hash, reply chan []*Node) bool {
317 q := &findnodeQuery{remote: n, target: target, reply: reply}
319 case net.queryReq <- q:
326 func (net *Network) reqReadPacket(pkt ingressPacket) {
328 case net.read <- pkt:
333 func (net *Network) reqTableOp(f func()) (called bool) {
335 case net.tableOpReq <- f:
343 // TODO: external address handling.
345 type topicSearchInfo struct {
346 lookupChn chan<- bool
350 const maxSearchCount = 5
352 func (net *Network) loop() {
354 refreshTimer = time.NewTicker(autoRefreshInterval)
355 bucketRefreshTimer = time.NewTimer(bucketRefreshInterval)
356 refreshDone chan struct{} // closed when the 'refresh' lookup has ended
359 // Tracking the next ticket to register.
361 nextTicket *ticketRef
362 nextRegisterTimer *time.Timer
363 nextRegisterTime <-chan time.Time
366 if nextRegisterTimer != nil {
367 nextRegisterTimer.Stop()
370 bucketRefreshTimer.Stop()
372 resetNextTicket := func() {
373 ticket, timeout := net.ticketStore.nextFilteredTicket()
374 if nextTicket != ticket {
376 if nextRegisterTimer != nil {
377 nextRegisterTimer.Stop()
378 nextRegisterTime = nil
381 nextRegisterTimer = time.NewTimer(timeout)
382 nextRegisterTime = nextRegisterTimer.C
387 // Tracking registration and search lookups.
389 topicRegisterLookupTarget lookupInfo
390 topicRegisterLookupDone chan []*Node
391 topicRegisterLookupTick = time.NewTimer(0)
392 searchReqWhenRefreshDone []topicSearchReq
393 searchInfo = make(map[Topic]topicSearchInfo)
394 activeSearchCount int
396 topicSearchLookupDone := make(chan topicSearchResult, 100)
397 topicSearch := make(chan Topic, 100)
398 <-topicRegisterLookupTick.C
400 statsDump := time.NewTicker(10 * time.Second)
401 defer statsDump.Stop()
409 log.WithFields(log.Fields{"module": logModule}).Debug("close request")
412 // Ingress packet handling.
413 case pkt := <-net.read:
414 log.WithFields(log.Fields{"module": logModule}).Debug("read from net")
415 n := net.internNode(&pkt)
418 if err := net.handle(n, pkt.ev, &pkt); err != nil {
421 log.WithFields(log.Fields{"module": logModule, "node num": net.tab.count, "event": pkt.ev, "remote id": hex.EncodeToString(pkt.remoteID[:8]), "remote addr": pkt.remoteAddr, "pre state": prestate, "node state": n.state, "status": status}).Debug("handle ingress msg")
423 // TODO: persist state if n.state goes >= known, delete if it goes <= known
425 // State transition timeouts.
426 case timeout := <-net.timeout:
427 log.WithFields(log.Fields{"module": logModule}).Debug("net timeout")
428 if net.timeoutTimers[timeout] == nil {
429 // Stale timer (was aborted).
432 delete(net.timeoutTimers, timeout)
433 prestate := timeout.node.state
435 if err := net.handle(timeout.node, timeout.ev, nil); err != nil {
438 log.WithFields(log.Fields{"module": logModule, "node num": net.tab.count, "event": timeout.ev, "node id": hex.EncodeToString(timeout.node.ID[:8]), "node addr": timeout.node.addr(), "pre state": prestate, "node state": timeout.node.state, "status": status}).Debug("handle timeout")
441 case q := <-net.queryReq:
442 log.WithFields(log.Fields{"module": logModule}).Debug("net query request")
444 q.remote.deferQuery(q)
447 // Interacting with the table.
448 case f := <-net.tableOpReq:
449 log.WithFields(log.Fields{"module": logModule}).Debug("net table operate request")
451 net.tableOpResp <- struct{}{}
453 // Topic registration stuff.
454 case req := <-net.topicRegisterReq:
455 log.WithFields(log.Fields{"module": logModule, "topic": req.topic}).Debug("net topic register request")
457 net.ticketStore.removeRegisterTopic(req.topic)
460 net.ticketStore.addTopic(req.topic, true)
461 // If we're currently waiting idle (nothing to look up), give the ticket store a
462 // chance to start it sooner. This should speed up convergence of the radius
463 // determination for new topics.
464 // if topicRegisterLookupDone == nil {
465 if topicRegisterLookupTarget.target == (common.Hash{}) {
466 log.WithFields(log.Fields{"module": logModule, "topic": req.topic}).Debug("topic register lookup target null")
467 if topicRegisterLookupTick.Stop() {
468 <-topicRegisterLookupTick.C
470 target, delay := net.ticketStore.nextRegisterLookup()
471 topicRegisterLookupTarget = target
472 topicRegisterLookupTick.Reset(delay)
475 case nodes := <-topicRegisterLookupDone:
476 log.WithFields(log.Fields{"module": logModule}).Debug("topic register lookup done")
477 net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte {
478 net.ping(n, n.addr())
481 target, delay := net.ticketStore.nextRegisterLookup()
482 topicRegisterLookupTarget = target
483 topicRegisterLookupTick.Reset(delay)
484 topicRegisterLookupDone = nil
486 case <-topicRegisterLookupTick.C:
487 log.WithFields(log.Fields{"module": logModule}).Debug("topic register lookup tick")
488 if (topicRegisterLookupTarget.target == common.Hash{}) {
489 target, delay := net.ticketStore.nextRegisterLookup()
490 topicRegisterLookupTarget = target
491 topicRegisterLookupTick.Reset(delay)
492 topicRegisterLookupDone = nil
494 topicRegisterLookupDone = make(chan []*Node)
495 target := topicRegisterLookupTarget.target
496 go func() { topicRegisterLookupDone <- net.lookup(target, false) }()
499 case <-nextRegisterTime:
500 log.WithFields(log.Fields{"module": logModule}).Debug("next register time")
501 net.ticketStore.ticketRegistered(*nextTicket)
502 net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
504 case req := <-net.topicSearchReq:
505 if refreshDone == nil {
506 log.WithFields(log.Fields{"module": logModule, "topic": req.topic}).Debug("net topic rearch req")
507 info, ok := searchInfo[req.topic]
509 if req.delay == time.Duration(0) {
510 delete(searchInfo, req.topic)
511 net.ticketStore.removeSearchTopic(req.topic)
513 info.period = req.delay
514 searchInfo[req.topic] = info
518 if req.delay != time.Duration(0) {
519 var info topicSearchInfo
520 info.period = req.delay
521 info.lookupChn = req.lookup
522 searchInfo[req.topic] = info
523 net.ticketStore.addSearchTopic(req.topic, req.found)
524 topicSearch <- req.topic
527 searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req)
530 case topic := <-topicSearch:
531 if activeSearchCount < maxSearchCount {
533 target := net.ticketStore.nextSearchLookup(topic)
535 nodes := net.lookup(target.target, false)
536 topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes}
539 period := searchInfo[topic].period
540 if period != time.Duration(0) {
547 case res := <-topicSearchLookupDone:
549 if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil {
550 lookupChn <- net.ticketStore.radius[res.target.topic].converged
552 net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte {
553 if n.state != nil && n.state.canQuery {
554 return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
556 if n.state == unknown {
557 net.ping(n, n.addr())
564 log.WithFields(log.Fields{"module": logModule}).Debug("stats dump clock")
565 /*r, ok := net.ticketStore.radius[testTopic]
567 fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now())
569 topics := len(net.ticketStore.tickets)
570 tickets := len(net.ticketStore.nodes)
571 rad := r.radius / (maxRadius/10000+1)
572 fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now())
576 for topic, r := range net.ticketStore.radius {
577 if printTestImgLogs {
578 rad := r.radius / (maxRadius/1000000 + 1)
579 minrad := r.minRadius / (maxRadius/1000000 + 1)
580 log.WithFields(log.Fields{"module": logModule}).Debugf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad)
581 log.WithFields(log.Fields{"module": logModule}).Debugf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad)
584 for topic, t := range net.topictab.topics {
585 wp := t.wcl.nextWaitPeriod(tm)
586 if printTestImgLogs {
587 log.WithFields(log.Fields{"module": logModule}).Debugf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000)
591 // Periodic / lookup-initiated bucket refresh.
592 case <-refreshTimer.C:
593 log.WithFields(log.Fields{"module": logModule}).Debug("refresh timer clock")
594 // TODO: ideally we would start the refresh timer after
595 // fallback nodes have been set for the first time.
596 if refreshDone == nil {
597 refreshDone = make(chan struct{})
598 net.refresh(refreshDone)
600 case <-bucketRefreshTimer.C:
601 target := net.tab.chooseBucketRefreshTarget()
603 net.lookup(target, false)
604 bucketRefreshTimer.Reset(bucketRefreshInterval)
606 case newNursery := <-net.refreshReq:
607 log.WithFields(log.Fields{"module": logModule}).Debug("net refresh request")
608 if newNursery != nil {
609 net.nursery = newNursery
611 if refreshDone == nil {
612 refreshDone = make(chan struct{})
613 net.refresh(refreshDone)
615 net.refreshResp <- refreshDone
617 log.WithFields(log.Fields{"module": logModule, "table size": net.tab.count}).Debug("net refresh done")
618 if net.tab.count != 0 {
620 list := searchReqWhenRefreshDone
621 searchReqWhenRefreshDone = nil
623 for _, req := range list {
624 net.topicSearchReq <- req
628 refreshDone = make(chan struct{})
629 net.refresh(refreshDone)
633 log.WithFields(log.Fields{"module": logModule}).Debug("loop stopped,shutting down")
637 if refreshDone != nil {
638 // TODO: wait for pending refresh.
641 // Cancel all pending timeouts.
642 for _, timer := range net.timeoutTimers {
651 // Everything below runs on the Network.loop goroutine
652 // and can modify Node, Table and Network at any time without locking.
654 func (net *Network) refresh(done chan<- struct{}) {
657 seeds = net.db.querySeeds(seedCount, seedMaxAge)
663 log.WithFields(log.Fields{"module": logModule}).Debug("no seed nodes found")
664 time.AfterFunc(time.Second*10, func() { close(done) })
667 for _, n := range seeds {
668 n = net.internNodeFromDB(n)
669 if n.state == unknown {
670 net.transition(n, verifyinit)
672 // Force-add the seed node so Lookup does something.
673 // It will be deleted again if verification fails.
676 // Start self lookup to fill up the buckets.
678 net.Lookup(net.tab.self.ID)
685 func (net *Network) internNode(pkt *ingressPacket) *Node {
686 if n := net.nodes[pkt.remoteID]; n != nil {
687 n.IP = pkt.remoteAddr.IP
688 n.UDP = uint16(pkt.remoteAddr.Port)
689 n.TCP = uint16(pkt.remoteAddr.Port)
692 n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port))
694 net.nodes[pkt.remoteID] = n
698 func (net *Network) internNodeFromDB(dbn *Node) *Node {
699 if n := net.nodes[dbn.ID]; n != nil {
702 n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP)
708 func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n *Node, err error) {
709 if rn.ID == net.tab.self.ID {
710 return nil, errors.New("is self")
712 if rn.UDP <= lowPort {
713 return nil, errors.New("low port")
717 // We haven't seen this node before.
718 n, err = nodeFromRPC(sender, rn)
719 if net.netrestrict != nil && !net.netrestrict.Contains(n.IP) {
720 return n, errors.New("not contained in netrestrict whitelist")
728 if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP {
729 if n.state == known {
730 // reject address change if node is known by us
731 err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n)
733 // accept otherwise; this will be handled nicer with signed ENRs
742 // nodeNetGuts is embedded in Node and contains fields.
743 type nodeNetGuts struct {
744 // This is a cached copy of sha3(ID) which is used for node
745 // distance calculations. This is part of Node in order to make it
746 // possible to write tests that need a node at a certain distance.
747 // In those tests, the content of sha will not actually correspond
751 // State machine fields. Access to these fields
752 // is restricted to the Network.loop goroutine.
754 pingEcho []byte // hash of last ping sent by us
755 pingTopics []Topic // topic set sent by us in last ping
756 deferredQueries []*findnodeQuery // queries that can't be sent yet
757 pendingNeighbours *findnodeQuery // current query, waiting for reply
761 func (n *nodeNetGuts) deferQuery(q *findnodeQuery) {
762 n.deferredQueries = append(n.deferredQueries, q)
765 func (n *nodeNetGuts) startNextQuery(net *Network) {
766 if len(n.deferredQueries) == 0 {
769 nextq := n.deferredQueries[0]
770 if nextq.start(net) {
771 n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...)
775 func (q *findnodeQuery) start(net *Network) bool {
776 // Satisfy queries against the local node directly.
777 if q.remote == net.tab.self {
778 log.WithFields(log.Fields{"module": logModule}).Debug("findnodeQuery self")
779 closest := net.tab.closest(common.BytesToHash(q.target[:]), bucketSize)
781 q.reply <- closest.entries
784 if q.remote.state.canQuery && q.remote.pendingNeighbours == nil {
785 log.WithFields(log.Fields{"module": logModule, "remote peer": q.remote.ID, "targetID": q.target}).Debug("find node query")
786 net.conn.sendFindnodeHash(q.remote, q.target)
787 net.timedEvent(respTimeout, q.remote, neighboursTimeout)
788 q.remote.pendingNeighbours = q
791 // If the node is not known yet, it won't accept queries.
792 // Initiate the transition to known.
793 // The request will be sent later when the node reaches known state.
794 if q.remote.state == unknown {
795 log.WithFields(log.Fields{"module": logModule, "id": q.remote.ID, "status": "unknown->verify init"}).Debug("find node query")
796 net.transition(q.remote, verifyinit)
801 // Node Events (the input to the state machine).
805 //go:generate stringer -type=nodeEvent
808 invalidEvent nodeEvent = iota // zero is reserved
810 // Packet type events.
811 // These correspond to packet types in the UDP protocol.
821 // Non-packet events.
822 // Event values in this category are allocated outside
823 // the packet type range (packet types are encoded as a single byte).
824 pongTimeout nodeEvent = iota + 256
829 // Node State Machine.
831 type nodeState struct {
833 handle func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error)
834 enter func(*Network, *Node)
838 func (s *nodeState) String() string {
844 verifyinit *nodeState
845 verifywait *nodeState
846 remoteverifywait *nodeState
849 unresponsive *nodeState
853 unknown = &nodeState{
855 enter: func(net *Network, n *Node) {
858 // Abort active queries.
859 for _, q := range n.deferredQueries {
862 n.deferredQueries = nil
863 if n.pendingNeighbours != nil {
864 n.pendingNeighbours.reply <- nil
865 n.pendingNeighbours = nil
869 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
872 net.handlePing(n, pkt)
873 net.ping(n, pkt.remoteAddr)
874 return verifywait, nil
876 return unknown, errInvalidEvent
881 verifyinit = &nodeState{
883 enter: func(net *Network, n *Node) {
884 net.ping(n, n.addr())
886 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
889 net.handlePing(n, pkt)
890 return verifywait, nil
892 err := net.handleKnownPong(n, pkt)
893 return remoteverifywait, err
897 return verifyinit, errInvalidEvent
902 verifywait = &nodeState{
904 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
907 net.handlePing(n, pkt)
908 return verifywait, nil
910 err := net.handleKnownPong(n, pkt)
915 return verifywait, errInvalidEvent
920 remoteverifywait = &nodeState{
921 name: "remoteverifywait",
922 enter: func(net *Network, n *Node) {
923 net.timedEvent(respTimeout, n, pingTimeout)
925 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
928 net.handlePing(n, pkt)
929 return remoteverifywait, nil
933 return remoteverifywait, errInvalidEvent
941 enter: func(net *Network, n *Node) {
943 n.startNextQuery(net)
944 // Insert into the table and start revalidation of the last node
945 // in the bucket if it is full.
946 last := net.tab.add(n)
947 if last != nil && last.state == known {
948 // TODO: do this asynchronously
949 net.transition(last, contested)
952 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
953 if err := net.db.updateNode(n); err != nil {
959 net.handlePing(n, pkt)
962 err := net.handleKnownPong(n, pkt)
965 return net.handleQueryEvent(n, ev, pkt)
970 contested = &nodeState{
973 enter: func(net *Network, n *Node) {
975 net.ping(n, n.addr())
977 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
980 // Node is still alive.
981 err := net.handleKnownPong(n, pkt)
984 net.tab.deleteReplace(n)
985 return unresponsive, nil
987 net.handlePing(n, pkt)
988 return contested, nil
990 return net.handleQueryEvent(n, ev, pkt)
995 unresponsive = &nodeState{
996 name: "unresponsive",
998 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
999 net.db.deleteNode(n.ID)
1003 net.handlePing(n, pkt)
1006 err := net.handleKnownPong(n, pkt)
1009 return net.handleQueryEvent(n, ev, pkt)
1015 // handle processes packets sent by n and events related to n.
1016 func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error {
1017 //fmt.Println("handle", n.addr().String(), n.state, ev)
1019 if err := net.checkPacket(n, ev, pkt); err != nil {
1020 //fmt.Println("check err:", err)
1023 // Start the background expiration goroutine after the first
1024 // successful communication. Subsequent calls have no effect if it
1025 // is already running. We do this here instead of somewhere else
1026 // so that the search for seed nodes also considers older nodes
1027 // that would otherwise be removed by the expirer.
1029 net.db.ensureExpirer()
1033 n.state = unknown //???
1035 next, err := n.state.handle(net, n, ev, pkt)
1036 net.transition(n, next)
1037 //fmt.Println("new state:", n.state)
1041 func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error {
1042 // Replay prevention checks.
1044 case pingPacket, findnodeHashPacket, neighborsPacket:
1045 // TODO: check date is > last date seen
1046 // TODO: check ping version
1048 if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) {
1049 // fmt.Println("pong reply token mismatch")
1050 return fmt.Errorf("pong reply token mismatch")
1054 // Address validation.
1055 // TODO: Ideally we would do the following:
1056 // - reject all packets with wrong address except ping.
1057 // - for ping with new address, transition to verifywait but keep the
1058 // previous node (with old address) around. if the new one reaches known,
1063 func (net *Network) transition(n *Node, next *nodeState) {
1064 if n.state != next {
1066 if next.enter != nil {
1071 // TODO: persist/unpersist node
1074 func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) {
1075 timeout := timeoutEvent{ev, n}
1076 net.timeoutTimers[timeout] = time.AfterFunc(d, func() {
1078 case net.timeout <- timeout:
1084 func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) {
1085 timer := net.timeoutTimers[timeoutEvent{ev, n}]
1088 delete(net.timeoutTimers, timeoutEvent{ev, n})
1092 func (net *Network) ping(n *Node, addr *net.UDPAddr) {
1093 //fmt.Println("ping", n.addr().String(), n.ID.String(), n.sha.Hex())
1094 if n.pingEcho != nil || n.ID == net.tab.self.ID {
1095 //fmt.Println(" not sent")
1098 log.WithFields(log.Fields{"module": logModule, "node": n.ID}).Debug("Pinging remote node")
1099 n.pingTopics = net.ticketStore.regTopicSet()
1100 n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics)
1101 net.timedEvent(respTimeout, n, pongTimeout)
1104 func (net *Network) handlePing(n *Node, pkt *ingressPacket) {
1105 log.WithFields(log.Fields{"module": logModule, "node": n.ID}).Debug("Handling remote ping")
1106 ping := pkt.data.(*ping)
1107 n.TCP = ping.From.TCP
1108 t := net.topictab.getTicket(n, ping.Topics)
1111 To: makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB
1113 Expiration: uint64(time.Now().Add(expiration).Unix()),
1115 ticketToPong(t, pong)
1116 net.conn.send(n, pongPacket, pong)
1119 func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error {
1120 log.WithFields(log.Fields{"module": logModule, "node": n.ID}).Debug("Handling known pong")
1121 net.abortTimedEvent(n, pongTimeout)
1123 ticket, err := pongToTicket(now, n.pingTopics, n, pkt)
1125 // fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data)
1126 net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket)
1128 log.WithFields(log.Fields{"module": logModule, "error": err}).Debug("Failed to convert pong to ticket")
1132 net.db.updateLastPong(n.ID, time.Now())
1136 func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
1138 case findnodePacket:
1139 target := common.BytesToHash(pkt.data.(*findnode).Target[:])
1140 results := net.tab.closest(target, bucketSize).entries
1141 net.conn.sendNeighbours(n, results)
1143 case neighborsPacket:
1144 err := net.handleNeighboursPacket(n, pkt)
1146 case neighboursTimeout:
1147 if n.pendingNeighbours != nil {
1148 n.pendingNeighbours.reply <- nil
1149 n.pendingNeighbours = nil
1152 if n.queryTimeouts > maxFindnodeFailures && n.state == known {
1153 return contested, errors.New("too many timeouts")
1159 case findnodeHashPacket:
1160 results := net.tab.closest(pkt.data.(*findnodeHash).Target, bucketSize).entries
1161 net.conn.sendNeighbours(n, results)
1163 case topicRegisterPacket:
1164 //fmt.Println("got topicRegisterPacket")
1165 regdata := pkt.data.(*topicRegister)
1166 pong, err := net.checkTopicRegister(regdata, net.conn.getNetID())
1169 return n.state, fmt.Errorf("bad waiting ticket: %v", err)
1171 net.topictab.useTicket(n, pong.TicketSerial, regdata.Topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods)
1173 case topicQueryPacket:
1174 // TODO: handle expiration
1175 topic := pkt.data.(*topicQuery).Topic
1176 results := net.topictab.getEntries(topic)
1177 if _, ok := net.ticketStore.tickets[topic]; ok {
1178 results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too
1180 if len(results) > 10 {
1181 results = results[:10]
1183 var hash common.Hash
1184 copy(hash[:], pkt.hash)
1185 net.conn.sendTopicNodes(n, hash, results)
1187 case topicNodesPacket:
1188 p := pkt.data.(*topicNodes)
1189 if net.ticketStore.gotTopicNodes(n, p.Echo, p.Nodes) {
1191 if n.queryTimeouts > maxFindnodeFailures && n.state == known {
1192 return contested, errors.New("too many timeouts")
1198 return n.state, errInvalidEvent
1202 func (net *Network) checkTopicRegister(data *topicRegister, netID uint64) (*pong, error) {
1203 var pongpkt ingressPacket
1204 if err := decodePacket(data.Pong, &pongpkt, netID); err != nil {
1207 if pongpkt.ev != pongPacket {
1208 return nil, errors.New("is not pong packet")
1210 if pongpkt.remoteID != net.tab.self.ID {
1211 return nil, errors.New("not signed by us")
1213 // check that we previously authorised all topics
1214 // that the other side is trying to register.
1215 hash, _, _ := wireHash(data.Topics)
1216 if hash != pongpkt.data.(*pong).TopicHash {
1217 return nil, errors.New("topic hash mismatch")
1219 if int(data.Idx) < 0 || int(data.Idx) >= len(data.Topics) {
1220 return nil, errors.New("topic index out of range")
1222 return pongpkt.data.(*pong), nil
1225 func wireHash(x interface{}) (h common.Hash, n int, err error) {
1227 wire.WriteBinary(x, hw, &n, &err)
1232 func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error {
1233 if n.pendingNeighbours == nil {
1236 net.abortTimedEvent(n, neighboursTimeout)
1238 req := pkt.data.(*neighbors)
1239 nodes := make([]*Node, len(req.Nodes))
1240 for i, rn := range req.Nodes {
1241 nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn)
1243 log.WithFields(log.Fields{"module": logModule, "ip": rn.IP, "id:": n.ID[:8], "addr:": pkt.remoteAddr, "error": err}).Debug("invalid neighbour")
1247 // Start validation of query results immediately.
1248 // This fills the table quickly.
1249 // TODO: generates way too many packets, maybe do it via queue.
1250 if nn.state == unknown {
1251 net.transition(nn, verifyinit)
1254 // TODO: don't ignore second packet
1255 n.pendingNeighbours.reply <- nodes
1256 n.pendingNeighbours = nil
1257 // Now that this query is done, start the next one.
1258 n.startNextQuery(net)