11 log "github.com/sirupsen/logrus"
12 "github.com/tendermint/go-wire"
13 "golang.org/x/crypto/sha3"
15 "github.com/vapor/common"
16 "github.com/vapor/crypto/ed25519"
17 "github.com/vapor/p2p/netutil"
21 errInvalidEvent = errors.New("invalid in current state")
22 errNoQuery = errors.New("no pending query")
23 errWrongAddress = errors.New("unknown sender address")
27 autoRefreshInterval = 1 * time.Hour
28 bucketRefreshInterval = 1 * time.Minute
30 seedMaxAge = 5 * 24 * time.Hour
35 printTestImgLogs = false
38 // Network manages the table and all protocol interaction.
40 db *nodeDB // database of known nodes
42 netrestrict *netutil.Netlist
44 closed chan struct{} // closed when loop is done
45 closeReq chan struct{} // 'request to close'
46 refreshReq chan []*Node // lookups ask for refresh on this channel
47 refreshResp chan (<-chan struct{}) // ...and get the channel to block on from this one
48 read chan ingressPacket // ingress packets arrive here
49 timeout chan timeoutEvent
50 queryReq chan *findnodeQuery // lookups submit findnode queries on this channel
51 tableOpReq chan func()
52 tableOpResp chan struct{}
53 topicRegisterReq chan topicRegisterReq
54 topicSearchReq chan topicSearchReq
56 // State of the main loop.
59 ticketStore *ticketStore
61 nodes map[NodeID]*Node // tracks active nodes with state != known
62 timeoutTimers map[timeoutEvent]*time.Timer
64 // Revalidation queues.
65 // Nodes put on these queues will be pinged eventually.
66 slowRevalidateQueue []*Node
67 fastRevalidateQueue []*Node
69 // Buffers for state transition.
70 sendBuf []*ingressPacket
73 // transport is implemented by the UDP transport.
74 // it is an interface so we can test without opening lots of UDP
75 // sockets and without generating a private key.
76 type transport interface {
77 sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte)
78 sendNeighbours(remote *Node, nodes []*Node)
79 sendFindnodeHash(remote *Node, target common.Hash)
80 sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte)
81 sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node)
83 send(remote *Node, ptype nodeEvent, p interface{}) (hash []byte)
85 localAddr() *net.UDPAddr
89 type findnodeQuery struct {
93 nresults int // counter for received nodes
96 type topicRegisterReq struct {
101 type topicSearchReq struct {
108 type topicSearchResult struct {
113 type timeoutEvent struct {
118 func newNetwork(conn transport, ourPubkey ed25519.PublicKey, dbPath string, netrestrict *netutil.Netlist) (*Network, error) {
120 copy(ourID[:], ourPubkey[:nodeIDBits])
123 if dbPath != "<no database>" {
125 if db, err = newNodeDB(dbPath, Version, ourID); err != nil {
130 tab := newTable(ourID, conn.localAddr())
134 netrestrict: netrestrict,
136 topictab: newTopicTable(db, tab.self),
137 ticketStore: newTicketStore(),
138 refreshReq: make(chan []*Node),
139 refreshResp: make(chan (<-chan struct{})),
140 closed: make(chan struct{}),
141 closeReq: make(chan struct{}),
142 read: make(chan ingressPacket, 100),
143 timeout: make(chan timeoutEvent),
144 timeoutTimers: make(map[timeoutEvent]*time.Timer),
145 tableOpReq: make(chan func()),
146 tableOpResp: make(chan struct{}),
147 queryReq: make(chan *findnodeQuery),
148 topicRegisterReq: make(chan topicRegisterReq),
149 topicSearchReq: make(chan topicSearchReq),
150 nodes: make(map[NodeID]*Node),
156 // Close terminates the network listener and flushes the node database.
157 func (net *Network) Close() {
161 case net.closeReq <- struct{}{}:
166 // Self returns the local node.
167 // The returned node should not be modified by the caller.
168 func (net *Network) Self() *Node {
172 func (net *Network) selfIP() net.IP {
173 return net.tab.self.IP
176 // ReadRandomNodes fills the given slice with random nodes from the
177 // table. It will not write the same node more than once. The nodes in
178 // the slice are copies and can be modified by the caller.
179 func (net *Network) ReadRandomNodes(buf []*Node) (n int) {
180 net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) })
184 // SetFallbackNodes sets the initial points of contact. These nodes
185 // are used to connect to the network if the table is empty and there
186 // are no known nodes in the database.
187 func (net *Network) SetFallbackNodes(nodes []*Node) error {
188 nursery := make([]*Node, 0, len(nodes))
189 for _, n := range nodes {
190 if err := n.validateComplete(); err != nil {
191 return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
193 // Recompute cpy.sha because the node might not have been
194 // created by NewNode or ParseNode.
196 cpy.sha = common.BytesToHash(n.ID[:])
197 nursery = append(nursery, &cpy)
199 net.reqRefresh(nursery)
203 // Resolve searches for a specific node with the given ID.
204 // It returns nil if the node could not be found.
205 func (net *Network) Resolve(targetID NodeID) *Node {
206 result := net.lookup(common.BytesToHash(targetID[:]), true)
207 for _, n := range result {
208 if n.ID == targetID {
215 // Lookup performs a network search for nodes close
216 // to the given target. It approaches the target by querying
217 // nodes that are closer to it on each iteration.
218 // The given target does not need to be an actual node
221 // The local node may be included in the result.
222 func (net *Network) Lookup(targetID NodeID) []*Node {
223 return net.lookup(common.BytesToHash(targetID[:]), false)
226 func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node {
228 asked = make(map[NodeID]bool)
229 seen = make(map[NodeID]bool)
230 reply = make(chan []*Node, alpha)
231 result = nodesByDistance{target: target}
234 // Get initial answers from the local node.
235 result.push(net.tab.self, bucketSize)
237 // Ask the α closest nodes that we haven't asked yet.
238 for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
239 n := result.entries[i]
243 net.reqQueryFindnode(n, target, reply)
246 if pendingQueries == 0 {
247 // We have asked all closest nodes, stop the search.
250 // Wait for the next reply.
252 case nodes := <-reply:
253 for _, n := range nodes {
254 if n != nil && !seen[n.ID] {
256 result.push(n, bucketSize)
257 if stopOnMatch && n.sha == target {
258 return result.entries
263 case <-time.After(respTimeout):
264 // forget all pending requests, start new ones
266 reply = make(chan []*Node, alpha)
269 return result.entries
272 func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) {
274 case net.topicRegisterReq <- topicRegisterReq{true, topic}:
282 case net.topicRegisterReq <- topicRegisterReq{false, topic}:
288 func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) {
293 case delay, ok := <-setPeriod:
295 case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}:
306 func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} {
308 case net.refreshReq <- nursery:
309 return <-net.refreshResp
315 func (net *Network) reqQueryFindnode(n *Node, target common.Hash, reply chan []*Node) bool {
316 q := &findnodeQuery{remote: n, target: target, reply: reply}
318 case net.queryReq <- q:
325 func (net *Network) reqReadPacket(pkt ingressPacket) {
327 case net.read <- pkt:
332 func (net *Network) reqTableOp(f func()) (called bool) {
334 case net.tableOpReq <- f:
342 // TODO: external address handling.
344 type topicSearchInfo struct {
345 lookupChn chan<- bool
349 const maxSearchCount = 5
351 func (net *Network) loop() {
353 refreshTimer = time.NewTicker(autoRefreshInterval)
354 bucketRefreshTimer = time.NewTimer(bucketRefreshInterval)
355 refreshDone chan struct{} // closed when the 'refresh' lookup has ended
358 // Tracking the next ticket to register.
360 nextTicket *ticketRef
361 nextRegisterTimer *time.Timer
362 nextRegisterTime <-chan time.Time
365 if nextRegisterTimer != nil {
366 nextRegisterTimer.Stop()
369 bucketRefreshTimer.Stop()
371 resetNextTicket := func() {
372 ticket, timeout := net.ticketStore.nextFilteredTicket()
373 if nextTicket != ticket {
375 if nextRegisterTimer != nil {
376 nextRegisterTimer.Stop()
377 nextRegisterTime = nil
380 nextRegisterTimer = time.NewTimer(timeout)
381 nextRegisterTime = nextRegisterTimer.C
386 // Tracking registration and search lookups.
388 topicRegisterLookupTarget lookupInfo
389 topicRegisterLookupDone chan []*Node
390 topicRegisterLookupTick = time.NewTimer(0)
391 searchReqWhenRefreshDone []topicSearchReq
392 searchInfo = make(map[Topic]topicSearchInfo)
393 activeSearchCount int
395 topicSearchLookupDone := make(chan topicSearchResult, 100)
396 topicSearch := make(chan Topic, 100)
397 <-topicRegisterLookupTick.C
399 statsDump := time.NewTicker(10 * time.Second)
400 defer statsDump.Stop()
408 log.WithFields(log.Fields{"module": logModule}).Debug("close request")
411 // Ingress packet handling.
412 case pkt := <-net.read:
413 log.WithFields(log.Fields{"module": logModule}).Debug("read from net")
414 n := net.internNode(&pkt)
417 if err := net.handle(n, pkt.ev, &pkt); err != nil {
420 log.WithFields(log.Fields{"module": logModule, "node num": net.tab.count, "event": pkt.ev, "remote id": hex.EncodeToString(pkt.remoteID[:8]), "remote addr": pkt.remoteAddr, "pre state": prestate, "node state": n.state, "status": status}).Debug("handle ingress msg")
422 // TODO: persist state if n.state goes >= known, delete if it goes <= known
424 // State transition timeouts.
425 case timeout := <-net.timeout:
426 log.WithFields(log.Fields{"module": logModule}).Debug("net timeout")
427 if net.timeoutTimers[timeout] == nil {
428 // Stale timer (was aborted).
431 delete(net.timeoutTimers, timeout)
432 prestate := timeout.node.state
434 if err := net.handle(timeout.node, timeout.ev, nil); err != nil {
437 log.WithFields(log.Fields{"module": logModule, "node num": net.tab.count, "event": timeout.ev, "node id": hex.EncodeToString(timeout.node.ID[:8]), "node addr": timeout.node.addr(), "pre state": prestate, "node state": timeout.node.state, "status": status}).Debug("handle timeout")
440 case q := <-net.queryReq:
441 log.WithFields(log.Fields{"module": logModule}).Debug("net query request")
443 q.remote.deferQuery(q)
446 // Interacting with the table.
447 case f := <-net.tableOpReq:
448 log.WithFields(log.Fields{"module": logModule}).Debug("net table operate request")
450 net.tableOpResp <- struct{}{}
452 // Topic registration stuff.
453 case req := <-net.topicRegisterReq:
454 log.WithFields(log.Fields{"module": logModule, "topic": req.topic}).Debug("net topic register request")
456 net.ticketStore.removeRegisterTopic(req.topic)
459 net.ticketStore.addTopic(req.topic, true)
460 // If we're currently waiting idle (nothing to look up), give the ticket store a
461 // chance to start it sooner. This should speed up convergence of the radius
462 // determination for new topics.
463 // if topicRegisterLookupDone == nil {
464 if topicRegisterLookupTarget.target == (common.Hash{}) {
465 log.WithFields(log.Fields{"module": logModule, "topic": req.topic}).Debug("topic register lookup target null")
466 if topicRegisterLookupTick.Stop() {
467 <-topicRegisterLookupTick.C
469 target, delay := net.ticketStore.nextRegisterLookup()
470 topicRegisterLookupTarget = target
471 topicRegisterLookupTick.Reset(delay)
474 case nodes := <-topicRegisterLookupDone:
475 log.WithFields(log.Fields{"module": logModule}).Debug("topic register lookup done")
476 net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte {
477 net.ping(n, n.addr())
480 target, delay := net.ticketStore.nextRegisterLookup()
481 topicRegisterLookupTarget = target
482 topicRegisterLookupTick.Reset(delay)
483 topicRegisterLookupDone = nil
485 case <-topicRegisterLookupTick.C:
486 log.WithFields(log.Fields{"module": logModule}).Debug("topic register lookup tick")
487 if (topicRegisterLookupTarget.target == common.Hash{}) {
488 target, delay := net.ticketStore.nextRegisterLookup()
489 topicRegisterLookupTarget = target
490 topicRegisterLookupTick.Reset(delay)
491 topicRegisterLookupDone = nil
493 topicRegisterLookupDone = make(chan []*Node)
494 target := topicRegisterLookupTarget.target
495 go func() { topicRegisterLookupDone <- net.lookup(target, false) }()
498 case <-nextRegisterTime:
499 log.WithFields(log.Fields{"module": logModule}).Debug("next register time")
500 net.ticketStore.ticketRegistered(*nextTicket)
501 net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
503 case req := <-net.topicSearchReq:
504 if refreshDone == nil {
505 log.WithFields(log.Fields{"module": logModule, "topic": req.topic}).Debug("net topic rearch req")
506 info, ok := searchInfo[req.topic]
508 if req.delay == time.Duration(0) {
509 delete(searchInfo, req.topic)
510 net.ticketStore.removeSearchTopic(req.topic)
512 info.period = req.delay
513 searchInfo[req.topic] = info
517 if req.delay != time.Duration(0) {
518 var info topicSearchInfo
519 info.period = req.delay
520 info.lookupChn = req.lookup
521 searchInfo[req.topic] = info
522 net.ticketStore.addSearchTopic(req.topic, req.found)
523 topicSearch <- req.topic
526 searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req)
529 case topic := <-topicSearch:
530 if activeSearchCount < maxSearchCount {
532 target := net.ticketStore.nextSearchLookup(topic)
534 nodes := net.lookup(target.target, false)
535 topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes}
538 period := searchInfo[topic].period
539 if period != time.Duration(0) {
546 case res := <-topicSearchLookupDone:
548 if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil {
549 lookupChn <- net.ticketStore.radius[res.target.topic].converged
551 net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte {
552 if n.state != nil && n.state.canQuery {
553 return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
555 if n.state == unknown {
556 net.ping(n, n.addr())
563 log.WithFields(log.Fields{"module": logModule}).Debug("stats dump clock")
564 /*r, ok := net.ticketStore.radius[testTopic]
566 fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now())
568 topics := len(net.ticketStore.tickets)
569 tickets := len(net.ticketStore.nodes)
570 rad := r.radius / (maxRadius/10000+1)
571 fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now())
575 for topic, r := range net.ticketStore.radius {
576 if printTestImgLogs {
577 rad := r.radius / (maxRadius/1000000 + 1)
578 minrad := r.minRadius / (maxRadius/1000000 + 1)
579 log.WithFields(log.Fields{"module": logModule}).Debugf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad)
580 log.WithFields(log.Fields{"module": logModule}).Debugf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad)
583 for topic, t := range net.topictab.topics {
584 wp := t.wcl.nextWaitPeriod(tm)
585 if printTestImgLogs {
586 log.WithFields(log.Fields{"module": logModule}).Debugf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000)
590 // Periodic / lookup-initiated bucket refresh.
591 case <-refreshTimer.C:
592 log.WithFields(log.Fields{"module": logModule}).Debug("refresh timer clock")
593 // TODO: ideally we would start the refresh timer after
594 // fallback nodes have been set for the first time.
595 if refreshDone == nil {
596 refreshDone = make(chan struct{})
597 net.refresh(refreshDone)
599 case <-bucketRefreshTimer.C:
600 target := net.tab.chooseBucketRefreshTarget()
602 net.lookup(target, false)
603 bucketRefreshTimer.Reset(bucketRefreshInterval)
605 case newNursery := <-net.refreshReq:
606 log.WithFields(log.Fields{"module": logModule}).Debug("net refresh request")
607 if newNursery != nil {
608 net.nursery = newNursery
610 if refreshDone == nil {
611 refreshDone = make(chan struct{})
612 net.refresh(refreshDone)
614 net.refreshResp <- refreshDone
616 log.WithFields(log.Fields{"module": logModule, "table size": net.tab.count}).Debug("net refresh done")
617 if net.tab.count != 0 {
619 list := searchReqWhenRefreshDone
620 searchReqWhenRefreshDone = nil
622 for _, req := range list {
623 net.topicSearchReq <- req
627 refreshDone = make(chan struct{})
628 net.refresh(refreshDone)
632 log.WithFields(log.Fields{"module": logModule}).Debug("loop stopped,shutting down")
636 if refreshDone != nil {
637 // TODO: wait for pending refresh.
640 // Cancel all pending timeouts.
641 for _, timer := range net.timeoutTimers {
650 // Everything below runs on the Network.loop goroutine
651 // and can modify Node, Table and Network at any time without locking.
653 func (net *Network) refresh(done chan<- struct{}) {
656 seeds = net.db.querySeeds(seedCount, seedMaxAge)
662 log.WithFields(log.Fields{"module": logModule}).Debug("no seed nodes found")
663 time.AfterFunc(time.Second*10, func() { close(done) })
666 for _, n := range seeds {
667 n = net.internNodeFromDB(n)
668 if n.state == unknown {
669 net.transition(n, verifyinit)
671 // Force-add the seed node so Lookup does something.
672 // It will be deleted again if verification fails.
675 // Start self lookup to fill up the buckets.
677 net.Lookup(net.tab.self.ID)
684 func (net *Network) internNode(pkt *ingressPacket) *Node {
685 if n := net.nodes[pkt.remoteID]; n != nil {
686 n.IP = pkt.remoteAddr.IP
687 n.UDP = uint16(pkt.remoteAddr.Port)
688 n.TCP = uint16(pkt.remoteAddr.Port)
691 n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port))
693 net.nodes[pkt.remoteID] = n
697 func (net *Network) internNodeFromDB(dbn *Node) *Node {
698 if n := net.nodes[dbn.ID]; n != nil {
701 n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP)
707 func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n *Node, err error) {
708 if rn.ID == net.tab.self.ID {
709 return nil, errors.New("is self")
711 if rn.UDP <= lowPort {
712 return nil, errors.New("low port")
716 // We haven't seen this node before.
717 n, err = nodeFromRPC(sender, rn)
718 if net.netrestrict != nil && !net.netrestrict.Contains(n.IP) {
719 return n, errors.New("not contained in netrestrict whitelist")
727 if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP {
728 if n.state == known {
729 // reject address change if node is known by us
730 err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n)
732 // accept otherwise; this will be handled nicer with signed ENRs
741 // nodeNetGuts is embedded in Node and contains fields.
742 type nodeNetGuts struct {
743 // This is a cached copy of sha3(ID) which is used for node
744 // distance calculations. This is part of Node in order to make it
745 // possible to write tests that need a node at a certain distance.
746 // In those tests, the content of sha will not actually correspond
750 // State machine fields. Access to these fields
751 // is restricted to the Network.loop goroutine.
753 pingEcho []byte // hash of last ping sent by us
754 pingTopics []Topic // topic set sent by us in last ping
755 deferredQueries []*findnodeQuery // queries that can't be sent yet
756 pendingNeighbours *findnodeQuery // current query, waiting for reply
760 func (n *nodeNetGuts) deferQuery(q *findnodeQuery) {
761 n.deferredQueries = append(n.deferredQueries, q)
764 func (n *nodeNetGuts) startNextQuery(net *Network) {
765 if len(n.deferredQueries) == 0 {
768 nextq := n.deferredQueries[0]
769 if nextq.start(net) {
770 n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...)
774 func (q *findnodeQuery) start(net *Network) bool {
775 // Satisfy queries against the local node directly.
776 if q.remote == net.tab.self {
777 log.WithFields(log.Fields{"module": logModule}).Debug("findnodeQuery self")
778 closest := net.tab.closest(common.BytesToHash(q.target[:]), bucketSize)
780 q.reply <- closest.entries
783 if q.remote.state.canQuery && q.remote.pendingNeighbours == nil {
784 log.WithFields(log.Fields{"module": logModule, "remote peer": q.remote.ID, "targetID": q.target}).Debug("find node query")
785 net.conn.sendFindnodeHash(q.remote, q.target)
786 net.timedEvent(respTimeout, q.remote, neighboursTimeout)
787 q.remote.pendingNeighbours = q
790 // If the node is not known yet, it won't accept queries.
791 // Initiate the transition to known.
792 // The request will be sent later when the node reaches known state.
793 if q.remote.state == unknown {
794 log.WithFields(log.Fields{"module": logModule, "id": q.remote.ID, "status": "unknown->verify init"}).Debug("find node query")
795 net.transition(q.remote, verifyinit)
800 // Node Events (the input to the state machine).
804 //go:generate stringer -type=nodeEvent
807 invalidEvent nodeEvent = iota // zero is reserved
809 // Packet type events.
810 // These correspond to packet types in the UDP protocol.
820 // Non-packet events.
821 // Event values in this category are allocated outside
822 // the packet type range (packet types are encoded as a single byte).
823 pongTimeout nodeEvent = iota + 256
828 // Node State Machine.
830 type nodeState struct {
832 handle func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error)
833 enter func(*Network, *Node)
837 func (s *nodeState) String() string {
843 verifyinit *nodeState
844 verifywait *nodeState
845 remoteverifywait *nodeState
848 unresponsive *nodeState
852 unknown = &nodeState{
854 enter: func(net *Network, n *Node) {
857 // Abort active queries.
858 for _, q := range n.deferredQueries {
861 n.deferredQueries = nil
862 if n.pendingNeighbours != nil {
863 n.pendingNeighbours.reply <- nil
864 n.pendingNeighbours = nil
868 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
871 net.handlePing(n, pkt)
872 net.ping(n, pkt.remoteAddr)
873 return verifywait, nil
875 return unknown, errInvalidEvent
880 verifyinit = &nodeState{
882 enter: func(net *Network, n *Node) {
883 net.ping(n, n.addr())
885 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
888 net.handlePing(n, pkt)
889 return verifywait, nil
891 err := net.handleKnownPong(n, pkt)
892 return remoteverifywait, err
896 return verifyinit, errInvalidEvent
901 verifywait = &nodeState{
903 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
906 net.handlePing(n, pkt)
907 return verifywait, nil
909 err := net.handleKnownPong(n, pkt)
914 return verifywait, errInvalidEvent
919 remoteverifywait = &nodeState{
920 name: "remoteverifywait",
921 enter: func(net *Network, n *Node) {
922 net.timedEvent(respTimeout, n, pingTimeout)
924 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
927 net.handlePing(n, pkt)
928 return remoteverifywait, nil
932 return remoteverifywait, errInvalidEvent
940 enter: func(net *Network, n *Node) {
942 n.startNextQuery(net)
943 // Insert into the table and start revalidation of the last node
944 // in the bucket if it is full.
945 last := net.tab.add(n)
946 if last != nil && last.state == known {
947 // TODO: do this asynchronously
948 net.transition(last, contested)
951 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
952 if err := net.db.updateNode(n); err != nil {
958 net.handlePing(n, pkt)
961 err := net.handleKnownPong(n, pkt)
964 return net.handleQueryEvent(n, ev, pkt)
969 contested = &nodeState{
972 enter: func(net *Network, n *Node) {
974 net.ping(n, n.addr())
976 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
979 // Node is still alive.
980 err := net.handleKnownPong(n, pkt)
983 net.tab.deleteReplace(n)
984 return unresponsive, nil
986 net.handlePing(n, pkt)
987 return contested, nil
989 return net.handleQueryEvent(n, ev, pkt)
994 unresponsive = &nodeState{
995 name: "unresponsive",
997 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
998 net.db.deleteNode(n.ID)
1002 net.handlePing(n, pkt)
1005 err := net.handleKnownPong(n, pkt)
1008 return net.handleQueryEvent(n, ev, pkt)
1014 // handle processes packets sent by n and events related to n.
1015 func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error {
1016 //fmt.Println("handle", n.addr().String(), n.state, ev)
1018 if err := net.checkPacket(n, ev, pkt); err != nil {
1019 //fmt.Println("check err:", err)
1022 // Start the background expiration goroutine after the first
1023 // successful communication. Subsequent calls have no effect if it
1024 // is already running. We do this here instead of somewhere else
1025 // so that the search for seed nodes also considers older nodes
1026 // that would otherwise be removed by the expirer.
1028 net.db.ensureExpirer()
1032 n.state = unknown //???
1034 next, err := n.state.handle(net, n, ev, pkt)
1035 net.transition(n, next)
1036 //fmt.Println("new state:", n.state)
1040 func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error {
1041 // Replay prevention checks.
1043 case pingPacket, findnodeHashPacket, neighborsPacket:
1044 // TODO: check date is > last date seen
1045 // TODO: check ping version
1047 if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) {
1048 // fmt.Println("pong reply token mismatch")
1049 return fmt.Errorf("pong reply token mismatch")
1053 // Address validation.
1054 // TODO: Ideally we would do the following:
1055 // - reject all packets with wrong address except ping.
1056 // - for ping with new address, transition to verifywait but keep the
1057 // previous node (with old address) around. if the new one reaches known,
1062 func (net *Network) transition(n *Node, next *nodeState) {
1063 if n.state != next {
1065 if next.enter != nil {
1070 // TODO: persist/unpersist node
1073 func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) {
1074 timeout := timeoutEvent{ev, n}
1075 net.timeoutTimers[timeout] = time.AfterFunc(d, func() {
1077 case net.timeout <- timeout:
1083 func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) {
1084 timer := net.timeoutTimers[timeoutEvent{ev, n}]
1087 delete(net.timeoutTimers, timeoutEvent{ev, n})
1091 func (net *Network) ping(n *Node, addr *net.UDPAddr) {
1092 //fmt.Println("ping", n.addr().String(), n.ID.String(), n.sha.Hex())
1093 if n.pingEcho != nil || n.ID == net.tab.self.ID {
1094 //fmt.Println(" not sent")
1097 log.WithFields(log.Fields{"module": logModule, "node": n.ID}).Debug("Pinging remote node")
1098 n.pingTopics = net.ticketStore.regTopicSet()
1099 n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics)
1100 net.timedEvent(respTimeout, n, pongTimeout)
1103 func (net *Network) handlePing(n *Node, pkt *ingressPacket) {
1104 log.WithFields(log.Fields{"module": logModule, "node": n.ID}).Debug("Handling remote ping")
1105 ping := pkt.data.(*ping)
1106 n.TCP = ping.From.TCP
1107 t := net.topictab.getTicket(n, ping.Topics)
1110 To: makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB
1112 Expiration: uint64(time.Now().Add(expiration).Unix()),
1114 ticketToPong(t, pong)
1115 net.conn.send(n, pongPacket, pong)
1118 func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error {
1119 log.WithFields(log.Fields{"module": logModule, "node": n.ID}).Debug("Handling known pong")
1120 net.abortTimedEvent(n, pongTimeout)
1122 ticket, err := pongToTicket(now, n.pingTopics, n, pkt)
1124 // fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data)
1125 net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket)
1127 log.WithFields(log.Fields{"module": logModule, "error": err}).Debug("Failed to convert pong to ticket")
1131 net.db.updateLastPong(n.ID, time.Now())
1135 func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
1137 case findnodePacket:
1138 target := common.BytesToHash(pkt.data.(*findnode).Target[:])
1139 results := net.tab.closest(target, bucketSize).entries
1140 net.conn.sendNeighbours(n, results)
1142 case neighborsPacket:
1143 err := net.handleNeighboursPacket(n, pkt)
1145 case neighboursTimeout:
1146 if n.pendingNeighbours != nil {
1147 n.pendingNeighbours.reply <- nil
1148 n.pendingNeighbours = nil
1151 if n.queryTimeouts > maxFindnodeFailures && n.state == known {
1152 return contested, errors.New("too many timeouts")
1158 case findnodeHashPacket:
1159 results := net.tab.closest(pkt.data.(*findnodeHash).Target, bucketSize).entries
1160 net.conn.sendNeighbours(n, results)
1162 case topicRegisterPacket:
1163 //fmt.Println("got topicRegisterPacket")
1164 regdata := pkt.data.(*topicRegister)
1165 pong, err := net.checkTopicRegister(regdata)
1168 return n.state, fmt.Errorf("bad waiting ticket: %v", err)
1170 net.topictab.useTicket(n, pong.TicketSerial, regdata.Topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods)
1172 case topicQueryPacket:
1173 // TODO: handle expiration
1174 topic := pkt.data.(*topicQuery).Topic
1175 results := net.topictab.getEntries(topic)
1176 if _, ok := net.ticketStore.tickets[topic]; ok {
1177 results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too
1179 if len(results) > 10 {
1180 results = results[:10]
1182 var hash common.Hash
1183 copy(hash[:], pkt.hash)
1184 net.conn.sendTopicNodes(n, hash, results)
1186 case topicNodesPacket:
1187 p := pkt.data.(*topicNodes)
1188 if net.ticketStore.gotTopicNodes(n, p.Echo, p.Nodes) {
1190 if n.queryTimeouts > maxFindnodeFailures && n.state == known {
1191 return contested, errors.New("too many timeouts")
1197 return n.state, errInvalidEvent
1201 func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) {
1202 var pongpkt ingressPacket
1203 if err := decodePacket(data.Pong, &pongpkt); err != nil {
1206 if pongpkt.ev != pongPacket {
1207 return nil, errors.New("is not pong packet")
1209 if pongpkt.remoteID != net.tab.self.ID {
1210 return nil, errors.New("not signed by us")
1212 // check that we previously authorised all topics
1213 // that the other side is trying to register.
1214 hash, _, _ := wireHash(data.Topics)
1215 if hash != pongpkt.data.(*pong).TopicHash {
1216 return nil, errors.New("topic hash mismatch")
1218 if int(data.Idx) < 0 || int(data.Idx) >= len(data.Topics) {
1219 return nil, errors.New("topic index out of range")
1221 return pongpkt.data.(*pong), nil
1224 func wireHash(x interface{}) (h common.Hash, n int, err error) {
1226 wire.WriteBinary(x, hw, &n, &err)
1231 func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error {
1232 if n.pendingNeighbours == nil {
1235 net.abortTimedEvent(n, neighboursTimeout)
1237 req := pkt.data.(*neighbors)
1238 nodes := make([]*Node, len(req.Nodes))
1239 for i, rn := range req.Nodes {
1240 nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn)
1242 log.WithFields(log.Fields{"module": logModule, "ip": rn.IP, "id:": n.ID[:8], "addr:": pkt.remoteAddr, "error": err}).Debug("invalid neighbour")
1246 // Start validation of query results immediately.
1247 // This fills the table quickly.
1248 // TODO: generates way too many packets, maybe do it via queue.
1249 if nn.state == unknown {
1250 net.transition(nn, verifyinit)
1253 // TODO: don't ignore second packet
1254 n.pendingNeighbours.reply <- nodes
1255 n.pendingNeighbours = nil
1256 // Now that this query is done, start the next one.
1257 n.startNextQuery(net)