1 // Modified for Tendermint
2 // Originally Copyright (c) 2013-2014 Conformal Systems LLC.
3 // https://github.com/conformal/btcd/blob/master/LICENSE
17 log "github.com/sirupsen/logrus"
18 crypto "github.com/tendermint/go-crypto"
19 cmn "github.com/tendermint/tmlibs/common"
23 // addresses under which the address manager will claim to need more addresses.
24 needAddressThreshold = 1000
26 // interval used to dump the address cache to disk for future use.
27 dumpAddressInterval = time.Minute * 2
29 // max addresses in each old address bucket.
32 // buckets we split old addresses over.
35 // max addresses in each new address bucket.
38 // buckets that we spread new addresses over.
41 // old buckets over which an address group will be spread.
42 oldBucketsPerGroup = 4
44 // new buckets over which an source address group will be spread.
45 newBucketsPerGroup = 32
47 // buckets a frequently seen new address may end up in.
48 maxNewBucketsPerAddress = 4
50 // days before which we assume an address has vanished
51 // if we have not seen it announced in that long.
54 // tries without a single success before we assume an address is bad.
57 // max failures we will accept without a success before considering an address bad.
60 // days since the last success before we will consider evicting an address.
63 // % of total addresses known returned by GetSelection.
64 getSelectionPercent = 23
66 // min addresses that must be returned by GetSelection. Useful for bootstrapping.
69 // max addresses returned by GetSelection
70 // NOTE: this must match "maxPexMessageSize"
73 // current version of the on-disk format.
74 serializationVersion = 1
82 // AddrBook - concurrency safe peer address manager.
83 type AddrBook struct {
88 routabilityStrict bool
91 ourAddrs map[string]*NetAddress
92 addrLookup map[string]*knownAddress // new & old
93 addrNew []map[string]*knownAddress
94 addrOld []map[string]*knownAddress
100 // NewAddrBook creates a new address book.
101 // Use Start to begin processing asynchronous address updates.
102 func NewAddrBook(filePath string, routabilityStrict bool) *AddrBook {
104 rand: rand.New(rand.NewSource(time.Now().UnixNano())),
105 ourAddrs: make(map[string]*NetAddress),
106 addrLookup: make(map[string]*knownAddress),
108 routabilityStrict: routabilityStrict,
111 am.BaseService = *cmn.NewBaseService(nil, "AddrBook", am)
115 // When modifying this, don't forget to update loadFromFile()
116 func (a *AddrBook) init() {
117 a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits
119 a.addrNew = make([]map[string]*knownAddress, newBucketCount)
120 for i := range a.addrNew {
121 a.addrNew[i] = make(map[string]*knownAddress)
124 a.addrOld = make([]map[string]*knownAddress, oldBucketCount)
125 for i := range a.addrOld {
126 a.addrOld[i] = make(map[string]*knownAddress)
130 // OnStart implements Service.
131 func (a *AddrBook) OnStart() error {
132 a.BaseService.OnStart()
133 a.loadFromFile(a.filePath)
139 // OnStop implements Service.
140 func (a *AddrBook) OnStop() {
141 a.BaseService.OnStop()
144 func (a *AddrBook) Wait() {
148 func (a *AddrBook) AddOurAddress(addr *NetAddress) {
151 log.WithField("addr", addr).Info("Add our address to book")
153 a.ourAddrs[addr.String()] = addr
156 func (a *AddrBook) OurAddresses() []*NetAddress {
157 addrs := []*NetAddress{}
161 for _, addr := range a.ourAddrs {
162 addrs = append(addrs, addr)
167 // NOTE: addr must not be nil
168 func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) {
171 log.WithFields(log.Fields{
174 }).Debug("Add address to book")
175 a.addAddress(addr, src)
178 func (a *AddrBook) NeedMoreAddrs() bool {
179 return a.Size() < needAddressThreshold
182 func (a *AddrBook) Size() int {
188 func (a *AddrBook) size() int {
189 return a.nNew + a.nOld
192 // Pick an address to connect to with new/old bias.
193 func (a *AddrBook) PickAddress(newBias int) *NetAddress {
207 // Bias between new and old addresses.
208 oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
209 newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
211 if (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation {
212 // pick random Old bucket.
213 var bucket map[string]*knownAddress = nil
215 for len(bucket) == 0 && num < oldBucketCount {
216 bucket = a.addrOld[a.rand.Intn(len(a.addrOld))]
219 if num == oldBucketCount {
222 // pick a random ka from bucket.
223 randIndex := a.rand.Intn(len(bucket))
224 for _, ka := range bucket {
230 cmn.PanicSanity("Should not happen")
232 // pick random New bucket.
233 var bucket map[string]*knownAddress = nil
235 for len(bucket) == 0 && num < newBucketCount {
236 bucket = a.addrNew[a.rand.Intn(len(a.addrNew))]
239 if num == newBucketCount {
242 // pick a random ka from bucket.
243 randIndex := a.rand.Intn(len(bucket))
244 for _, ka := range bucket {
250 cmn.PanicSanity("Should not happen")
255 func (a *AddrBook) MarkGood(addr *NetAddress) {
258 ka := a.addrLookup[addr.String()]
268 func (a *AddrBook) MarkAttempt(addr *NetAddress) {
271 ka := a.addrLookup[addr.String()]
278 // MarkBad currently just ejects the address. In the future, consider
280 func (a *AddrBook) MarkBad(addr *NetAddress) {
281 a.RemoveAddress(addr)
284 // RemoveAddress removes the address from the book.
285 func (a *AddrBook) RemoveAddress(addr *NetAddress) {
288 ka := a.addrLookup[addr.String()]
292 log.WithField("addr", addr).Info("Remove address from book")
293 a.removeFromAllBuckets(ka)
298 // GetSelection randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
299 func (a *AddrBook) GetSelection() []*NetAddress {
307 allAddr := make([]*NetAddress, a.size())
309 for _, v := range a.addrLookup {
314 numAddresses := cmn.MaxInt(
315 cmn.MinInt(minGetSelection, len(allAddr)),
316 len(allAddr)*getSelectionPercent/100)
317 numAddresses = cmn.MinInt(maxGetSelection, numAddresses)
319 // Fisher-Yates shuffle the array. We only need to do the first
320 // `numAddresses' since we are throwing the rest.
321 for i := 0; i < numAddresses; i++ {
322 // pick a number between current index and the end
323 j := rand.Intn(len(allAddr)-i) + i
324 allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
327 // slice off the limit we are willing to share.
328 return allAddr[:numAddresses]
331 /* Loading & Saving */
333 type addrBookJSON struct {
335 Addrs []*knownAddress
338 func (a *AddrBook) saveToFile(filePath string) {
339 log.WithField("size", a.Size()).Info("Saving AddrBook to file")
344 addrs := []*knownAddress{}
345 for _, ka := range a.addrLookup {
346 addrs = append(addrs, ka)
349 aJSON := &addrBookJSON{
354 jsonBytes, err := json.MarshalIndent(aJSON, "", "\t")
356 log.WithField("err", err).Error("Failed to save AddrBook to file")
359 err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644)
361 log.WithFields(log.Fields{
364 }).Error("Failed to save AddrBook to file")
368 // Returns false if file does not exist.
369 // cmn.Panics if file is corrupt.
370 func (a *AddrBook) loadFromFile(filePath string) bool {
371 // If doesn't exist, do nothing.
372 _, err := os.Stat(filePath)
373 if os.IsNotExist(err) {
377 // Load addrBookJSON{}
378 r, err := os.Open(filePath)
380 cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err))
383 aJSON := &addrBookJSON{}
384 dec := json.NewDecoder(r)
385 err = dec.Decode(aJSON)
387 cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err))
390 // Restore all the fields...
393 // Restore .addrNew & .addrOld
394 for _, ka := range aJSON.Addrs {
395 for _, bucketIndex := range ka.Buckets {
396 bucket := a.getBucket(ka.BucketType, bucketIndex)
397 bucket[ka.Addr.String()] = ka
399 a.addrLookup[ka.Addr.String()] = ka
400 if ka.BucketType == bucketTypeNew {
409 // Save saves the book.
410 func (a *AddrBook) Save() {
411 log.WithField("size", a.Size()).Info("Saving AddrBook to file")
412 a.saveToFile(a.filePath)
415 /* Private methods */
417 func (a *AddrBook) saveRoutine() {
418 dumpAddressTicker := time.NewTicker(dumpAddressInterval)
422 case <-dumpAddressTicker.C:
423 a.saveToFile(a.filePath)
428 dumpAddressTicker.Stop()
429 a.saveToFile(a.filePath)
431 log.Info("Address handler done")
434 func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
437 return a.addrNew[bucketIdx]
439 return a.addrOld[bucketIdx]
441 cmn.PanicSanity("Should not happen")
446 // Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full.
447 // NOTE: currently it always returns true.
448 func (a *AddrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool {
451 log.Error(cmn.Fmt("Cannot add address already in old bucket to a new bucket: %v", ka))
455 addrStr := ka.Addr.String()
456 bucket := a.getBucket(bucketTypeNew, bucketIdx)
459 if _, ok := bucket[addrStr]; ok {
463 // Enforce max addresses.
464 if len(bucket) > newBucketSize {
465 log.Info("new bucket is full, expiring old ")
466 a.expireNew(bucketIdx)
471 if ka.addBucketRef(bucketIdx) == 1 {
475 // Ensure in addrLookup
476 a.addrLookup[addrStr] = ka
481 // Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full.
482 func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
485 log.Error(cmn.Fmt("Cannot add new address to old bucket: %v", ka))
488 if len(ka.Buckets) != 0 {
489 log.Error(cmn.Fmt("Cannot add already old address to another old bucket: %v", ka))
493 addrStr := ka.Addr.String()
494 bucket := a.getBucket(bucketTypeNew, bucketIdx)
497 if _, ok := bucket[addrStr]; ok {
501 // Enforce max addresses.
502 if len(bucket) > oldBucketSize {
508 if ka.addBucketRef(bucketIdx) == 1 {
512 // Ensure in addrLookup
513 a.addrLookup[addrStr] = ka
518 func (a *AddrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
519 if ka.BucketType != bucketType {
520 log.Error(cmn.Fmt("Bucket type mismatch: %v", ka))
523 bucket := a.getBucket(bucketType, bucketIdx)
524 delete(bucket, ka.Addr.String())
525 if ka.removeBucketRef(bucketIdx) == 0 {
526 if bucketType == bucketTypeNew {
531 delete(a.addrLookup, ka.Addr.String())
535 func (a *AddrBook) removeFromAllBuckets(ka *knownAddress) {
536 for _, bucketIdx := range ka.Buckets {
537 bucket := a.getBucket(ka.BucketType, bucketIdx)
538 delete(bucket, ka.Addr.String())
541 if ka.BucketType == bucketTypeNew {
546 delete(a.addrLookup, ka.Addr.String())
549 func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
550 bucket := a.getBucket(bucketType, bucketIdx)
551 var oldest *knownAddress
552 for _, ka := range bucket {
553 if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) {
560 func (a *AddrBook) addAddress(addr, src *NetAddress) {
561 if a.routabilityStrict && !addr.Routable() {
562 log.Error(cmn.Fmt("Cannot add non-routable address %v", addr))
565 if _, ok := a.ourAddrs[addr.String()]; ok {
566 // Ignore our own listener address.
570 ka := a.addrLookup[addr.String()]
577 // Already in max new buckets.
578 if len(ka.Buckets) == maxNewBucketsPerAddress {
581 // The more entries we have, the less likely we are to add more.
582 factor := int32(2 * len(ka.Buckets))
583 if a.rand.Int31n(factor) != 0 {
587 ka = newKnownAddress(addr, src)
590 bucket := a.calcNewBucket(addr, src)
591 a.addToNewBucket(ka, bucket)
593 log.Info("Added new address ", "address:", addr, " total:", a.size())
596 // Make space in the new buckets by expiring the really bad entries.
597 // If no bad entries are available we remove the oldest.
598 func (a *AddrBook) expireNew(bucketIdx int) {
599 for addrStr, ka := range a.addrNew[bucketIdx] {
600 // If an entry is bad, throw it away
602 log.Info(cmn.Fmt("expiring bad address %v", addrStr))
603 a.removeFromBucket(ka, bucketTypeNew, bucketIdx)
608 // If we haven't thrown out a bad entry, throw out the oldest entry
609 oldest := a.pickOldest(bucketTypeNew, bucketIdx)
610 a.removeFromBucket(oldest, bucketTypeNew, bucketIdx)
613 // Promotes an address from new to old.
614 // TODO: Move to old probabilistically.
615 // The better a node is, the less likely it should be evicted from an old bucket.
616 func (a *AddrBook) moveToOld(ka *knownAddress) {
619 log.Error(cmn.Fmt("Cannot promote address that is already old %v", ka))
622 if len(ka.Buckets) == 0 {
623 log.Error(cmn.Fmt("Cannot promote address that isn't in any new buckets %v", ka))
627 // Remember one of the buckets in which ka is in.
628 freedBucket := ka.Buckets[0]
629 // Remove from all (new) buckets.
630 a.removeFromAllBuckets(ka)
631 // It's officially old now.
632 ka.BucketType = bucketTypeOld
634 // Try to add it to its oldBucket destination.
635 oldBucketIdx := a.calcOldBucket(ka.Addr)
636 added := a.addToOldBucket(ka, oldBucketIdx)
638 // No room, must evict something
639 oldest := a.pickOldest(bucketTypeOld, oldBucketIdx)
640 a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx)
641 // Find new bucket to put oldest in
642 newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src)
643 added := a.addToNewBucket(oldest, newBucketIdx)
644 // No space in newBucket either, just put it in freedBucket from above.
646 added := a.addToNewBucket(oldest, freedBucket)
648 log.Error(cmn.Fmt("Could not migrate oldest %v to freedBucket %v", oldest, freedBucket))
651 // Finally, add to bucket again.
652 added = a.addToOldBucket(ka, oldBucketIdx)
654 log.Error(cmn.Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx))
659 // doublesha256( key + sourcegroup +
660 // int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets
661 func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
663 data1 = append(data1, []byte(a.key)...)
664 data1 = append(data1, []byte(a.groupKey(addr))...)
665 data1 = append(data1, []byte(a.groupKey(src))...)
666 hash1 := doubleSha256(data1)
667 hash64 := binary.BigEndian.Uint64(hash1)
668 hash64 %= newBucketsPerGroup
670 binary.BigEndian.PutUint64(hashbuf[:], hash64)
672 data2 = append(data2, []byte(a.key)...)
673 data2 = append(data2, a.groupKey(src)...)
674 data2 = append(data2, hashbuf[:]...)
676 hash2 := doubleSha256(data2)
677 return int(binary.BigEndian.Uint64(hash2) % newBucketCount)
680 // doublesha256( key + group +
681 // int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets
682 func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
684 data1 = append(data1, []byte(a.key)...)
685 data1 = append(data1, []byte(addr.String())...)
686 hash1 := doubleSha256(data1)
687 hash64 := binary.BigEndian.Uint64(hash1)
688 hash64 %= oldBucketsPerGroup
690 binary.BigEndian.PutUint64(hashbuf[:], hash64)
692 data2 = append(data2, []byte(a.key)...)
693 data2 = append(data2, a.groupKey(addr)...)
694 data2 = append(data2, hashbuf[:]...)
696 hash2 := doubleSha256(data2)
697 return int(binary.BigEndian.Uint64(hash2) % oldBucketCount)
700 // Return a string representing the network group of this address.
701 // This is the /16 for IPv6, the /32 (/36 for he.net) for IPv6, the string
702 // "local" for a local address and the string "unroutable for an unroutable
704 func (a *AddrBook) groupKey(na *NetAddress) string {
705 if a.routabilityStrict && na.Local() {
708 if a.routabilityStrict && !na.Routable() {
712 if ipv4 := na.IP.To4(); ipv4 != nil {
713 return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String()
715 if na.RFC6145() || na.RFC6052() {
716 // last four bytes are the ip address
717 ip := net.IP(na.IP[12:16])
718 return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
722 ip := net.IP(na.IP[2:7])
723 return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
727 // teredo tunnels have the last 4 bytes as the v4 address XOR
729 ip := net.IP(make([]byte, 4))
730 for i, byte := range na.IP[12:16] {
733 return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
736 // OK, so now we know ourselves to be a IPv6 address.
737 // bitcoind uses /32 for everything, except for Hurricane Electric's
738 // (he.net) IP range, which it uses /36 for.
740 heNet := &net.IPNet{IP: net.ParseIP("2001:470::"),
741 Mask: net.CIDRMask(32, 128)}
742 if heNet.Contains(na.IP) {
746 return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String()
749 //-----------------------------------------------------------------------------
754 tracks information about a known network address that is used
755 to determine how viable an address is.
757 type knownAddress struct {
761 LastAttempt time.Time
762 LastSuccess time.Time
767 func newKnownAddress(addr *NetAddress, src *NetAddress) *knownAddress {
768 return &knownAddress{
772 LastAttempt: time.Now(),
773 BucketType: bucketTypeNew,
778 func (ka *knownAddress) isOld() bool {
779 return ka.BucketType == bucketTypeOld
782 func (ka *knownAddress) isNew() bool {
783 return ka.BucketType == bucketTypeNew
786 func (ka *knownAddress) markAttempt() {
792 func (ka *knownAddress) markGood() {
799 func (ka *knownAddress) addBucketRef(bucketIdx int) int {
800 for _, bucket := range ka.Buckets {
801 if bucket == bucketIdx {
802 // TODO refactor to return error?
803 // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka))
807 ka.Buckets = append(ka.Buckets, bucketIdx)
808 return len(ka.Buckets)
811 func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
813 for _, bucket := range ka.Buckets {
814 if bucket != bucketIdx {
815 buckets = append(buckets, bucket)
818 if len(buckets) != len(ka.Buckets)-1 {
819 // TODO refactor to return error?
820 // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka))
824 return len(ka.Buckets)
828 An address is bad if the address in question has not been tried in the last
829 minute and meets one of the following criteria:
831 1) It claims to be from the future
832 2) It hasn't been seen in over a month
833 3) It has failed at least three times and never succeeded
834 4) It has failed ten times in the last week
836 All addresses that meet these criteria are assumed to be worthless and not
837 worth keeping hold of.
839 func (ka *knownAddress) isBad() bool {
840 // Has been attempted in the last minute --> good
841 if ka.LastAttempt.After(time.Now().Add(-1 * time.Minute)) {
846 if ka.LastAttempt.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
851 if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries {
855 // Hasn't succeeded in too long?
856 if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
857 ka.Attempts >= maxFailures {