1 // Modified for Tendermint
2 // Originally Copyright (c) 2013-2014 Conformal Systems LLC.
3 // https://github.com/conformal/btcd/blob/master/LICENSE
17 log "github.com/sirupsen/logrus"
18 crypto "github.com/tendermint/go-crypto"
19 cmn "github.com/tendermint/tmlibs/common"
23 // addresses under which the address manager will claim to need more addresses.
24 needAddressThreshold = 1000
26 // interval used to dump the address cache to disk for future use.
27 dumpAddressInterval = time.Minute * 2
29 // max addresses in each old address bucket.
32 // buckets we split old addresses over.
35 // max addresses in each new address bucket.
38 // buckets that we spread new addresses over.
41 // old buckets over which an address group will be spread.
42 oldBucketsPerGroup = 4
44 // new buckets over which an source address group will be spread.
45 newBucketsPerGroup = 32
47 // buckets a frequently seen new address may end up in.
48 maxNewBucketsPerAddress = 4
50 // days before which we assume an address has vanished
51 // if we have not seen it announced in that long.
54 // tries without a single success before we assume an address is bad.
57 // max failures we will accept without a success before considering an address bad.
60 // days since the last success before we will consider evicting an address.
63 // % of total addresses known returned by GetSelection.
64 getSelectionPercent = 23
66 // min addresses that must be returned by GetSelection. Useful for bootstrapping.
69 // max addresses returned by GetSelection
70 // NOTE: this must match "maxPexMessageSize"
73 // current version of the on-disk format.
74 serializationVersion = 1
82 // AddrBook - concurrency safe peer address manager.
83 type AddrBook struct {
88 routabilityStrict bool
91 ourAddrs map[string]*NetAddress
92 addrLookup map[string]*knownAddress // new & old
93 addrNew []map[string]*knownAddress
94 addrOld []map[string]*knownAddress
100 // NewAddrBook creates a new address book.
101 // Use Start to begin processing asynchronous address updates.
102 func NewAddrBook(filePath string, routabilityStrict bool) *AddrBook {
104 rand: rand.New(rand.NewSource(time.Now().UnixNano())),
105 ourAddrs: make(map[string]*NetAddress),
106 addrLookup: make(map[string]*knownAddress),
108 routabilityStrict: routabilityStrict,
111 am.BaseService = *cmn.NewBaseService(nil, "AddrBook", am)
115 // When modifying this, don't forget to update loadFromFile()
116 func (a *AddrBook) init() {
117 a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits
119 a.addrNew = make([]map[string]*knownAddress, newBucketCount)
120 for i := range a.addrNew {
121 a.addrNew[i] = make(map[string]*knownAddress)
124 a.addrOld = make([]map[string]*knownAddress, oldBucketCount)
125 for i := range a.addrOld {
126 a.addrOld[i] = make(map[string]*knownAddress)
130 // OnStart implements Service.
131 func (a *AddrBook) OnStart() error {
132 a.BaseService.OnStart()
133 a.loadFromFile(a.filePath)
139 // OnStop implements Service.
140 func (a *AddrBook) OnStop() {
141 a.BaseService.OnStop()
144 func (a *AddrBook) Wait() {
148 func (a *AddrBook) AddOurAddress(addr *NetAddress) {
151 log.WithField("addr", addr).Info("Add our address to book")
153 a.ourAddrs[addr.String()] = addr
156 func (a *AddrBook) OurAddresses() []*NetAddress {
157 addrs := []*NetAddress{}
158 for _, addr := range a.ourAddrs {
159 addrs = append(addrs, addr)
164 // NOTE: addr must not be nil
165 func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) {
168 log.WithFields(log.Fields{
171 }).Debug("Add address to book")
172 a.addAddress(addr, src)
175 func (a *AddrBook) NeedMoreAddrs() bool {
176 return a.Size() < needAddressThreshold
179 func (a *AddrBook) Size() int {
185 func (a *AddrBook) size() int {
186 return a.nNew + a.nOld
189 // Pick an address to connect to with new/old bias.
190 func (a *AddrBook) PickAddress(newBias int) *NetAddress {
204 // Bias between new and old addresses.
205 oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
206 newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
208 if (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation {
209 // pick random Old bucket.
210 var bucket map[string]*knownAddress = nil
212 for len(bucket) == 0 && num < oldBucketCount {
213 bucket = a.addrOld[a.rand.Intn(len(a.addrOld))]
216 if num == oldBucketCount {
219 // pick a random ka from bucket.
220 randIndex := a.rand.Intn(len(bucket))
221 for _, ka := range bucket {
227 cmn.PanicSanity("Should not happen")
229 // pick random New bucket.
230 var bucket map[string]*knownAddress = nil
232 for len(bucket) == 0 && num < newBucketCount {
233 bucket = a.addrNew[a.rand.Intn(len(a.addrNew))]
236 if num == newBucketCount {
239 // pick a random ka from bucket.
240 randIndex := a.rand.Intn(len(bucket))
241 for _, ka := range bucket {
247 cmn.PanicSanity("Should not happen")
252 func (a *AddrBook) MarkGood(addr *NetAddress) {
255 ka := a.addrLookup[addr.String()]
265 func (a *AddrBook) MarkAttempt(addr *NetAddress) {
268 ka := a.addrLookup[addr.String()]
275 // MarkBad currently just ejects the address. In the future, consider
277 func (a *AddrBook) MarkBad(addr *NetAddress) {
278 a.RemoveAddress(addr)
281 // RemoveAddress removes the address from the book.
282 func (a *AddrBook) RemoveAddress(addr *NetAddress) {
285 ka := a.addrLookup[addr.String()]
289 log.WithField("addr", addr).Info("Remove address from book")
290 a.removeFromAllBuckets(ka)
295 // GetSelection randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
296 func (a *AddrBook) GetSelection() []*NetAddress {
304 allAddr := make([]*NetAddress, a.size())
306 for _, v := range a.addrLookup {
311 numAddresses := cmn.MaxInt(
312 cmn.MinInt(minGetSelection, len(allAddr)),
313 len(allAddr)*getSelectionPercent/100)
314 numAddresses = cmn.MinInt(maxGetSelection, numAddresses)
316 // Fisher-Yates shuffle the array. We only need to do the first
317 // `numAddresses' since we are throwing the rest.
318 for i := 0; i < numAddresses; i++ {
319 // pick a number between current index and the end
320 j := rand.Intn(len(allAddr)-i) + i
321 allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
324 // slice off the limit we are willing to share.
325 return allAddr[:numAddresses]
328 /* Loading & Saving */
330 type addrBookJSON struct {
332 Addrs []*knownAddress
335 func (a *AddrBook) saveToFile(filePath string) {
336 log.WithField("size", a.Size()).Info("Saving AddrBook to file")
341 addrs := []*knownAddress{}
342 for _, ka := range a.addrLookup {
343 addrs = append(addrs, ka)
346 aJSON := &addrBookJSON{
351 jsonBytes, err := json.MarshalIndent(aJSON, "", "\t")
353 log.WithField("err", err).Error("Failed to save AddrBook to file")
356 err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644)
358 log.WithFields(log.Fields{
361 }).Error("Failed to save AddrBook to file")
365 // Returns false if file does not exist.
366 // cmn.Panics if file is corrupt.
367 func (a *AddrBook) loadFromFile(filePath string) bool {
368 // If doesn't exist, do nothing.
369 _, err := os.Stat(filePath)
370 if os.IsNotExist(err) {
374 // Load addrBookJSON{}
375 r, err := os.Open(filePath)
377 cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err))
380 aJSON := &addrBookJSON{}
381 dec := json.NewDecoder(r)
382 err = dec.Decode(aJSON)
384 cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err))
387 // Restore all the fields...
390 // Restore .addrNew & .addrOld
391 for _, ka := range aJSON.Addrs {
392 for _, bucketIndex := range ka.Buckets {
393 bucket := a.getBucket(ka.BucketType, bucketIndex)
394 bucket[ka.Addr.String()] = ka
396 a.addrLookup[ka.Addr.String()] = ka
397 if ka.BucketType == bucketTypeNew {
406 // Save saves the book.
407 func (a *AddrBook) Save() {
408 log.WithField("size", a.Size()).Info("Saving AddrBook to file")
409 a.saveToFile(a.filePath)
412 /* Private methods */
414 func (a *AddrBook) saveRoutine() {
415 dumpAddressTicker := time.NewTicker(dumpAddressInterval)
419 case <-dumpAddressTicker.C:
420 a.saveToFile(a.filePath)
425 dumpAddressTicker.Stop()
426 a.saveToFile(a.filePath)
428 log.Info("Address handler done")
431 func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
434 return a.addrNew[bucketIdx]
436 return a.addrOld[bucketIdx]
438 cmn.PanicSanity("Should not happen")
443 // Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full.
444 // NOTE: currently it always returns true.
445 func (a *AddrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool {
448 log.Error(cmn.Fmt("Cannot add address already in old bucket to a new bucket: %v", ka))
452 addrStr := ka.Addr.String()
453 bucket := a.getBucket(bucketTypeNew, bucketIdx)
456 if _, ok := bucket[addrStr]; ok {
460 // Enforce max addresses.
461 if len(bucket) > newBucketSize {
462 log.Info("new bucket is full, expiring old ")
463 a.expireNew(bucketIdx)
468 if ka.addBucketRef(bucketIdx) == 1 {
472 // Ensure in addrLookup
473 a.addrLookup[addrStr] = ka
478 // Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full.
479 func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
482 log.Error(cmn.Fmt("Cannot add new address to old bucket: %v", ka))
485 if len(ka.Buckets) != 0 {
486 log.Error(cmn.Fmt("Cannot add already old address to another old bucket: %v", ka))
490 addrStr := ka.Addr.String()
491 bucket := a.getBucket(bucketTypeNew, bucketIdx)
494 if _, ok := bucket[addrStr]; ok {
498 // Enforce max addresses.
499 if len(bucket) > oldBucketSize {
505 if ka.addBucketRef(bucketIdx) == 1 {
509 // Ensure in addrLookup
510 a.addrLookup[addrStr] = ka
515 func (a *AddrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
516 if ka.BucketType != bucketType {
517 log.Error(cmn.Fmt("Bucket type mismatch: %v", ka))
520 bucket := a.getBucket(bucketType, bucketIdx)
521 delete(bucket, ka.Addr.String())
522 if ka.removeBucketRef(bucketIdx) == 0 {
523 if bucketType == bucketTypeNew {
528 delete(a.addrLookup, ka.Addr.String())
532 func (a *AddrBook) removeFromAllBuckets(ka *knownAddress) {
533 for _, bucketIdx := range ka.Buckets {
534 bucket := a.getBucket(ka.BucketType, bucketIdx)
535 delete(bucket, ka.Addr.String())
538 if ka.BucketType == bucketTypeNew {
543 delete(a.addrLookup, ka.Addr.String())
546 func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
547 bucket := a.getBucket(bucketType, bucketIdx)
548 var oldest *knownAddress
549 for _, ka := range bucket {
550 if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) {
557 func (a *AddrBook) addAddress(addr, src *NetAddress) {
558 if a.routabilityStrict && !addr.Routable() {
559 log.Error(cmn.Fmt("Cannot add non-routable address %v", addr))
562 if _, ok := a.ourAddrs[addr.String()]; ok {
563 // Ignore our own listener address.
567 ka := a.addrLookup[addr.String()]
574 // Already in max new buckets.
575 if len(ka.Buckets) == maxNewBucketsPerAddress {
578 // The more entries we have, the less likely we are to add more.
579 factor := int32(2 * len(ka.Buckets))
580 if a.rand.Int31n(factor) != 0 {
584 ka = newKnownAddress(addr, src)
587 bucket := a.calcNewBucket(addr, src)
588 a.addToNewBucket(ka, bucket)
590 log.Info("Added new address ", "address:", addr, " total:", a.size())
593 // Make space in the new buckets by expiring the really bad entries.
594 // If no bad entries are available we remove the oldest.
595 func (a *AddrBook) expireNew(bucketIdx int) {
596 for addrStr, ka := range a.addrNew[bucketIdx] {
597 // If an entry is bad, throw it away
599 log.Info(cmn.Fmt("expiring bad address %v", addrStr))
600 a.removeFromBucket(ka, bucketTypeNew, bucketIdx)
605 // If we haven't thrown out a bad entry, throw out the oldest entry
606 oldest := a.pickOldest(bucketTypeNew, bucketIdx)
607 a.removeFromBucket(oldest, bucketTypeNew, bucketIdx)
610 // Promotes an address from new to old.
611 // TODO: Move to old probabilistically.
612 // The better a node is, the less likely it should be evicted from an old bucket.
613 func (a *AddrBook) moveToOld(ka *knownAddress) {
616 log.Error(cmn.Fmt("Cannot promote address that is already old %v", ka))
619 if len(ka.Buckets) == 0 {
620 log.Error(cmn.Fmt("Cannot promote address that isn't in any new buckets %v", ka))
624 // Remember one of the buckets in which ka is in.
625 freedBucket := ka.Buckets[0]
626 // Remove from all (new) buckets.
627 a.removeFromAllBuckets(ka)
628 // It's officially old now.
629 ka.BucketType = bucketTypeOld
631 // Try to add it to its oldBucket destination.
632 oldBucketIdx := a.calcOldBucket(ka.Addr)
633 added := a.addToOldBucket(ka, oldBucketIdx)
635 // No room, must evict something
636 oldest := a.pickOldest(bucketTypeOld, oldBucketIdx)
637 a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx)
638 // Find new bucket to put oldest in
639 newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src)
640 added := a.addToNewBucket(oldest, newBucketIdx)
641 // No space in newBucket either, just put it in freedBucket from above.
643 added := a.addToNewBucket(oldest, freedBucket)
645 log.Error(cmn.Fmt("Could not migrate oldest %v to freedBucket %v", oldest, freedBucket))
648 // Finally, add to bucket again.
649 added = a.addToOldBucket(ka, oldBucketIdx)
651 log.Error(cmn.Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx))
656 // doublesha256( key + sourcegroup +
657 // int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets
658 func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
660 data1 = append(data1, []byte(a.key)...)
661 data1 = append(data1, []byte(a.groupKey(addr))...)
662 data1 = append(data1, []byte(a.groupKey(src))...)
663 hash1 := doubleSha256(data1)
664 hash64 := binary.BigEndian.Uint64(hash1)
665 hash64 %= newBucketsPerGroup
667 binary.BigEndian.PutUint64(hashbuf[:], hash64)
669 data2 = append(data2, []byte(a.key)...)
670 data2 = append(data2, a.groupKey(src)...)
671 data2 = append(data2, hashbuf[:]...)
673 hash2 := doubleSha256(data2)
674 return int(binary.BigEndian.Uint64(hash2) % newBucketCount)
677 // doublesha256( key + group +
678 // int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets
679 func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
681 data1 = append(data1, []byte(a.key)...)
682 data1 = append(data1, []byte(addr.String())...)
683 hash1 := doubleSha256(data1)
684 hash64 := binary.BigEndian.Uint64(hash1)
685 hash64 %= oldBucketsPerGroup
687 binary.BigEndian.PutUint64(hashbuf[:], hash64)
689 data2 = append(data2, []byte(a.key)...)
690 data2 = append(data2, a.groupKey(addr)...)
691 data2 = append(data2, hashbuf[:]...)
693 hash2 := doubleSha256(data2)
694 return int(binary.BigEndian.Uint64(hash2) % oldBucketCount)
697 // Return a string representing the network group of this address.
698 // This is the /16 for IPv6, the /32 (/36 for he.net) for IPv6, the string
699 // "local" for a local address and the string "unroutable for an unroutable
701 func (a *AddrBook) groupKey(na *NetAddress) string {
702 if a.routabilityStrict && na.Local() {
705 if a.routabilityStrict && !na.Routable() {
709 if ipv4 := na.IP.To4(); ipv4 != nil {
710 return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String()
712 if na.RFC6145() || na.RFC6052() {
713 // last four bytes are the ip address
714 ip := net.IP(na.IP[12:16])
715 return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
719 ip := net.IP(na.IP[2:7])
720 return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
724 // teredo tunnels have the last 4 bytes as the v4 address XOR
726 ip := net.IP(make([]byte, 4))
727 for i, byte := range na.IP[12:16] {
730 return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
733 // OK, so now we know ourselves to be a IPv6 address.
734 // bitcoind uses /32 for everything, except for Hurricane Electric's
735 // (he.net) IP range, which it uses /36 for.
737 heNet := &net.IPNet{IP: net.ParseIP("2001:470::"),
738 Mask: net.CIDRMask(32, 128)}
739 if heNet.Contains(na.IP) {
743 return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String()
746 //-----------------------------------------------------------------------------
751 tracks information about a known network address that is used
752 to determine how viable an address is.
754 type knownAddress struct {
758 LastAttempt time.Time
759 LastSuccess time.Time
764 func newKnownAddress(addr *NetAddress, src *NetAddress) *knownAddress {
765 return &knownAddress{
769 LastAttempt: time.Now(),
770 BucketType: bucketTypeNew,
775 func (ka *knownAddress) isOld() bool {
776 return ka.BucketType == bucketTypeOld
779 func (ka *knownAddress) isNew() bool {
780 return ka.BucketType == bucketTypeNew
783 func (ka *knownAddress) markAttempt() {
789 func (ka *knownAddress) markGood() {
796 func (ka *knownAddress) addBucketRef(bucketIdx int) int {
797 for _, bucket := range ka.Buckets {
798 if bucket == bucketIdx {
799 // TODO refactor to return error?
800 // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka))
804 ka.Buckets = append(ka.Buckets, bucketIdx)
805 return len(ka.Buckets)
808 func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
810 for _, bucket := range ka.Buckets {
811 if bucket != bucketIdx {
812 buckets = append(buckets, bucket)
815 if len(buckets) != len(ka.Buckets)-1 {
816 // TODO refactor to return error?
817 // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka))
821 return len(ka.Buckets)
825 An address is bad if the address in question has not been tried in the last
826 minute and meets one of the following criteria:
828 1) It claims to be from the future
829 2) It hasn't been seen in over a month
830 3) It has failed at least three times and never succeeded
831 4) It has failed ten times in the last week
833 All addresses that meet these criteria are assumed to be worthless and not
834 worth keeping hold of.
836 func (ka *knownAddress) isBad() bool {
837 // Has been attempted in the last minute --> good
838 if ka.LastAttempt.After(time.Now().Add(-1 * time.Minute)) {
843 if ka.LastAttempt.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
848 if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries {
852 // Hasn't succeeded in too long?
853 if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
854 ka.Attempts >= maxFailures {