1 // Modified for Tendermint
2 // Originally Copyright (c) 2013-2014 Conformal Systems LLC.
3 // https://github.com/conformal/btcd/blob/master/LICENSE
17 log "github.com/sirupsen/logrus"
18 crypto "github.com/tendermint/go-crypto"
19 cmn "github.com/tendermint/tmlibs/common"
23 // addresses under which the address manager will claim to need more addresses.
24 needAddressThreshold = 1000
26 // interval used to dump the address cache to disk for future use.
27 dumpAddressInterval = time.Minute * 2
29 // max addresses in each old address bucket.
32 // buckets we split old addresses over.
35 // max addresses in each new address bucket.
38 // buckets that we spread new addresses over.
41 // old buckets over which an address group will be spread.
42 oldBucketsPerGroup = 4
44 // new buckets over which an source address group will be spread.
45 newBucketsPerGroup = 32
47 // buckets a frequently seen new address may end up in.
48 maxNewBucketsPerAddress = 4
50 // days before which we assume an address has vanished
51 // if we have not seen it announced in that long.
54 // tries without a single success before we assume an address is bad.
57 // max failures we will accept without a success before considering an address bad.
60 // days since the last success before we will consider evicting an address.
63 // % of total addresses known returned by GetSelection.
64 getSelectionPercent = 23
66 // min addresses that must be returned by GetSelection. Useful for bootstrapping.
69 // max addresses returned by GetSelection
70 // NOTE: this must match "maxPexMessageSize"
73 // current version of the on-disk format.
74 serializationVersion = 1
82 // AddrBook - concurrency safe peer address manager.
83 type AddrBook struct {
88 routabilityStrict bool
91 ourAddrs map[string]*NetAddress
92 addrLookup map[string]*knownAddress // new & old
93 addrNew []map[string]*knownAddress
94 addrOld []map[string]*knownAddress
100 // NewAddrBook creates a new address book.
101 // Use Start to begin processing asynchronous address updates.
102 func NewAddrBook(filePath string, routabilityStrict bool) *AddrBook {
104 rand: rand.New(rand.NewSource(time.Now().UnixNano())),
105 ourAddrs: make(map[string]*NetAddress),
106 addrLookup: make(map[string]*knownAddress),
108 routabilityStrict: routabilityStrict,
111 am.BaseService = *cmn.NewBaseService(nil, "AddrBook", am)
115 // When modifying this, don't forget to update loadFromFile()
116 func (a *AddrBook) init() {
117 a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits
119 a.addrNew = make([]map[string]*knownAddress, newBucketCount)
120 for i := range a.addrNew {
121 a.addrNew[i] = make(map[string]*knownAddress)
124 a.addrOld = make([]map[string]*knownAddress, oldBucketCount)
125 for i := range a.addrOld {
126 a.addrOld[i] = make(map[string]*knownAddress)
130 // OnStart implements Service.
131 func (a *AddrBook) OnStart() error {
132 a.BaseService.OnStart()
133 a.loadFromFile(a.filePath)
139 // OnStop implements Service.
140 func (a *AddrBook) OnStop() {
141 a.BaseService.OnStop()
144 func (a *AddrBook) Wait() {
148 func (a *AddrBook) AddOurAddress(addr *NetAddress) {
151 log.WithField("addr", addr).Info("Add our address to book")
153 a.ourAddrs[addr.String()] = addr
156 func (a *AddrBook) OurAddresses() []*NetAddress {
157 addrs := []*NetAddress{}
158 for _, addr := range a.ourAddrs {
159 addrs = append(addrs, addr)
164 // NOTE: addr must not be nil
165 func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) {
168 log.WithFields(log.Fields{
171 }).Debug("Add address to book")
172 a.addAddress(addr, src)
175 func (a *AddrBook) NeedMoreAddrs() bool {
176 return a.Size() < needAddressThreshold
179 func (a *AddrBook) Size() int {
185 func (a *AddrBook) size() int {
186 return a.nNew + a.nOld
189 // Pick an address to connect to with new/old bias.
190 func (a *AddrBook) PickAddress(newBias int) *NetAddress {
204 // Bias between new and old addresses.
205 oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
206 newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
208 if (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation {
209 // pick random Old bucket.
210 var bucket map[string]*knownAddress = nil
211 for len(bucket) == 0 {
212 bucket = a.addrOld[a.rand.Intn(len(a.addrOld))]
214 // pick a random ka from bucket.
215 randIndex := a.rand.Intn(len(bucket))
216 for _, ka := range bucket {
222 cmn.PanicSanity("Should not happen")
224 // pick random New bucket.
225 var bucket map[string]*knownAddress = nil
226 for len(bucket) == 0 {
227 bucket = a.addrNew[a.rand.Intn(len(a.addrNew))]
229 // pick a random ka from bucket.
230 randIndex := a.rand.Intn(len(bucket))
231 for _, ka := range bucket {
237 cmn.PanicSanity("Should not happen")
242 func (a *AddrBook) MarkGood(addr *NetAddress) {
245 ka := a.addrLookup[addr.String()]
255 func (a *AddrBook) MarkAttempt(addr *NetAddress) {
258 ka := a.addrLookup[addr.String()]
265 // MarkBad currently just ejects the address. In the future, consider
267 func (a *AddrBook) MarkBad(addr *NetAddress) {
268 a.RemoveAddress(addr)
271 // RemoveAddress removes the address from the book.
272 func (a *AddrBook) RemoveAddress(addr *NetAddress) {
275 ka := a.addrLookup[addr.String()]
279 log.WithField("addr", addr).Info("Remove address from book")
280 a.removeFromAllBuckets(ka)
285 // GetSelection randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
286 func (a *AddrBook) GetSelection() []*NetAddress {
294 allAddr := make([]*NetAddress, a.size())
296 for _, v := range a.addrLookup {
301 numAddresses := cmn.MaxInt(
302 cmn.MinInt(minGetSelection, len(allAddr)),
303 len(allAddr)*getSelectionPercent/100)
304 numAddresses = cmn.MinInt(maxGetSelection, numAddresses)
306 // Fisher-Yates shuffle the array. We only need to do the first
307 // `numAddresses' since we are throwing the rest.
308 for i := 0; i < numAddresses; i++ {
309 // pick a number between current index and the end
310 j := rand.Intn(len(allAddr)-i) + i
311 allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
314 // slice off the limit we are willing to share.
315 return allAddr[:numAddresses]
318 /* Loading & Saving */
320 type addrBookJSON struct {
322 Addrs []*knownAddress
325 func (a *AddrBook) saveToFile(filePath string) {
326 log.WithField("size", a.Size()).Info("Saving AddrBook to file")
331 addrs := []*knownAddress{}
332 for _, ka := range a.addrLookup {
333 addrs = append(addrs, ka)
336 aJSON := &addrBookJSON{
341 jsonBytes, err := json.MarshalIndent(aJSON, "", "\t")
343 log.WithField("err", err).Error("Failed to save AddrBook to file")
346 err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644)
348 log.WithFields(log.Fields{
351 }).Error("Failed to save AddrBook to file")
355 // Returns false if file does not exist.
356 // cmn.Panics if file is corrupt.
357 func (a *AddrBook) loadFromFile(filePath string) bool {
358 // If doesn't exist, do nothing.
359 _, err := os.Stat(filePath)
360 if os.IsNotExist(err) {
364 // Load addrBookJSON{}
365 r, err := os.Open(filePath)
367 cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err))
370 aJSON := &addrBookJSON{}
371 dec := json.NewDecoder(r)
372 err = dec.Decode(aJSON)
374 cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err))
377 // Restore all the fields...
380 // Restore .addrNew & .addrOld
381 for _, ka := range aJSON.Addrs {
382 for _, bucketIndex := range ka.Buckets {
383 bucket := a.getBucket(ka.BucketType, bucketIndex)
384 bucket[ka.Addr.String()] = ka
386 a.addrLookup[ka.Addr.String()] = ka
387 if ka.BucketType == bucketTypeNew {
396 // Save saves the book.
397 func (a *AddrBook) Save() {
398 log.WithField("size", a.Size()).Info("Saving AddrBook to file")
399 a.saveToFile(a.filePath)
402 /* Private methods */
404 func (a *AddrBook) saveRoutine() {
405 dumpAddressTicker := time.NewTicker(dumpAddressInterval)
409 case <-dumpAddressTicker.C:
410 a.saveToFile(a.filePath)
415 dumpAddressTicker.Stop()
416 a.saveToFile(a.filePath)
418 log.Info("Address handler done")
421 func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
424 return a.addrNew[bucketIdx]
426 return a.addrOld[bucketIdx]
428 cmn.PanicSanity("Should not happen")
433 // Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full.
434 // NOTE: currently it always returns true.
435 func (a *AddrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool {
438 log.Error(cmn.Fmt("Cannot add address already in old bucket to a new bucket: %v", ka))
442 addrStr := ka.Addr.String()
443 bucket := a.getBucket(bucketTypeNew, bucketIdx)
446 if _, ok := bucket[addrStr]; ok {
450 // Enforce max addresses.
451 if len(bucket) > newBucketSize {
452 log.Info("new bucket is full, expiring old ")
453 a.expireNew(bucketIdx)
458 if ka.addBucketRef(bucketIdx) == 1 {
462 // Ensure in addrLookup
463 a.addrLookup[addrStr] = ka
468 // Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full.
469 func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
472 log.Error(cmn.Fmt("Cannot add new address to old bucket: %v", ka))
475 if len(ka.Buckets) != 0 {
476 log.Error(cmn.Fmt("Cannot add already old address to another old bucket: %v", ka))
480 addrStr := ka.Addr.String()
481 bucket := a.getBucket(bucketTypeNew, bucketIdx)
484 if _, ok := bucket[addrStr]; ok {
488 // Enforce max addresses.
489 if len(bucket) > oldBucketSize {
495 if ka.addBucketRef(bucketIdx) == 1 {
499 // Ensure in addrLookup
500 a.addrLookup[addrStr] = ka
505 func (a *AddrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
506 if ka.BucketType != bucketType {
507 log.Error(cmn.Fmt("Bucket type mismatch: %v", ka))
510 bucket := a.getBucket(bucketType, bucketIdx)
511 delete(bucket, ka.Addr.String())
512 if ka.removeBucketRef(bucketIdx) == 0 {
513 if bucketType == bucketTypeNew {
518 delete(a.addrLookup, ka.Addr.String())
522 func (a *AddrBook) removeFromAllBuckets(ka *knownAddress) {
523 for _, bucketIdx := range ka.Buckets {
524 bucket := a.getBucket(ka.BucketType, bucketIdx)
525 delete(bucket, ka.Addr.String())
528 if ka.BucketType == bucketTypeNew {
533 delete(a.addrLookup, ka.Addr.String())
536 func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
537 bucket := a.getBucket(bucketType, bucketIdx)
538 var oldest *knownAddress
539 for _, ka := range bucket {
540 if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) {
547 func (a *AddrBook) addAddress(addr, src *NetAddress) {
548 if a.routabilityStrict && !addr.Routable() {
549 log.Error(cmn.Fmt("Cannot add non-routable address %v", addr))
552 if _, ok := a.ourAddrs[addr.String()]; ok {
553 // Ignore our own listener address.
557 ka := a.addrLookup[addr.String()]
564 // Already in max new buckets.
565 if len(ka.Buckets) == maxNewBucketsPerAddress {
568 // The more entries we have, the less likely we are to add more.
569 factor := int32(2 * len(ka.Buckets))
570 if a.rand.Int31n(factor) != 0 {
574 ka = newKnownAddress(addr, src)
577 bucket := a.calcNewBucket(addr, src)
578 a.addToNewBucket(ka, bucket)
580 log.Info("Added new address ", "address:", addr, " total:", a.size())
583 // Make space in the new buckets by expiring the really bad entries.
584 // If no bad entries are available we remove the oldest.
585 func (a *AddrBook) expireNew(bucketIdx int) {
586 for addrStr, ka := range a.addrNew[bucketIdx] {
587 // If an entry is bad, throw it away
589 log.Info(cmn.Fmt("expiring bad address %v", addrStr))
590 a.removeFromBucket(ka, bucketTypeNew, bucketIdx)
595 // If we haven't thrown out a bad entry, throw out the oldest entry
596 oldest := a.pickOldest(bucketTypeNew, bucketIdx)
597 a.removeFromBucket(oldest, bucketTypeNew, bucketIdx)
600 // Promotes an address from new to old.
601 // TODO: Move to old probabilistically.
602 // The better a node is, the less likely it should be evicted from an old bucket.
603 func (a *AddrBook) moveToOld(ka *knownAddress) {
606 log.Error(cmn.Fmt("Cannot promote address that is already old %v", ka))
609 if len(ka.Buckets) == 0 {
610 log.Error(cmn.Fmt("Cannot promote address that isn't in any new buckets %v", ka))
614 // Remember one of the buckets in which ka is in.
615 freedBucket := ka.Buckets[0]
616 // Remove from all (new) buckets.
617 a.removeFromAllBuckets(ka)
618 // It's officially old now.
619 ka.BucketType = bucketTypeOld
621 // Try to add it to its oldBucket destination.
622 oldBucketIdx := a.calcOldBucket(ka.Addr)
623 added := a.addToOldBucket(ka, oldBucketIdx)
625 // No room, must evict something
626 oldest := a.pickOldest(bucketTypeOld, oldBucketIdx)
627 a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx)
628 // Find new bucket to put oldest in
629 newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src)
630 added := a.addToNewBucket(oldest, newBucketIdx)
631 // No space in newBucket either, just put it in freedBucket from above.
633 added := a.addToNewBucket(oldest, freedBucket)
635 log.Error(cmn.Fmt("Could not migrate oldest %v to freedBucket %v", oldest, freedBucket))
638 // Finally, add to bucket again.
639 added = a.addToOldBucket(ka, oldBucketIdx)
641 log.Error(cmn.Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx))
646 // doublesha256( key + sourcegroup +
647 // int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets
648 func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
650 data1 = append(data1, []byte(a.key)...)
651 data1 = append(data1, []byte(a.groupKey(addr))...)
652 data1 = append(data1, []byte(a.groupKey(src))...)
653 hash1 := doubleSha256(data1)
654 hash64 := binary.BigEndian.Uint64(hash1)
655 hash64 %= newBucketsPerGroup
657 binary.BigEndian.PutUint64(hashbuf[:], hash64)
659 data2 = append(data2, []byte(a.key)...)
660 data2 = append(data2, a.groupKey(src)...)
661 data2 = append(data2, hashbuf[:]...)
663 hash2 := doubleSha256(data2)
664 return int(binary.BigEndian.Uint64(hash2) % newBucketCount)
667 // doublesha256( key + group +
668 // int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets
669 func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
671 data1 = append(data1, []byte(a.key)...)
672 data1 = append(data1, []byte(addr.String())...)
673 hash1 := doubleSha256(data1)
674 hash64 := binary.BigEndian.Uint64(hash1)
675 hash64 %= oldBucketsPerGroup
677 binary.BigEndian.PutUint64(hashbuf[:], hash64)
679 data2 = append(data2, []byte(a.key)...)
680 data2 = append(data2, a.groupKey(addr)...)
681 data2 = append(data2, hashbuf[:]...)
683 hash2 := doubleSha256(data2)
684 return int(binary.BigEndian.Uint64(hash2) % oldBucketCount)
687 // Return a string representing the network group of this address.
688 // This is the /16 for IPv6, the /32 (/36 for he.net) for IPv6, the string
689 // "local" for a local address and the string "unroutable for an unroutable
691 func (a *AddrBook) groupKey(na *NetAddress) string {
692 if a.routabilityStrict && na.Local() {
695 if a.routabilityStrict && !na.Routable() {
699 if ipv4 := na.IP.To4(); ipv4 != nil {
700 return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String()
702 if na.RFC6145() || na.RFC6052() {
703 // last four bytes are the ip address
704 ip := net.IP(na.IP[12:16])
705 return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
709 ip := net.IP(na.IP[2:7])
710 return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
714 // teredo tunnels have the last 4 bytes as the v4 address XOR
716 ip := net.IP(make([]byte, 4))
717 for i, byte := range na.IP[12:16] {
720 return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
723 // OK, so now we know ourselves to be a IPv6 address.
724 // bitcoind uses /32 for everything, except for Hurricane Electric's
725 // (he.net) IP range, which it uses /36 for.
727 heNet := &net.IPNet{IP: net.ParseIP("2001:470::"),
728 Mask: net.CIDRMask(32, 128)}
729 if heNet.Contains(na.IP) {
733 return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String()
736 //-----------------------------------------------------------------------------
741 tracks information about a known network address that is used
742 to determine how viable an address is.
744 type knownAddress struct {
748 LastAttempt time.Time
749 LastSuccess time.Time
754 func newKnownAddress(addr *NetAddress, src *NetAddress) *knownAddress {
755 return &knownAddress{
759 LastAttempt: time.Now(),
760 BucketType: bucketTypeNew,
765 func (ka *knownAddress) isOld() bool {
766 return ka.BucketType == bucketTypeOld
769 func (ka *knownAddress) isNew() bool {
770 return ka.BucketType == bucketTypeNew
773 func (ka *knownAddress) markAttempt() {
779 func (ka *knownAddress) markGood() {
786 func (ka *knownAddress) addBucketRef(bucketIdx int) int {
787 for _, bucket := range ka.Buckets {
788 if bucket == bucketIdx {
789 // TODO refactor to return error?
790 // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka))
794 ka.Buckets = append(ka.Buckets, bucketIdx)
795 return len(ka.Buckets)
798 func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
800 for _, bucket := range ka.Buckets {
801 if bucket != bucketIdx {
802 buckets = append(buckets, bucket)
805 if len(buckets) != len(ka.Buckets)-1 {
806 // TODO refactor to return error?
807 // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka))
811 return len(ka.Buckets)
815 An address is bad if the address in question has not been tried in the last
816 minute and meets one of the following criteria:
818 1) It claims to be from the future
819 2) It hasn't been seen in over a month
820 3) It has failed at least three times and never succeeded
821 4) It has failed ten times in the last week
823 All addresses that meet these criteria are assumed to be worthless and not
824 worth keeping hold of.
826 func (ka *knownAddress) isBad() bool {
827 // Has been attempted in the last minute --> good
828 if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) {
833 if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
838 if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries {
842 // Hasn't succeeded in too long?
843 if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
844 ka.Attempts >= maxFailures {