1 // Contains the node database, storing previously seen nodes and any collected
2 // metadata about them for QoS purposes.
15 log "github.com/sirupsen/logrus"
16 wire "github.com/tendermint/go-wire"
18 "github.com/vapor/crypto"
19 dbm "github.com/vapor/database/leveldb"
20 "github.com/vapor/errors"
24 nodeDBNilNodeID = NodeID{} // Special node ID to use as a nil element.
25 nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
26 nodeDBCleanupCycle = time.Hour // Time period for running the expiration task.
29 // nodeDB stores all nodes we know about.
31 lvl dbm.DB // Interface to the database itself
32 self NodeID // Own node id to prevent adding it into the database
33 runner sync.Once // Ensures we can start at most one expirer
34 quit chan struct{} // Channel to signal the expiring thread to stop
37 // Schema layout for the node database
39 nodeDBVersionKey = []byte("version") // Version of the database to flush if changes
40 nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with
42 nodeDBDiscoverRoot = ":discover"
43 nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping"
44 nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong"
45 nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail"
46 nodeDBTopicRegTickets = ":tickets"
49 // newNodeDB creates a new node database for storing and retrieving infos about
50 // known peers in the network. If no path is given, an in-memory, temporary
51 // database is constructed.
52 func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
54 return newMemoryNodeDB(self), nil
56 return newPersistentNodeDB(path, version, self)
59 // newMemoryNodeDB creates a new in-memory node database without a persistent
61 func newMemoryNodeDB(self NodeID) *nodeDB {
66 quit: make(chan struct{}),
70 // newPersistentNodeDB creates/opens a leveldb backed persistent node database,
71 // also flushing its contents in case of a version mismatch.
72 func newPersistentNodeDB(filePath string, version int, self NodeID) (*nodeDB, error) {
73 dir, file := path.Split(filePath)
75 return nil, errors.New("unspecified db file name")
77 db := dbm.NewDB(file, dbm.GoLevelDBBackendStr, dir)
79 // The nodes contained in the cache correspond to a certain protocol version.
80 // Flush all nodes if the version doesn't match.
81 currentVer := make([]byte, binary.MaxVarintLen64)
82 currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))]
84 blob := db.Get(nodeDBVersionKey)
86 db.Set(nodeDBVersionKey, currentVer)
87 } else if !bytes.Equal(blob, currentVer) {
89 if err := os.RemoveAll(filePath + ".db"); err != nil {
92 return newPersistentNodeDB(filePath, version, self)
98 quit: make(chan struct{}),
102 // makeKey generates the leveldb key-blob from a node id and its particular
103 // field of interest.
104 func makeKey(id NodeID, field string) []byte {
105 if bytes.Equal(id[:], nodeDBNilNodeID[:]) {
108 return append(nodeDBItemPrefix, append(id[:], field...)...)
111 // splitKey tries to split a database key into a node id and a field part.
112 func splitKey(key []byte) (id NodeID, field string) {
113 // If the key is not of a node, return it plainly
114 if !bytes.HasPrefix(key, nodeDBItemPrefix) {
115 return NodeID{}, string(key)
117 // Otherwise split the id and field
118 item := key[len(nodeDBItemPrefix):]
119 copy(id[:], item[:len(id)])
120 field = string(item[len(id):])
125 // fetchInt64 retrieves an integer instance associated with a particular
127 func (db *nodeDB) fetchInt64(key []byte) int64 {
128 blob := db.lvl.Get(key)
132 val, read := binary.Varint(blob)
139 // storeInt64 update a specific database entry to the current time instance as a
141 func (db *nodeDB) storeInt64(key []byte, n int64) {
142 blob := make([]byte, binary.MaxVarintLen64)
143 blob = blob[:binary.PutVarint(blob, n)]
144 db.lvl.Set(key, blob)
147 // node retrieves a node with a given id from the database.
148 func (db *nodeDB) node(id NodeID) *Node {
154 key := makeKey(id, nodeDBDiscoverRoot)
155 rawData := db.lvl.Get(key)
158 wire.ReadBinary(node, bytes.NewReader(rawData), 0, &n, &err)
160 log.WithFields(log.Fields{"module": logModule, "key": key, "node": node, "error": err}).Warn("get node from db err")
164 node.sha = crypto.Sha256Hash(node.ID[:])
168 // updateNode inserts - potentially overwriting - a node into the peer database.
169 func (db *nodeDB) updateNode(node *Node) error {
173 blob = new(bytes.Buffer)
176 wire.WriteBinary(node, blob, &n, &err)
181 db.lvl.Set(makeKey(node.ID, nodeDBDiscoverRoot), blob.Bytes())
185 // deleteNode deletes all information/keys associated with a node.
186 func (db *nodeDB) deleteNode(id NodeID) {
187 deleter := db.lvl.IteratorPrefix(makeKey(id, ""))
189 db.lvl.Delete(deleter.Key())
193 // ensureExpirer is a small helper method ensuring that the data expiration
194 // mechanism is running. If the expiration goroutine is already running, this
195 // method simply returns.
197 // The goal is to start the data evacuation only after the network successfully
198 // bootstrapped itself (to prevent dumping potentially useful seed nodes). Since
199 // it would require significant overhead to exactly trace the first successful
200 // convergence, it's simpler to "ensure" the correct state when an appropriate
201 // condition occurs (i.e. a successful bonding), and discard further events.
202 func (db *nodeDB) ensureExpirer() {
203 db.runner.Do(func() { go db.expirer() })
206 // expirer should be started in a go routine, and is responsible for looping ad
207 // infinitum and dropping stale data from the database.
208 func (db *nodeDB) expirer() {
209 tick := time.NewTicker(nodeDBCleanupCycle)
214 if err := db.expireNodes(); err != nil {
215 log.WithFields(log.Fields{"module": logModule, "error": err}).Error("Failed to expire nodedb items")
223 // expireNodes iterates over the database and deletes all nodes that have not
224 // been seen (i.e. received a pong from) for some allotted time.
225 func (db *nodeDB) expireNodes() error {
226 threshold := time.Now().Add(-nodeDBNodeExpiration)
228 // Find discovered nodes that are older than the allowance
229 it := db.lvl.Iterator()
233 // Skip the item if not a discovery node
234 id, field := splitKey(it.Key())
235 if field != nodeDBDiscoverRoot {
238 // Skip the node if not expired yet (and not self)
239 if !bytes.Equal(id[:], db.self[:]) {
240 if seen := db.lastPong(id); seen.After(threshold) {
244 // Otherwise delete all associated information
251 // lastPing retrieves the time of the last ping packet send to a remote node,
252 // requesting binding.
253 func (db *nodeDB) lastPing(id NodeID) time.Time {
254 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0)
257 // updateLastPing updates the last time we tried contacting a remote node.
258 func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) {
259 db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix())
262 // lastPong retrieves the time of the last successful contact from remote node.
263 func (db *nodeDB) lastPong(id NodeID) time.Time {
264 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0)
267 // updateLastPong updates the last time a remote node successfully contacted.
268 func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) {
269 db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix())
272 // findFails retrieves the number of findnode failures since bonding.
273 func (db *nodeDB) findFails(id NodeID) int {
274 return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails)))
277 // updateFindFails updates the number of findnode failures since bonding.
278 func (db *nodeDB) updateFindFails(id NodeID, fails int) {
279 db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails))
282 // querySeeds retrieves random nodes to be used as potential seed nodes
283 // for bootstrapping.
284 func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node {
287 nodes = make([]*Node, 0, n)
288 it = db.lvl.Iterator()
294 for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ {
295 // Seek to a random entry. The first byte is incremented by a
296 // random amount each time in order to increase the likelihood
297 // of hitting all existing nodes in very small databases.
299 if _, err := rand.Read(id[:]); err != nil {
300 log.WithFields(log.Fields{"module": logModule, "error": err}).Warn("get rand date")
302 id[0] = ctr + id[0]%16
303 it.Seek(makeKey(id, nodeDBDiscoverRoot))
308 continue seek // iterator exhausted
313 if now.Sub(db.lastPong(n.ID)) > maxAge {
316 for i := range nodes {
317 if nodes[i].ID == n.ID {
318 continue seek // duplicate
321 nodes = append(nodes, n)
326 func (db *nodeDB) fetchTopicRegTickets(id NodeID) (issued, used uint32) {
327 key := makeKey(id, nodeDBTopicRegTickets)
328 blob := db.lvl.Get(key)
333 issued = binary.BigEndian.Uint32(blob[0:4])
334 used = binary.BigEndian.Uint32(blob[4:8])
338 func (db *nodeDB) updateTopicRegTickets(id NodeID, issued, used uint32) {
339 key := makeKey(id, nodeDBTopicRegTickets)
340 blob := make([]byte, 8)
341 binary.BigEndian.PutUint32(blob[0:4], issued)
342 binary.BigEndian.PutUint32(blob[4:8], used)
343 db.lvl.Set(key, blob)
346 // reads the next node record from the iterator, skipping over other
348 func nextNode(it dbm.Iterator) *Node {
355 for end := false; !end; end = !it.Next() {
356 id, field := splitKey(it.Key())
357 if field != nodeDBDiscoverRoot {
361 wire.ReadBinary(node, bytes.NewReader(it.Value()), 0, &n, &err)
363 log.WithFields(log.Fields{"module": logModule, "id": id, "error": err}).Error("invalid node")
372 // close flushes and closes the database files.
373 func (db *nodeDB) close() {