// HardenedKeyStart bip32 hierarchical deterministic wallets
// keys with index ≥ 0x80000000 are hardened keys
HardenedKeyStart = 0x80000000
+ logModule = "account"
)
var (
for _, slice := range image.Slice {
if existed := m.db.Get(Key(slice.Account.ID)); existed != nil {
log.WithFields(log.Fields{
- "alias": slice.Account.Alias,
- "id": slice.Account.ID,
+ "module": logModule,
+ "alias": slice.Account.Alias,
+ "id": slice.Account.ID,
}).Warning("skip restore account due to already existed")
continue
}
for utxoIter.Next() {
u := &UTXO{}
if err := json.Unmarshal(utxoIter.Value(), u); err != nil {
- log.WithField("err", err).Error("utxoKeeper findUtxos fail on unmarshal utxo")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("utxoKeeper findUtxos fail on unmarshal utxo")
continue
}
appendUtxo(u)
// SUCCESS indicates the rpc calling is successful.
SUCCESS = "success"
// FAIL indicated the rpc calling is failed.
- FAIL = "fail"
+ FAIL = "fail"
+ logModule = "api"
)
// Response describes the response standard.
// StartServer start the server
func (a *API) StartServer(address string) {
- log.WithField("api address:", address).Info("Rpc listen")
+ log.WithFields(log.Fields{"module": logModule, "api address:": address}).Info("Rpc listen")
listener, err := net.Listen("tcp", address)
if err != nil {
cmn.Exit(cmn.Fmt("Failed to register tcp port: %v", err))
// we call it.
go func() {
if err := a.server.Serve(listener); err != nil {
- log.WithField("error", errors.Wrap(err, "Serve")).Error("Rpc server")
+ log.WithFields(log.Fields{"module": logModule, "error": errors.Wrap(err, "Serve")}).Error("Rpc server")
}
}()
}
// TODO(tessr): check that this path exists; return early if this path isn't legit
req, err := authenticator.Authenticate(req)
if err != nil {
- log.WithField("error", errors.Wrap(err, "Serve")).Error("Authenticate fail")
+ log.WithFields(log.Fields{"module": logModule, "error": errors.Wrap(err, "Serve")}).Error("Authenticate fail")
err = errors.WithDetail(errNotAuthenticated, err.Error())
errorFormatter.Write(req.Context(), rw, err)
return
Filter string `json:"filter"`
}) Response {
if err := a.txFeedTracker.Create(ctx, in.Alias, in.Filter); err != nil {
- log.WithField("error", err).Error("Add TxFeed Failed")
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("Add TxFeed Failed")
return NewErrorResponse(err)
}
return NewSuccessResponse(nil)
return NewErrorResponse(err)
}
if err := a.txFeedTracker.Create(ctx, in.Alias, in.Filter); err != nil {
- log.WithField("error", err).Error("Update TxFeed Failed")
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("Update TxFeed Failed")
return NewErrorResponse(err)
}
return NewSuccessResponse(nil)
"github.com/bytom/crypto/ed25519/chainkd"
)
+const logModule = "pseudohsm"
+
// KeyImage is the struct for hold export key data
type KeyImage struct {
XKeys []*encryptedKeyJSON `json:"xkeys"`
copy(xPub[:], data)
if h.cache.hasKey(xPub) {
log.WithFields(log.Fields{
- "alias": xKey.Alias,
- "id": xKey.ID,
- "xPub": xKey.XPub,
+ "module": logModule,
+ "alias": xKey.Alias,
+ "id": xKey.ID,
+ "xPub": xKey.XPub,
}).Warning("skip restore key due to already existed")
continue
}
func (kc *keyCache) reload() {
keys, err := kc.scan()
if err != nil {
- log.WithField("load keys error", err).Error("can't load keys")
+ log.WithFields(log.Fields{"module": logModule, "load keys error": err}).Error("can't load keys")
}
kc.all = keys
sort.Sort(kc.all)
for _, k := range keys {
kc.byPubs[k.XPub] = append(kc.byPubs[k.XPub], k)
}
- log.WithField("cache has keys:", len(kc.all)).Debug("reloaded keys")
+ log.WithFields(log.Fields{"module": logModule, "cache has keys:": len(kc.all)}).Debug("reloaded keys")
}
func (kc *keyCache) scan() ([]XPub, error) {
err = json.NewDecoder(buf).Decode(&keyJSON)
switch {
case err != nil:
- log.WithField("decode json err", err).Errorf("can't decode key %s: %v", path, err)
+ log.WithFields(log.Fields{"module": logModule, "decode json err": err}).Errorf("can't decode key %s: %v", path, err)
+
case (keyJSON.Alias == ""):
- log.WithField("can't decode key, key path:", path).Warn("missing or void alias")
+ log.WithFields(log.Fields{"module": logModule, "can't decode key, key path:": path}).Warn("missing or void alias")
default:
keys = append(keys, XPub{XPub: keyJSON.XPub, Alias: keyJSON.Alias, File: path})
}
}
sigBytes, err := signFn(ctx, keyID.XPub, path, tpl.Hash(index).Byte32(), auth)
if err != nil {
- log.WithField("err", err).Warningf("computing signature %d", i)
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Warningf("computing signature %d", i)
continue
}
}
sigBytes, err := signFn(ctx, keyID.XPub, path, h, auth)
if err != nil {
- log.WithField("err", err).Warningf("computing signature %d", i)
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Warningf("computing signature %d", i)
continue
}
"github.com/bytom/protocol/vm"
)
+const logModule = "txbuilder"
+
// errors
var (
//ErrBadRefData means invalid reference data
for i, action := range actions {
err := action.Build(ctx, &builder)
if err != nil {
- log.WithFields(log.Fields{"action index": i, "error": err}).Error("Loop tx's action")
+ log.WithFields(log.Fields{"module": logModule, "action index": i, "error": err}).Error("Loop tx's action")
errs = append(errs, errors.WithDetailf(err, "action index %v", i))
}
}
const (
//FilterNumMax max txfeed filter amount.
FilterNumMax = 1024
+ logModule = "txfeed"
)
var (
//Delete delete txfeed with alias.
func (t *Tracker) Delete(ctx context.Context, alias string) error {
- log.WithField("delete", alias).Info("delete txfeed")
+ log.WithFields(log.Fields{"module": logModule, "delete": alias}).Info("delete txfeed")
if alias == "" {
return errors.WithDetail(ErrEmptyAlias, "del transaction feed with empty alias")
if err != nil {
return err
}
- log.WithField("filter", string(b)).Info("find new tx match filter")
+ log.WithFields(log.Fields{"module:": logModule, "filter": string(b)}).Info("find new tx match filter")
t.txfeedCh <- tx
}
}
func initFiles(cmd *cobra.Command, args []string) {
configFilePath := path.Join(config.RootDir, "config.toml")
if _, err := os.Stat(configFilePath); !os.IsNotExist(err) {
- log.WithField("config", configFilePath).Info("Already exists config file.")
+ log.WithFields(log.Fields{"module": logModule, "config": configFilePath}).Info("Already exists config file.")
return
}
cfg.EnsureRoot(config.RootDir, "solonet")
}
- log.WithField("config", configFilePath).Info("Initialized bytom")
+ log.WithFields(log.Fields{"module": logModule, "config": configFilePath}).Info("Initialized bytom")
}
"github.com/bytom/node"
)
+const logModule = "cmd"
+
var runNodeCmd = &cobra.Command{
Use: "node",
Short: "Run the bytomd",
// Create & start node
n := node.NewNode(config)
if _, err := n.Start(); err != nil {
- log.WithField("err", err).Fatal("failed to start node")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Fatal("failed to start node")
}
nodeInfo := n.NodeInfo()
log.WithFields(log.Fields{
+ "module": logModule,
"version": nodeInfo.Version,
"network": nodeInfo.Network,
"duration": time.Since(startTime),
maxNonce = ^uint64(0) // 2^64 - 1
defaultNumWorkers = 1
hashUpdateSecs = 1
+ logModule = "cpuminer"
)
// CPUMiner provides facilities for solving blocks (mining) using the CPU in
if m.solveBlock(block, ticker, quit) {
if isOrphan, err := m.chain.ProcessBlock(block); err == nil {
log.WithFields(log.Fields{
+ "module": logModule,
"height": block.BlockHeader.Height,
"isOrphan": isOrphan,
"tx": len(block.Transactions),
// Broadcast the block and announce chain insertion event
if err = m.eventDispatcher.Post(event.NewMinedBlockEvent{Block: block}); err != nil {
- log.WithField("height", block.BlockHeader.Height).Errorf("Miner fail on post block, %v", err)
+ log.WithFields(log.Fields{"module": logModule, "height": block.BlockHeader.Height, "error": err}).Errorf("Miner fail on post block")
}
} else {
- log.WithField("height", block.BlockHeader.Height).Errorf("Miner fail on ProcessBlock, %v", err)
+ log.WithFields(log.Fields{"module": logModule, "height": block.BlockHeader.Height, "error": err}).Errorf("Miner fail on ProcessBlock")
}
}
}
"github.com/bytom/protocol/vm/vmutil"
)
+const logModule = "mining"
+
// createCoinbaseTx returns a coinbase transaction paying an appropriate subsidy
// based on the passed block height to the provided address. When the address
// is nil, the coinbase transaction will instead be redeemable by anyone.
}
func blkGenSkipTxForErr(txPool *protocol.TxPool, txHash *bc.Hash, err error) {
- log.WithField("error", err).Error("mining block generation: skip tx due to")
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("mining block generation: skip tx due to")
txPool.RemoveTransaction(txHash)
}
log "github.com/sirupsen/logrus"
)
+const logModule = "httpjson"
+
// ErrBadRequest indicates the user supplied malformed JSON input,
// possibly including a datatype that doesn't match what we expected.
var ErrBadRequest = errors.New("httpjson: bad request")
err := json.NewEncoder(w).Encode(Array(v))
if err != nil {
- log.WithField("error", err).Error("Error encountered during writing the Content-Type header using status")
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("Error encountered during writing the Content-Type header using status")
}
}
coreIDKey
// pathKey is the key for the request path being handled.
pathKey
+ logModule = "reqid"
)
// New generates a random request ID.
b := make([]byte, l)
_, err := rand.Read(b)
if err != nil {
- log.WithField("error", err).Info("error making reqID")
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Info("error making reqID")
}
return hex.EncodeToString(b)
}
}
if genesisHash := msg.GetGenesisHash(); sm.genesisHash != *genesisHash {
- log.WithFields(log.Fields{
- "module": logModule,
- "remote genesis": genesisHash.String(),
- "local genesis": sm.genesisHash.String(),
- }).Warn("fail hand shake due to differnt genesis")
+ log.WithFields(log.Fields{"module": logModule, "remote genesis": genesisHash.String(), "local genesis": sm.genesisHash.String()}).Warn("fail hand shake due to differnt genesis")
return
}
import (
"encoding/hex"
"net"
+ "reflect"
"sync"
log "github.com/sirupsen/logrus"
log.WithField("module", logModule).Warn("the size of filter address is greater than limit")
return
}
+
p.filterAdds.Add(hex.EncodeToString(address))
}
continue
}
if ok := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}); !ok {
+ log.WithFields(log.Fields{"module": logModule, "peer": peer.Addr(), "type": reflect.TypeOf(msg), "message": msg.String()}).Warning("send message to peer error")
ps.removePeer(peer.ID())
continue
}
msg := NewStatusResponseMessage(&bestBlock.BlockHeader, &genesisHash)
for _, peer := range peers {
if ok := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}); !ok {
+ log.WithFields(log.Fields{"module": logModule, "peer": peer.Addr(), "type": reflect.TypeOf(msg), "message": msg.String()}).Warning("send message to peer error")
ps.removePeer(peer.ID())
continue
}
continue
}
if ok := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}); !ok {
+ log.WithFields(log.Fields{
+ "module": logModule,
+ "peer": peer.Addr(),
+ "type": reflect.TypeOf(msg),
+ "message": msg.String(),
+ }).Warning("send message to peer error")
ps.removePeer(peer.ID())
continue
}
w "github.com/bytom/wallet"
)
-const webHost = "http://127.0.0.1"
+const (
+ webHost = "http://127.0.0.1"
+ logModule = "node"
+)
// Node represent bytom node
type Node struct {
txFeed = txfeed.NewTracker(txFeedDB, chain)
if err = txFeed.Prepare(ctx); err != nil {
- log.WithField("error", err).Error("start txfeed")
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("start txfeed")
return nil
}
assets = asset.NewRegistry(walletDB, chain)
wallet, err = w.NewWallet(walletDB, accounts, assets, hsm, chain, dispatcher)
if err != nil {
- log.WithField("error", err).Error("init NewWallet")
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("init NewWallet")
}
// trigger rescan wallet
if err == nil {
log.SetOutput(file)
} else {
- log.WithField("err", err).Info("using default")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Info("using default")
}
}
defaultRecvMessageCapacity = 22020096 // 21MB
defaultRecvRate = int64(512000) // 500KB/s
defaultSendTimeout = 10 * time.Second
+ logModule = "p2p/conn"
)
type receiveCbFunc func(chID byte, msgBytes []byte)
channel, ok := c.channelsIdx[chID]
if !ok {
- log.WithField("chID", chID).Error("cannot send bytes due to unknown channel")
+ log.WithFields(log.Fields{"module": logModule, "chID": chID}).Error("cannot send bytes due to unknown channel")
return false
}
if !channel.sendBytes(wire.BinaryBytes(msg)) {
- log.WithFields(log.Fields{"chID": chID, "conn": c, "msg": msg}).Error("MConnection send failed")
+ log.WithFields(log.Fields{"module": logModule, "chID": chID, "conn": c, "msg": msg}).Error("MConnection send failed")
return false
}
channel, ok := c.channelsIdx[chID]
if !ok {
- log.WithField("chID", chID).Error("cannot send bytes due to unknown channel")
+ log.WithFields(log.Fields{"module": logModule, "chID": chID}).Error("cannot send bytes due to unknown channel")
return false
}
func (c *MConnection) flush() {
if err := c.bufWriter.Flush(); err != nil {
- log.WithField("error", err).Error("MConnection flush failed")
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("MConnection flush failed")
}
}
c.recvMonitor.Update(int(n))
if err != nil {
if c.IsRunning() {
- log.WithFields(log.Fields{"conn": c, "error": err}).Error("Connection failed @ recvRoutine (reading byte)")
+ log.WithFields(log.Fields{"module": logModule, "conn": c, "error": err}).Error("Connection failed @ recvRoutine (reading byte)")
c.conn.Close()
c.stopForError(err)
}
// Read more depending on packet type.
switch pktType {
case packetTypePing:
- log.Debug("receive Ping")
+ log.WithFields(log.Fields{"module": logModule, "conn": c}).Debug("receive Ping")
select {
case c.pong <- struct{}{}:
default:
}
case packetTypePong:
- log.Debug("receive Pong")
+ log.WithFields(log.Fields{"module": logModule, "conn": c}).Debug("receive Pong")
case packetTypeMsg:
pkt, n, err := msgPacket{}, int(0), error(nil)
c.recvMonitor.Update(int(n))
if err != nil {
if c.IsRunning() {
- log.WithFields(log.Fields{"conn": c, "error": err}).Error("failed on recvRoutine")
+ log.WithFields(log.Fields{"module": logModule, "conn": c, "error": err}).Error("failed on recvRoutine")
c.stopForError(err)
}
return
msgBytes, err := channel.recvMsgPacket(pkt)
if err != nil {
if c.IsRunning() {
- log.WithFields(log.Fields{"conn": c, "error": err}).Error("failed on recvRoutine")
+ log.WithFields(log.Fields{"module": logModule, "conn": c, "error": err}).Error("failed on recvRoutine")
c.stopForError(err)
}
return
n, err := leastChannel.writeMsgPacketTo(c.bufWriter)
if err != nil {
- log.WithField("error", err).Error("failed to write msgPacket")
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("failed to write msgPacket")
c.stopForError(err)
return true
}
channel.updateStats()
}
case <-c.pingTimer.C:
- log.Debug("send Ping")
+ log.WithFields(log.Fields{"module": logModule, "conn": c}).Debug("send Ping")
wire.WriteByte(packetTypePing, c.bufWriter, &n, &err)
c.sendMonitor.Update(int(n))
c.flush()
case <-c.pong:
- log.Debug("send Pong")
+ log.WithFields(log.Fields{"module": logModule, "conn": c}).Debug("send Pong")
wire.WriteByte(packetTypePong, c.bufWriter, &n, &err)
c.sendMonitor.Update(int(n))
c.flush()
return
}
if err != nil {
- log.WithFields(log.Fields{"conn": c, "error": err}).Error("Connection failed @ sendRoutine")
+ log.WithFields(log.Fields{"module": logModule, "conn": c, "error": err}).Error("Connection failed @ sendRoutine")
c.stopForError(err)
return
}
// Version not found (i.e. empty cache), insert it
if err = db.Put(nodeDBVersionKey, currentVer, nil); err != nil {
if err := db.Close(); err != nil {
- log.Warn(fmt.Sprintf("db close err %v", err))
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Warn(fmt.Sprintf("db close err"))
}
return nil, err
}
// Version present, flush if different
if !bytes.Equal(blob, currentVer) {
if err = db.Close(); err != nil {
- log.Warn(fmt.Sprintf("db close err %v", err))
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Warn(fmt.Sprintf("db close err"))
}
if err = os.RemoveAll(path); err != nil {
return nil, err
key := makeKey(id, nodeDBDiscoverRoot)
rawData, err := db.lvl.Get(key, nil)
if err != nil {
- log.Warn(fmt.Sprintf("get node rawdata err %v", err))
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Warn(fmt.Sprintf("get node rawdata err"))
return nil
}
wire.ReadBinary(node, bytes.NewReader(rawData), 0, &n, &err)
if err != nil {
- log.Warn(fmt.Sprintf("key %x (%T) %v", key, node, err))
+ log.WithFields(log.Fields{"module": logModule, "key": key, "node": node, "error": err}).Warn("get node from db err")
return nil
}
select {
case <-tick.C:
if err := db.expireNodes(); err != nil {
- log.Error(fmt.Sprintf("Failed to expire nodedb items: %v", err))
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("Failed to expire nodedb items")
}
case <-db.quit:
return
// of hitting all existing nodes in very small databases.
ctr := id[0]
if _, err := rand.Read(id[:]); err != nil {
- log.Warn("get rand date:", err)
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Warn("get rand date")
}
id[0] = ctr + id[0]%16
it.Seek(makeKey(id, nodeDBDiscoverRoot))
key := makeKey(id, nodeDBTopicRegTickets)
blob, err := db.lvl.Get(key, nil)
if err != nil {
- log.Warn("db get raw data:", err)
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Warn("db get raw data")
}
if len(blob) != 8 {
wire.ReadBinary(node, bytes.NewReader(it.Value()), 0, &n, &err)
if err != nil {
- log.Error("invalid node:", id, err)
+ log.WithFields(log.Fields{"module": logModule, "id": id, "error": err}).Error("invalid node")
continue
}
func (db *nodeDB) close() {
close(db.quit)
if err := db.lvl.Close(); err != nil {
- log.Warn("db close err:", err)
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Warn("db close err")
}
}
import (
"bytes"
+ "encoding/hex"
"errors"
"fmt"
"net"
select {
case <-net.closeReq:
- log.Debug("<-net.closeReq")
+ log.WithFields(log.Fields{"module": logModule}).Debug("close request")
break loop
// Ingress packet handling.
case pkt := <-net.read:
- //fmt.Println("read", pkt.ev)
- log.Debug("<-net.read")
+ log.WithFields(log.Fields{"module": logModule}).Debug("read from net")
n := net.internNode(&pkt)
prestate := n.state
status := "ok"
if err := net.handle(n, pkt.ev, &pkt); err != nil {
status = err.Error()
}
- log.Debug("", "msg", net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status)
+ log.WithFields(log.Fields{"module": logModule, "node num": net.tab.count, "event": pkt.ev, "remote id": hex.EncodeToString(pkt.remoteID[:8]), "remote addr": pkt.remoteAddr, "pre state": prestate, "node state": n.state, "status": status}).Debug("handle ingress msg")
// TODO: persist state if n.state goes >= known, delete if it goes <= known
// State transition timeouts.
case timeout := <-net.timeout:
- log.Debug("<-net.timeout")
+ log.WithFields(log.Fields{"module": logModule}).Debug("net timeout")
if net.timeoutTimers[timeout] == nil {
// Stale timer (was aborted).
continue
if err := net.handle(timeout.node, timeout.ev, nil); err != nil {
status = err.Error()
}
- log.Debug("", "msg", net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status)
+ log.WithFields(log.Fields{"module": logModule, "node num": net.tab.count, "event": timeout.ev, "node id": hex.EncodeToString(timeout.node.ID[:8]), "node addr": timeout.node.addr(), "pre state": prestate, "node state": timeout.node.state, "status": status}).Debug("handle timeout")
// Querying.
case q := <-net.queryReq:
- log.Debug("<-net.queryReq")
+ log.WithFields(log.Fields{"module": logModule}).Debug("net query request")
if !q.start(net) {
q.remote.deferQuery(q)
}
// Interacting with the table.
case f := <-net.tableOpReq:
- log.Debug("<-net.tableOpReq")
+ log.WithFields(log.Fields{"module": logModule}).Debug("net table operate request")
f()
net.tableOpResp <- struct{}{}
// Topic registration stuff.
case req := <-net.topicRegisterReq:
- log.Debug("<-net.topicRegisterReq")
+ log.WithFields(log.Fields{"module": logModule, "topic": req.topic}).Debug("net topic register request")
if !req.add {
net.ticketStore.removeRegisterTopic(req.topic)
continue
// determination for new topics.
// if topicRegisterLookupDone == nil {
if topicRegisterLookupTarget.target == (common.Hash{}) {
- log.Debug("topicRegisterLookupTarget == null")
+ log.WithFields(log.Fields{"module": logModule, "topic": req.topic}).Debug("topic register lookup target null")
if topicRegisterLookupTick.Stop() {
<-topicRegisterLookupTick.C
}
}
case nodes := <-topicRegisterLookupDone:
- log.Debug("<-topicRegisterLookupDone")
+ log.WithFields(log.Fields{"module": logModule}).Debug("topic register lookup done")
net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte {
net.ping(n, n.addr())
return n.pingEcho
topicRegisterLookupDone = nil
case <-topicRegisterLookupTick.C:
- log.Debug("<-topicRegisterLookupTick")
+ log.WithFields(log.Fields{"module": logModule}).Debug("topic register lookup tick")
if (topicRegisterLookupTarget.target == common.Hash{}) {
target, delay := net.ticketStore.nextRegisterLookup()
topicRegisterLookupTarget = target
}
case <-nextRegisterTime:
- log.Debug("<-nextRegisterTime")
+ log.WithFields(log.Fields{"module": logModule}).Debug("next register time")
net.ticketStore.ticketRegistered(*nextTicket)
- //fmt.Println("sendTopicRegister", nextTicket.t.node.addr().String(), nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
case req := <-net.topicSearchReq:
if refreshDone == nil {
- log.Debug("<-net.topicSearchReq")
+ log.WithFields(log.Fields{"module": logModule, "topic": req.topic}).Debug("net topic rearch req")
info, ok := searchInfo[req.topic]
if ok {
if req.delay == time.Duration(0) {
})
case <-statsDump.C:
- log.Debug("<-statsDump.C")
+ log.WithFields(log.Fields{"module": logModule}).Debug("stats dump clock")
/*r, ok := net.ticketStore.radius[testTopic]
if !ok {
fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now())
if printTestImgLogs {
rad := r.radius / (maxRadius/1000000 + 1)
minrad := r.minRadius / (maxRadius/1000000 + 1)
- fmt.Printf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad)
- fmt.Printf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad)
+ log.WithFields(log.Fields{"module": logModule}).Debug("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad)
+ log.WithFields(log.Fields{"module": logModule}).Debug("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad)
}
}
for topic, t := range net.topictab.topics {
wp := t.wcl.nextWaitPeriod(tm)
if printTestImgLogs {
- fmt.Printf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000)
+ log.WithFields(log.Fields{"module": logModule}).Debug("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000)
}
}
// Periodic / lookup-initiated bucket refresh.
case <-refreshTimer.C:
- log.Debug("<-refreshTimer.C")
+ log.WithFields(log.Fields{"module": logModule}).Debug("refresh timer clock")
// TODO: ideally we would start the refresh timer after
// fallback nodes have been set for the first time.
if refreshDone == nil {
bucketRefreshTimer.Reset(bucketRefreshInterval)
}()
case newNursery := <-net.refreshReq:
- log.Debug("<-net.refreshReq")
+ log.WithFields(log.Fields{"module": logModule}).Debug("net refresh request")
if newNursery != nil {
net.nursery = newNursery
}
}
net.refreshResp <- refreshDone
case <-refreshDone:
- log.Debug("<-net.refreshDone", "table size", net.tab.count)
+ log.WithFields(log.Fields{"module": logModule, "table size": net.tab.count}).Debug("net refresh done")
if net.tab.count != 0 {
refreshDone = nil
list := searchReqWhenRefreshDone
}
}
}
- log.Debug("loop stopped")
-
- log.Debug(fmt.Sprintf("shutting down"))
+ log.WithFields(log.Fields{"module": logModule}).Debug("loop stopped,shutting down")
if net.conn != nil {
net.conn.Close()
}
seeds = net.nursery
}
if len(seeds) == 0 {
- log.Debug("no seed nodes found")
+ log.WithFields(log.Fields{"module": logModule}).Debug("no seed nodes found")
time.AfterFunc(time.Second*10, func() { close(done) })
return
}
func (q *findnodeQuery) start(net *Network) bool {
// Satisfy queries against the local node directly.
if q.remote == net.tab.self {
- log.Debug("findnodeQuery self")
+ log.WithFields(log.Fields{"module": logModule}).Debug("findnodeQuery self")
closest := net.tab.closest(common.BytesToHash(q.target[:]), bucketSize)
q.reply <- closest.entries
return true
}
if q.remote.state.canQuery && q.remote.pendingNeighbours == nil {
- log.Debug("findnodeQuery", "remote peer:", q.remote.ID, "targetID:", q.target)
+ log.WithFields(log.Fields{"module": logModule, "remote peer": q.remote.ID, "targetID": q.target}).Debug("find node query")
net.conn.sendFindnodeHash(q.remote, q.target)
net.timedEvent(respTimeout, q.remote, neighboursTimeout)
q.remote.pendingNeighbours = q
// Initiate the transition to known.
// The request will be sent later when the node reaches known state.
if q.remote.state == unknown {
- log.Debug("findnodeQuery", "id:", q.remote.ID, "status:", "unknown->verifyinit")
+ log.WithFields(log.Fields{"module": logModule, "id": q.remote.ID, "status": "unknown->verify init"}).Debug("find node query")
net.transition(q.remote, verifyinit)
}
return false
//fmt.Println(" not sent")
return
}
- log.Debug("Pinging remote node", "node", n.ID)
+ log.WithFields(log.Fields{"module": logModule, "node": n.ID}).Debug("Pinging remote node")
n.pingTopics = net.ticketStore.regTopicSet()
n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics)
net.timedEvent(respTimeout, n, pongTimeout)
}
func (net *Network) handlePing(n *Node, pkt *ingressPacket) {
- log.Debug("Handling remote ping", "node", n.ID)
+ log.WithFields(log.Fields{"module": logModule, "node": n.ID}).Debug("Handling remote ping")
ping := pkt.data.(*ping)
n.TCP = ping.From.TCP
t := net.topictab.getTicket(n, ping.Topics)
}
func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error {
- log.Debug("Handling known pong", "node", n.ID)
+ log.WithFields(log.Fields{"module": logModule, "node": n.ID}).Debug("Handling known pong")
net.abortTimedEvent(n, pongTimeout)
now := Now()
ticket, err := pongToTicket(now, n.pingTopics, n, pkt)
// fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data)
net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket)
} else {
- log.Debug("Failed to convert pong to ticket", "err", err)
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Debug("Failed to convert pong to ticket")
}
n.pingEcho = nil
n.pingTopics = nil
for i, rn := range req.Nodes {
nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn)
if err != nil {
- log.Debug(fmt.Sprintf("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err))
+ log.WithFields(log.Fields{"module": logModule, "ip": rn.IP, "id:": n.ID[:8], "addr:": pkt.remoteAddr, "error": err}).Debug("invalid neighbour")
continue
}
nodes[i] = nn
howtofix := fmt.Sprintf("Please enable network time synchronisation in system settings")
separator := strings.Repeat("-", len(warning))
- log.Warn(separator)
- log.Warn(warning)
- log.Warn(howtofix)
- log.Warn(separator)
+ log.WithFields(log.Fields{"module": logModule}).Warn(separator)
+ log.WithFields(log.Fields{"module": logModule}).Warn(warning)
+ log.WithFields(log.Fields{"module": logModule}).Warn(howtofix)
+ log.WithFields(log.Fields{"module": logModule}).Warn(separator)
} else {
- log.Debug(fmt.Sprintf("Sanity NTP check reported %v drift, all ok", drift))
+ log.WithFields(log.Fields{"module": logModule, "drift": drift}).Debug(fmt.Sprintf("Sanity NTP check reported all ok"))
}
}
import (
"crypto/rand"
"encoding/binary"
- "fmt"
"net"
"sort"
+ log "github.com/sirupsen/logrus"
+
"github.com/bytom/common"
"github.com/bytom/crypto"
)
return tab
}
-const printTable = false
-
// chooseBucketRefreshTarget selects random refresh targets to keep all Kademlia
// buckets filled with live connections and keep the network topology healthy.
// This requires selecting addresses closer to our own with a higher probability
// used buckets.
func (tab *Table) chooseBucketRefreshTarget() common.Hash {
entries := 0
- if printTable {
- fmt.Println()
- fmt.Println("self ", "id:", tab.self.ID, " hex:", crypto.Sha256Hash(tab.self.ID[:]).Hex())
- }
+ log.WithFields(log.Fields{"module": logModule, "self id:": tab.self.ID, "hex": crypto.Sha256Hash(tab.self.ID[:]).Hex()}).Debug()
for i, b := range &tab.buckets {
entries += len(b.entries)
- if printTable {
- for _, e := range b.entries {
- fmt.Println(i, e.state, e.addr().String(), e.ID.String(), e.sha.Hex())
- }
+ for _, e := range b.entries {
+ log.WithFields(log.Fields{"module": logModule, "bucket": i, "status": e.state, "addr": e.addr().String(), "id": e.ID.String(), "hex": e.sha.Hex()}).Debug()
}
}
pong.Expiration = uint64(t.issueTime / AbsTime(time.Second))
pong.TopicHash, _, err = wireHash(t.topics)
if err != nil {
- log.Error("wireHash err:", err)
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("wireHash err")
}
pong.TicketSerial = t.serial
pong.WaitPeriods = make([]uint32, len(t.regTime))
// addTopic starts tracking a topic. If register is true,
// the local node will register the topic and tickets will be collected.
func (s *ticketStore) addTopic(topic Topic, register bool) {
- log.Debug("Adding discovery topic", "topic", topic, "register", register)
+ log.WithFields(log.Fields{"module": logModule, "topic": topic, "register": register}).Debug("Adding discovery topic")
if s.radius[topic] == nil {
s.radius[topic] = newTopicRadius(topic)
}
// removeRegisterTopic deletes all tickets for the given topic.
func (s *ticketStore) removeRegisterTopic(topic Topic) {
- log.Debug("Removing discovery topic", "topic", topic)
+ log.WithFields(log.Fields{"module": logModule, "topic": topic}).Debug("Removing discovery topic")
if s.tickets[topic] == nil {
- log.Warn("Removing non-existent discovery topic", "topic", topic)
+ log.WithFields(log.Fields{"module": logModule, "topic": topic}).Warn("Removing non-existent discovery topic")
return
}
for _, list := range s.tickets[topic].buckets {
// If the topic needs more tickets, return it
if s.tickets[topic].nextLookup < Now() {
next, delay := s.radius[topic].nextTarget(false), 100*time.Millisecond
- log.Debug("Found discovery topic to register", "topic", topic, "target", next.target, "delay", delay)
+ log.WithFields(log.Fields{"module": logModule, "topic": topic, "target": next.target, "delay": delay}).Debug("Found discovery topic to register")
return next, delay
}
}
// No registration topics found or all exhausted, sleep
delay := 40 * time.Second
- log.Debug("No topic found to register", "delay", delay)
+ log.WithFields(log.Fields{"module": logModule, "delay": delay}).Debug("No topic found to register")
return lookupInfo{}, delay
}
func (s *ticketStore) ticketsInWindow(topic Topic) []ticketRef {
// Sanity check that the topic still exists before operating on it
if s.tickets[topic] == nil {
- log.Warn("Listing non-existing discovery tickets", "topic", topic)
+ log.WithFields(log.Fields{"module": logModule, "topic": topic}).Warn("Listing non-existing discovery tickets")
return nil
}
// Gather all the tickers in the next time window
for idx := timeBucket(0); idx < timeWindow; idx++ {
tickets = append(tickets, buckets[s.lastBucketFetched+idx]...)
}
- log.Debug("Retrieved discovery registration tickets", "topic", topic, "from", s.lastBucketFetched, "tickets", len(tickets))
+ log.WithFields(log.Fields{"module": logModule, "topic": topic, "from": s.lastBucketFetched, "tickets": len(tickets)}).Debug("Retrieved discovery registration tickets")
return tickets
}
topic := r.t.topics[r.idx]
tickets := s.tickets[topic]
if tickets == nil {
- log.Warn("Adding ticket to non-existent topic", "topic", topic)
+ log.WithFields(log.Fields{"module": logModule, "topic": topic}).Warn("Adding ticket to non-existent topic")
return
}
bucket := timeBucket(r.t.regTime[r.idx] / AbsTime(ticketTimeBucketLen))
// removeTicket removes a ticket from the ticket store
func (s *ticketStore) removeTicketRef(ref ticketRef) {
- log.Debug("Removing discovery ticket reference", "node", ref.t.node.ID, "serial", ref.t.serial)
+ log.WithFields(log.Fields{"module": logModule, "node": ref.t.node.ID, "serial": ref.t.serial}).Debug("Removing discovery ticket reference")
// Make nextRegisterableTicket return the next available ticket.
s.nextTicketCached = nil
tickets := s.tickets[topic]
if tickets == nil {
- log.Debug("Removing tickets from unknown topic", "topic", topic)
+ log.WithFields(log.Fields{"module": logModule, "topic": topic}).Debug("Removing tickets from unknown topic")
return
}
bucket := timeBucket(ref.t.regTime[ref.idx] / AbsTime(ticketTimeBucketLen))
}
func (s *ticketStore) addTicket(localTime AbsTime, pingHash []byte, ticket *ticket) {
- log.Debug("Adding discovery ticket", "node", ticket.node.ID, "serial", ticket.serial)
+ log.WithFields(log.Fields{"module": logModule, "node": ticket.node.ID, "serial": ticket.serial}).Debug("Adding discovery ticket")
lastReq, ok := s.nodeLastReq[ticket.node]
if !(ok && bytes.Equal(pingHash, lastReq.pingHash)) {
func (s *ticketStore) getNodeTicket(node *Node) *ticket {
if s.nodes[node] == nil {
- log.Debug("Retrieving node ticket", "node", node.ID, "serial", nil)
+ log.WithFields(log.Fields{"module": logModule, "node": node.ID, "serial": nil}).Debug("Retrieving node ticket")
} else {
- log.Debug("Retrieving node ticket", "node", node.ID, "serial", s.nodes[node].serial)
+ log.WithFields(log.Fields{"module": logModule, "node": node.ID, "serial": s.nodes[node].serial}).Debug("Retrieving node ticket")
}
return s.nodes[node]
}
// It is assumed that topics and waitPeriods have the same length.
func (t *topicTable) useTicket(node *Node, serialNo uint32, topics []Topic, idx int, issueTime uint64, waitPeriods []uint32) (registered bool) {
- log.Debug("Using discovery ticket", "serial", serialNo, "topics", topics, "waits", waitPeriods)
- //fmt.Println("useTicket", serialNo, topics, waitPeriods)
+ log.WithFields(log.Fields{"module": logModule, "serial": serialNo, "topics": topics, "waits": waitPeriods}).Debug("Using discovery ticket")
t.collectGarbage()
-
n := t.getOrNewNode(node)
if serialNo < n.lastUsedTicket {
return false
if err != nil {
return nil, err
}
- log.Info("UDP listener up v5", "net", net.tab.self)
+ log.WithFields(log.Fields{"module": logModule, "net": net.tab.self}).Info("UDP listener up v5")
transport.net = net
go transport.readLoop()
return net, nil
if err != nil {
return hash, err
}
- log.Debug(fmt.Sprintf(">>> %v to %x@%v", nodeEvent(ptype), toid[:8], toaddr))
+ log.WithFields(log.Fields{"module": logModule, "event": nodeEvent(ptype), "to id": hex.EncodeToString(toid[:8]), "to addr": toaddr}).Debug("send packet")
if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil {
- log.Info(fmt.Sprint("UDP send failed:", err))
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Info(fmt.Sprint("UDP send failed"))
}
return hash, err
}
var size int
wire.WriteJSON(req, b, &size, &err)
if err != nil {
- log.Error(fmt.Sprint("error encoding packet:", err))
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("error encoding packet")
return nil, nil, err
}
packet := b.Bytes()
nbytes, from, err := t.conn.ReadFromUDP(buf)
if netutil.IsTemporaryError(err) {
// Ignore temporary read errors.
- log.Debug(fmt.Sprintf("Temporary read error: %v", err))
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Debug("Temporary read error")
continue
} else if err != nil {
// Shut down the loop for permament errors.
- log.Debug(fmt.Sprintf("Read error: %v", err))
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Debug("Read error")
return
}
t.handlePacket(from, buf[:nbytes])
func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
pkt := ingressPacket{remoteAddr: from}
if err := decodePacket(buf, &pkt); err != nil {
- log.Debug(fmt.Sprintf("Bad packet from %v: %v", from, err))
- //fmt.Println("bad packet", err)
+ log.WithFields(log.Fields{"module": logModule, "from": from, "error": err}).Error("Bad packet")
return err
}
t.net.reqReadPacket(pkt)
var err error
wire.ReadJSON(pkt.data, sigdata[1:], &err)
if err != nil {
- log.Error("wire readjson err:", err)
+ log.WithFields(log.Fields{"module": logModule, "error": err}).Error("wire readjson err")
}
return err
if !skipUPNP && (lAddrIP == "" || lAddrIP == "0.0.0.0") {
extAddr, err = getUPNPExternalAddress(lAddrPort, listenerPort)
upnpMap = err == nil
- log.WithField("err", err).Info("get UPNP external address")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Info("get UPNP external address")
}
if extAddr == nil {
"github.com/btcsuite/go-socks/socks"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
- crypto "github.com/tendermint/go-crypto"
- wire "github.com/tendermint/go-wire"
+ "github.com/tendermint/go-crypto"
+ "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/flowrate"
func() {
var n int
wire.ReadBinary(peerNodeInfo, pc.conn, maxNodeInfoSize, &n, &err2)
- log.WithField("address", peerNodeInfo.ListenAddr).Info("Peer handshake")
+ log.WithFields(log.Fields{"module": logModule, "address": peerNodeInfo.ListenAddr}).Info("Peer handshake")
})
if err1 != nil {
return peerNodeInfo, errors.Wrap(err1, "Error during handshake/write")
if !p.IsRunning() {
return false
}
+
+ log.WithFields(log.Fields{
+ "module": logModule,
+ "peer": p.Addr(),
+ "msg": msg,
+ }).Info("send message to peer")
return p.mconn.TrySend(chID, msg)
}
bannedPeerKey = "BannedPeer"
defaultBanDuration = time.Hour * 1
minNumOutboundPeers = 3
+ logModule = "p2p"
)
//pre-define errors for connecting fail
//DialPeerWithAddress dial node from net address
func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error {
- log.Debug("Dialing peer address:", addr)
+ log.WithFields(log.Fields{"module": logModule, "address": addr}).Debug("Dialing peer")
sw.dialing.Set(addr.IP.String(), addr)
defer sw.dialing.Delete(addr.IP.String())
if err := sw.filterConnByIP(addr.IP.String()); err != nil {
pc, err := newOutboundPeerConn(addr, sw.nodePrivKey, sw.peerConfig)
if err != nil {
- log.WithFields(log.Fields{"address": addr, " err": err}).Error("DialPeer fail on newOutboundPeerConn")
+ log.WithFields(log.Fields{"module": logModule, "address": addr, " err": err}).Error("DialPeer fail on newOutboundPeerConn")
return err
}
if err = sw.AddPeer(pc); err != nil {
- log.WithFields(log.Fields{"address": addr, " err": err}).Error("DialPeer fail on switch AddPeer")
+ log.WithFields(log.Fields{"module": logModule, "address": addr, " err": err}).Error("DialPeer fail on switch AddPeer")
pc.CloseConn()
return err
}
- log.Debug("DialPeer added peer:", addr)
+ log.WithFields(log.Fields{"module": logModule, "address": addr, "peer num": sw.peers.Size()}).Debug("DialPeer added peer")
return nil
}
// StopPeerForError disconnects from a peer due to external error.
func (sw *Switch) StopPeerForError(peer *Peer, reason interface{}) {
- log.WithFields(log.Fields{"peer": peer, " err": reason}).Debug("stopping peer for error")
+ log.WithFields(log.Fields{"module": logModule, "peer": peer, " err": reason}).Debug("stopping peer for error")
sw.stopAndRemovePeer(peer, reason)
}
peerConn, err := newInboundPeerConn(conn, sw.nodePrivKey, sw.Config.P2P)
if err != nil {
if err := conn.Close(); err != nil {
- log.WithFields(log.Fields{"remote peer:": conn.RemoteAddr().String(), " err:": err}).Error("closes connection err")
+ log.WithFields(log.Fields{"module": logModule, "remote peer:": conn.RemoteAddr().String(), " err:": err}).Error("closes connection err")
}
return err
}
if err = sw.AddPeer(peerConn); err != nil {
if err := conn.Close(); err != nil {
- log.WithFields(log.Fields{"remote peer:": conn.RemoteAddr().String(), " err:": err}).Error("closes connection err")
+ log.WithFields(log.Fields{"module": logModule, "remote peer:": conn.RemoteAddr().String(), " err:": err}).Error("closes connection err")
}
return err
}
+ log.WithFields(log.Fields{"module": logModule, "address": conn.RemoteAddr().String(), "peer num": sw.peers.Size()}).Debug("add inbound peer")
return nil
}
// disconnect if we alrady have MaxNumPeers
if sw.peers.Size() >= sw.Config.P2P.MaxNumPeers {
if err := inConn.Close(); err != nil {
- log.WithFields(log.Fields{"remote peer:": inConn.RemoteAddr().String(), " err:": err}).Error("closes connection err")
+ log.WithFields(log.Fields{"module": logModule, "remote peer:": inConn.RemoteAddr().String(), " err:": err}).Error("closes connection err")
}
log.Info("Ignoring inbound connection: already have enough peers.")
continue
func (sw *Switch) dialPeerWorker(a *NetAddress, wg *sync.WaitGroup) {
if err := sw.DialPeerWithAddress(a); err != nil {
- log.WithFields(log.Fields{"addr": a, "err": err}).Error("dialPeerWorker fail on dial peer")
+ log.WithFields(log.Fields{"module": logModule, "addr": a, "err": err}).Error("dialPeerWorker fail on dial peer")
}
wg.Done()
}
func (sw *Switch) ensureOutboundPeers() {
numOutPeers, _, numDialing := sw.NumPeers()
numToDial := (minNumOutboundPeers - (numOutPeers + numDialing))
- log.WithFields(log.Fields{"numOutPeers": numOutPeers, "numDialing": numDialing, "numToDial": numToDial}).Debug("ensure peers")
+ log.WithFields(log.Fields{"module": logModule, "numOutPeers": numOutPeers, "numDialing": numDialing, "numToDial": numToDial}).Debug("ensure peers")
if numToDial <= 0 {
return
}
func (sw *Switch) startInitPeer(peer *Peer) error {
// spawn send/recv routines
if _, err := peer.Start(); err != nil {
- log.WithFields(log.Fields{"remote peer:": peer.RemoteAddr, " err:": err}).Error("init peer err")
+ log.WithFields(log.Fields{"module": logModule, "remote peer:": peer.RemoteAddr, " err:": err}).Error("init peer err")
}
for _, reactor := range sw.reactors {
sentStatus, receivedStatus := peer.TrafficStatus()
log.WithFields(log.Fields{
+ "module": logModule,
"address": peer.Addr().String(),
"reason": reason,
"duration": sentStatus.Duration.String(),
"total_received": receivedStatus.Bytes,
"average_sent_rate": sentStatus.AvgRate,
"average_received_rate": receivedStatus.AvgRate,
+ "peer num": sw.peers.Size(),
}).Info("disconnect with peer")
}
return err
}
- log.WithFields(log.Fields{"height": node.Height, "hash": node.Hash.String()}).Debug("detach from mainchain")
+ log.WithFields(log.Fields{"module": logModule, "height": node.Height, "hash": node.Hash.String()}).Debug("detach from mainchain")
}
for _, attachNode := range attachNodes {
return err
}
- log.WithFields(log.Fields{"height": node.Height, "hash": node.Hash.String()}).Debug("attach from mainchain")
+ log.WithFields(log.Fields{"module": logModule, "height": node.Height, "hash": node.Hash.String()}).Debug("attach from mainchain")
}
return c.setState(node, utxoView)
for _, prevOrphan := range prevOrphans {
orphanBlock, ok := c.orphanManage.Get(prevOrphan)
if !ok {
- log.WithFields(log.Fields{"hash": prevOrphan.String()}).Warning("saveSubBlock fail to get block from orphanManage")
+ log.WithFields(log.Fields{"module": logModule, "hash": prevOrphan.String()}).Warning("saveSubBlock fail to get block from orphanManage")
continue
}
if err := c.saveBlock(orphanBlock); err != nil {
- log.WithFields(log.Fields{"hash": prevOrphan.String(), "height": orphanBlock.Height}).Warning("saveSubBlock fail to save block")
+ log.WithFields(log.Fields{"module": logModule, "hash": prevOrphan.String(), "height": orphanBlock.Height}).Warning("saveSubBlock fail to save block")
continue
}
func (c *Chain) processBlock(block *types.Block) (bool, error) {
blockHash := block.Hash()
if c.BlockExist(&blockHash) {
- log.WithFields(log.Fields{"hash": blockHash.String(), "height": block.Height}).Info("block has been processed")
+ log.WithFields(log.Fields{"module": logModule, "hash": blockHash.String(), "height": block.Height}).Info("block has been processed")
return c.orphanManage.BlockExist(&blockHash), nil
}
bestNode := c.index.GetNode(&bestBlockHash)
if bestNode.Parent == c.bestNode {
- log.Debug("append block to the end of mainchain")
+ log.WithFields(log.Fields{"module": logModule}).Debug("append block to the end of mainchain")
return false, c.connectBlock(bestBlock)
}
if bestNode.Height > c.bestNode.Height && bestNode.WorkSum.Cmp(c.bestNode.WorkSum) >= 0 {
- log.Debug("start to reorganize chain")
+ log.WithFields(log.Fields{"module": logModule}).Debug("start to reorganize chain")
return false, c.reorganizeChain(bestNode)
}
return false, nil
}
if len(o.orphan) >= numOrphanBlockLimit {
- log.WithFields(log.Fields{"hash": blockHash.String(), "height": block.Height}).Info("the number of orphan blocks exceeds the limit")
+ log.WithFields(log.Fields{"module": logModule, "hash": blockHash.String(), "height": block.Height}).Info("the number of orphan blocks exceeds the limit")
return
}
o.orphan[blockHash] = &orphanBlock{block, time.Now().Add(orphanBlockTTL)}
o.prevOrphans[block.PreviousBlockHash] = append(o.prevOrphans[block.PreviousBlockHash], &blockHash)
- log.WithFields(log.Fields{"hash": blockHash.String(), "height": block.Height}).Info("add block to orphan")
+ log.WithFields(log.Fields{"module": logModule, "hash": blockHash.String(), "height": block.Height}).Info("add block to orphan")
}
// Delete will delete the block from OrphanManage
c.index.SetMainChain(node)
c.bestNode = node
- log.WithFields(log.Fields{"height": c.bestNode.Height, "hash": c.bestNode.Hash.String()}).Debug("chain best status has been update")
+ log.WithFields(log.Fields{"module": logModule, "height": c.bestNode.Height, "hash": c.bestNode.Hash.String()}).Debug("chain best status has been update")
c.cond.Broadcast()
return nil
}
}
if err != nil {
- log.WithFields(log.Fields{"tx_id": tx.Tx.ID.String(), "error": err}).Info("transaction status fail")
+ log.WithFields(log.Fields{"module": logModule, "tx_id": tx.Tx.ID.String(), "error": err}).Info("transaction status fail")
}
return c.txPool.ProcessTransaction(tx, err != nil, block.BlockHeader.Height, gasStatus.BTMValue)
const (
MsgNewTx = iota
MsgRemoveTx
+ logModule = "protocol"
)
var (
atomic.StoreInt64(&tp.lastUpdated, time.Now().Unix())
tp.eventDispatcher.Post(TxMsgEvent{TxMsg: &TxPoolMsg{TxDesc: txD, MsgType: MsgRemoveTx}})
- log.WithField("tx_id", txHash).Debug("remove tx from mempool")
+ log.WithFields(log.Fields{"module": logModule, "tx_id": txHash}).Debug("remove tx from mempool")
}
// GetTransaction return the TxDesc by hash
atomic.StoreInt64(&tp.lastUpdated, time.Now().Unix())
tp.eventDispatcher.Post(TxMsgEvent{TxMsg: &TxPoolMsg{TxDesc: txD, MsgType: MsgNewTx}})
- log.WithField("tx_id", tx.ID.String()).Debug("Add tx to mempool")
+ log.WithFields(log.Fields{"module": logModule, "tx_id": tx.ID.String()}).Debug("Add tx to mempool")
return nil
}
processOrphan := processOrphans[0]
requireParents, err := tp.checkOrphanUtxos(processOrphan.Tx)
if err != nil {
- log.WithField("err", err).Error("processOrphans got unexpect error")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("processOrphans got unexpect error")
continue
}
noUpdate uint16 = iota
hasUpdate
hasMUpdate
+ logModule = "version"
)
var (
}
if s.versionStatus != noUpdate {
log.WithFields(log.Fields{
+ "module": logModule,
"Current version": localVerStr,
"Newer version": remoteVerStr,
"seed": remoteAddr,
}
if err := w.AssetReg.SaveAsset(externalAsset, alias); err != nil {
- log.WithFields(log.Fields{"err": err, "assetID": alias}).Warning("fail on save external asset to internal asset DB")
+ log.WithFields(log.Fields{"module": logModule, "err": err, "assetID": alias}).Warning("fail on save external asset to internal asset DB")
}
return definitionByte
}
for _, tx := range annotatedTxs {
rawTx, err := json.Marshal(tx)
if err != nil {
- log.WithField("err", err).Error("inserting annotated_txs to db")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("inserting annotated_txs to db")
return err
}
// AddUnconfirmedTx handle wallet status update when tx add into txpool
func (w *Wallet) AddUnconfirmedTx(txD *protocol.TxDesc) {
if err := w.saveUnconfirmedTx(txD.Tx); err != nil {
- log.WithField("err", err).Error("wallet fail on saveUnconfirmedTx")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("wallet fail on saveUnconfirmedTx")
}
utxos := txOutToUtxos(txD.Tx, txD.StatusFail, 0)
//delUnconfirmedTx periodically delete locally stored timeout did not confirm txs
func (w *Wallet) delUnconfirmedTx() {
if err := w.delExpiredTxs(); err != nil {
- log.WithField("err", err).Error("wallet fail on delUnconfirmedTx")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("wallet fail on delUnconfirmedTx")
return
}
ticker := time.NewTicker(UnconfirmedTxCheckPeriod)
for {
<-ticker.C
if err := w.delExpiredTxs(); err != nil {
- log.WithField("err", err).Error("wallet fail on delUnconfirmedTx")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("wallet fail on delUnconfirmedTx")
}
}
}
for accountUtxoIter.Next() {
accountUtxo := &account.UTXO{}
if err := json.Unmarshal(accountUtxoIter.Value(), accountUtxo); err != nil {
- log.WithField("err", err).Warn("GetAccountUtxos fail on unmarshal utxo")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Warn("GetAccountUtxos fail on unmarshal utxo")
continue
}
for txIndex, tx := range b.Transactions {
statusFail, err := txStatus.GetStatus(txIndex)
if err != nil {
- log.WithField("err", err).Error("attachUtxos fail on get tx status")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("attachUtxos fail on get tx status")
continue
}
outputUtxos := txOutToUtxos(tx, statusFail, validHeight)
utxos := w.filterAccountUtxo(outputUtxos)
if err := batchSaveUtxos(utxos, batch); err != nil {
- log.WithField("err", err).Error("attachUtxos fail on batchSaveUtxos")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("attachUtxos fail on batchSaveUtxos")
}
}
}
statusFail, err := txStatus.GetStatus(txIndex)
if err != nil {
- log.WithField("err", err).Error("detachUtxos fail on get tx status")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("detachUtxos fail on get tx status")
continue
}
inputUtxos := txInToUtxos(tx, statusFail)
utxos := w.filterAccountUtxo(inputUtxos)
if err := batchSaveUtxos(utxos, batch); err != nil {
- log.WithField("err", err).Error("detachUtxos fail on batchSaveUtxos")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("detachUtxos fail on batchSaveUtxos")
return
}
}
cp := &account.CtrlProgram{}
if err := json.Unmarshal(data, cp); err != nil {
- log.WithField("err", err).Error("filterAccountUtxo fail on unmarshal control program")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("filterAccountUtxo fail on unmarshal control program")
continue
}
resOut, err := tx.Output(*sp.SpentOutputId)
if err != nil {
- log.WithField("err", err).Error("txInToUtxos fail on get resOut")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("txInToUtxos fail on get resOut")
continue
}
func (w *Wallet) commitWalletInfo(batch db.Batch) error {
rawWallet, err := json.Marshal(w.status)
if err != nil {
- log.WithField("err", err).Error("save wallet info")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("save wallet info")
return err
}
for !w.chain.InMainChain(w.status.BestHash) {
block, err := w.chain.GetBlockByHash(&w.status.BestHash)
if err != nil {
- log.WithField("err", err).Error("walletUpdater GetBlockByHash")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("walletUpdater GetBlockByHash")
return
}
if err := w.DetachBlock(block); err != nil {
- log.WithField("err", err).Error("walletUpdater detachBlock stop")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("walletUpdater detachBlock stop")
return
}
}
}
if err := w.AttachBlock(block); err != nil {
- log.WithField("err", err).Error("walletUpdater AttachBlock stop")
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Error("walletUpdater AttachBlock stop")
return
}
}