txs []*types.Tx
}
-func (m *Manager) syncTransactions(peerID string) {
- pending := m.txPool.GetTransactions()
+func (m *Manager) syncMempool(peerID string) {
+ pending := m.mempool.GetTransactions()
if len(pending) == 0 {
return
}
m.txSyncCh <- &txSyncMsg{peerID, txs}
}
-func (m *Manager) txBroadcastLoop() {
+func (m *Manager) broadcastTxsLoop() {
for {
select {
case obj, ok := <-m.txMsgSub.Chan():
continue
}
}
- case <-m.quitSync:
+ case <-m.quit:
return
}
}
}
-// txSyncLoop takes care of the initial transaction sync for each new
+// syncMempoolLoop takes care of the initial transaction sync for each new
// connection. When a new peer appears, we relay all currently pending
// transactions. In order to minimise egress bandwidth usage, we send
// the transactions in small packs to one peer at a time.
-func (m *Manager) txSyncLoop() {
+func (m *Manager) syncMempoolLoop() {
pending := make(map[string]*txSyncMsg)
sending := false // whether a send is active
done := make(chan error, 1) // result of the send
if !sending {
send(msg)
}
-
case err := <-done:
sending = false
if err != nil {
if s := pick(); s != nil {
send(s)
}
+ case <-m.quit:
+ return
}
}
}