13 log "github.com/sirupsen/logrus"
14 wire "github.com/tendermint/go-wire"
15 cmn "github.com/tendermint/tmlibs/common"
16 flow "github.com/tendermint/tmlibs/flowrate"
20 numBatchMsgPackets = 10
21 minReadBufferSize = 1024
22 minWriteBufferSize = 65536
23 updateState = 2 * time.Second
24 pingTimeout = 40 * time.Second
25 flushThrottle = 100 * time.Millisecond
27 defaultSendQueueCapacity = 1
28 defaultSendRate = int64(512000) // 500KB/s
29 defaultRecvBufferCapacity = 4096
30 defaultRecvMessageCapacity = 22020096 // 21MB
31 defaultRecvRate = int64(512000) // 500KB/s
32 defaultSendTimeout = 10 * time.Second
35 type receiveCbFunc func(chID byte, msgBytes []byte)
36 type errorCbFunc func(interface{})
39 Each peer has one `MConnection` (multiplex connection) instance.
41 __multiplex__ *noun* a system or signal involving simultaneous transmission of
42 several messages along a single channel of communication.
44 Each `MConnection` handles message transmission on multiple abstract communication
45 `Channel`s. Each channel has a globally unique byte id.
46 The byte id and the relative priorities of each `Channel` are configured upon
47 initialization of the connection.
49 There are two methods for sending messages:
50 func (m MConnection) Send(chID byte, msg interface{}) bool {}
51 func (m MConnection) TrySend(chID byte, msg interface{}) bool {}
53 `Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued
54 for the channel with the given id byte `chID`, or until the request times out.
55 The message `msg` is serialized using the `tendermint/wire` submodule's
56 `WriteBinary()` reflection routine.
58 `TrySend(chID, msg)` is a nonblocking call that returns false if the channel's
61 Inbound message bytes are handled with an onReceive callback function.
63 type MConnection struct {
67 bufReader *bufio.Reader
68 bufWriter *bufio.Writer
69 sendMonitor *flow.Monitor
70 recvMonitor *flow.Monitor
74 channelsIdx map[byte]*Channel
75 onReceive receiveCbFunc
81 flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled.
82 pingTimer *time.Ticker // send pings periodically
83 chStatsTimer *time.Ticker // update channel stats periodically
86 // MConnConfig is a MConnection configuration.
87 type MConnConfig struct {
88 SendRate int64 `mapstructure:"send_rate"`
89 RecvRate int64 `mapstructure:"recv_rate"`
92 // DefaultMConnConfig returns the default config.
93 func DefaultMConnConfig() *MConnConfig {
95 SendRate: defaultSendRate,
96 RecvRate: defaultRecvRate,
100 // NewMConnection wraps net.Conn and creates multiplex connection
101 func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc) *MConnection {
102 return NewMConnectionWithConfig(
107 DefaultMConnConfig())
110 // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
111 func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, config *MConnConfig) *MConnection {
112 mconn := &MConnection{
114 bufReader: bufio.NewReaderSize(conn, minReadBufferSize),
115 bufWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
116 sendMonitor: flow.New(0, 0),
117 recvMonitor: flow.New(0, 0),
118 send: make(chan struct{}, 1),
119 pong: make(chan struct{}),
120 onReceive: onReceive,
124 pingTimer: time.NewTicker(pingTimeout),
125 chStatsTimer: time.NewTicker(updateState),
129 var channelsIdx = map[byte]*Channel{}
130 var channels = []*Channel{}
132 for _, desc := range chDescs {
133 descCopy := *desc // copy the desc else unsafe access across connections
134 channel := newChannel(mconn, &descCopy)
135 channelsIdx[channel.id] = channel
136 channels = append(channels, channel)
138 mconn.channels = channels
139 mconn.channelsIdx = channelsIdx
141 mconn.BaseService = *cmn.NewBaseService(nil, "MConnection", mconn)
146 func (c *MConnection) OnStart() error {
147 c.BaseService.OnStart()
148 c.quit = make(chan struct{})
149 c.flushTimer = cmn.NewThrottleTimer("flush", flushThrottle)
155 func (c *MConnection) OnStop() {
156 c.BaseService.OnStop()
162 // We can't close pong safely here because
163 // recvRoutine may write to it after we've stopped.
164 // Though it doesn't need to get closed at all,
165 // we close it @ recvRoutine.
169 func (c *MConnection) String() string {
170 return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr())
173 func (c *MConnection) flush() {
174 log.WithField("conn", c).Debug("Flush")
175 err := c.bufWriter.Flush()
177 log.WithField("error", err).Error("MConnection flush failed")
181 // Catch panics, usually caused by remote disconnects.
182 func (c *MConnection) _recover() {
183 if r := recover(); r != nil {
184 stack := debug.Stack()
185 err := cmn.StackError{r, stack}
190 func (c *MConnection) stopForError(r interface{}) {
192 if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
193 if c.onError != nil {
199 // Queues a message to be sent to channel.
200 func (c *MConnection) Send(chID byte, msg interface{}) bool {
205 log.WithFields(log.Fields{
211 // Send message to channel.
212 channel, ok := c.channelsIdx[chID]
214 log.WithField("chID", chID).Error(cmn.Fmt("Cannot send bytes, unknown channel"))
218 success := channel.sendBytes(wire.BinaryBytes(msg))
220 // Wake up sendRoutine if necessary
222 case c.send <- struct{}{}:
226 log.WithFields(log.Fields{
230 }).Error("Send failed")
235 // Queues a message to be sent to channel.
236 // Nonblocking, returns true if successful.
237 func (c *MConnection) TrySend(chID byte, msg interface{}) bool {
242 log.WithFields(log.Fields{
248 // Send message to channel.
249 channel, ok := c.channelsIdx[chID]
251 log.WithField("chID", chID).Error(cmn.Fmt("cannot send bytes, unknown channel"))
255 ok = channel.trySendBytes(wire.BinaryBytes(msg))
257 // Wake up sendRoutine if necessary
259 case c.send <- struct{}{}:
267 // CanSend returns true if you can send more data onto the chID, false
268 // otherwise. Use only as a heuristic.
269 func (c *MConnection) CanSend(chID byte) bool {
274 channel, ok := c.channelsIdx[chID]
276 log.WithField("chID", chID).Error(cmn.Fmt("Unknown channel"))
279 return channel.canSend()
282 // sendRoutine polls for packets to send from channels.
283 func (c *MConnection) sendRoutine() {
291 case <-c.flushTimer.Ch:
292 // NOTE: flushTimer.Set() must be called every time
293 // something is written to .bufWriter.
295 case <-c.chStatsTimer.C:
296 for _, channel := range c.channels {
297 channel.updateStats()
299 case <-c.pingTimer.C:
300 log.Debug("Send Ping")
301 wire.WriteByte(packetTypePing, c.bufWriter, &n, &err)
302 c.sendMonitor.Update(int(n))
305 log.Debug("Send Pong")
306 wire.WriteByte(packetTypePong, c.bufWriter, &n, &err)
307 c.sendMonitor.Update(int(n))
312 // Send some msgPackets
313 eof := c.sendSomeMsgPackets()
315 // Keep sendRoutine awake.
317 case c.send <- struct{}{}:
327 log.WithFields(log.Fields{
330 }).Error("Connection failed @ sendRoutine")
339 // Returns true if messages from channels were exhausted.
340 // Blocks in accordance to .sendMonitor throttling.
341 func (c *MConnection) sendSomeMsgPackets() bool {
342 // Block until .sendMonitor says we can write.
343 // Once we're ready we send more than we asked for,
344 // but amortized it should even out.
345 c.sendMonitor.Limit(maxMsgPacketTotalSize, atomic.LoadInt64(&c.config.SendRate), true)
347 // Now send some msgPackets.
348 for i := 0; i < numBatchMsgPackets; i++ {
349 if c.sendMsgPacket() {
356 // Returns true if messages from channels were exhausted.
357 func (c *MConnection) sendMsgPacket() bool {
358 // Choose a channel to create a msgPacket from.
359 // The chosen channel will be the one whose recentlySent/priority is the least.
360 var leastRatio float32 = math.MaxFloat32
361 var leastChannel *Channel
362 for _, channel := range c.channels {
363 // If nothing to send, skip this channel
364 if !channel.isSendPending() {
367 // Get ratio, and keep track of lowest ratio.
368 ratio := float32(channel.recentlySent) / float32(channel.priority)
369 if ratio < leastRatio {
371 leastChannel = channel
376 if leastChannel == nil {
379 // c.Logger.Info("Found a msgPacket to send")
382 // Make & send a msgPacket from this channel
383 n, err := leastChannel.writeMsgPacketTo(c.bufWriter)
385 log.WithField("error", err).Error("Failed to write msgPacket")
389 c.sendMonitor.Update(int(n))
394 // recvRoutine reads msgPackets and reconstructs the message using the channels' "recving" buffer.
395 // After a whole message has been assembled, it's pushed to onReceive().
396 // Blocks depending on how the connection is throttled.
397 func (c *MConnection) recvRoutine() {
402 // Block until .recvMonitor says we can read.
403 c.recvMonitor.Limit(maxMsgPacketTotalSize, atomic.LoadInt64(&c.config.RecvRate), true)
406 // Peek into bufReader for debugging
407 if numBytes := c.bufReader.Buffered(); numBytes > 0 {
408 log.Infof("Peek connection buffer numBytes:", numBytes)
409 bytes, err := c.bufReader.Peek(cmn.MinInt(numBytes, 100))
411 log.Infof("bytes:", bytes)
413 log.Warning("Error peeking connection buffer err:", err)
416 log.Warning("Received bytes number is:", numBytes)
423 pktType := wire.ReadByte(c.bufReader, &n, &err)
424 c.recvMonitor.Update(int(n))
427 log.WithFields(log.Fields{
430 }).Error("Connection failed @ recvRoutine (reading byte)")
437 // Read more depending on packet type.
440 // TODO: prevent abuse, as they cause flush()'s.
441 log.Debug("Receive Ping")
445 log.Debug("Receive Pong")
447 pkt, n, err := msgPacket{}, int(0), error(nil)
448 wire.ReadBinaryPtr(&pkt, c.bufReader, maxMsgPacketTotalSize, &n, &err)
449 c.recvMonitor.Update(int(n))
452 log.WithFields(log.Fields{
455 }).Error("Connection failed @ recvRoutine")
460 channel, ok := c.channelsIdx[pkt.ChannelID]
461 if !ok || channel == nil {
462 cmn.PanicQ(cmn.Fmt("Unknown channel %X", pkt.ChannelID))
464 msgBytes, err := channel.recvMsgPacket(pkt)
467 log.WithFields(log.Fields{
470 }).Error("Connection failed @ recvRoutine")
476 log.WithFields(log.Fields{
477 "channelID": pkt.ChannelID,
478 "msgBytes": msgBytes,
479 }).Debug("Received bytes")
480 c.onReceive(pkt.ChannelID, msgBytes)
483 cmn.PanicSanity(cmn.Fmt("Unknown message type %X", pktType))
489 for _ = range c.pong {
494 type ConnectionStatus struct {
495 SendMonitor flow.Status
496 RecvMonitor flow.Status
497 Channels []ChannelStatus
500 type ChannelStatus struct {
502 SendQueueCapacity int
508 func (c *MConnection) Status() ConnectionStatus {
509 var status ConnectionStatus
510 status.SendMonitor = c.sendMonitor.Status()
511 status.RecvMonitor = c.recvMonitor.Status()
512 status.Channels = make([]ChannelStatus, len(c.channels))
513 for i, channel := range c.channels {
514 status.Channels[i] = ChannelStatus{
516 SendQueueCapacity: cap(channel.sendQueue),
517 SendQueueSize: int(channel.sendQueueSize), // TODO use atomic
518 Priority: channel.priority,
519 RecentlySent: channel.recentlySent,
525 //-----------------------------------------------------------------------------
527 type ChannelDescriptor struct {
530 SendQueueCapacity int
531 RecvBufferCapacity int
532 RecvMessageCapacity int
535 func (chDesc *ChannelDescriptor) FillDefaults() {
536 if chDesc.SendQueueCapacity == 0 {
537 chDesc.SendQueueCapacity = defaultSendQueueCapacity
539 if chDesc.RecvBufferCapacity == 0 {
540 chDesc.RecvBufferCapacity = defaultRecvBufferCapacity
542 if chDesc.RecvMessageCapacity == 0 {
543 chDesc.RecvMessageCapacity = defaultRecvMessageCapacity
548 // NOTE: not goroutine-safe.
549 type Channel struct {
551 desc *ChannelDescriptor
553 sendQueue chan []byte
554 sendQueueSize int32 // atomic.
558 recentlySent int64 // exponential moving average
561 func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel {
563 if desc.Priority <= 0 {
564 cmn.PanicSanity("Channel default priority must be a postive integer")
570 sendQueue: make(chan []byte, desc.SendQueueCapacity),
571 recving: make([]byte, 0, desc.RecvBufferCapacity),
572 priority: desc.Priority,
576 // Queues message to send to this channel.
578 // Times out (and returns false) after defaultSendTimeout
579 func (ch *Channel) sendBytes(bytes []byte) bool {
581 case ch.sendQueue <- bytes:
582 atomic.AddInt32(&ch.sendQueueSize, 1)
584 case <-time.After(defaultSendTimeout):
589 // Queues message to send to this channel.
590 // Nonblocking, returns true if successful.
592 func (ch *Channel) trySendBytes(bytes []byte) bool {
594 case ch.sendQueue <- bytes:
595 atomic.AddInt32(&ch.sendQueueSize, 1)
603 func (ch *Channel) loadSendQueueSize() (size int) {
604 return int(atomic.LoadInt32(&ch.sendQueueSize))
608 // Use only as a heuristic.
609 func (ch *Channel) canSend() bool {
610 return ch.loadSendQueueSize() < defaultSendQueueCapacity
613 // Returns true if any msgPackets are pending to be sent.
614 // Call before calling nextMsgPacket()
616 func (ch *Channel) isSendPending() bool {
617 if len(ch.sending) == 0 {
618 if len(ch.sendQueue) == 0 {
621 ch.sending = <-ch.sendQueue
626 // Creates a new msgPacket to send.
627 // Not goroutine-safe
628 func (ch *Channel) nextMsgPacket() msgPacket {
629 packet := msgPacket{}
630 packet.ChannelID = byte(ch.id)
631 packet.Bytes = ch.sending[:cmn.MinInt(maxMsgPacketPayloadSize, len(ch.sending))]
632 if len(ch.sending) <= maxMsgPacketPayloadSize {
633 packet.EOF = byte(0x01)
635 atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize
637 packet.EOF = byte(0x00)
638 ch.sending = ch.sending[cmn.MinInt(maxMsgPacketPayloadSize, len(ch.sending)):]
643 // Writes next msgPacket to w.
644 // Not goroutine-safe
645 func (ch *Channel) writeMsgPacketTo(w io.Writer) (n int, err error) {
646 packet := ch.nextMsgPacket()
647 wire.WriteByte(packetTypeMsg, w, &n, &err)
648 wire.WriteBinary(packet, w, &n, &err)
650 ch.recentlySent += int64(n)
655 // Handles incoming msgPackets. Returns a msg bytes if msg is complete.
656 // Not goroutine-safe
657 func (ch *Channel) recvMsgPacket(packet msgPacket) ([]byte, error) {
658 if ch.desc.RecvMessageCapacity < len(ch.recving)+len(packet.Bytes) {
659 return nil, wire.ErrBinaryReadOverflow
661 ch.recving = append(ch.recving, packet.Bytes...)
662 if packet.EOF == byte(0x01) {
663 msgBytes := ch.recving
664 // clear the slice without re-allocating.
665 // http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go
666 // suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes,
667 // at which point the recving slice stops being used and should be garbage collected
668 ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity)
674 // Call this periodically to update stats for throttling purposes.
675 // Not goroutine-safe
676 func (ch *Channel) updateStats() {
677 // Exponential decay of stats.
679 ch.recentlySent = int64(float64(ch.recentlySent) * 0.8)
682 //-----------------------------------------------------------------------------
685 maxMsgPacketPayloadSize = 1024
686 maxMsgPacketOverheadSize = 10 // It's actually lower but good enough
687 maxMsgPacketTotalSize = maxMsgPacketPayloadSize + maxMsgPacketOverheadSize
688 packetTypePing = byte(0x01)
689 packetTypePong = byte(0x02)
690 packetTypeMsg = byte(0x03)
693 // Messages in channels are chopped into smaller msgPackets for multiplexing.
694 type msgPacket struct {
696 EOF byte // 1 means message ends here.
700 func (p msgPacket) String() string {
701 return fmt.Sprintf("MsgPacket{%X:%X T:%X}", p.ChannelID, p.Bytes, p.EOF)