3 * Copyright 2014 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
31 "golang.org/x/net/context"
32 "golang.org/x/net/http2"
33 "golang.org/x/net/http2/hpack"
34 "google.golang.org/grpc/codes"
35 "google.golang.org/grpc/credentials"
36 "google.golang.org/grpc/keepalive"
37 "google.golang.org/grpc/metadata"
38 "google.golang.org/grpc/peer"
39 "google.golang.org/grpc/stats"
40 "google.golang.org/grpc/status"
43 // http2Client implements the ClientTransport interface with HTTP2.
44 type http2Client struct {
46 cancel context.CancelFunc
49 conn net.Conn // underlying communication channel
52 authInfo credentials.AuthInfo // auth info about the connection
53 nextID uint32 // the next stream ID to be used
55 // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
56 // that the server sent GoAway on this transport.
58 // awakenKeepalive is used to wake up keepalive when after it has gone dormant.
59 awakenKeepalive chan struct{}
62 hBuf *bytes.Buffer // the buffer for HPACK encoding
63 hEnc *hpack.Encoder // HPACK encoder
65 // controlBuf delivers all the control related tasks (e.g., window
66 // updates, reset streams, and various settings) to the controller.
67 controlBuf *controlBuffer
69 // sendQuotaPool provides flow control to outbound message.
70 sendQuotaPool *quotaPool
71 // streamsQuota limits the max number of concurrent streams.
72 streamsQuota *quotaPool
74 // The scheme used: https if TLS is on, http otherwise.
79 creds []credentials.PerRPCCredentials
81 // Boolean to keep track of reading activity on transport.
82 // 1 is true and 0 is false.
83 activity uint32 // Accessed atomically.
84 kp keepalive.ClientParameters
86 statsHandler stats.Handler
88 initialWindowSize int32
91 outQuotaVersion uint32
93 mu sync.Mutex // guard the following variables
94 state transportState // the state of underlying connection
95 activeStreams map[uint32]*Stream
96 // The max number of concurrent streams
98 // the per-stream outbound flow control window size set by the peer.
99 streamSendQuota uint32
100 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
102 // goAwayReason records the http2.ErrCode and debug data received with the
104 goAwayReason GoAwayReason
107 func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
111 return dialContext(ctx, "tcp", addr)
114 func isTemporary(err error) bool {
117 // Connection closures may be resolved upon retry, and are thus
118 // treated as temporary.
120 case context.DeadlineExceeded:
121 // In Go 1.7, context.DeadlineExceeded implements Timeout(), and this
122 // special case is not needed. Until then, we need to keep this
127 switch err := err.(type) {
131 return err.Temporary()
135 // Timeouts may be resolved upon retry, and are thus treated as
142 // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
143 // and starts to receive messages on it. Non-nil error returns if construction
145 func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) {
147 ctx, cancel := context.WithCancel(ctx)
148 connectCtx, connectCancel := context.WithTimeout(ctx, timeout)
152 // Don't call connectCancel in success path due to a race in Go 1.6:
153 // https://github.com/golang/go/issues/15078.
158 conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
160 if opts.FailOnNonTempDialError {
161 return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
163 return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
165 // Any further errors will close the underlying connection
166 defer func(conn net.Conn) {
173 authInfo credentials.AuthInfo
175 if creds := opts.TransportCredentials; creds != nil {
177 conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
179 // Credentials handshake errors are typically considered permanent
180 // to avoid retrying on e.g. bad certificates.
181 temp := isTemporary(err)
182 return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err)
186 kp := opts.KeepaliveParams
187 // Validate keepalive parameters.
189 kp.Time = defaultClientKeepaliveTime
192 kp.Timeout = defaultClientKeepaliveTimeout
194 dynamicWindow := true
195 icwz := int32(initialWindowSize)
196 if opts.InitialConnWindowSize >= defaultWindowSize {
197 icwz = opts.InitialConnWindowSize
198 dynamicWindow = false
201 writeBufSize := defaultWriteBufSize
202 if opts.WriteBufferSize > 0 {
203 writeBufSize = opts.WriteBufferSize
205 readBufSize := defaultReadBufSize
206 if opts.ReadBufferSize > 0 {
207 readBufSize = opts.ReadBufferSize
212 userAgent: opts.UserAgent,
215 remoteAddr: conn.RemoteAddr(),
216 localAddr: conn.LocalAddr(),
218 // The client initiated stream id is odd starting from 1.
220 goAway: make(chan struct{}),
221 awakenKeepalive: make(chan struct{}, 1),
223 hEnc: hpack.NewEncoder(&buf),
224 framer: newFramer(conn, writeBufSize, readBufSize),
225 controlBuf: newControlBuffer(),
226 fc: &inFlow{limit: uint32(icwz)},
227 sendQuotaPool: newQuotaPool(defaultWindowSize),
230 activeStreams: make(map[uint32]*Stream),
232 creds: opts.PerRPCCredentials,
233 maxStreams: defaultMaxStreamsClient,
234 streamsQuota: newQuotaPool(defaultMaxStreamsClient),
235 streamSendQuota: defaultWindowSize,
237 statsHandler: opts.StatsHandler,
238 initialWindowSize: initialWindowSize,
240 if opts.InitialWindowSize >= defaultWindowSize {
241 t.initialWindowSize = opts.InitialWindowSize
242 dynamicWindow = false
245 t.bdpEst = &bdpEstimator{
246 bdp: initialWindowSize,
247 updateFlowControl: t.updateFlowControl,
250 // Make sure awakenKeepalive can't be written upon.
251 // keepalive routine will make it writable, if need be.
252 t.awakenKeepalive <- struct{}{}
253 if t.statsHandler != nil {
254 t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
255 RemoteAddr: t.remoteAddr,
256 LocalAddr: t.localAddr,
258 connBegin := &stats.ConnBegin{
261 t.statsHandler.HandleConn(t.ctx, connBegin)
263 // Start the reader goroutine for incoming message. Each transport has
264 // a dedicated goroutine which reads HTTP2 frame from network. Then it
265 // dispatches the frame to the corresponding stream entity.
267 // Send connection preface to server.
268 n, err := t.conn.Write(clientPreface)
271 return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
273 if n != len(clientPreface) {
275 return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
277 if t.initialWindowSize != defaultWindowSize {
278 err = t.framer.fr.WriteSettings(http2.Setting{
279 ID: http2.SettingInitialWindowSize,
280 Val: uint32(t.initialWindowSize),
283 err = t.framer.fr.WriteSettings()
287 return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
289 // Adjust the connection flow control window if needed.
290 if delta := uint32(icwz - defaultWindowSize); delta > 0 {
291 if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
293 return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
296 t.framer.writer.Flush()
298 loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
301 if t.kp.Time != infinity {
307 func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
308 // TODO(zhaoq): Handle uint32 overflow of Stream.id.
311 done: make(chan struct{}),
312 goAway: make(chan struct{}),
313 method: callHdr.Method,
314 sendCompress: callHdr.SendCompress,
315 buf: newRecvBuffer(),
316 fc: &inFlow{limit: uint32(t.initialWindowSize)},
317 sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
318 localSendQuota: newQuotaPool(defaultLocalSendQuota),
319 headerChan: make(chan struct{}),
322 s.requestRead = func(n int) {
323 t.adjustWindow(s, uint32(n))
325 // The client side stream context should have exactly the same life cycle with the user provided context.
326 // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
327 // So we use the original context here instead of creating a copy.
329 s.trReader = &transportReader{
330 reader: &recvBufferReader{
335 windowHandler: func(n int) {
336 t.updateWindow(s, uint32(n))
343 // NewStream creates a stream and registers it into the transport as "active"
345 func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
349 // Attach Auth info if there is any.
350 if t.authInfo != nil {
351 pr.AuthInfo = t.authInfo
353 ctx = peer.NewContext(ctx, pr)
355 authData = make(map[string]string)
358 // Create an audience string only if needed.
359 if len(t.creds) > 0 || callHdr.Creds != nil {
360 // Construct URI required to get auth request metadata.
361 // Omit port if it is the default one.
362 host := strings.TrimSuffix(callHdr.Host, ":443")
363 pos := strings.LastIndex(callHdr.Method, "/")
365 pos = len(callHdr.Method)
367 audience = "https://" + host + callHdr.Method[:pos]
369 for _, c := range t.creds {
370 data, err := c.GetRequestMetadata(ctx, audience)
372 return nil, streamErrorf(codes.Internal, "transport: %v", err)
374 for k, v := range data {
375 // Capital header names are illegal in HTTP/2.
376 k = strings.ToLower(k)
380 callAuthData := map[string]string{}
381 // Check if credentials.PerRPCCredentials were provided via call options.
382 // Note: if these credentials are provided both via dial options and call
383 // options, then both sets of credentials will be applied.
384 if callCreds := callHdr.Creds; callCreds != nil {
385 if !t.isSecure && callCreds.RequireTransportSecurity() {
386 return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
388 data, err := callCreds.GetRequestMetadata(ctx, audience)
390 return nil, streamErrorf(codes.Internal, "transport: %v", err)
392 for k, v := range data {
393 // Capital header names are illegal in HTTP/2
394 k = strings.ToLower(k)
399 if t.activeStreams == nil {
401 return nil, ErrConnClosing
403 if t.state == draining {
405 return nil, ErrStreamDrain
407 if t.state != reachable {
409 return nil, ErrConnClosing
412 sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire())
416 // Returns the quota balance back.
418 t.streamsQuota.add(sq - 1)
420 // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
421 // first and create a slice of that exact size.
422 // Make the slice of certain predictable size to reduce allocations made by append.
423 hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
424 hfLen += len(authData) + len(callAuthData)
425 headerFields := make([]hpack.HeaderField, 0, hfLen)
426 headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
427 headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
428 headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
429 headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
430 headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
431 headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
432 headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
434 if callHdr.SendCompress != "" {
435 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
437 if dl, ok := ctx.Deadline(); ok {
438 // Send out timeout regardless its value. The server can detect timeout context by itself.
439 // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
440 timeout := dl.Sub(time.Now())
441 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
443 for k, v := range authData {
444 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
446 for k, v := range callAuthData {
447 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
449 if b := stats.OutgoingTags(ctx); b != nil {
450 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
452 if b := stats.OutgoingTrace(ctx); b != nil {
453 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
455 if md, ok := metadata.FromOutgoingContext(ctx); ok {
456 for k, vv := range md {
457 // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
458 if isReservedHeader(k) {
461 for _, v := range vv {
462 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
466 if md, ok := t.md.(*metadata.MD); ok {
467 for k, vv := range *md {
468 if isReservedHeader(k) {
471 for _, v := range vv {
472 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
477 if t.state == draining {
479 t.streamsQuota.add(1)
480 return nil, ErrStreamDrain
482 if t.state != reachable {
484 return nil, ErrConnClosing
486 s := t.newStream(ctx, callHdr)
487 t.activeStreams[s.id] = s
488 // If the number of active streams change from 0 to 1, then check if keepalive
489 // has gone dormant. If so, wake it up.
490 if len(t.activeStreams) == 1 {
492 case t.awakenKeepalive <- struct{}{}:
493 t.controlBuf.put(&ping{data: [8]byte{}})
494 // Fill the awakenKeepalive channel again as this channel must be
495 // kept non-writable except at the point that the keepalive()
496 // goroutine is waiting either to be awaken or shutdown.
497 t.awakenKeepalive <- struct{}{}
501 t.controlBuf.put(&headerFrame{
512 if t.statsHandler != nil {
513 outHeader := &stats.OutHeader{
515 FullMethod: callHdr.Method,
516 RemoteAddr: t.remoteAddr,
517 LocalAddr: t.localAddr,
518 Compression: callHdr.SendCompress,
520 t.statsHandler.HandleRPC(s.ctx, outHeader)
525 // CloseStream clears the footprint of a stream when the stream is not needed any more.
526 // This must not be executed in reader's goroutine.
527 func (t *http2Client) CloseStream(s *Stream, err error) {
529 if t.activeStreams == nil {
534 // notify in-flight streams, before the deletion
535 s.write(recvMsg{err: err})
537 delete(t.activeStreams, s.id)
538 if t.state == draining && len(t.activeStreams) == 0 {
539 // The transport is draining and s is the last live stream on t.
545 // rstStream is true in case the stream is being closed at the client-side
546 // and the server needs to be intimated about it by sending a RST_STREAM
548 // To make sure this frame is written to the wire before the headers of the
549 // next stream waiting for streamsQuota, we add to streamsQuota pool only
550 // after having acquired the writableChan to send RST_STREAM out (look at
551 // the controller() routine).
553 var rstError http2.ErrCode
555 // In case, the client doesn't have to send RST_STREAM to server
556 // we can safely add back to streamsQuota pool now.
558 t.streamsQuota.add(1)
561 t.controlBuf.put(&resetStream{s.id, rstError})
564 rstStream = s.rstStream
565 rstError = s.rstError
566 if s.state == streamDone {
576 if _, ok := err.(StreamError); ok {
578 rstError = http2.ErrCodeCancel
582 // Close kicks off the shutdown process of the transport. This should be called
583 // only once on a transport. Once it is called, the transport should not be
584 // accessed any more.
585 func (t *http2Client) Close() (err error) {
587 if t.state == closing {
596 streams := t.activeStreams
597 t.activeStreams = nil
599 // Notify all active streams.
600 for _, s := range streams {
607 s.write(recvMsg{err: ErrConnClosing})
609 if t.statsHandler != nil {
610 connEnd := &stats.ConnEnd{
613 t.statsHandler.HandleConn(t.ctx, connEnd)
618 // GracefulClose sets the state to draining, which prevents new streams from
619 // being created and causes the transport to be closed when the last active
620 // stream is closed. If there are no active streams, the transport is closed
621 // immediately. This does nothing if the transport is already draining or
623 func (t *http2Client) GracefulClose() error {
626 case closing, draining:
631 active := len(t.activeStreams)
639 // Write formats the data into HTTP2 data frame(s) and sends it out. The caller
640 // should proceed only if Write returns nil.
641 func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
644 return ContextErr(s.ctx.Err())
646 return ErrConnClosing
650 if hdr == nil && data == nil && opts.Last {
651 // stream.CloseSend uses this to send an empty frame with endStream=True
652 t.controlBuf.put(&dataFrame{streamID: s.id, endStream: true, f: func() {}})
655 // Add data to header frame so that we can equally distribute data across frames.
656 emptyLen := http2MaxFrameLen - len(hdr)
657 if emptyLen > len(data) {
660 hdr = append(hdr, data[:emptyLen]...)
661 data = data[emptyLen:]
664 streamQuotaVer uint32
669 for idx, r := range [][]byte{hdr, data} {
671 size := http2MaxFrameLen
675 if streamQuota == 0 { // Used up all the locally cached stream quota.
676 sqChan, streamQuotaVer = s.sendQuotaPool.acquireWithVersion()
677 // Wait until the stream has some quota to send the data.
678 streamQuota, err = wait(s.ctx, t.ctx, s.done, s.goAway, sqChan)
683 if localSendQuota <= 0 { // Being a soft limit, it can go negative.
684 // Acquire local send quota to be able to write to the controlBuf.
685 localSendQuota, err = wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire())
690 if size > streamQuota {
692 } // No need to do that for localSendQuota since that's only a soft limit.
693 // Wait until the transport has some quota to send the data.
694 tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire())
701 if tq > size { // Overbooked transport quota. Return it back.
702 t.sendQuotaPool.add(tq - size)
705 localSendQuota -= size
708 // See if this is the last frame to be written.
710 if len(r)-size == 0 { // No more data in r after this iteration.
711 if idx == 0 { // We're writing data header.
712 if len(data) == 0 { // There's no data to follow.
715 } else { // We're writing data.
722 t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(sz) }})
725 failure := func() { // The stream quota version must have changed.
726 // Our streamQuota cache is invalidated now, so give it back.
727 s.sendQuotaPool.lockedAdd(streamQuota + size)
729 if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
730 // Couldn't send this chunk out.
731 t.sendQuotaPool.add(size)
732 localSendQuota += size
737 if streamQuota > 0 { // Add the left over quota back to stream.
738 s.sendQuotaPool.add(streamQuota)
740 if localSendQuota > 0 {
741 s.localSendQuota.add(localSendQuota)
747 if s.state != streamDone {
748 s.state = streamWriteDone
754 func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
757 s, ok := t.activeStreams[f.Header().StreamID]
761 // adjustWindow sends out extra window update over the initial window size
762 // of stream if the application is requesting data larger in size than
764 func (t *http2Client) adjustWindow(s *Stream, n uint32) {
767 if s.state == streamDone {
770 if w := s.fc.maybeAdjust(n); w > 0 {
771 // Piggyback connection's window update along.
772 if cw := t.fc.resetPendingUpdate(); cw > 0 {
773 t.controlBuf.put(&windowUpdate{0, cw})
775 t.controlBuf.put(&windowUpdate{s.id, w})
779 // updateWindow adjusts the inbound quota for the stream and the transport.
780 // Window updates will deliver to the controller for sending when
781 // the cumulative quota exceeds the corresponding threshold.
782 func (t *http2Client) updateWindow(s *Stream, n uint32) {
785 if s.state == streamDone {
788 if w := s.fc.onRead(n); w > 0 {
789 if cw := t.fc.resetPendingUpdate(); cw > 0 {
790 t.controlBuf.put(&windowUpdate{0, cw})
792 t.controlBuf.put(&windowUpdate{s.id, w})
796 // updateFlowControl updates the incoming flow control windows
797 // for the transport and the stream based on the current bdp
799 func (t *http2Client) updateFlowControl(n uint32) {
801 for _, s := range t.activeStreams {
804 t.initialWindowSize = int32(n)
806 t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
807 t.controlBuf.put(&settings{
811 ID: http2.SettingInitialWindowSize,
818 func (t *http2Client) handleData(f *http2.DataFrame) {
819 size := f.Header().Length
822 sendBDPPing = t.bdpEst.add(uint32(size))
824 // Decouple connection's flow control from application's read.
825 // An update on connection's flow control should not depend on
826 // whether user application has read the data or not. Such a
827 // restriction is already imposed on the stream's flow control,
828 // and therefore the sender will be blocked anyways.
829 // Decoupling the connection flow control will prevent other
830 // active(fast) streams from starving in presence of slow or
833 // Furthermore, if a bdpPing is being sent out we can piggyback
834 // connection's window update for the bytes we just received.
836 if size != 0 { // Could've been an empty data frame.
837 t.controlBuf.put(&windowUpdate{0, uint32(size)})
839 t.controlBuf.put(bdpPing)
841 if err := t.fc.onData(uint32(size)); err != nil {
845 if w := t.fc.onRead(uint32(size)); w > 0 {
846 t.controlBuf.put(&windowUpdate{0, w})
849 // Select the right stream to dispatch.
850 s, ok := t.getStream(f)
856 if s.state == streamDone {
860 if err := s.fc.onData(uint32(size)); err != nil {
862 s.rstError = http2.ErrCodeFlowControl
863 s.finish(status.New(codes.Internal, err.Error()))
865 s.write(recvMsg{err: io.EOF})
868 if f.Header().Flags.Has(http2.FlagDataPadded) {
869 if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
870 t.controlBuf.put(&windowUpdate{s.id, w})
874 // TODO(bradfitz, zhaoq): A copy is required here because there is no
875 // guarantee f.Data() is consumed before the arrival of next frame.
876 // Can this copy be eliminated?
877 if len(f.Data()) > 0 {
878 data := make([]byte, len(f.Data()))
880 s.write(recvMsg{data: data})
883 // The server has closed the stream without sending trailers. Record that
884 // the read direction is closed, and set the status appropriately.
885 if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
887 if s.state == streamDone {
891 s.finish(status.New(codes.Internal, "server closed the stream without sending trailers"))
893 s.write(recvMsg{err: io.EOF})
897 func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
898 s, ok := t.getStream(f)
903 if s.state == streamDone {
911 statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)]
913 warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
914 statusCode = codes.Unknown
916 s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode))
918 s.write(recvMsg{err: io.EOF})
921 func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
925 var ss []http2.Setting
926 isMaxConcurrentStreamsMissing := true
927 f.ForeachSetting(func(s http2.Setting) error {
928 if s.ID == http2.SettingMaxConcurrentStreams {
929 isMaxConcurrentStreamsMissing = false
934 if isFirst && isMaxConcurrentStreamsMissing {
935 // This means server is imposing no limits on
936 // maximum number of concurrent streams initiated by client.
937 // So we must remove our self-imposed limit.
938 ss = append(ss, http2.Setting{
939 ID: http2.SettingMaxConcurrentStreams,
943 // The settings will be applied once the ack is sent.
944 t.controlBuf.put(&settings{ack: true, ss: ss})
947 func (t *http2Client) handlePing(f *http2.PingFrame) {
949 // Maybe it's a BDP ping.
951 t.bdpEst.calculate(f.Data)
955 pingAck := &ping{ack: true}
956 copy(pingAck.data[:], f.Data[:])
957 t.controlBuf.put(pingAck)
960 func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
962 if t.state != reachable && t.state != draining {
966 if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
967 infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
970 if id > 0 && id%2 != 1 {
975 // A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387).
976 // The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay
977 // with the ID of the last stream the server will process.
978 // Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we
979 // close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server
980 // was being sent don't get killed.
982 case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
983 // If there are multiple GoAways the first one should always have an ID greater than the following ones.
984 if id > t.prevGoAwayID {
994 // All streams with IDs greater than the GoAwayId
995 // and smaller than the previous GoAway ID should be killed.
996 upperLimit := t.prevGoAwayID
997 if upperLimit == 0 { // This is the first GoAway Frame.
998 upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
1000 for streamID, stream := range t.activeStreams {
1001 if streamID > id && streamID <= upperLimit {
1002 close(stream.goAway)
1006 active := len(t.activeStreams)
1013 // setGoAwayReason sets the value of t.goAwayReason based
1014 // on the GoAway frame received.
1015 // It expects a lock on transport's mutext to be held by
1017 func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
1018 t.goAwayReason = NoReason
1020 case http2.ErrCodeEnhanceYourCalm:
1021 if string(f.DebugData()) == "too_many_pings" {
1022 t.goAwayReason = TooManyPings
1027 func (t *http2Client) GetGoAwayReason() GoAwayReason {
1030 return t.goAwayReason
1033 func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
1034 id := f.Header().StreamID
1037 t.sendQuotaPool.add(int(incr))
1040 if s, ok := t.getStream(f); ok {
1041 s.sendQuotaPool.add(int(incr))
1045 // operateHeaders takes action on the decoded headers.
1046 func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
1047 s, ok := t.getStream(frame)
1052 s.bytesReceived = true
1054 var state decodeState
1055 if err := state.decodeResponseHeader(frame); err != nil {
1062 s.write(recvMsg{err: err})
1063 // Something wrong. Stops reading even when there is remaining.
1067 endStream := frame.StreamEnded()
1070 if t.statsHandler != nil {
1072 inHeader := &stats.InHeader{
1074 WireLength: int(frame.Header().Length),
1076 t.statsHandler.HandleRPC(s.ctx, inHeader)
1078 inTrailer := &stats.InTrailer{
1080 WireLength: int(frame.Header().Length),
1082 t.statsHandler.HandleRPC(s.ctx, inTrailer)
1089 s.recvCompress = state.encoding
1092 if !endStream && len(state.mdata) > 0 {
1093 s.header = state.mdata
1099 if !endStream || s.state == streamDone {
1104 if len(state.mdata) > 0 {
1105 s.trailer = state.mdata
1107 s.finish(state.status())
1109 s.write(recvMsg{err: io.EOF})
1112 func handleMalformedHTTP2(s *Stream, err error) {
1119 s.write(recvMsg{err: err})
1122 // reader runs as a separate goroutine in charge of reading data from network
1125 // TODO(zhaoq): currently one reader per transport. Investigate whether this is
1127 // TODO(zhaoq): Check the validity of the incoming frame sequence.
1128 func (t *http2Client) reader() {
1129 // Check the validity of server preface.
1130 frame, err := t.framer.fr.ReadFrame()
1135 atomic.CompareAndSwapUint32(&t.activity, 0, 1)
1136 sf, ok := frame.(*http2.SettingsFrame)
1141 t.handleSettings(sf, true)
1143 // loop to keep reading incoming messages on this transport.
1145 frame, err := t.framer.fr.ReadFrame()
1146 atomic.CompareAndSwapUint32(&t.activity, 0, 1)
1148 // Abort an active stream if the http2.Framer returns a
1149 // http2.StreamError. This can happen only if the server's response
1150 // is malformed http2.
1151 if se, ok := err.(http2.StreamError); ok {
1153 s := t.activeStreams[se.StreamID]
1156 // use error detail to provide better err message
1157 handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail()))
1166 switch frame := frame.(type) {
1167 case *http2.MetaHeadersFrame:
1168 t.operateHeaders(frame)
1169 case *http2.DataFrame:
1171 case *http2.RSTStreamFrame:
1172 t.handleRSTStream(frame)
1173 case *http2.SettingsFrame:
1174 t.handleSettings(frame, false)
1175 case *http2.PingFrame:
1177 case *http2.GoAwayFrame:
1178 t.handleGoAway(frame)
1179 case *http2.WindowUpdateFrame:
1180 t.handleWindowUpdate(frame)
1182 errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
1187 func (t *http2Client) applySettings(ss []http2.Setting) {
1188 for _, s := range ss {
1190 case http2.SettingMaxConcurrentStreams:
1191 // TODO(zhaoq): This is a hack to avoid significant refactoring of the
1192 // code to deal with the unrealistic int32 overflow. Probably will try
1193 // to find a better way to handle this later.
1194 if s.Val > math.MaxInt32 {
1195 s.Val = math.MaxInt32
1199 t.maxStreams = int(s.Val)
1201 t.streamsQuota.add(int(s.Val) - ms)
1202 case http2.SettingInitialWindowSize:
1204 for _, stream := range t.activeStreams {
1205 // Adjust the sending quota for each stream.
1206 stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota))
1208 t.streamSendQuota = s.Val
1214 // TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
1215 // is duplicated between the client and the server.
1216 // The transport layer needs to be refactored to take care of this.
1217 func (t *http2Client) itemHandler(i item) error {
1219 switch i := i.(type) {
1221 err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d)
1227 for _, f := range i.hf {
1228 t.hEnc.WriteField(f)
1233 size := t.hBuf.Len()
1234 if size > http2MaxFrameLen {
1235 size = http2MaxFrameLen
1241 err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{
1242 StreamID: i.streamID,
1243 BlockFragment: t.hBuf.Next(size),
1244 EndStream: i.endStream,
1245 EndHeaders: endHeaders,
1248 err = t.framer.fr.WriteContinuation(
1259 err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
1262 t.applySettings(i.ss)
1263 err = t.framer.fr.WriteSettingsAck()
1265 err = t.framer.fr.WriteSettings(i.ss...)
1268 // If the server needs to be to intimated about stream closing,
1269 // then we need to make sure the RST_STREAM frame is written to
1270 // the wire before the headers of the next stream waiting on
1271 // streamQuota. We ensure this by adding to the streamsQuota pool
1272 // only after having acquired the writableChan to send RST_STREAM.
1273 err = t.framer.fr.WriteRSTStream(i.streamID, i.code)
1274 t.streamsQuota.add(1)
1276 err = t.framer.writer.Flush()
1279 t.bdpEst.timesnap(i.data)
1281 err = t.framer.fr.WritePing(i.ack, i.data)
1283 errorf("transport: http2Client.controller got unexpected item type %v", i)
1288 // keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
1289 func (t *http2Client) keepalive() {
1290 p := &ping{data: [8]byte{}}
1291 timer := time.NewTimer(t.kp.Time)
1295 if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
1296 timer.Reset(t.kp.Time)
1299 // Check if keepalive should go dormant.
1301 if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
1302 // Make awakenKeepalive writable.
1306 case <-t.awakenKeepalive:
1307 // If the control gets here a ping has been sent
1308 // need to reset the timer with keepalive.Timeout.
1309 case <-t.ctx.Done():
1318 // By the time control gets here a ping has been sent one way or the other.
1319 timer.Reset(t.kp.Timeout)
1322 if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
1323 timer.Reset(t.kp.Time)
1328 case <-t.ctx.Done():
1334 case <-t.ctx.Done():
1343 func (t *http2Client) Error() <-chan struct{} {
1347 func (t *http2Client) GoAway() <-chan struct{} {