aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/golang.org/x/net/http2
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x/net/http2')
-rw-r--r--vendor/golang.org/x/net/http2/.gitignore2
-rw-r--r--vendor/golang.org/x/net/http2/Dockerfile51
-rw-r--r--vendor/golang.org/x/net/http2/Makefile3
-rw-r--r--vendor/golang.org/x/net/http2/README20
-rw-r--r--vendor/golang.org/x/net/http2/ciphers.go641
-rw-r--r--vendor/golang.org/x/net/http2/ciphers_test.go309
-rw-r--r--vendor/golang.org/x/net/http2/client_conn_pool.go256
-rw-r--r--vendor/golang.org/x/net/http2/configure_transport.go80
-rw-r--r--vendor/golang.org/x/net/http2/databuffer.go146
-rw-r--r--vendor/golang.org/x/net/http2/databuffer_test.go157
-rw-r--r--vendor/golang.org/x/net/http2/errors.go133
-rw-r--r--vendor/golang.org/x/net/http2/errors_test.go24
-rw-r--r--vendor/golang.org/x/net/http2/flow.go50
-rw-r--r--vendor/golang.org/x/net/http2/flow_test.go53
-rw-r--r--vendor/golang.org/x/net/http2/frame.go1579
-rw-r--r--vendor/golang.org/x/net/http2/frame_test.go1191
-rw-r--r--vendor/golang.org/x/net/http2/go16.go16
-rw-r--r--vendor/golang.org/x/net/http2/go17.go106
-rw-r--r--vendor/golang.org/x/net/http2/go17_not18.go36
-rw-r--r--vendor/golang.org/x/net/http2/go18.go56
-rw-r--r--vendor/golang.org/x/net/http2/go18_test.go79
-rw-r--r--vendor/golang.org/x/net/http2/go19.go16
-rw-r--r--vendor/golang.org/x/net/http2/go19_test.go60
-rw-r--r--vendor/golang.org/x/net/http2/gotrack.go170
-rw-r--r--vendor/golang.org/x/net/http2/gotrack_test.go33
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/.gitignore5
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/Makefile8
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/README16
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/h2demo.go538
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/launch.go302
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/rootCA.key27
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/rootCA.pem26
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/rootCA.srl1
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/server.crt20
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/server.key27
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/tmpl.go1991
-rw-r--r--vendor/golang.org/x/net/http2/h2i/README.md97
-rw-r--r--vendor/golang.org/x/net/http2/h2i/h2i.go522
-rw-r--r--vendor/golang.org/x/net/http2/headermap.go78
-rw-r--r--vendor/golang.org/x/net/http2/hpack/encode.go240
-rw-r--r--vendor/golang.org/x/net/http2/hpack/encode_test.go386
-rw-r--r--vendor/golang.org/x/net/http2/hpack/hpack.go490
-rw-r--r--vendor/golang.org/x/net/http2/hpack/hpack_test.go722
-rw-r--r--vendor/golang.org/x/net/http2/hpack/huffman.go212
-rw-r--r--vendor/golang.org/x/net/http2/hpack/tables.go479
-rw-r--r--vendor/golang.org/x/net/http2/hpack/tables_test.go214
-rw-r--r--vendor/golang.org/x/net/http2/http2.go391
-rw-r--r--vendor/golang.org/x/net/http2/http2_test.go199
-rw-r--r--vendor/golang.org/x/net/http2/not_go16.go21
-rw-r--r--vendor/golang.org/x/net/http2/not_go17.go87
-rw-r--r--vendor/golang.org/x/net/http2/not_go18.go29
-rw-r--r--vendor/golang.org/x/net/http2/not_go19.go16
-rw-r--r--vendor/golang.org/x/net/http2/pipe.go163
-rw-r--r--vendor/golang.org/x/net/http2/pipe_test.go130
-rw-r--r--vendor/golang.org/x/net/http2/server.go2866
-rw-r--r--vendor/golang.org/x/net/http2/server_push_test.go521
-rw-r--r--vendor/golang.org/x/net/http2/server_test.go3728
-rw-r--r--vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml5021
-rw-r--r--vendor/golang.org/x/net/http2/transport.go2284
-rw-r--r--vendor/golang.org/x/net/http2/transport_test.go3796
-rw-r--r--vendor/golang.org/x/net/http2/write.go365
-rw-r--r--vendor/golang.org/x/net/http2/writesched.go242
-rw-r--r--vendor/golang.org/x/net/http2/writesched_priority.go452
-rw-r--r--vendor/golang.org/x/net/http2/writesched_priority_test.go541
-rw-r--r--vendor/golang.org/x/net/http2/writesched_random.go72
-rw-r--r--vendor/golang.org/x/net/http2/writesched_random_test.go44
-rw-r--r--vendor/golang.org/x/net/http2/writesched_test.go125
-rw-r--r--vendor/golang.org/x/net/http2/z_spec_test.go356
68 files changed, 33117 insertions, 0 deletions
diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore
new file mode 100644
index 0000000..190f122
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/.gitignore
@@ -0,0 +1,2 @@
+*~
+h2i/h2i
diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile
new file mode 100644
index 0000000..53fc525
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Dockerfile
@@ -0,0 +1,51 @@
+#
+# This Dockerfile builds a recent curl with HTTP/2 client support, using
+# a recent nghttp2 build.
+#
+# See the Makefile for how to tag it. If Docker and that image is found, the
+# Go tests use this curl binary for integration tests.
+#
+
+FROM ubuntu:trusty
+
+RUN apt-get update && \
+ apt-get upgrade -y && \
+ apt-get install -y git-core build-essential wget
+
+RUN apt-get install -y --no-install-recommends \
+ autotools-dev libtool pkg-config zlib1g-dev \
+ libcunit1-dev libssl-dev libxml2-dev libevent-dev \
+ automake autoconf
+
+# The list of packages nghttp2 recommends for h2load:
+RUN apt-get install -y --no-install-recommends make binutils \
+ autoconf automake autotools-dev \
+ libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
+ libev-dev libevent-dev libjansson-dev libjemalloc-dev \
+ cython python3.4-dev python-setuptools
+
+# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
+ENV NGHTTP2_VER 895da9a
+RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
+
+WORKDIR /root/nghttp2
+RUN git reset --hard $NGHTTP2_VER
+RUN autoreconf -i
+RUN automake
+RUN autoconf
+RUN ./configure
+RUN make
+RUN make install
+
+WORKDIR /root
+RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
+RUN tar -zxvf curl-7.45.0.tar.gz
+WORKDIR /root/curl-7.45.0
+RUN ./configure --with-ssl --with-nghttp2=/usr/local
+RUN make
+RUN make install
+RUN ldconfig
+
+CMD ["-h"]
+ENTRYPOINT ["/usr/local/bin/curl"]
+
diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile
new file mode 100644
index 0000000..55fd826
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Makefile
@@ -0,0 +1,3 @@
+curlimage:
+ docker build -t gohttp2/curl .
+
diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README
new file mode 100644
index 0000000..360d5aa
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/README
@@ -0,0 +1,20 @@
+This is a work-in-progress HTTP/2 implementation for Go.
+
+It will eventually live in the Go standard library and won't require
+any changes to your code to use. It will just be automatic.
+
+Status:
+
+* The server support is pretty good. A few things are missing
+ but are being worked on.
+* The client work has just started but shares a lot of code
+ is coming along much quicker.
+
+Docs are at https://godoc.org/golang.org/x/net/http2
+
+Demo test server at https://http2.golang.org/
+
+Help & bug reports welcome!
+
+Contributing: https://golang.org/doc/contribute.html
+Bugs: https://golang.org/issue/new?title=x/net/http2:+
diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go
new file mode 100644
index 0000000..698860b
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/ciphers.go
@@ -0,0 +1,641 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+// A list of the possible cipher suite ids. Taken from
+// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt
+
+const (
+ cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
+ cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
+ cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
+ cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
+ cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
+ cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
+ cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
+ cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
+ cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
+ cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
+ cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
+ cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
+ cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
+ cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
+ cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
+ cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
+ cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
+ cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
+ cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
+ cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
+ cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
+ cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
+ cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
+ cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
+ cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
+ cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
+ cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
+ cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
+ // Reserved uint16 = 0x001C-1D
+ cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
+ cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
+ cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
+ cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
+ cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
+ cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
+ cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
+ cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
+ // Reserved uint16 = 0x0047-4F
+ // Reserved uint16 = 0x0050-58
+ // Reserved uint16 = 0x0059-5C
+ // Unassigned uint16 = 0x005D-5F
+ // Reserved uint16 = 0x0060-66
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
+ // Unassigned uint16 = 0x006E-83
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
+ cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
+ cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
+ cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
+ cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
+ cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
+ cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
+ cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
+ cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
+ cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
+ cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
+ cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
+ cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
+ cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
+ cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
+ cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
+ cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
+ cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
+ cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
+ cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
+ cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
+ cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
+ cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
+ cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
+ cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
+ cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
+ cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
+ cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
+ cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
+ cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
+ cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
+ cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
+ cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
+ // Unassigned uint16 = 0x00C6-FE
+ cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
+ // Unassigned uint16 = 0x01-55,*
+ cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
+ // Unassigned uint16 = 0x5601 - 0xC000
+ cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
+ cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
+ cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
+ cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
+ cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
+ cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
+ cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
+ cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
+ cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
+ cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
+ cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
+ cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
+ cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
+ cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
+ cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
+ cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
+ cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
+ cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
+ cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
+ cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
+ cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
+ cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
+ cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
+ cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
+ cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
+ cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
+ cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
+ cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
+ cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
+ cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
+ cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
+ cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
+ cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
+ cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
+ cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
+ cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
+ cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
+ cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
+ cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
+ cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
+ cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
+ cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
+ cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
+ cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
+ cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
+ cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
+ cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
+ cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
+ cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
+ cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
+ cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
+ cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
+ cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
+ cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
+ cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
+ cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
+ cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
+ cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
+ cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
+ cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
+ cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
+ cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
+ cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
+ cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
+ cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
+ cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
+ cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
+ cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
+ cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
+ cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
+ cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
+ cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
+ cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
+ cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
+ cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
+ cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
+ cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
+ cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
+ cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
+ cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
+ // Unassigned uint16 = 0xC0B0-FF
+ // Unassigned uint16 = 0xC1-CB,*
+ // Unassigned uint16 = 0xCC00-A7
+ cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
+ cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
+ cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
+ cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
+ cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
+ cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
+ cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
+)
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+// References:
+// https://tools.ietf.org/html/rfc7540#appendix-A
+// Reject cipher suites from Appendix A.
+// "This list includes those cipher suites that do not
+// offer an ephemeral key exchange and those that are
+// based on the TLS null, stream or block cipher type"
+func isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case cipher_TLS_NULL_WITH_NULL_NULL,
+ cipher_TLS_RSA_WITH_NULL_MD5,
+ cipher_TLS_RSA_WITH_NULL_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_RSA_WITH_RC4_128_MD5,
+ cipher_TLS_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
+ cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_DH_anon_WITH_RC4_128_MD5,
+ cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_KRB5_WITH_DES_CBC_SHA,
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_KRB5_WITH_RC4_128_SHA,
+ cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
+ cipher_TLS_KRB5_WITH_DES_CBC_MD5,
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
+ cipher_TLS_KRB5_WITH_RC4_128_MD5,
+ cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_PSK_WITH_NULL_SHA,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA,
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_WITH_NULL_SHA256,
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
+ cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_PSK_WITH_NULL_SHA256,
+ cipher_TLS_PSK_WITH_NULL_SHA384,
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
+ cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
+ cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_NULL_SHA,
+ cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_AES_128_CCM,
+ cipher_TLS_RSA_WITH_AES_256_CCM,
+ cipher_TLS_RSA_WITH_AES_128_CCM_8,
+ cipher_TLS_RSA_WITH_AES_256_CCM_8,
+ cipher_TLS_PSK_WITH_AES_128_CCM,
+ cipher_TLS_PSK_WITH_AES_256_CCM,
+ cipher_TLS_PSK_WITH_AES_128_CCM_8,
+ cipher_TLS_PSK_WITH_AES_256_CCM_8:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/ciphers_test.go b/vendor/golang.org/x/net/http2/ciphers_test.go
new file mode 100644
index 0000000..764bbc8
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/ciphers_test.go
@@ -0,0 +1,309 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "testing"
+
+func TestIsBadCipherBad(t *testing.T) {
+ for _, c := range badCiphers {
+ if !isBadCipher(c) {
+ t.Errorf("Wrong result for isBadCipher(%d), want true", c)
+ }
+ }
+}
+
+// verify we don't give false positives on ciphers not on blacklist
+func TestIsBadCipherGood(t *testing.T) {
+ goodCiphers := map[uint16]string{
+ cipher_TLS_DHE_RSA_WITH_AES_256_CCM: "cipher_TLS_DHE_RSA_WITH_AES_256_CCM",
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM: "cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM",
+ cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256: "cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256",
+ }
+ for c, name := range goodCiphers {
+ if isBadCipher(c) {
+ t.Errorf("Wrong result for isBadCipher(%d) %s, want false", c, name)
+ }
+ }
+}
+
+// copied from https://http2.github.io/http2-spec/#BadCipherSuites,
+var badCiphers = []uint16{
+ cipher_TLS_NULL_WITH_NULL_NULL,
+ cipher_TLS_RSA_WITH_NULL_MD5,
+ cipher_TLS_RSA_WITH_NULL_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_RSA_WITH_RC4_128_MD5,
+ cipher_TLS_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
+ cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_DH_anon_WITH_RC4_128_MD5,
+ cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_KRB5_WITH_DES_CBC_SHA,
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_KRB5_WITH_RC4_128_SHA,
+ cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
+ cipher_TLS_KRB5_WITH_DES_CBC_MD5,
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
+ cipher_TLS_KRB5_WITH_RC4_128_MD5,
+ cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_PSK_WITH_NULL_SHA,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA,
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_WITH_NULL_SHA256,
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
+ cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_PSK_WITH_NULL_SHA256,
+ cipher_TLS_PSK_WITH_NULL_SHA384,
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
+ cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
+ cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_NULL_SHA,
+ cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_AES_128_CCM,
+ cipher_TLS_RSA_WITH_AES_256_CCM,
+ cipher_TLS_RSA_WITH_AES_128_CCM_8,
+ cipher_TLS_RSA_WITH_AES_256_CCM_8,
+ cipher_TLS_PSK_WITH_AES_128_CCM,
+ cipher_TLS_PSK_WITH_AES_256_CCM,
+ cipher_TLS_PSK_WITH_AES_128_CCM_8,
+ cipher_TLS_PSK_WITH_AES_256_CCM_8,
+}
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
new file mode 100644
index 0000000..bdf5652
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -0,0 +1,256 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code's client connection pooling.
+
+package http2
+
+import (
+ "crypto/tls"
+ "net/http"
+ "sync"
+)
+
+// ClientConnPool manages a pool of HTTP/2 client connections.
+type ClientConnPool interface {
+ GetClientConn(req *http.Request, addr string) (*ClientConn, error)
+ MarkDead(*ClientConn)
+}
+
+// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
+// implementations which can close their idle connections.
+type clientConnPoolIdleCloser interface {
+ ClientConnPool
+ closeIdleConnections()
+}
+
+var (
+ _ clientConnPoolIdleCloser = (*clientConnPool)(nil)
+ _ clientConnPoolIdleCloser = noDialClientConnPool{}
+)
+
+// TODO: use singleflight for dialing and addConnCalls?
+type clientConnPool struct {
+ t *Transport
+
+ mu sync.Mutex // TODO: maybe switch to RWMutex
+ // TODO: add support for sharing conns based on cert names
+ // (e.g. share conn for googleapis.com and appspot.com)
+ conns map[string][]*ClientConn // key is host:port
+ dialing map[string]*dialCall // currently in-flight dials
+ keys map[*ClientConn][]string
+ addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
+}
+
+func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, dialOnMiss)
+}
+
+const (
+ dialOnMiss = true
+ noDialOnMiss = false
+)
+
+func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
+ if isConnectionCloseRequest(req) && dialOnMiss {
+ // It gets its own connection.
+ const singleUse = true
+ cc, err := p.t.dialClientConn(addr, singleUse)
+ if err != nil {
+ return nil, err
+ }
+ return cc, nil
+ }
+ p.mu.Lock()
+ for _, cc := range p.conns[addr] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return cc, nil
+ }
+ }
+ if !dialOnMiss {
+ p.mu.Unlock()
+ return nil, ErrNoCachedConn
+ }
+ call := p.getStartDialLocked(addr)
+ p.mu.Unlock()
+ <-call.done
+ return call.res, call.err
+}
+
+// dialCall is an in-flight Transport dial call to a host.
+type dialCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ res *ClientConn // valid after done is closed
+ err error // valid after done is closed
+}
+
+// requires p.mu is held.
+func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
+ if call, ok := p.dialing[addr]; ok {
+ // A dial is already in-flight. Don't start another.
+ return call
+ }
+ call := &dialCall{p: p, done: make(chan struct{})}
+ if p.dialing == nil {
+ p.dialing = make(map[string]*dialCall)
+ }
+ p.dialing[addr] = call
+ go call.dial(addr)
+ return call
+}
+
+// run in its own goroutine.
+func (c *dialCall) dial(addr string) {
+ const singleUse = false // shared conn
+ c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
+ close(c.done)
+
+ c.p.mu.Lock()
+ delete(c.p.dialing, addr)
+ if c.err == nil {
+ c.p.addConnLocked(addr, c.res)
+ }
+ c.p.mu.Unlock()
+}
+
+// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
+// already exist. It coalesces concurrent calls with the same key.
+// This is used by the http1 Transport code when it creates a new connection. Because
+// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
+// the protocol), it can get into a situation where it has multiple TLS connections.
+// This code decides which ones live or die.
+// The return value used is whether c was used.
+// c is never closed.
+func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[key] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return false, nil
+ }
+ }
+ call, dup := p.addConnCalls[key]
+ if !dup {
+ if p.addConnCalls == nil {
+ p.addConnCalls = make(map[string]*addConnCall)
+ }
+ call = &addConnCall{
+ p: p,
+ done: make(chan struct{}),
+ }
+ p.addConnCalls[key] = call
+ go call.run(t, key, c)
+ }
+ p.mu.Unlock()
+
+ <-call.done
+ if call.err != nil {
+ return false, call.err
+ }
+ return !dup, nil
+}
+
+type addConnCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ err error
+}
+
+func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
+ cc, err := t.NewClientConn(tc)
+
+ p := c.p
+ p.mu.Lock()
+ if err != nil {
+ c.err = err
+ } else {
+ p.addConnLocked(key, cc)
+ }
+ delete(p.addConnCalls, key)
+ p.mu.Unlock()
+ close(c.done)
+}
+
+func (p *clientConnPool) addConn(key string, cc *ClientConn) {
+ p.mu.Lock()
+ p.addConnLocked(key, cc)
+ p.mu.Unlock()
+}
+
+// p.mu must be held
+func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
+ for _, v := range p.conns[key] {
+ if v == cc {
+ return
+ }
+ }
+ if p.conns == nil {
+ p.conns = make(map[string][]*ClientConn)
+ }
+ if p.keys == nil {
+ p.keys = make(map[*ClientConn][]string)
+ }
+ p.conns[key] = append(p.conns[key], cc)
+ p.keys[cc] = append(p.keys[cc], key)
+}
+
+func (p *clientConnPool) MarkDead(cc *ClientConn) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ for _, key := range p.keys[cc] {
+ vv, ok := p.conns[key]
+ if !ok {
+ continue
+ }
+ newList := filterOutClientConn(vv, cc)
+ if len(newList) > 0 {
+ p.conns[key] = newList
+ } else {
+ delete(p.conns, key)
+ }
+ }
+ delete(p.keys, cc)
+}
+
+func (p *clientConnPool) closeIdleConnections() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // TODO: don't close a cc if it was just added to the pool
+ // milliseconds ago and has never been used. There's currently
+ // a small race window with the HTTP/1 Transport's integration
+ // where it can add an idle conn just before using it, and
+ // somebody else can concurrently call CloseIdleConns and
+ // break some caller's RoundTrip.
+ for _, vv := range p.conns {
+ for _, cc := range vv {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
+ out := in[:0]
+ for _, v := range in {
+ if v != exclude {
+ out = append(out, v)
+ }
+ }
+ // If we filtered it out, zero out the last item to prevent
+ // the GC from seeing it.
+ if len(in) != len(out) {
+ in[len(in)-1] = nil
+ }
+ return out
+}
+
+// noDialClientConnPool is an implementation of http2.ClientConnPool
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// connection instead.
+type noDialClientConnPool struct{ *clientConnPool }
+
+func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, noDialOnMiss)
+}
diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go
new file mode 100644
index 0000000..b65fc6d
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/configure_transport.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ connPool := new(clientConnPool)
+ t2 := &Transport{
+ ConnPool: noDialClientConnPool{connPool},
+ t1: t1,
+ }
+ connPool.t = t2
+ if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
+ return nil, err
+ }
+ if t1.TLSClientConfig == nil {
+ t1.TLSClientConfig = new(tls.Config)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
+ t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
+ t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
+ }
+ upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
+ addr := authorityAddr("https", authority)
+ if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
+ go c.Close()
+ return erringRoundTripper{err}
+ } else if !used {
+ // Turns out we don't need this c.
+ // For example, two goroutines made requests to the same host
+ // at the same time, both kicking off TCP dials. (since protocol
+ // was unknown)
+ go c.Close()
+ }
+ return t2
+ }
+ if m := t1.TLSNextProto; len(m) == 0 {
+ t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
+ "h2": upgradeFn,
+ }
+ } else {
+ m["h2"] = upgradeFn
+ }
+ return t2, nil
+}
+
+// registerHTTPSProtocol calls Transport.RegisterProtocol but
+// converting panics into errors.
+func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("%v", e)
+ }
+ }()
+ t.RegisterProtocol("https", rt)
+ return nil
+}
+
+// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
+// if there's already has a cached connection to the host.
+type noDialH2RoundTripper struct{ t *Transport }
+
+func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ res, err := rt.t.RoundTrip(req)
+ if err == ErrNoCachedConn {
+ return nil, http.ErrSkipAltProtocol
+ }
+ return res, err
+}
diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go
new file mode 100644
index 0000000..a3067f8
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/databuffer.go
@@ -0,0 +1,146 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+)
+
+// Buffer chunks are allocated from a pool to reduce pressure on GC.
+// The maximum wasted space per dataBuffer is 2x the largest size class,
+// which happens when the dataBuffer has multiple chunks and there is
+// one unread byte in both the first and last chunks. We use a few size
+// classes to minimize overheads for servers that typically receive very
+// small request bodies.
+//
+// TODO: Benchmark to determine if the pools are necessary. The GC may have
+// improved enough that we can instead allocate chunks like this:
+// make([]byte, max(16<<10, expectedBytesRemaining))
+var (
+ dataChunkSizeClasses = []int{
+ 1 << 10,
+ 2 << 10,
+ 4 << 10,
+ 8 << 10,
+ 16 << 10,
+ }
+ dataChunkPools = [...]sync.Pool{
+ {New: func() interface{} { return make([]byte, 1<<10) }},
+ {New: func() interface{} { return make([]byte, 2<<10) }},
+ {New: func() interface{} { return make([]byte, 4<<10) }},
+ {New: func() interface{} { return make([]byte, 8<<10) }},
+ {New: func() interface{} { return make([]byte, 16<<10) }},
+ }
+)
+
+func getDataBufferChunk(size int64) []byte {
+ i := 0
+ for ; i < len(dataChunkSizeClasses)-1; i++ {
+ if size <= int64(dataChunkSizeClasses[i]) {
+ break
+ }
+ }
+ return dataChunkPools[i].Get().([]byte)
+}
+
+func putDataBufferChunk(p []byte) {
+ for i, n := range dataChunkSizeClasses {
+ if len(p) == n {
+ dataChunkPools[i].Put(p)
+ return
+ }
+ }
+ panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
+}
+
+// dataBuffer is an io.ReadWriter backed by a list of data chunks.
+// Each dataBuffer is used to read DATA frames on a single stream.
+// The buffer is divided into chunks so the server can limit the
+// total memory used by a single connection without limiting the
+// request body size on any single stream.
+type dataBuffer struct {
+ chunks [][]byte
+ r int // next byte to read is chunks[0][r]
+ w int // next byte to write is chunks[len(chunks)-1][w]
+ size int // total buffered bytes
+ expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
+}
+
+var errReadEmpty = errors.New("read from empty dataBuffer")
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *dataBuffer) Read(p []byte) (int, error) {
+ if b.size == 0 {
+ return 0, errReadEmpty
+ }
+ var ntotal int
+ for len(p) > 0 && b.size > 0 {
+ readFrom := b.bytesFromFirstChunk()
+ n := copy(p, readFrom)
+ p = p[n:]
+ ntotal += n
+ b.r += n
+ b.size -= n
+ // If the first chunk has been consumed, advance to the next chunk.
+ if b.r == len(b.chunks[0]) {
+ putDataBufferChunk(b.chunks[0])
+ end := len(b.chunks) - 1
+ copy(b.chunks[:end], b.chunks[1:])
+ b.chunks[end] = nil
+ b.chunks = b.chunks[:end]
+ b.r = 0
+ }
+ }
+ return ntotal, nil
+}
+
+func (b *dataBuffer) bytesFromFirstChunk() []byte {
+ if len(b.chunks) == 1 {
+ return b.chunks[0][b.r:b.w]
+ }
+ return b.chunks[0][b.r:]
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *dataBuffer) Len() int {
+ return b.size
+}
+
+// Write appends p to the buffer.
+func (b *dataBuffer) Write(p []byte) (int, error) {
+ ntotal := len(p)
+ for len(p) > 0 {
+ // If the last chunk is empty, allocate a new chunk. Try to allocate
+ // enough to fully copy p plus any additional bytes we expect to
+ // receive. However, this may allocate less than len(p).
+ want := int64(len(p))
+ if b.expected > want {
+ want = b.expected
+ }
+ chunk := b.lastChunkOrAlloc(want)
+ n := copy(chunk[b.w:], p)
+ p = p[n:]
+ b.w += n
+ b.size += n
+ b.expected -= int64(n)
+ }
+ return ntotal, nil
+}
+
+func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
+ if len(b.chunks) != 0 {
+ last := b.chunks[len(b.chunks)-1]
+ if b.w < len(last) {
+ return last
+ }
+ }
+ chunk := getDataBufferChunk(want)
+ b.chunks = append(b.chunks, chunk)
+ b.w = 0
+ return chunk
+}
diff --git a/vendor/golang.org/x/net/http2/databuffer_test.go b/vendor/golang.org/x/net/http2/databuffer_test.go
new file mode 100644
index 0000000..028e12e
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/databuffer_test.go
@@ -0,0 +1,157 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+func fmtDataChunk(chunk []byte) string {
+ out := ""
+ var last byte
+ var count int
+ for _, c := range chunk {
+ if c != last {
+ if count > 0 {
+ out += fmt.Sprintf(" x %d ", count)
+ count = 0
+ }
+ out += string([]byte{c})
+ last = c
+ }
+ count++
+ }
+ if count > 0 {
+ out += fmt.Sprintf(" x %d", count)
+ }
+ return out
+}
+
+func fmtDataChunks(chunks [][]byte) string {
+ var out string
+ for _, chunk := range chunks {
+ out += fmt.Sprintf("{%q}", fmtDataChunk(chunk))
+ }
+ return out
+}
+
+func testDataBuffer(t *testing.T, wantBytes []byte, setup func(t *testing.T) *dataBuffer) {
+ // Run setup, then read the remaining bytes from the dataBuffer and check
+ // that they match wantBytes. We use different read sizes to check corner
+ // cases in Read.
+ for _, readSize := range []int{1, 2, 1 * 1024, 32 * 1024} {
+ t.Run(fmt.Sprintf("ReadSize=%d", readSize), func(t *testing.T) {
+ b := setup(t)
+ buf := make([]byte, readSize)
+ var gotRead bytes.Buffer
+ for {
+ n, err := b.Read(buf)
+ gotRead.Write(buf[:n])
+ if err == errReadEmpty {
+ break
+ }
+ if err != nil {
+ t.Fatalf("error after %v bytes: %v", gotRead.Len(), err)
+ }
+ }
+ if got, want := gotRead.Bytes(), wantBytes; !bytes.Equal(got, want) {
+ t.Errorf("FinalRead=%q, want %q", fmtDataChunk(got), fmtDataChunk(want))
+ }
+ })
+ }
+}
+
+func TestDataBufferAllocation(t *testing.T) {
+ writes := [][]byte{
+ bytes.Repeat([]byte("a"), 1*1024-1),
+ []byte("a"),
+ bytes.Repeat([]byte("b"), 4*1024-1),
+ []byte("b"),
+ bytes.Repeat([]byte("c"), 8*1024-1),
+ []byte("c"),
+ bytes.Repeat([]byte("d"), 16*1024-1),
+ []byte("d"),
+ bytes.Repeat([]byte("e"), 32*1024),
+ }
+ var wantRead bytes.Buffer
+ for _, p := range writes {
+ wantRead.Write(p)
+ }
+
+ testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer {
+ b := &dataBuffer{}
+ for _, p := range writes {
+ if n, err := b.Write(p); n != len(p) || err != nil {
+ t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p))
+ }
+ }
+ want := [][]byte{
+ bytes.Repeat([]byte("a"), 1*1024),
+ bytes.Repeat([]byte("b"), 4*1024),
+ bytes.Repeat([]byte("c"), 8*1024),
+ bytes.Repeat([]byte("d"), 16*1024),
+ bytes.Repeat([]byte("e"), 16*1024),
+ bytes.Repeat([]byte("e"), 16*1024),
+ }
+ if !reflect.DeepEqual(b.chunks, want) {
+ t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want))
+ }
+ return b
+ })
+}
+
+func TestDataBufferAllocationWithExpected(t *testing.T) {
+ writes := [][]byte{
+ bytes.Repeat([]byte("a"), 1*1024), // allocates 16KB
+ bytes.Repeat([]byte("b"), 14*1024),
+ bytes.Repeat([]byte("c"), 15*1024), // allocates 16KB more
+ bytes.Repeat([]byte("d"), 2*1024),
+ bytes.Repeat([]byte("e"), 1*1024), // overflows 32KB expectation, allocates just 1KB
+ }
+ var wantRead bytes.Buffer
+ for _, p := range writes {
+ wantRead.Write(p)
+ }
+
+ testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer {
+ b := &dataBuffer{expected: 32 * 1024}
+ for _, p := range writes {
+ if n, err := b.Write(p); n != len(p) || err != nil {
+ t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p))
+ }
+ }
+ want := [][]byte{
+ append(bytes.Repeat([]byte("a"), 1*1024), append(bytes.Repeat([]byte("b"), 14*1024), bytes.Repeat([]byte("c"), 1*1024)...)...),
+ append(bytes.Repeat([]byte("c"), 14*1024), bytes.Repeat([]byte("d"), 2*1024)...),
+ bytes.Repeat([]byte("e"), 1*1024),
+ }
+ if !reflect.DeepEqual(b.chunks, want) {
+ t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want))
+ }
+ return b
+ })
+}
+
+func TestDataBufferWriteAfterPartialRead(t *testing.T) {
+ testDataBuffer(t, []byte("cdxyz"), func(t *testing.T) *dataBuffer {
+ b := &dataBuffer{}
+ if n, err := b.Write([]byte("abcd")); n != 4 || err != nil {
+ t.Fatalf("Write(\"abcd\")=%v,%v want 4,nil", n, err)
+ }
+ p := make([]byte, 2)
+ if n, err := b.Read(p); n != 2 || err != nil || !bytes.Equal(p, []byte("ab")) {
+ t.Fatalf("Read()=%q,%v,%v want \"ab\",2,nil", p, n, err)
+ }
+ if n, err := b.Write([]byte("xyz")); n != 3 || err != nil {
+ t.Fatalf("Write(\"xyz\")=%v,%v want 3,nil", n, err)
+ }
+ return b
+ })
+}
diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go
new file mode 100644
index 0000000..71f2c46
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/errors.go
@@ -0,0 +1,133 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "fmt"
+)
+
+// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
+type ErrCode uint32
+
+const (
+ ErrCodeNo ErrCode = 0x0
+ ErrCodeProtocol ErrCode = 0x1
+ ErrCodeInternal ErrCode = 0x2
+ ErrCodeFlowControl ErrCode = 0x3
+ ErrCodeSettingsTimeout ErrCode = 0x4
+ ErrCodeStreamClosed ErrCode = 0x5
+ ErrCodeFrameSize ErrCode = 0x6
+ ErrCodeRefusedStream ErrCode = 0x7
+ ErrCodeCancel ErrCode = 0x8
+ ErrCodeCompression ErrCode = 0x9
+ ErrCodeConnect ErrCode = 0xa
+ ErrCodeEnhanceYourCalm ErrCode = 0xb
+ ErrCodeInadequateSecurity ErrCode = 0xc
+ ErrCodeHTTP11Required ErrCode = 0xd
+)
+
+var errCodeName = map[ErrCode]string{
+ ErrCodeNo: "NO_ERROR",
+ ErrCodeProtocol: "PROTOCOL_ERROR",
+ ErrCodeInternal: "INTERNAL_ERROR",
+ ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
+ ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
+ ErrCodeStreamClosed: "STREAM_CLOSED",
+ ErrCodeFrameSize: "FRAME_SIZE_ERROR",
+ ErrCodeRefusedStream: "REFUSED_STREAM",
+ ErrCodeCancel: "CANCEL",
+ ErrCodeCompression: "COMPRESSION_ERROR",
+ ErrCodeConnect: "CONNECT_ERROR",
+ ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
+ ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
+ ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
+}
+
+func (e ErrCode) String() string {
+ if s, ok := errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("unknown error code 0x%x", uint32(e))
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection.
+type ConnectionError ErrCode
+
+func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
+
+// StreamError is an error that only affects one stream within an
+// HTTP/2 connection.
+type StreamError struct {
+ StreamID uint32
+ Code ErrCode
+ Cause error // optional additional detail
+}
+
+func streamError(id uint32, code ErrCode) StreamError {
+ return StreamError{StreamID: id, Code: code}
+}
+
+func (e StreamError) Error() string {
+ if e.Cause != nil {
+ return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
+ }
+ return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
+}
+
+// 6.9.1 The Flow Control Window
+// "If a sender receives a WINDOW_UPDATE that causes a flow control
+// window to exceed this maximum it MUST terminate either the stream
+// or the connection, as appropriate. For streams, [...]; for the
+// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
+type goAwayFlowError struct{}
+
+func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
+
+// connError represents an HTTP/2 ConnectionError error code, along
+// with a string (for debugging) explaining why.
+//
+// Errors of this type are only returned by the frame parser functions
+// and converted into ConnectionError(Code), after stashing away
+// the Reason into the Framer's errDetail field, accessible via
+// the (*Framer).ErrorDetail method.
+type connError struct {
+ Code ErrCode // the ConnectionError error code
+ Reason string // additional reason
+}
+
+func (e connError) Error() string {
+ return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
+}
+
+type pseudoHeaderError string
+
+func (e pseudoHeaderError) Error() string {
+ return fmt.Sprintf("invalid pseudo-header %q", string(e))
+}
+
+type duplicatePseudoHeaderError string
+
+func (e duplicatePseudoHeaderError) Error() string {
+ return fmt.Sprintf("duplicate pseudo-header %q", string(e))
+}
+
+type headerFieldNameError string
+
+func (e headerFieldNameError) Error() string {
+ return fmt.Sprintf("invalid header field name %q", string(e))
+}
+
+type headerFieldValueError string
+
+func (e headerFieldValueError) Error() string {
+ return fmt.Sprintf("invalid header field value %q", string(e))
+}
+
+var (
+ errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
+ errPseudoAfterRegular = errors.New("pseudo header field after regular")
+)
diff --git a/vendor/golang.org/x/net/http2/errors_test.go b/vendor/golang.org/x/net/http2/errors_test.go
new file mode 100644
index 0000000..da5c58c
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/errors_test.go
@@ -0,0 +1,24 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "testing"
+
+func TestErrCodeString(t *testing.T) {
+ tests := []struct {
+ err ErrCode
+ want string
+ }{
+ {ErrCodeProtocol, "PROTOCOL_ERROR"},
+ {0xd, "HTTP_1_1_REQUIRED"},
+ {0xf, "unknown error code 0xf"},
+ }
+ for i, tt := range tests {
+ got := tt.err.String()
+ if got != tt.want {
+ t.Errorf("%d. Error = %q; want %q", i, got, tt.want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go
new file mode 100644
index 0000000..957de25
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/flow.go
@@ -0,0 +1,50 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Flow control
+
+package http2
+
+// flow is the flow control window's size.
+type flow struct {
+ // n is the number of DATA bytes we're allowed to send.
+ // A flow is kept both on a conn and a per-stream.
+ n int32
+
+ // conn points to the shared connection-level flow that is
+ // shared by all streams on that conn. It is nil for the flow
+ // that's on the conn directly.
+ conn *flow
+}
+
+func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
+
+func (f *flow) available() int32 {
+ n := f.n
+ if f.conn != nil && f.conn.n < n {
+ n = f.conn.n
+ }
+ return n
+}
+
+func (f *flow) take(n int32) {
+ if n > f.available() {
+ panic("internal error: took too much")
+ }
+ f.n -= n
+ if f.conn != nil {
+ f.conn.n -= n
+ }
+}
+
+// add adds n bytes (positive or negative) to the flow control window.
+// It returns false if the sum would exceed 2^31-1.
+func (f *flow) add(n int32) bool {
+ remain := (1<<31 - 1) - f.n
+ if n > remain {
+ return false
+ }
+ f.n += n
+ return true
+}
diff --git a/vendor/golang.org/x/net/http2/flow_test.go b/vendor/golang.org/x/net/http2/flow_test.go
new file mode 100644
index 0000000..859adf5
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/flow_test.go
@@ -0,0 +1,53 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "testing"
+
+func TestFlow(t *testing.T) {
+ var st flow
+ var conn flow
+ st.add(3)
+ conn.add(2)
+
+ if got, want := st.available(), int32(3); got != want {
+ t.Errorf("available = %d; want %d", got, want)
+ }
+ st.setConnFlow(&conn)
+ if got, want := st.available(), int32(2); got != want {
+ t.Errorf("after parent setup, available = %d; want %d", got, want)
+ }
+
+ st.take(2)
+ if got, want := conn.available(), int32(0); got != want {
+ t.Errorf("after taking 2, conn = %d; want %d", got, want)
+ }
+ if got, want := st.available(), int32(0); got != want {
+ t.Errorf("after taking 2, stream = %d; want %d", got, want)
+ }
+}
+
+func TestFlowAdd(t *testing.T) {
+ var f flow
+ if !f.add(1) {
+ t.Fatal("failed to add 1")
+ }
+ if !f.add(-1) {
+ t.Fatal("failed to add -1")
+ }
+ if got, want := f.available(), int32(0); got != want {
+ t.Fatalf("size = %d; want %d", got, want)
+ }
+ if !f.add(1<<31 - 1) {
+ t.Fatal("failed to add 2^31-1")
+ }
+ if got, want := f.available(), int32(1<<31-1); got != want {
+ t.Fatalf("size = %d; want %d", got, want)
+ }
+ if f.add(1) {
+ t.Fatal("adding 1 to max shouldn't be allowed")
+ }
+
+}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
new file mode 100644
index 0000000..3b14890
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -0,0 +1,1579 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
+)
+
+const frameHeaderLen = 9
+
+var padZeros = make([]byte, 255) // zeros for padding
+
+// A FrameType is a registered frame type as defined in
+// http://http2.github.io/http2-spec/#rfc.section.11.2
+type FrameType uint8
+
+const (
+ FrameData FrameType = 0x0
+ FrameHeaders FrameType = 0x1
+ FramePriority FrameType = 0x2
+ FrameRSTStream FrameType = 0x3
+ FrameSettings FrameType = 0x4
+ FramePushPromise FrameType = 0x5
+ FramePing FrameType = 0x6
+ FrameGoAway FrameType = 0x7
+ FrameWindowUpdate FrameType = 0x8
+ FrameContinuation FrameType = 0x9
+)
+
+var frameName = map[FrameType]string{
+ FrameData: "DATA",
+ FrameHeaders: "HEADERS",
+ FramePriority: "PRIORITY",
+ FrameRSTStream: "RST_STREAM",
+ FrameSettings: "SETTINGS",
+ FramePushPromise: "PUSH_PROMISE",
+ FramePing: "PING",
+ FrameGoAway: "GOAWAY",
+ FrameWindowUpdate: "WINDOW_UPDATE",
+ FrameContinuation: "CONTINUATION",
+}
+
+func (t FrameType) String() string {
+ if s, ok := frameName[t]; ok {
+ return s
+ }
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+}
+
+// Flags is a bitmask of HTTP/2 flags.
+// The meaning of flags varies depending on the frame type.
+type Flags uint8
+
+// Has reports whether f contains all (0 or more) flags in v.
+func (f Flags) Has(v Flags) bool {
+ return (f & v) == v
+}
+
+// Frame-specific FrameHeader flag bits.
+const (
+ // Data Frame
+ FlagDataEndStream Flags = 0x1
+ FlagDataPadded Flags = 0x8
+
+ // Headers Frame
+ FlagHeadersEndStream Flags = 0x1
+ FlagHeadersEndHeaders Flags = 0x4
+ FlagHeadersPadded Flags = 0x8
+ FlagHeadersPriority Flags = 0x20
+
+ // Settings Frame
+ FlagSettingsAck Flags = 0x1
+
+ // Ping Frame
+ FlagPingAck Flags = 0x1
+
+ // Continuation Frame
+ FlagContinuationEndHeaders Flags = 0x4
+
+ FlagPushPromiseEndHeaders Flags = 0x4
+ FlagPushPromisePadded Flags = 0x8
+)
+
+var flagName = map[FrameType]map[Flags]string{
+ FrameData: {
+ FlagDataEndStream: "END_STREAM",
+ FlagDataPadded: "PADDED",
+ },
+ FrameHeaders: {
+ FlagHeadersEndStream: "END_STREAM",
+ FlagHeadersEndHeaders: "END_HEADERS",
+ FlagHeadersPadded: "PADDED",
+ FlagHeadersPriority: "PRIORITY",
+ },
+ FrameSettings: {
+ FlagSettingsAck: "ACK",
+ },
+ FramePing: {
+ FlagPingAck: "ACK",
+ },
+ FrameContinuation: {
+ FlagContinuationEndHeaders: "END_HEADERS",
+ },
+ FramePushPromise: {
+ FlagPushPromiseEndHeaders: "END_HEADERS",
+ FlagPushPromisePadded: "PADDED",
+ },
+}
+
+// a frameParser parses a frame given its FrameHeader and payload
+// bytes. The length of payload will always equal fh.Length (which
+// might be 0).
+type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
+
+var frameParsers = map[FrameType]frameParser{
+ FrameData: parseDataFrame,
+ FrameHeaders: parseHeadersFrame,
+ FramePriority: parsePriorityFrame,
+ FrameRSTStream: parseRSTStreamFrame,
+ FrameSettings: parseSettingsFrame,
+ FramePushPromise: parsePushPromise,
+ FramePing: parsePingFrame,
+ FrameGoAway: parseGoAwayFrame,
+ FrameWindowUpdate: parseWindowUpdateFrame,
+ FrameContinuation: parseContinuationFrame,
+}
+
+func typeFrameParser(t FrameType) frameParser {
+ if f := frameParsers[t]; f != nil {
+ return f
+ }
+ return parseUnknownFrame
+}
+
+// A FrameHeader is the 9 byte header of all HTTP/2 frames.
+//
+// See http://http2.github.io/http2-spec/#FrameHeader
+type FrameHeader struct {
+ valid bool // caller can access []byte fields in the Frame
+
+ // Type is the 1 byte frame type. There are ten standard frame
+ // types, but extension frame types may be written by WriteRawFrame
+ // and will be returned by ReadFrame (as UnknownFrame).
+ Type FrameType
+
+ // Flags are the 1 byte of 8 potential bit flags per frame.
+ // They are specific to the frame type.
+ Flags Flags
+
+ // Length is the length of the frame, not including the 9 byte header.
+ // The maximum size is one byte less than 16MB (uint24), but only
+ // frames up to 16KB are allowed without peer agreement.
+ Length uint32
+
+ // StreamID is which stream this frame is for. Certain frames
+ // are not stream-specific, in which case this field is 0.
+ StreamID uint32
+}
+
+// Header returns h. It exists so FrameHeaders can be embedded in other
+// specific frame types and implement the Frame interface.
+func (h FrameHeader) Header() FrameHeader { return h }
+
+func (h FrameHeader) String() string {
+ var buf bytes.Buffer
+ buf.WriteString("[FrameHeader ")
+ h.writeDebug(&buf)
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+func (h FrameHeader) writeDebug(buf *bytes.Buffer) {
+ buf.WriteString(h.Type.String())
+ if h.Flags != 0 {
+ buf.WriteString(" flags=")
+ set := 0
+ for i := uint8(0); i < 8; i++ {
+ if h.Flags&(1<<i) == 0 {
+ continue
+ }
+ set++
+ if set > 1 {
+ buf.WriteByte('|')
+ }
+ name := flagName[h.Type][Flags(1<<i)]
+ if name != "" {
+ buf.WriteString(name)
+ } else {
+ fmt.Fprintf(buf, "0x%x", 1<<i)
+ }
+ }
+ }
+ if h.StreamID != 0 {
+ fmt.Fprintf(buf, " stream=%d", h.StreamID)
+ }
+ fmt.Fprintf(buf, " len=%d", h.Length)
+}
+
+func (h *FrameHeader) checkValid() {
+ if !h.valid {
+ panic("Frame accessor called on non-owned Frame")
+ }
+}
+
+func (h *FrameHeader) invalidate() { h.valid = false }
+
+// frame header bytes.
+// Used only by ReadFrameHeader.
+var fhBytes = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, frameHeaderLen)
+ return &buf
+ },
+}
+
+// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
+// Most users should use Framer.ReadFrame instead.
+func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
+ bufp := fhBytes.Get().(*[]byte)
+ defer fhBytes.Put(bufp)
+ return readFrameHeader(*bufp, r)
+}
+
+func readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) {
+ _, err := io.ReadFull(r, buf[:frameHeaderLen])
+ if err != nil {
+ return FrameHeader{}, err
+ }
+ return FrameHeader{
+ Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
+ Type: FrameType(buf[3]),
+ Flags: Flags(buf[4]),
+ StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
+ valid: true,
+ }, nil
+}
+
+// A Frame is the base interface implemented by all frame types.
+// Callers will generally type-assert the specific frame type:
+// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
+//
+// Frames are only valid until the next call to Framer.ReadFrame.
+type Frame interface {
+ Header() FrameHeader
+
+ // invalidate is called by Framer.ReadFrame to make this
+ // frame's buffers as being invalid, since the subsequent
+ // frame will reuse them.
+ invalidate()
+}
+
+// A Framer reads and writes Frames.
+type Framer struct {
+ r io.Reader
+ lastFrame Frame
+ errDetail error
+
+ // lastHeaderStream is non-zero if the last frame was an
+ // unfinished HEADERS/CONTINUATION.
+ lastHeaderStream uint32
+
+ maxReadSize uint32
+ headerBuf [frameHeaderLen]byte
+
+ // TODO: let getReadBuf be configurable, and use a less memory-pinning
+ // allocator in server.go to minimize memory pinned for many idle conns.
+ // Will probably also need to make frame invalidation have a hook too.
+ getReadBuf func(size uint32) []byte
+ readBuf []byte // cache for default getReadBuf
+
+ maxWriteSize uint32 // zero means unlimited; TODO: implement
+
+ w io.Writer
+ wbuf []byte
+
+ // AllowIllegalWrites permits the Framer's Write methods to
+ // write frames that do not conform to the HTTP/2 spec. This
+ // permits using the Framer to test other HTTP/2
+ // implementations' conformance to the spec.
+ // If false, the Write methods will prefer to return an error
+ // rather than comply.
+ AllowIllegalWrites bool
+
+ // AllowIllegalReads permits the Framer's ReadFrame method
+ // to return non-compliant frames or frame orders.
+ // This is for testing and permits using the Framer to test
+ // other HTTP/2 implementations' conformance to the spec.
+ // It is not compatible with ReadMetaHeaders.
+ AllowIllegalReads bool
+
+ // ReadMetaHeaders if non-nil causes ReadFrame to merge
+ // HEADERS and CONTINUATION frames together and return
+ // MetaHeadersFrame instead.
+ ReadMetaHeaders *hpack.Decoder
+
+ // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.
+ // It's used only if ReadMetaHeaders is set; 0 means a sane default
+ // (currently 16MB)
+ // If the limit is hit, MetaHeadersFrame.Truncated is set true.
+ MaxHeaderListSize uint32
+
+ // TODO: track which type of frame & with which flags was sent
+ // last. Then return an error (unless AllowIllegalWrites) if
+ // we're in the middle of a header block and a
+ // non-Continuation or Continuation on a different stream is
+ // attempted to be written.
+
+ logReads, logWrites bool
+
+ debugFramer *Framer // only use for logging written writes
+ debugFramerBuf *bytes.Buffer
+ debugReadLoggerf func(string, ...interface{})
+ debugWriteLoggerf func(string, ...interface{})
+
+ frameCache *frameCache // nil if frames aren't reused (default)
+}
+
+func (fr *Framer) maxHeaderListSize() uint32 {
+ if fr.MaxHeaderListSize == 0 {
+ return 16 << 20 // sane default, per docs
+ }
+ return fr.MaxHeaderListSize
+}
+
+func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
+ // Write the FrameHeader.
+ f.wbuf = append(f.wbuf[:0],
+ 0, // 3 bytes of length, filled in in endWrite
+ 0,
+ 0,
+ byte(ftype),
+ byte(flags),
+ byte(streamID>>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+}
+
+func (f *Framer) endWrite() error {
+ // Now that we know the final size, fill in the FrameHeader in
+ // the space previously reserved for it. Abuse append.
+ length := len(f.wbuf) - frameHeaderLen
+ if length >= (1 << 24) {
+ return ErrFrameTooLarge
+ }
+ _ = append(f.wbuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length))
+ if f.logWrites {
+ f.logWrite()
+ }
+
+ n, err := f.w.Write(f.wbuf)
+ if err == nil && n != len(f.wbuf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+func (f *Framer) logWrite() {
+ if f.debugFramer == nil {
+ f.debugFramerBuf = new(bytes.Buffer)
+ f.debugFramer = NewFramer(nil, f.debugFramerBuf)
+ f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
+ // Let us read anything, even if we accidentally wrote it
+ // in the wrong order:
+ f.debugFramer.AllowIllegalReads = true
+ }
+ f.debugFramerBuf.Write(f.wbuf)
+ fr, err := f.debugFramer.ReadFrame()
+ if err != nil {
+ f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f)
+ return
+ }
+ f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
+}
+
+func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
+func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
+func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
+func (f *Framer) writeUint32(v uint32) {
+ f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+const (
+ minMaxFrameSize = 1 << 14
+ maxFrameSize = 1<<24 - 1
+)
+
+// SetReuseFrames allows the Framer to reuse Frames.
+// If called on a Framer, Frames returned by calls to ReadFrame are only
+// valid until the next call to ReadFrame.
+func (fr *Framer) SetReuseFrames() {
+ if fr.frameCache != nil {
+ return
+ }
+ fr.frameCache = &frameCache{}
+}
+
+type frameCache struct {
+ dataFrame DataFrame
+}
+
+func (fc *frameCache) getDataFrame() *DataFrame {
+ if fc == nil {
+ return &DataFrame{}
+ }
+ return &fc.dataFrame
+}
+
+// NewFramer returns a Framer that writes frames to w and reads them from r.
+func NewFramer(w io.Writer, r io.Reader) *Framer {
+ fr := &Framer{
+ w: w,
+ r: r,
+ logReads: logFrameReads,
+ logWrites: logFrameWrites,
+ debugReadLoggerf: log.Printf,
+ debugWriteLoggerf: log.Printf,
+ }
+ fr.getReadBuf = func(size uint32) []byte {
+ if cap(fr.readBuf) >= int(size) {
+ return fr.readBuf[:size]
+ }
+ fr.readBuf = make([]byte, size)
+ return fr.readBuf
+ }
+ fr.SetMaxReadFrameSize(maxFrameSize)
+ return fr
+}
+
+// SetMaxReadFrameSize sets the maximum size of a frame
+// that will be read by a subsequent call to ReadFrame.
+// It is the caller's responsibility to advertise this
+// limit with a SETTINGS frame.
+func (fr *Framer) SetMaxReadFrameSize(v uint32) {
+ if v > maxFrameSize {
+ v = maxFrameSize
+ }
+ fr.maxReadSize = v
+}
+
+// ErrorDetail returns a more detailed error of the last error
+// returned by Framer.ReadFrame. For instance, if ReadFrame
+// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
+// will say exactly what was invalid. ErrorDetail is not guaranteed
+// to return a non-nil value and like the rest of the http2 package,
+// its return value is not protected by an API compatibility promise.
+// ErrorDetail is reset after the next call to ReadFrame.
+func (fr *Framer) ErrorDetail() error {
+ return fr.errDetail
+}
+
+// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
+// sends a frame that is larger than declared with SetMaxReadFrameSize.
+var ErrFrameTooLarge = errors.New("http2: frame too large")
+
+// terminalReadFrameError reports whether err is an unrecoverable
+// error from ReadFrame and no other frames should be read.
+func terminalReadFrameError(err error) bool {
+ if _, ok := err.(StreamError); ok {
+ return false
+ }
+ return err != nil
+}
+
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from the underlying
+// reader.
+func (fr *Framer) ReadFrame() (Frame, error) {
+ fr.errDetail = nil
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
+ }
+ fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
+ if err != nil {
+ return nil, err
+ }
+ if fh.Length > fr.maxReadSize {
+ return nil, ErrFrameTooLarge
+ }
+ payload := fr.getReadBuf(fh.Length)
+ if _, err := io.ReadFull(fr.r, payload); err != nil {
+ return nil, err
+ }
+ f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
+ if err != nil {
+ if ce, ok := err.(connError); ok {
+ return nil, fr.connError(ce.Code, ce.Reason)
+ }
+ return nil, err
+ }
+ if err := fr.checkFrameOrder(f); err != nil {
+ return nil, err
+ }
+ if fr.logReads {
+ fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f))
+ }
+ if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
+ return fr.readMetaFrame(f.(*HeadersFrame))
+ }
+ return f, nil
+}
+
+// connError returns ConnectionError(code) but first
+// stashes away a public reason to the caller can optionally relay it
+// to the peer before hanging up on them. This might help others debug
+// their implementations.
+func (fr *Framer) connError(code ErrCode, reason string) error {
+ fr.errDetail = errors.New(reason)
+ return ConnectionError(code)
+}
+
+// checkFrameOrder reports an error if f is an invalid frame to return
+// next from ReadFrame. Mostly it checks whether HEADERS and
+// CONTINUATION frames are contiguous.
+func (fr *Framer) checkFrameOrder(f Frame) error {
+ last := fr.lastFrame
+ fr.lastFrame = f
+ if fr.AllowIllegalReads {
+ return nil
+ }
+
+ fh := f.Header()
+ if fr.lastHeaderStream != 0 {
+ if fh.Type != FrameContinuation {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
+ fh.Type, fh.StreamID,
+ last.Header().Type, fr.lastHeaderStream))
+ }
+ if fh.StreamID != fr.lastHeaderStream {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
+ fh.StreamID, fr.lastHeaderStream))
+ }
+ } else if fh.Type == FrameContinuation {
+ return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
+ }
+
+ switch fh.Type {
+ case FrameHeaders, FrameContinuation:
+ if fh.Flags.Has(FlagHeadersEndHeaders) {
+ fr.lastHeaderStream = 0
+ } else {
+ fr.lastHeaderStream = fh.StreamID
+ }
+ }
+
+ return nil
+}
+
+// A DataFrame conveys arbitrary, variable-length sequences of octets
+// associated with a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.1
+type DataFrame struct {
+ FrameHeader
+ data []byte
+}
+
+func (f *DataFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagDataEndStream)
+}
+
+// Data returns the frame's data octets, not including any padding
+// size byte or padding suffix bytes.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *DataFrame) Data() []byte {
+ f.checkValid()
+ return f.data
+}
+
+func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
+ }
+ f := fc.getDataFrame()
+ f.FrameHeader = fh
+
+ var padSize byte
+ if fh.Flags.Has(FlagDataPadded) {
+ var err error
+ payload, padSize, err = readByte(payload)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if int(padSize) > len(payload) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
+ }
+ f.data = payload[:len(payload)-int(padSize)]
+ return f, nil
+}
+
+var (
+ errStreamID = errors.New("invalid stream ID")
+ errDepStreamID = errors.New("invalid dependent stream ID")
+ errPadLength = errors.New("pad length too large")
+ errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
+)
+
+func validStreamIDOrZero(streamID uint32) bool {
+ return streamID&(1<<31) == 0
+}
+
+func validStreamID(streamID uint32) bool {
+ return streamID != 0 && streamID&(1<<31) == 0
+}
+
+// WriteData writes a DATA frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
+ return f.WriteDataPadded(streamID, endStream, data, nil)
+}
+
+// WriteData writes a DATA frame with optional padding.
+//
+// If pad is nil, the padding bit is not sent.
+// The length of pad must not exceed 255 bytes.
+// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ if len(pad) > 0 {
+ if len(pad) > 255 {
+ return errPadLength
+ }
+ if !f.AllowIllegalWrites {
+ for _, b := range pad {
+ if b != 0 {
+ // "Padding octets MUST be set to zero when sending."
+ return errPadBytes
+ }
+ }
+ }
+ }
+ var flags Flags
+ if endStream {
+ flags |= FlagDataEndStream
+ }
+ if pad != nil {
+ flags |= FlagDataPadded
+ }
+ f.startWrite(FrameData, flags, streamID)
+ if pad != nil {
+ f.wbuf = append(f.wbuf, byte(len(pad)))
+ }
+ f.wbuf = append(f.wbuf, data...)
+ f.wbuf = append(f.wbuf, pad...)
+ return f.endWrite()
+}
+
+// A SettingsFrame conveys configuration parameters that affect how
+// endpoints communicate, such as preferences and constraints on peer
+// behavior.
+//
+// See http://http2.github.io/http2-spec/#SETTINGS
+type SettingsFrame struct {
+ FrameHeader
+ p []byte
+}
+
+func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+ if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
+ // When this (ACK 0x1) bit is set, the payload of the
+ // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FRAME_SIZE_ERROR.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ // SETTINGS frames always apply to a connection,
+ // never a single stream. The stream identifier for a
+ // SETTINGS frame MUST be zero (0x0). If an endpoint
+ // receives a SETTINGS frame whose stream identifier
+ // field is anything other than 0x0, the endpoint MUST
+ // respond with a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p)%6 != 0 {
+ // Expecting even number of 6 byte settings.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ f := &SettingsFrame{FrameHeader: fh, p: p}
+ if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
+ // Values above the maximum flow control window size of 2^31 - 1 MUST
+ // be treated as a connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ return nil, ConnectionError(ErrCodeFlowControl)
+ }
+ return f, nil
+}
+
+func (f *SettingsFrame) IsAck() bool {
+ return f.FrameHeader.Flags.Has(FlagSettingsAck)
+}
+
+func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
+ if settingID == s {
+ return binary.BigEndian.Uint32(buf[2:6]), true
+ }
+ buf = buf[6:]
+ }
+ return 0, false
+}
+
+// ForeachSetting runs fn for each setting.
+// It stops and returns the first error.
+func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ if err := fn(Setting{
+ SettingID(binary.BigEndian.Uint16(buf[:2])),
+ binary.BigEndian.Uint32(buf[2:6]),
+ }); err != nil {
+ return err
+ }
+ buf = buf[6:]
+ }
+ return nil
+}
+
+// WriteSettings writes a SETTINGS frame with zero or more settings
+// specified and the ACK bit not set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettings(settings ...Setting) error {
+ f.startWrite(FrameSettings, 0, 0)
+ for _, s := range settings {
+ f.writeUint16(uint16(s.ID))
+ f.writeUint32(s.Val)
+ }
+ return f.endWrite()
+}
+
+// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettingsAck() error {
+ f.startWrite(FrameSettings, FlagSettingsAck, 0)
+ return f.endWrite()
+}
+
+// A PingFrame is a mechanism for measuring a minimal round trip time
+// from the sender, as well as determining whether an idle connection
+// is still functional.
+// See http://http2.github.io/http2-spec/#rfc.section.6.7
+type PingFrame struct {
+ FrameHeader
+ Data [8]byte
+}
+
+func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
+
+func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
+ if len(payload) != 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ f := &PingFrame{FrameHeader: fh}
+ copy(f.Data[:], payload)
+ return f, nil
+}
+
+func (f *Framer) WritePing(ack bool, data [8]byte) error {
+ var flags Flags
+ if ack {
+ flags = FlagPingAck
+ }
+ f.startWrite(FramePing, flags, 0)
+ f.writeBytes(data[:])
+ return f.endWrite()
+}
+
+// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
+// See http://http2.github.io/http2-spec/#rfc.section.6.8
+type GoAwayFrame struct {
+ FrameHeader
+ LastStreamID uint32
+ ErrCode ErrCode
+ debugData []byte
+}
+
+// DebugData returns any debug data in the GOAWAY frame. Its contents
+// are not defined.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *GoAwayFrame) DebugData() []byte {
+ f.checkValid()
+ return f.debugData
+}
+
+func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p) < 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ return &GoAwayFrame{
+ FrameHeader: fh,
+ LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
+ ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])),
+ debugData: p[8:],
+ }, nil
+}
+
+func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {
+ f.startWrite(FrameGoAway, 0, 0)
+ f.writeUint32(maxStreamID & (1<<31 - 1))
+ f.writeUint32(uint32(code))
+ f.writeBytes(debugData)
+ return f.endWrite()
+}
+
+// An UnknownFrame is the frame type returned when the frame type is unknown
+// or no specific frame type parser exists.
+type UnknownFrame struct {
+ FrameHeader
+ p []byte
+}
+
+// Payload returns the frame's payload (after the header). It is not
+// valid to call this method after a subsequent call to
+// Framer.ReadFrame, nor is it valid to retain the returned slice.
+// The memory is owned by the Framer and is invalidated when the next
+// frame is read.
+func (f *UnknownFrame) Payload() []byte {
+ f.checkValid()
+ return f.p
+}
+
+func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+ return &UnknownFrame{fh, p}, nil
+}
+
+// A WindowUpdateFrame is used to implement flow control.
+// See http://http2.github.io/http2-spec/#rfc.section.6.9
+type WindowUpdateFrame struct {
+ FrameHeader
+ Increment uint32 // never read with high bit set
+}
+
+func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
+ if inc == 0 {
+ // A receiver MUST treat the receipt of a
+ // WINDOW_UPDATE frame with an flow control window
+ // increment of 0 as a stream error (Section 5.4.2) of
+ // type PROTOCOL_ERROR; errors on the connection flow
+ // control window MUST be treated as a connection
+ // error (Section 5.4.1).
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return nil, streamError(fh.StreamID, ErrCodeProtocol)
+ }
+ return &WindowUpdateFrame{
+ FrameHeader: fh,
+ Increment: inc,
+ }, nil
+}
+
+// WriteWindowUpdate writes a WINDOW_UPDATE frame.
+// The increment value must be between 1 and 2,147,483,647, inclusive.
+// If the Stream ID is zero, the window update applies to the
+// connection as a whole.
+func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error {
+ // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
+ if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
+ return errors.New("illegal window increment value")
+ }
+ f.startWrite(FrameWindowUpdate, 0, streamID)
+ f.writeUint32(incr)
+ return f.endWrite()
+}
+
+// A HeadersFrame is used to open a stream and additionally carries a
+// header block fragment.
+type HeadersFrame struct {
+ FrameHeader
+
+ // Priority is set if FlagHeadersPriority is set in the FrameHeader.
+ Priority PriorityParam
+
+ headerFragBuf []byte // not owned
+}
+
+func (f *HeadersFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *HeadersFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders)
+}
+
+func (f *HeadersFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndStream)
+}
+
+func (f *HeadersFrame) HasPriority() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersPriority)
+}
+
+func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
+ hf := &HeadersFrame{
+ FrameHeader: fh,
+ }
+ if fh.StreamID == 0 {
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // is received whose stream identifier field is 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
+ }
+ var padLength uint8
+ if fh.Flags.Has(FlagHeadersPadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+ if fh.Flags.Has(FlagHeadersPriority) {
+ var v uint32
+ p, v, err = readUint32(p)
+ if err != nil {
+ return nil, err
+ }
+ hf.Priority.StreamDep = v & 0x7fffffff
+ hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
+ p, hf.Priority.Weight, err = readByte(p)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(p)-int(padLength) <= 0 {
+ return nil, streamError(fh.StreamID, ErrCodeProtocol)
+ }
+ hf.headerFragBuf = p[:len(p)-int(padLength)]
+ return hf, nil
+}
+
+// HeadersFrameParam are the parameters for writing a HEADERS frame.
+type HeadersFrameParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndStream indicates that the header block is the last that
+ // the endpoint will send for the identified stream. Setting
+ // this flag causes the stream to enter one of "half closed"
+ // states.
+ EndStream bool
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+
+ // Priority, if non-zero, includes stream priority information
+ // in the HEADER frame.
+ Priority PriorityParam
+}
+
+// WriteHeaders writes a single HEADERS frame.
+//
+// This is a low-level header writing method. Encoding headers and
+// splitting them into any necessary CONTINUATION frames is handled
+// elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteHeaders(p HeadersFrameParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagHeadersPadded
+ }
+ if p.EndStream {
+ flags |= FlagHeadersEndStream
+ }
+ if p.EndHeaders {
+ flags |= FlagHeadersEndHeaders
+ }
+ if !p.Priority.IsZero() {
+ flags |= FlagHeadersPriority
+ }
+ f.startWrite(FrameHeaders, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !p.Priority.IsZero() {
+ v := p.Priority.StreamDep
+ if !validStreamIDOrZero(v) && !f.AllowIllegalWrites {
+ return errDepStreamID
+ }
+ if p.Priority.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Priority.Weight)
+ }
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// A PriorityFrame specifies the sender-advised priority of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.3
+type PriorityFrame struct {
+ FrameHeader
+ PriorityParam
+}
+
+// PriorityParam are the stream prioritzation parameters.
+type PriorityParam struct {
+ // StreamDep is a 31-bit stream identifier for the
+ // stream that this stream depends on. Zero means no
+ // dependency.
+ StreamDep uint32
+
+ // Exclusive is whether the dependency is exclusive.
+ Exclusive bool
+
+ // Weight is the stream's zero-indexed weight. It should be
+ // set together with StreamDep, or neither should be set. Per
+ // the spec, "Add one to the value to obtain a weight between
+ // 1 and 256."
+ Weight uint8
+}
+
+func (p PriorityParam) IsZero() bool {
+ return p == PriorityParam{}
+}
+
+func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
+ }
+ if len(payload) != 5 {
+ return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
+ }
+ v := binary.BigEndian.Uint32(payload[:4])
+ streamID := v & 0x7fffffff // mask off high bit
+ return &PriorityFrame{
+ FrameHeader: fh,
+ PriorityParam: PriorityParam{
+ Weight: payload[4],
+ StreamDep: streamID,
+ Exclusive: streamID != v, // was high bit set?
+ },
+ }, nil
+}
+
+// WritePriority writes a PRIORITY frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ if !validStreamIDOrZero(p.StreamDep) {
+ return errDepStreamID
+ }
+ f.startWrite(FramePriority, 0, streamID)
+ v := p.StreamDep
+ if p.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Weight)
+ return f.endWrite()
+}
+
+// A RSTStreamFrame allows for abnormal termination of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.4
+type RSTStreamFrame struct {
+ FrameHeader
+ ErrCode ErrCode
+}
+
+func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
+}
+
+// WriteRSTStream writes a RST_STREAM frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.startWrite(FrameRSTStream, 0, streamID)
+ f.writeUint32(uint32(code))
+ return f.endWrite()
+}
+
+// A ContinuationFrame is used to continue a sequence of header block fragments.
+// See http://http2.github.io/http2-spec/#rfc.section.6.10
+type ContinuationFrame struct {
+ FrameHeader
+ headerFragBuf []byte
+}
+
+func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
+ }
+ return &ContinuationFrame{fh, p}, nil
+}
+
+func (f *ContinuationFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *ContinuationFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders)
+}
+
+// WriteContinuation writes a CONTINUATION frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if endHeaders {
+ flags |= FlagContinuationEndHeaders
+ }
+ f.startWrite(FrameContinuation, flags, streamID)
+ f.wbuf = append(f.wbuf, headerBlockFragment...)
+ return f.endWrite()
+}
+
+// A PushPromiseFrame is used to initiate a server stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.6
+type PushPromiseFrame struct {
+ FrameHeader
+ PromiseID uint32
+ headerFragBuf []byte // not owned
+}
+
+func (f *PushPromiseFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *PushPromiseFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
+}
+
+func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
+ pp := &PushPromiseFrame{
+ FrameHeader: fh,
+ }
+ if pp.StreamID == 0 {
+ // PUSH_PROMISE frames MUST be associated with an existing,
+ // peer-initiated stream. The stream identifier of a
+ // PUSH_PROMISE frame indicates the stream it is associated
+ // with. If the stream identifier field specifies the value
+ // 0x0, a recipient MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ // The PUSH_PROMISE frame includes optional padding.
+ // Padding fields and flags are identical to those defined for DATA frames
+ var padLength uint8
+ if fh.Flags.Has(FlagPushPromisePadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+
+ p, pp.PromiseID, err = readUint32(p)
+ if err != nil {
+ return
+ }
+ pp.PromiseID = pp.PromiseID & (1<<31 - 1)
+
+ if int(padLength) > len(p) {
+ // like the DATA frame, error out if padding is longer than the body.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ pp.headerFragBuf = p[:len(p)-int(padLength)]
+ return pp, nil
+}
+
+// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
+type PushPromiseParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+
+ // PromiseID is the required Stream ID which this
+ // Push Promises
+ PromiseID uint32
+
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+}
+
+// WritePushPromise writes a single PushPromise Frame.
+//
+// As with Header Frames, This is the low level call for writing
+// individual frames. Continuation frames are handled elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePushPromise(p PushPromiseParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagPushPromisePadded
+ }
+ if p.EndHeaders {
+ flags |= FlagPushPromiseEndHeaders
+ }
+ f.startWrite(FramePushPromise, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.writeUint32(p.PromiseID)
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// WriteRawFrame writes a raw frame. This can be used to write
+// extension frames unknown to this package.
+func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error {
+ f.startWrite(t, flags, streamID)
+ f.writeBytes(payload)
+ return f.endWrite()
+}
+
+func readByte(p []byte) (remain []byte, b byte, err error) {
+ if len(p) == 0 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[1:], p[0], nil
+}
+
+func readUint32(p []byte) (remain []byte, v uint32, err error) {
+ if len(p) < 4 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[4:], binary.BigEndian.Uint32(p[:4]), nil
+}
+
+type streamEnder interface {
+ StreamEnded() bool
+}
+
+type headersEnder interface {
+ HeadersEnded() bool
+}
+
+type headersOrContinuation interface {
+ headersEnder
+ HeaderBlockFragment() []byte
+}
+
+// A MetaHeadersFrame is the representation of one HEADERS frame and
+// zero or more contiguous CONTINUATION frames and the decoding of
+// their HPACK-encoded contents.
+//
+// This type of frame does not appear on the wire and is only returned
+// by the Framer when Framer.ReadMetaHeaders is set.
+type MetaHeadersFrame struct {
+ *HeadersFrame
+
+ // Fields are the fields contained in the HEADERS and
+ // CONTINUATION frames. The underlying slice is owned by the
+ // Framer and must not be retained after the next call to
+ // ReadFrame.
+ //
+ // Fields are guaranteed to be in the correct http2 order and
+ // not have unknown pseudo header fields or invalid header
+ // field names or values. Required pseudo header fields may be
+ // missing, however. Use the MetaHeadersFrame.Pseudo accessor
+ // method access pseudo headers.
+ Fields []hpack.HeaderField
+
+ // Truncated is whether the max header list size limit was hit
+ // and Fields is incomplete. The hpack decoder state is still
+ // valid, however.
+ Truncated bool
+}
+
+// PseudoValue returns the given pseudo header field's value.
+// The provided pseudo field should not contain the leading colon.
+func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string {
+ for _, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return ""
+ }
+ if hf.Name[1:] == pseudo {
+ return hf.Value
+ }
+ }
+ return ""
+}
+
+// RegularFields returns the regular (non-pseudo) header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[i:]
+ }
+ }
+ return nil
+}
+
+// PseudoFields returns the pseudo header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[:i]
+ }
+ }
+ return mh.Fields
+}
+
+func (mh *MetaHeadersFrame) checkPseudos() error {
+ var isRequest, isResponse bool
+ pf := mh.PseudoFields()
+ for i, hf := range pf {
+ switch hf.Name {
+ case ":method", ":path", ":scheme", ":authority":
+ isRequest = true
+ case ":status":
+ isResponse = true
+ default:
+ return pseudoHeaderError(hf.Name)
+ }
+ // Check for duplicates.
+ // This would be a bad algorithm, but N is 4.
+ // And this doesn't allocate.
+ for _, hf2 := range pf[:i] {
+ if hf.Name == hf2.Name {
+ return duplicatePseudoHeaderError(hf.Name)
+ }
+ }
+ }
+ if isRequest && isResponse {
+ return errMixPseudoHeaderTypes
+ }
+ return nil
+}
+
+func (fr *Framer) maxHeaderStringLen() int {
+ v := fr.maxHeaderListSize()
+ if uint32(int(v)) == v {
+ return int(v)
+ }
+ // They had a crazy big number for MaxHeaderBytes anyway,
+ // so give them unlimited header lengths:
+ return 0
+}
+
+// readMetaFrame returns 0 or more CONTINUATION frames from fr and
+// merge them into into the provided hf and returns a MetaHeadersFrame
+// with the decoded hpack values.
+func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
+ if fr.AllowIllegalReads {
+ return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
+ }
+ mh := &MetaHeadersFrame{
+ HeadersFrame: hf,
+ }
+ var remainSize = fr.maxHeaderListSize()
+ var sawRegular bool
+
+ var invalid error // pseudo header field errors
+ hdec := fr.ReadMetaHeaders
+ hdec.SetEmitEnabled(true)
+ hdec.SetMaxStringLength(fr.maxHeaderStringLen())
+ hdec.SetEmitFunc(func(hf hpack.HeaderField) {
+ if VerboseLogs && fr.logReads {
+ fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
+ }
+ if !httplex.ValidHeaderFieldValue(hf.Value) {
+ invalid = headerFieldValueError(hf.Value)
+ }
+ isPseudo := strings.HasPrefix(hf.Name, ":")
+ if isPseudo {
+ if sawRegular {
+ invalid = errPseudoAfterRegular
+ }
+ } else {
+ sawRegular = true
+ if !validWireHeaderFieldName(hf.Name) {
+ invalid = headerFieldNameError(hf.Name)
+ }
+ }
+
+ if invalid != nil {
+ hdec.SetEmitEnabled(false)
+ return
+ }
+
+ size := hf.Size()
+ if size > remainSize {
+ hdec.SetEmitEnabled(false)
+ mh.Truncated = true
+ return
+ }
+ remainSize -= size
+
+ mh.Fields = append(mh.Fields, hf)
+ })
+ // Lose reference to MetaHeadersFrame:
+ defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
+
+ var hc headersOrContinuation = hf
+ for {
+ frag := hc.HeaderBlockFragment()
+ if _, err := hdec.Write(frag); err != nil {
+ return nil, ConnectionError(ErrCodeCompression)
+ }
+
+ if hc.HeadersEnded() {
+ break
+ }
+ if f, err := fr.ReadFrame(); err != nil {
+ return nil, err
+ } else {
+ hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder
+ }
+ }
+
+ mh.HeadersFrame.headerFragBuf = nil
+ mh.HeadersFrame.invalidate()
+
+ if err := hdec.Close(); err != nil {
+ return nil, ConnectionError(ErrCodeCompression)
+ }
+ if invalid != nil {
+ fr.errDetail = invalid
+ if VerboseLogs {
+ log.Printf("http2: invalid header: %v", invalid)
+ }
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid}
+ }
+ if err := mh.checkPseudos(); err != nil {
+ fr.errDetail = err
+ if VerboseLogs {
+ log.Printf("http2: invalid pseudo headers: %v", err)
+ }
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol, err}
+ }
+ return mh, nil
+}
+
+func summarizeFrame(f Frame) string {
+ var buf bytes.Buffer
+ f.Header().writeDebug(&buf)
+ switch f := f.(type) {
+ case *SettingsFrame:
+ n := 0
+ f.ForeachSetting(func(s Setting) error {
+ n++
+ if n == 1 {
+ buf.WriteString(", settings:")
+ }
+ fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
+ return nil
+ })
+ if n > 0 {
+ buf.Truncate(buf.Len() - 1) // remove trailing comma
+ }
+ case *DataFrame:
+ data := f.Data()
+ const max = 256
+ if len(data) > max {
+ data = data[:max]
+ }
+ fmt.Fprintf(&buf, " data=%q", data)
+ if len(f.Data()) > max {
+ fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
+ }
+ case *WindowUpdateFrame:
+ if f.StreamID == 0 {
+ buf.WriteString(" (conn)")
+ }
+ fmt.Fprintf(&buf, " incr=%v", f.Increment)
+ case *PingFrame:
+ fmt.Fprintf(&buf, " ping=%q", f.Data[:])
+ case *GoAwayFrame:
+ fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
+ f.LastStreamID, f.ErrCode, f.debugData)
+ case *RSTStreamFrame:
+ fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
+ }
+ return buf.String()
+}
diff --git a/vendor/golang.org/x/net/http2/frame_test.go b/vendor/golang.org/x/net/http2/frame_test.go
new file mode 100644
index 0000000..37266bc
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/frame_test.go
@@ -0,0 +1,1191 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "testing"
+ "unsafe"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+func testFramer() (*Framer, *bytes.Buffer) {
+ buf := new(bytes.Buffer)
+ return NewFramer(buf, buf), buf
+}
+
+func TestFrameSizes(t *testing.T) {
+ // Catch people rearranging the FrameHeader fields.
+ if got, want := int(unsafe.Sizeof(FrameHeader{})), 12; got != want {
+ t.Errorf("FrameHeader size = %d; want %d", got, want)
+ }
+}
+
+func TestFrameTypeString(t *testing.T) {
+ tests := []struct {
+ ft FrameType
+ want string
+ }{
+ {FrameData, "DATA"},
+ {FramePing, "PING"},
+ {FrameGoAway, "GOAWAY"},
+ {0xf, "UNKNOWN_FRAME_TYPE_15"},
+ }
+
+ for i, tt := range tests {
+ got := tt.ft.String()
+ if got != tt.want {
+ t.Errorf("%d. String(FrameType %d) = %q; want %q", i, int(tt.ft), got, tt.want)
+ }
+ }
+}
+
+func TestWriteRST(t *testing.T) {
+ fr, buf := testFramer()
+ var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
+ var errCode uint32 = 7<<24 + 6<<16 + 5<<8 + 4
+ fr.WriteRSTStream(streamID, ErrCode(errCode))
+ const wantEnc = "\x00\x00\x04\x03\x00\x01\x02\x03\x04\x07\x06\x05\x04"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &RSTStreamFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ Type: 0x3,
+ Flags: 0x0,
+ Length: 0x4,
+ StreamID: 0x1020304,
+ },
+ ErrCode: 0x7060504,
+ }
+ if !reflect.DeepEqual(f, want) {
+ t.Errorf("parsed back %#v; want %#v", f, want)
+ }
+}
+
+func TestWriteData(t *testing.T) {
+ fr, buf := testFramer()
+ var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
+ data := []byte("ABC")
+ fr.WriteData(streamID, true, data)
+ const wantEnc = "\x00\x00\x03\x00\x01\x01\x02\x03\x04ABC"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ df, ok := f.(*DataFrame)
+ if !ok {
+ t.Fatalf("got %T; want *DataFrame", f)
+ }
+ if !bytes.Equal(df.Data(), data) {
+ t.Errorf("got %q; want %q", df.Data(), data)
+ }
+ if f.Header().Flags&1 == 0 {
+ t.Errorf("didn't see END_STREAM flag")
+ }
+}
+
+func TestWriteDataPadded(t *testing.T) {
+ tests := [...]struct {
+ streamID uint32
+ endStream bool
+ data []byte
+ pad []byte
+ wantHeader FrameHeader
+ }{
+ // Unpadded:
+ 0: {
+ streamID: 1,
+ endStream: true,
+ data: []byte("foo"),
+ pad: nil,
+ wantHeader: FrameHeader{
+ Type: FrameData,
+ Flags: FlagDataEndStream,
+ Length: 3,
+ StreamID: 1,
+ },
+ },
+
+ // Padded bit set, but no padding:
+ 1: {
+ streamID: 1,
+ endStream: true,
+ data: []byte("foo"),
+ pad: []byte{},
+ wantHeader: FrameHeader{
+ Type: FrameData,
+ Flags: FlagDataEndStream | FlagDataPadded,
+ Length: 4,
+ StreamID: 1,
+ },
+ },
+
+ // Padded bit set, with padding:
+ 2: {
+ streamID: 1,
+ endStream: false,
+ data: []byte("foo"),
+ pad: []byte{0, 0, 0},
+ wantHeader: FrameHeader{
+ Type: FrameData,
+ Flags: FlagDataPadded,
+ Length: 7,
+ StreamID: 1,
+ },
+ },
+ }
+ for i, tt := range tests {
+ fr, _ := testFramer()
+ fr.WriteDataPadded(tt.streamID, tt.endStream, tt.data, tt.pad)
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Errorf("%d. ReadFrame: %v", i, err)
+ continue
+ }
+ got := f.Header()
+ tt.wantHeader.valid = true
+ if got != tt.wantHeader {
+ t.Errorf("%d. read %+v; want %+v", i, got, tt.wantHeader)
+ continue
+ }
+ df := f.(*DataFrame)
+ if !bytes.Equal(df.Data(), tt.data) {
+ t.Errorf("%d. got %q; want %q", i, df.Data(), tt.data)
+ }
+ }
+}
+
+func TestWriteHeaders(t *testing.T) {
+ tests := []struct {
+ name string
+ p HeadersFrameParam
+ wantEnc string
+ wantFrame *HeadersFrame
+ }{
+ {
+ "basic",
+ HeadersFrameParam{
+ StreamID: 42,
+ BlockFragment: []byte("abc"),
+ Priority: PriorityParam{},
+ },
+ "\x00\x00\x03\x01\x00\x00\x00\x00*abc",
+ &HeadersFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: 42,
+ Type: FrameHeaders,
+ Length: uint32(len("abc")),
+ },
+ Priority: PriorityParam{},
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ {
+ "basic + end flags",
+ HeadersFrameParam{
+ StreamID: 42,
+ BlockFragment: []byte("abc"),
+ EndStream: true,
+ EndHeaders: true,
+ Priority: PriorityParam{},
+ },
+ "\x00\x00\x03\x01\x05\x00\x00\x00*abc",
+ &HeadersFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: 42,
+ Type: FrameHeaders,
+ Flags: FlagHeadersEndStream | FlagHeadersEndHeaders,
+ Length: uint32(len("abc")),
+ },
+ Priority: PriorityParam{},
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ {
+ "with padding",
+ HeadersFrameParam{
+ StreamID: 42,
+ BlockFragment: []byte("abc"),
+ EndStream: true,
+ EndHeaders: true,
+ PadLength: 5,
+ Priority: PriorityParam{},
+ },
+ "\x00\x00\t\x01\r\x00\x00\x00*\x05abc\x00\x00\x00\x00\x00",
+ &HeadersFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: 42,
+ Type: FrameHeaders,
+ Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded,
+ Length: uint32(1 + len("abc") + 5), // pad length + contents + padding
+ },
+ Priority: PriorityParam{},
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ {
+ "with priority",
+ HeadersFrameParam{
+ StreamID: 42,
+ BlockFragment: []byte("abc"),
+ EndStream: true,
+ EndHeaders: true,
+ PadLength: 2,
+ Priority: PriorityParam{
+ StreamDep: 15,
+ Exclusive: true,
+ Weight: 127,
+ },
+ },
+ "\x00\x00\v\x01-\x00\x00\x00*\x02\x80\x00\x00\x0f\u007fabc\x00\x00",
+ &HeadersFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: 42,
+ Type: FrameHeaders,
+ Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority,
+ Length: uint32(1 + 5 + len("abc") + 2), // pad length + priority + contents + padding
+ },
+ Priority: PriorityParam{
+ StreamDep: 15,
+ Exclusive: true,
+ Weight: 127,
+ },
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ {
+ "with priority stream dep zero", // golang.org/issue/15444
+ HeadersFrameParam{
+ StreamID: 42,
+ BlockFragment: []byte("abc"),
+ EndStream: true,
+ EndHeaders: true,
+ PadLength: 2,
+ Priority: PriorityParam{
+ StreamDep: 0,
+ Exclusive: true,
+ Weight: 127,
+ },
+ },
+ "\x00\x00\v\x01-\x00\x00\x00*\x02\x80\x00\x00\x00\u007fabc\x00\x00",
+ &HeadersFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: 42,
+ Type: FrameHeaders,
+ Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority,
+ Length: uint32(1 + 5 + len("abc") + 2), // pad length + priority + contents + padding
+ },
+ Priority: PriorityParam{
+ StreamDep: 0,
+ Exclusive: true,
+ Weight: 127,
+ },
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ }
+ for _, tt := range tests {
+ fr, buf := testFramer()
+ if err := fr.WriteHeaders(tt.p); err != nil {
+ t.Errorf("test %q: %v", tt.name, err)
+ continue
+ }
+ if buf.String() != tt.wantEnc {
+ t.Errorf("test %q: encoded %q; want %q", tt.name, buf.Bytes(), tt.wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
+ continue
+ }
+ if !reflect.DeepEqual(f, tt.wantFrame) {
+ t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
+ }
+ }
+}
+
+func TestWriteInvalidStreamDep(t *testing.T) {
+ fr, _ := testFramer()
+ err := fr.WriteHeaders(HeadersFrameParam{
+ StreamID: 42,
+ Priority: PriorityParam{
+ StreamDep: 1 << 31,
+ },
+ })
+ if err != errDepStreamID {
+ t.Errorf("header error = %v; want %q", err, errDepStreamID)
+ }
+
+ err = fr.WritePriority(2, PriorityParam{StreamDep: 1 << 31})
+ if err != errDepStreamID {
+ t.Errorf("priority error = %v; want %q", err, errDepStreamID)
+ }
+}
+
+func TestWriteContinuation(t *testing.T) {
+ const streamID = 42
+ tests := []struct {
+ name string
+ end bool
+ frag []byte
+
+ wantFrame *ContinuationFrame
+ }{
+ {
+ "not end",
+ false,
+ []byte("abc"),
+ &ContinuationFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: streamID,
+ Type: FrameContinuation,
+ Length: uint32(len("abc")),
+ },
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ {
+ "end",
+ true,
+ []byte("def"),
+ &ContinuationFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: streamID,
+ Type: FrameContinuation,
+ Flags: FlagContinuationEndHeaders,
+ Length: uint32(len("def")),
+ },
+ headerFragBuf: []byte("def"),
+ },
+ },
+ }
+ for _, tt := range tests {
+ fr, _ := testFramer()
+ if err := fr.WriteContinuation(streamID, tt.end, tt.frag); err != nil {
+ t.Errorf("test %q: %v", tt.name, err)
+ continue
+ }
+ fr.AllowIllegalReads = true
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
+ continue
+ }
+ if !reflect.DeepEqual(f, tt.wantFrame) {
+ t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
+ }
+ }
+}
+
+func TestWritePriority(t *testing.T) {
+ const streamID = 42
+ tests := []struct {
+ name string
+ priority PriorityParam
+ wantFrame *PriorityFrame
+ }{
+ {
+ "not exclusive",
+ PriorityParam{
+ StreamDep: 2,
+ Exclusive: false,
+ Weight: 127,
+ },
+ &PriorityFrame{
+ FrameHeader{
+ valid: true,
+ StreamID: streamID,
+ Type: FramePriority,
+ Length: 5,
+ },
+ PriorityParam{
+ StreamDep: 2,
+ Exclusive: false,
+ Weight: 127,
+ },
+ },
+ },
+
+ {
+ "exclusive",
+ PriorityParam{
+ StreamDep: 3,
+ Exclusive: true,
+ Weight: 77,
+ },
+ &PriorityFrame{
+ FrameHeader{
+ valid: true,
+ StreamID: streamID,
+ Type: FramePriority,
+ Length: 5,
+ },
+ PriorityParam{
+ StreamDep: 3,
+ Exclusive: true,
+ Weight: 77,
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ fr, _ := testFramer()
+ if err := fr.WritePriority(streamID, tt.priority); err != nil {
+ t.Errorf("test %q: %v", tt.name, err)
+ continue
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
+ continue
+ }
+ if !reflect.DeepEqual(f, tt.wantFrame) {
+ t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
+ }
+ }
+}
+
+func TestWriteSettings(t *testing.T) {
+ fr, buf := testFramer()
+ settings := []Setting{{1, 2}, {3, 4}}
+ fr.WriteSettings(settings...)
+ const wantEnc = "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x03\x00\x00\x00\x04"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sf, ok := f.(*SettingsFrame)
+ if !ok {
+ t.Fatalf("Got a %T; want a SettingsFrame", f)
+ }
+ var got []Setting
+ sf.ForeachSetting(func(s Setting) error {
+ got = append(got, s)
+ valBack, ok := sf.Value(s.ID)
+ if !ok || valBack != s.Val {
+ t.Errorf("Value(%d) = %v, %v; want %v, true", s.ID, valBack, ok, s.Val)
+ }
+ return nil
+ })
+ if !reflect.DeepEqual(settings, got) {
+ t.Errorf("Read settings %+v != written settings %+v", got, settings)
+ }
+}
+
+func TestWriteSettingsAck(t *testing.T) {
+ fr, buf := testFramer()
+ fr.WriteSettingsAck()
+ const wantEnc = "\x00\x00\x00\x04\x01\x00\x00\x00\x00"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+}
+
+func TestWriteWindowUpdate(t *testing.T) {
+ fr, buf := testFramer()
+ const streamID = 1<<24 + 2<<16 + 3<<8 + 4
+ const incr = 7<<24 + 6<<16 + 5<<8 + 4
+ if err := fr.WriteWindowUpdate(streamID, incr); err != nil {
+ t.Fatal(err)
+ }
+ const wantEnc = "\x00\x00\x04\x08\x00\x01\x02\x03\x04\x07\x06\x05\x04"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &WindowUpdateFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ Type: 0x8,
+ Flags: 0x0,
+ Length: 0x4,
+ StreamID: 0x1020304,
+ },
+ Increment: 0x7060504,
+ }
+ if !reflect.DeepEqual(f, want) {
+ t.Errorf("parsed back %#v; want %#v", f, want)
+ }
+}
+
+func TestWritePing(t *testing.T) { testWritePing(t, false) }
+func TestWritePingAck(t *testing.T) { testWritePing(t, true) }
+
+func testWritePing(t *testing.T, ack bool) {
+ fr, buf := testFramer()
+ if err := fr.WritePing(ack, [8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil {
+ t.Fatal(err)
+ }
+ var wantFlags Flags
+ if ack {
+ wantFlags = FlagPingAck
+ }
+ var wantEnc = "\x00\x00\x08\x06" + string(wantFlags) + "\x00\x00\x00\x00" + "\x01\x02\x03\x04\x05\x06\x07\x08"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &PingFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ Type: 0x6,
+ Flags: wantFlags,
+ Length: 0x8,
+ StreamID: 0,
+ },
+ Data: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
+ }
+ if !reflect.DeepEqual(f, want) {
+ t.Errorf("parsed back %#v; want %#v", f, want)
+ }
+}
+
+func TestReadFrameHeader(t *testing.T) {
+ tests := []struct {
+ in string
+ want FrameHeader
+ }{
+ {in: "\x00\x00\x00" + "\x00" + "\x00" + "\x00\x00\x00\x00", want: FrameHeader{}},
+ {in: "\x01\x02\x03" + "\x04" + "\x05" + "\x06\x07\x08\x09", want: FrameHeader{
+ Length: 66051, Type: 4, Flags: 5, StreamID: 101124105,
+ }},
+ // Ignore high bit:
+ {in: "\xff\xff\xff" + "\xff" + "\xff" + "\xff\xff\xff\xff", want: FrameHeader{
+ Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},
+ {in: "\xff\xff\xff" + "\xff" + "\xff" + "\x7f\xff\xff\xff", want: FrameHeader{
+ Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},
+ }
+ for i, tt := range tests {
+ got, err := readFrameHeader(make([]byte, 9), strings.NewReader(tt.in))
+ if err != nil {
+ t.Errorf("%d. readFrameHeader(%q) = %v", i, tt.in, err)
+ continue
+ }
+ tt.want.valid = true
+ if got != tt.want {
+ t.Errorf("%d. readFrameHeader(%q) = %+v; want %+v", i, tt.in, got, tt.want)
+ }
+ }
+}
+
+func TestReadWriteFrameHeader(t *testing.T) {
+ tests := []struct {
+ len uint32
+ typ FrameType
+ flags Flags
+ streamID uint32
+ }{
+ {len: 0, typ: 255, flags: 1, streamID: 0},
+ {len: 0, typ: 255, flags: 1, streamID: 1},
+ {len: 0, typ: 255, flags: 1, streamID: 255},
+ {len: 0, typ: 255, flags: 1, streamID: 256},
+ {len: 0, typ: 255, flags: 1, streamID: 65535},
+ {len: 0, typ: 255, flags: 1, streamID: 65536},
+
+ {len: 0, typ: 1, flags: 255, streamID: 1},
+ {len: 255, typ: 1, flags: 255, streamID: 1},
+ {len: 256, typ: 1, flags: 255, streamID: 1},
+ {len: 65535, typ: 1, flags: 255, streamID: 1},
+ {len: 65536, typ: 1, flags: 255, streamID: 1},
+ {len: 16777215, typ: 1, flags: 255, streamID: 1},
+ }
+ for _, tt := range tests {
+ fr, buf := testFramer()
+ fr.startWrite(tt.typ, tt.flags, tt.streamID)
+ fr.writeBytes(make([]byte, tt.len))
+ fr.endWrite()
+ fh, err := ReadFrameHeader(buf)
+ if err != nil {
+ t.Errorf("ReadFrameHeader(%+v) = %v", tt, err)
+ continue
+ }
+ if fh.Type != tt.typ || fh.Flags != tt.flags || fh.Length != tt.len || fh.StreamID != tt.streamID {
+ t.Errorf("ReadFrameHeader(%+v) = %+v; mismatch", tt, fh)
+ }
+ }
+
+}
+
+func TestWriteTooLargeFrame(t *testing.T) {
+ fr, _ := testFramer()
+ fr.startWrite(0, 1, 1)
+ fr.writeBytes(make([]byte, 1<<24))
+ err := fr.endWrite()
+ if err != ErrFrameTooLarge {
+ t.Errorf("endWrite = %v; want errFrameTooLarge", err)
+ }
+}
+
+func TestWriteGoAway(t *testing.T) {
+ const debug = "foo"
+ fr, buf := testFramer()
+ if err := fr.WriteGoAway(0x01020304, 0x05060708, []byte(debug)); err != nil {
+ t.Fatal(err)
+ }
+ const wantEnc = "\x00\x00\v\a\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08" + debug
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &GoAwayFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ Type: 0x7,
+ Flags: 0,
+ Length: uint32(4 + 4 + len(debug)),
+ StreamID: 0,
+ },
+ LastStreamID: 0x01020304,
+ ErrCode: 0x05060708,
+ debugData: []byte(debug),
+ }
+ if !reflect.DeepEqual(f, want) {
+ t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want)
+ }
+ if got := string(f.(*GoAwayFrame).DebugData()); got != debug {
+ t.Errorf("debug data = %q; want %q", got, debug)
+ }
+}
+
+func TestWritePushPromise(t *testing.T) {
+ pp := PushPromiseParam{
+ StreamID: 42,
+ PromiseID: 42,
+ BlockFragment: []byte("abc"),
+ }
+ fr, buf := testFramer()
+ if err := fr.WritePushPromise(pp); err != nil {
+ t.Fatal(err)
+ }
+ const wantEnc = "\x00\x00\x07\x05\x00\x00\x00\x00*\x00\x00\x00*abc"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, ok := f.(*PushPromiseFrame)
+ if !ok {
+ t.Fatalf("got %T; want *PushPromiseFrame", f)
+ }
+ want := &PushPromiseFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ Type: 0x5,
+ Flags: 0x0,
+ Length: 0x7,
+ StreamID: 42,
+ },
+ PromiseID: 42,
+ headerFragBuf: []byte("abc"),
+ }
+ if !reflect.DeepEqual(f, want) {
+ t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want)
+ }
+}
+
+// test checkFrameOrder and that HEADERS and CONTINUATION frames can't be intermingled.
+func TestReadFrameOrder(t *testing.T) {
+ head := func(f *Framer, id uint32, end bool) {
+ f.WriteHeaders(HeadersFrameParam{
+ StreamID: id,
+ BlockFragment: []byte("foo"), // unused, but non-empty
+ EndHeaders: end,
+ })
+ }
+ cont := func(f *Framer, id uint32, end bool) {
+ f.WriteContinuation(id, end, []byte("foo"))
+ }
+
+ tests := [...]struct {
+ name string
+ w func(*Framer)
+ atLeast int
+ wantErr string
+ }{
+ 0: {
+ w: func(f *Framer) {
+ head(f, 1, true)
+ },
+ },
+ 1: {
+ w: func(f *Framer) {
+ head(f, 1, true)
+ head(f, 2, true)
+ },
+ },
+ 2: {
+ wantErr: "got HEADERS for stream 2; expected CONTINUATION following HEADERS for stream 1",
+ w: func(f *Framer) {
+ head(f, 1, false)
+ head(f, 2, true)
+ },
+ },
+ 3: {
+ wantErr: "got DATA for stream 1; expected CONTINUATION following HEADERS for stream 1",
+ w: func(f *Framer) {
+ head(f, 1, false)
+ },
+ },
+ 4: {
+ w: func(f *Framer) {
+ head(f, 1, false)
+ cont(f, 1, true)
+ head(f, 2, true)
+ },
+ },
+ 5: {
+ wantErr: "got CONTINUATION for stream 2; expected stream 1",
+ w: func(f *Framer) {
+ head(f, 1, false)
+ cont(f, 2, true)
+ head(f, 2, true)
+ },
+ },
+ 6: {
+ wantErr: "unexpected CONTINUATION for stream 1",
+ w: func(f *Framer) {
+ cont(f, 1, true)
+ },
+ },
+ 7: {
+ wantErr: "unexpected CONTINUATION for stream 1",
+ w: func(f *Framer) {
+ cont(f, 1, false)
+ },
+ },
+ 8: {
+ wantErr: "HEADERS frame with stream ID 0",
+ w: func(f *Framer) {
+ head(f, 0, true)
+ },
+ },
+ 9: {
+ wantErr: "CONTINUATION frame with stream ID 0",
+ w: func(f *Framer) {
+ cont(f, 0, true)
+ },
+ },
+ 10: {
+ wantErr: "unexpected CONTINUATION for stream 1",
+ atLeast: 5,
+ w: func(f *Framer) {
+ head(f, 1, false)
+ cont(f, 1, false)
+ cont(f, 1, false)
+ cont(f, 1, false)
+ cont(f, 1, true)
+ cont(f, 1, false)
+ },
+ },
+ }
+ for i, tt := range tests {
+ buf := new(bytes.Buffer)
+ f := NewFramer(buf, buf)
+ f.AllowIllegalWrites = true
+ tt.w(f)
+ f.WriteData(1, true, nil) // to test transition away from last step
+
+ var err error
+ n := 0
+ var log bytes.Buffer
+ for {
+ var got Frame
+ got, err = f.ReadFrame()
+ fmt.Fprintf(&log, " read %v, %v\n", got, err)
+ if err != nil {
+ break
+ }
+ n++
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ ok := tt.wantErr == ""
+ if ok && err != nil {
+ t.Errorf("%d. after %d good frames, ReadFrame = %v; want success\n%s", i, n, err, log.Bytes())
+ continue
+ }
+ if !ok && err != ConnectionError(ErrCodeProtocol) {
+ t.Errorf("%d. after %d good frames, ReadFrame = %v; want ConnectionError(ErrCodeProtocol)\n%s", i, n, err, log.Bytes())
+ continue
+ }
+ if !((f.errDetail == nil && tt.wantErr == "") || (fmt.Sprint(f.errDetail) == tt.wantErr)) {
+ t.Errorf("%d. framer eror = %q; want %q\n%s", i, f.errDetail, tt.wantErr, log.Bytes())
+ }
+ if n < tt.atLeast {
+ t.Errorf("%d. framer only read %d frames; want at least %d\n%s", i, n, tt.atLeast, log.Bytes())
+ }
+ }
+}
+
+func TestMetaFrameHeader(t *testing.T) {
+ write := func(f *Framer, frags ...[]byte) {
+ for i, frag := range frags {
+ end := (i == len(frags)-1)
+ if i == 0 {
+ f.WriteHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: frag,
+ EndHeaders: end,
+ })
+ } else {
+ f.WriteContinuation(1, end, frag)
+ }
+ }
+ }
+
+ want := func(flags Flags, length uint32, pairs ...string) *MetaHeadersFrame {
+ mh := &MetaHeadersFrame{
+ HeadersFrame: &HeadersFrame{
+ FrameHeader: FrameHeader{
+ Type: FrameHeaders,
+ Flags: flags,
+ Length: length,
+ StreamID: 1,
+ },
+ },
+ Fields: []hpack.HeaderField(nil),
+ }
+ for len(pairs) > 0 {
+ mh.Fields = append(mh.Fields, hpack.HeaderField{
+ Name: pairs[0],
+ Value: pairs[1],
+ })
+ pairs = pairs[2:]
+ }
+ return mh
+ }
+ truncated := func(mh *MetaHeadersFrame) *MetaHeadersFrame {
+ mh.Truncated = true
+ return mh
+ }
+
+ const noFlags Flags = 0
+
+ oneKBString := strings.Repeat("a", 1<<10)
+
+ tests := [...]struct {
+ name string
+ w func(*Framer)
+ want interface{} // *MetaHeaderFrame or error
+ wantErrReason string
+ maxHeaderListSize uint32
+ }{
+ 0: {
+ name: "single_headers",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/")
+ write(f, all)
+ },
+ want: want(FlagHeadersEndHeaders, 2, ":method", "GET", ":path", "/"),
+ },
+ 1: {
+ name: "with_continuation",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", "bar")
+ write(f, all[:1], all[1:])
+ },
+ want: want(noFlags, 1, ":method", "GET", ":path", "/", "foo", "bar"),
+ },
+ 2: {
+ name: "with_two_continuation",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", "bar")
+ write(f, all[:2], all[2:4], all[4:])
+ },
+ want: want(noFlags, 2, ":method", "GET", ":path", "/", "foo", "bar"),
+ },
+ 3: {
+ name: "big_string_okay",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", oneKBString)
+ write(f, all[:2], all[2:])
+ },
+ want: want(noFlags, 2, ":method", "GET", ":path", "/", "foo", oneKBString),
+ },
+ 4: {
+ name: "big_string_error",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", oneKBString)
+ write(f, all[:2], all[2:])
+ },
+ maxHeaderListSize: (1 << 10) / 2,
+ want: ConnectionError(ErrCodeCompression),
+ },
+ 5: {
+ name: "max_header_list_truncated",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ var pairs = []string{":method", "GET", ":path", "/"}
+ for i := 0; i < 100; i++ {
+ pairs = append(pairs, "foo", "bar")
+ }
+ all := he.encodeHeaderRaw(t, pairs...)
+ write(f, all[:2], all[2:])
+ },
+ maxHeaderListSize: (1 << 10) / 2,
+ want: truncated(want(noFlags, 2,
+ ":method", "GET",
+ ":path", "/",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar", // 11
+ )),
+ },
+ 6: {
+ name: "pseudo_order",
+ w: func(f *Framer) {
+ write(f, encodeHeaderRaw(t,
+ ":method", "GET",
+ "foo", "bar",
+ ":path", "/", // bogus
+ ))
+ },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "pseudo header field after regular",
+ },
+ 7: {
+ name: "pseudo_unknown",
+ w: func(f *Framer) {
+ write(f, encodeHeaderRaw(t,
+ ":unknown", "foo", // bogus
+ "foo", "bar",
+ ))
+ },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "invalid pseudo-header \":unknown\"",
+ },
+ 8: {
+ name: "pseudo_mix_request_response",
+ w: func(f *Framer) {
+ write(f, encodeHeaderRaw(t,
+ ":method", "GET",
+ ":status", "100",
+ ))
+ },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "mix of request and response pseudo headers",
+ },
+ 9: {
+ name: "pseudo_dup",
+ w: func(f *Framer) {
+ write(f, encodeHeaderRaw(t,
+ ":method", "GET",
+ ":method", "POST",
+ ))
+ },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "duplicate pseudo-header \":method\"",
+ },
+ 10: {
+ name: "trailer_okay_no_pseudo",
+ w: func(f *Framer) { write(f, encodeHeaderRaw(t, "foo", "bar")) },
+ want: want(FlagHeadersEndHeaders, 8, "foo", "bar"),
+ },
+ 11: {
+ name: "invalid_field_name",
+ w: func(f *Framer) { write(f, encodeHeaderRaw(t, "CapitalBad", "x")) },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "invalid header field name \"CapitalBad\"",
+ },
+ 12: {
+ name: "invalid_field_value",
+ w: func(f *Framer) { write(f, encodeHeaderRaw(t, "key", "bad_null\x00")) },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "invalid header field value \"bad_null\\x00\"",
+ },
+ }
+ for i, tt := range tests {
+ buf := new(bytes.Buffer)
+ f := NewFramer(buf, buf)
+ f.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ f.MaxHeaderListSize = tt.maxHeaderListSize
+ tt.w(f)
+
+ name := tt.name
+ if name == "" {
+ name = fmt.Sprintf("test index %d", i)
+ }
+
+ var got interface{}
+ var err error
+ got, err = f.ReadFrame()
+ if err != nil {
+ got = err
+
+ // Ignore the StreamError.Cause field, if it matches the wantErrReason.
+ // The test table above predates the Cause field.
+ if se, ok := err.(StreamError); ok && se.Cause != nil && se.Cause.Error() == tt.wantErrReason {
+ se.Cause = nil
+ got = se
+ }
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ if mhg, ok := got.(*MetaHeadersFrame); ok {
+ if mhw, ok := tt.want.(*MetaHeadersFrame); ok {
+ hg := mhg.HeadersFrame
+ hw := mhw.HeadersFrame
+ if hg != nil && hw != nil && !reflect.DeepEqual(*hg, *hw) {
+ t.Errorf("%s: headers differ:\n got: %+v\nwant: %+v\n", name, *hg, *hw)
+ }
+ }
+ }
+ str := func(v interface{}) string {
+ if _, ok := v.(error); ok {
+ return fmt.Sprintf("error %v", v)
+ } else {
+ return fmt.Sprintf("value %#v", v)
+ }
+ }
+ t.Errorf("%s:\n got: %v\nwant: %s", name, str(got), str(tt.want))
+ }
+ if tt.wantErrReason != "" && tt.wantErrReason != fmt.Sprint(f.errDetail) {
+ t.Errorf("%s: got error reason %q; want %q", name, f.errDetail, tt.wantErrReason)
+ }
+ }
+}
+
+func TestSetReuseFrames(t *testing.T) {
+ fr, buf := testFramer()
+ fr.SetReuseFrames()
+
+ // Check that DataFrames are reused. Note that
+ // SetReuseFrames only currently implements reuse of DataFrames.
+ firstDf := readAndVerifyDataFrame("ABC", 3, fr, buf, t)
+
+ for i := 0; i < 10; i++ {
+ df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t)
+ if df != firstDf {
+ t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf)
+ }
+ }
+
+ for i := 0; i < 10; i++ {
+ df := readAndVerifyDataFrame("", 0, fr, buf, t)
+ if df != firstDf {
+ t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf)
+ }
+ }
+
+ for i := 0; i < 10; i++ {
+ df := readAndVerifyDataFrame("HHH", 3, fr, buf, t)
+ if df != firstDf {
+ t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf)
+ }
+ }
+}
+
+func TestSetReuseFramesMoreThanOnce(t *testing.T) {
+ fr, buf := testFramer()
+ fr.SetReuseFrames()
+
+ firstDf := readAndVerifyDataFrame("ABC", 3, fr, buf, t)
+ fr.SetReuseFrames()
+
+ for i := 0; i < 10; i++ {
+ df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t)
+ // SetReuseFrames should be idempotent
+ fr.SetReuseFrames()
+ if df != firstDf {
+ t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf)
+ }
+ }
+}
+
+func TestNoSetReuseFrames(t *testing.T) {
+ fr, buf := testFramer()
+ const numNewDataFrames = 10
+ dfSoFar := make([]interface{}, numNewDataFrames)
+
+ // Check that DataFrames are not reused if SetReuseFrames wasn't called.
+ // SetReuseFrames only currently implements reuse of DataFrames.
+ for i := 0; i < numNewDataFrames; i++ {
+ df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t)
+ for _, item := range dfSoFar {
+ if df == item {
+ t.Errorf("Expected Framer to return new DataFrames since SetNoReuseFrames not set.")
+ }
+ }
+ dfSoFar[i] = df
+ }
+}
+
+func readAndVerifyDataFrame(data string, length byte, fr *Framer, buf *bytes.Buffer, t *testing.T) *DataFrame {
+ var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
+ fr.WriteData(streamID, true, []byte(data))
+ wantEnc := "\x00\x00" + string(length) + "\x00\x01\x01\x02\x03\x04" + data
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ df, ok := f.(*DataFrame)
+ if !ok {
+ t.Fatalf("got %T; want *DataFrame", f)
+ }
+ if !bytes.Equal(df.Data(), []byte(data)) {
+ t.Errorf("got %q; want %q", df.Data(), []byte(data))
+ }
+ if f.Header().Flags&1 == 0 {
+ t.Errorf("didn't see END_STREAM flag")
+ }
+ return df
+}
+
+func encodeHeaderRaw(t *testing.T, pairs ...string) []byte {
+ var he hpackEncoder
+ return he.encodeHeaderRaw(t, pairs...)
+}
diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go
new file mode 100644
index 0000000..00b2e9e
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go16.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+ "net/http"
+ "time"
+)
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+ return t1.ExpectContinueTimeout
+}
diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go
new file mode 100644
index 0000000..47b7fae
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go17.go
@@ -0,0 +1,106 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package http2
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+)
+
+type contextContext interface {
+ context.Context
+}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+ ctx, cancel = context.WithCancel(context.Background())
+ ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
+ if hs := opts.baseConfig(); hs != nil {
+ ctx = context.WithValue(ctx, http.ServerContextKey, hs)
+ }
+ return
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+ return context.WithCancel(ctx)
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+ return req.WithContext(ctx)
+}
+
+type clientTrace httptrace.ClientTrace
+
+func reqContext(r *http.Request) context.Context { return r.Context() }
+
+func (t *Transport) idleConnTimeout() time.Duration {
+ if t.t1 != nil {
+ return t.t1.IdleConnTimeout
+ }
+ return 0
+}
+
+func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
+
+func traceGotConn(req *http.Request, cc *ClientConn) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GotConn == nil {
+ return
+ }
+ ci := httptrace.GotConnInfo{Conn: cc.tconn}
+ cc.mu.Lock()
+ ci.Reused = cc.nextStreamID > 1
+ ci.WasIdle = len(cc.streams) == 0 && ci.Reused
+ if ci.WasIdle && !cc.lastActive.IsZero() {
+ ci.IdleTime = time.Now().Sub(cc.lastActive)
+ }
+ cc.mu.Unlock()
+
+ trace.GotConn(ci)
+}
+
+func traceWroteHeaders(trace *clientTrace) {
+ if trace != nil && trace.WroteHeaders != nil {
+ trace.WroteHeaders()
+ }
+}
+
+func traceGot100Continue(trace *clientTrace) {
+ if trace != nil && trace.Got100Continue != nil {
+ trace.Got100Continue()
+ }
+}
+
+func traceWait100Continue(trace *clientTrace) {
+ if trace != nil && trace.Wait100Continue != nil {
+ trace.Wait100Continue()
+ }
+}
+
+func traceWroteRequest(trace *clientTrace, err error) {
+ if trace != nil && trace.WroteRequest != nil {
+ trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
+ }
+}
+
+func traceFirstResponseByte(trace *clientTrace) {
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ trace.GotFirstResponseByte()
+ }
+}
+
+func requestTrace(req *http.Request) *clientTrace {
+ trace := httptrace.ContextClientTrace(req.Context())
+ return (*clientTrace)(trace)
+}
+
+// Ping sends a PING frame to the server and waits for the ack.
+func (cc *ClientConn) Ping(ctx context.Context) error {
+ return cc.ping(ctx)
+}
diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go
new file mode 100644
index 0000000..b4c52ec
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go17_not18.go
@@ -0,0 +1,36 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7,!go1.8
+
+package http2
+
+import "crypto/tls"
+
+// temporary copy of Go 1.7's private tls.Config.clone:
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go
new file mode 100644
index 0000000..4f30d22
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go18.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package http2
+
+import (
+ "crypto/tls"
+ "io"
+ "net/http"
+)
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ c2 := c.Clone()
+ c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
+ return c2
+}
+
+var _ http.Pusher = (*responseWriter)(nil)
+
+// Push implements http.Pusher.
+func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
+ internalOpts := pushOptions{}
+ if opts != nil {
+ internalOpts.Method = opts.Method
+ internalOpts.Header = opts.Header
+ }
+ return w.push(target, internalOpts)
+}
+
+func configureServer18(h1 *http.Server, h2 *Server) error {
+ if h2.IdleTimeout == 0 {
+ if h1.IdleTimeout != 0 {
+ h2.IdleTimeout = h1.IdleTimeout
+ } else {
+ h2.IdleTimeout = h1.ReadTimeout
+ }
+ }
+ return nil
+}
+
+func shouldLogPanic(panicValue interface{}) bool {
+ return panicValue != nil && panicValue != http.ErrAbortHandler
+}
+
+func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
+ return req.GetBody
+}
+
+func reqBodyIsNoBody(body io.ReadCloser) bool {
+ return body == http.NoBody
+}
+
+func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only
diff --git a/vendor/golang.org/x/net/http2/go18_test.go b/vendor/golang.org/x/net/http2/go18_test.go
new file mode 100644
index 0000000..30e3b03
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go18_test.go
@@ -0,0 +1,79 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package http2
+
+import (
+ "crypto/tls"
+ "net/http"
+ "testing"
+ "time"
+)
+
+// Tests that http2.Server.IdleTimeout is initialized from
+// http.Server.{Idle,Read}Timeout. http.Server.IdleTimeout was
+// added in Go 1.8.
+func TestConfigureServerIdleTimeout_Go18(t *testing.T) {
+ const timeout = 5 * time.Second
+ const notThisOne = 1 * time.Second
+
+ // With a zero http2.Server, verify that it copies IdleTimeout:
+ {
+ s1 := &http.Server{
+ IdleTimeout: timeout,
+ ReadTimeout: notThisOne,
+ }
+ s2 := &Server{}
+ if err := ConfigureServer(s1, s2); err != nil {
+ t.Fatal(err)
+ }
+ if s2.IdleTimeout != timeout {
+ t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
+ }
+ }
+
+ // And that it falls back to ReadTimeout:
+ {
+ s1 := &http.Server{
+ ReadTimeout: timeout,
+ }
+ s2 := &Server{}
+ if err := ConfigureServer(s1, s2); err != nil {
+ t.Fatal(err)
+ }
+ if s2.IdleTimeout != timeout {
+ t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
+ }
+ }
+
+ // Verify that s1's IdleTimeout doesn't overwrite an existing setting:
+ {
+ s1 := &http.Server{
+ IdleTimeout: notThisOne,
+ }
+ s2 := &Server{
+ IdleTimeout: timeout,
+ }
+ if err := ConfigureServer(s1, s2); err != nil {
+ t.Fatal(err)
+ }
+ if s2.IdleTimeout != timeout {
+ t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
+ }
+ }
+}
+
+func TestCertClone(t *testing.T) {
+ c := &tls.Config{
+ GetClientCertificate: func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ panic("shouldn't be called")
+ },
+ }
+ c2 := cloneTLSConfig(c)
+ if c2.GetClientCertificate == nil {
+ t.Error("GetClientCertificate is nil")
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/go19.go b/vendor/golang.org/x/net/http2/go19.go
new file mode 100644
index 0000000..38124ba
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go19.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.9
+
+package http2
+
+import (
+ "net/http"
+)
+
+func configureServer19(s *http.Server, conf *Server) error {
+ s.RegisterOnShutdown(conf.state.startGracefulShutdown)
+ return nil
+}
diff --git a/vendor/golang.org/x/net/http2/go19_test.go b/vendor/golang.org/x/net/http2/go19_test.go
new file mode 100644
index 0000000..1675d24
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go19_test.go
@@ -0,0 +1,60 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.9
+
+package http2
+
+import (
+ "context"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestServerGracefulShutdown(t *testing.T) {
+ var st *serverTester
+ handlerDone := make(chan struct{})
+ st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ defer close(handlerDone)
+ go st.ts.Config.Shutdown(context.Background())
+
+ ga := st.wantGoAway()
+ if ga.ErrCode != ErrCodeNo {
+ t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode)
+ }
+ if ga.LastStreamID != 1 {
+ t.Errorf("GOAWAY LastStreamID = %v; want 1", ga.LastStreamID)
+ }
+
+ w.Header().Set("x-foo", "bar")
+ })
+ defer st.Close()
+
+ st.greet()
+ st.bodylessReq1()
+
+ select {
+ case <-handlerDone:
+ case <-time.After(5 * time.Second):
+ t.Fatalf("server did not shutdown?")
+ }
+ hf := st.wantHeaders()
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"x-foo", "bar"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"content-length", "0"},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+
+ n, err := st.cc.Read([]byte{0})
+ if n != 0 || err == nil {
+ t.Errorf("Read = %v, %v; want 0, non-nil", n, err)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
new file mode 100644
index 0000000..9933c9f
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -0,0 +1,170 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Defensive debug-only utility to track that functions run on the
+// goroutine that they're supposed to.
+
+package http2
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+)
+
+var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+
+type goroutineLock uint64
+
+func newGoroutineLock() goroutineLock {
+ if !DebugGoroutines {
+ return 0
+ }
+ return goroutineLock(curGoroutineID())
+}
+
+func (g goroutineLock) check() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() != uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+func (g goroutineLock) checkNotOn() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() == uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+var goroutineSpace = []byte("goroutine ")
+
+func curGoroutineID() uint64 {
+ bp := littleBuf.Get().(*[]byte)
+ defer littleBuf.Put(bp)
+ b := *bp
+ b = b[:runtime.Stack(b, false)]
+ // Parse the 4707 out of "goroutine 4707 ["
+ b = bytes.TrimPrefix(b, goroutineSpace)
+ i := bytes.IndexByte(b, ' ')
+ if i < 0 {
+ panic(fmt.Sprintf("No space found in %q", b))
+ }
+ b = b[:i]
+ n, err := parseUintBytes(b, 10, 64)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+ }
+ return n
+}
+
+var littleBuf = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, 64)
+ return &buf
+ },
+}
+
+// parseUintBytes is like strconv.ParseUint, but using a []byte.
+func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
+ var cutoff, maxVal uint64
+
+ if bitSize == 0 {
+ bitSize = int(strconv.IntSize)
+ }
+
+ s0 := s
+ switch {
+ case len(s) < 1:
+ err = strconv.ErrSyntax
+ goto Error
+
+ case 2 <= base && base <= 36:
+ // valid base; nothing to do
+
+ case base == 0:
+ // Look for octal, hex prefix.
+ switch {
+ case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+ base = 16
+ s = s[2:]
+ if len(s) < 1 {
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ case s[0] == '0':
+ base = 8
+ default:
+ base = 10
+ }
+
+ default:
+ err = errors.New("invalid base " + strconv.Itoa(base))
+ goto Error
+ }
+
+ n = 0
+ cutoff = cutoff64(base)
+ maxVal = 1<<uint(bitSize) - 1
+
+ for i := 0; i < len(s); i++ {
+ var v byte
+ d := s[i]
+ switch {
+ case '0' <= d && d <= '9':
+ v = d - '0'
+ case 'a' <= d && d <= 'z':
+ v = d - 'a' + 10
+ case 'A' <= d && d <= 'Z':
+ v = d - 'A' + 10
+ default:
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ if int(v) >= base {
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+
+ if n >= cutoff {
+ // n*base overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n *= uint64(base)
+
+ n1 := n + uint64(v)
+ if n1 < n || n1 > maxVal {
+ // n+v overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n = n1
+ }
+
+ return n, nil
+
+Error:
+ return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
+}
+
+// Return the first number n such that n*base >= 1<<64.
+func cutoff64(base int) uint64 {
+ if base < 2 {
+ return 0
+ }
+ return (1<<64-1)/uint64(base) + 1
+}
diff --git a/vendor/golang.org/x/net/http2/gotrack_test.go b/vendor/golang.org/x/net/http2/gotrack_test.go
new file mode 100644
index 0000000..06db612
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/gotrack_test.go
@@ -0,0 +1,33 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func TestGoroutineLock(t *testing.T) {
+ oldDebug := DebugGoroutines
+ DebugGoroutines = true
+ defer func() { DebugGoroutines = oldDebug }()
+
+ g := newGoroutineLock()
+ g.check()
+
+ sawPanic := make(chan interface{})
+ go func() {
+ defer func() { sawPanic <- recover() }()
+ g.check() // should panic
+ }()
+ e := <-sawPanic
+ if e == nil {
+ t.Fatal("did not see panic from check in other goroutine")
+ }
+ if !strings.Contains(fmt.Sprint(e), "wrong goroutine") {
+ t.Errorf("expected on see panic about running on the wrong goroutine; got %v", e)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/h2demo/.gitignore b/vendor/golang.org/x/net/http2/h2demo/.gitignore
new file mode 100644
index 0000000..0de86dd
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/.gitignore
@@ -0,0 +1,5 @@
+h2demo
+h2demo.linux
+client-id.dat
+client-secret.dat
+token.dat
diff --git a/vendor/golang.org/x/net/http2/h2demo/Makefile b/vendor/golang.org/x/net/http2/h2demo/Makefile
new file mode 100644
index 0000000..f5c31ef
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/Makefile
@@ -0,0 +1,8 @@
+h2demo.linux: h2demo.go
+ GOOS=linux go build --tags=h2demo -o h2demo.linux .
+
+FORCE:
+
+upload: FORCE
+ go install golang.org/x/build/cmd/upload
+ upload --verbose --osarch=linux-amd64 --tags=h2demo --file=go:golang.org/x/net/http2/h2demo --public http2-demo-server-tls/h2demo
diff --git a/vendor/golang.org/x/net/http2/h2demo/README b/vendor/golang.org/x/net/http2/h2demo/README
new file mode 100644
index 0000000..212a96f
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/README
@@ -0,0 +1,16 @@
+
+Client:
+ -- Firefox nightly with about:config network.http.spdy.enabled.http2draft set true
+ -- Chrome: go to chrome://flags/#enable-spdy4, save and restart (button at bottom)
+
+Make CA:
+$ openssl genrsa -out rootCA.key 2048
+$ openssl req -x509 -new -nodes -key rootCA.key -days 1024 -out rootCA.pem
+... install that to Firefox
+
+Make cert:
+$ openssl genrsa -out server.key 2048
+$ openssl req -new -key server.key -out server.csr
+$ openssl x509 -req -in server.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out server.crt -days 500
+
+
diff --git a/vendor/golang.org/x/net/http2/h2demo/h2demo.go b/vendor/golang.org/x/net/http2/h2demo/h2demo.go
new file mode 100644
index 0000000..9853107
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/h2demo.go
@@ -0,0 +1,538 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build h2demo
+
+package main
+
+import (
+ "bytes"
+ "crypto/tls"
+ "flag"
+ "fmt"
+ "hash/crc32"
+ "image"
+ "image/jpeg"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "path"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "go4.org/syncutil/singleflight"
+ "golang.org/x/crypto/acme/autocert"
+ "golang.org/x/net/http2"
+)
+
+var (
+ prod = flag.Bool("prod", false, "Whether to configure itself to be the production http2.golang.org server.")
+
+ httpsAddr = flag.String("https_addr", "localhost:4430", "TLS address to listen on ('host:port' or ':port'). Required.")
+ httpAddr = flag.String("http_addr", "", "Plain HTTP address to listen on ('host:port', or ':port'). Empty means no HTTP.")
+
+ hostHTTP = flag.String("http_host", "", "Optional host or host:port to use for http:// links to this service. By default, this is implied from -http_addr.")
+ hostHTTPS = flag.String("https_host", "", "Optional host or host:port to use for http:// links to this service. By default, this is implied from -https_addr.")
+)
+
+func homeOldHTTP(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, `<html>
+<body>
+<h1>Go + HTTP/2</h1>
+<p>Welcome to <a href="https://golang.org/">the Go language</a>'s <a href="https://http2.github.io/">HTTP/2</a> demo & interop server.</p>
+<p>Unfortunately, you're <b>not</b> using HTTP/2 right now. To do so:</p>
+<ul>
+ <li>Use Firefox Nightly or go to <b>about:config</b> and enable "network.http.spdy.enabled.http2draft"</li>
+ <li>Use Google Chrome Canary and/or go to <b>chrome://flags/#enable-spdy4</b> to <i>Enable SPDY/4</i> (Chrome's name for HTTP/2)</li>
+</ul>
+<p>See code & instructions for connecting at <a href="https://github.com/golang/net/tree/master/http2">https://github.com/golang/net/tree/master/http2</a>.</p>
+
+</body></html>`)
+}
+
+func home(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+ io.WriteString(w, `<html>
+<body>
+<h1>Go + HTTP/2</h1>
+
+<p>Welcome to <a href="https://golang.org/">the Go language</a>'s <a
+href="https://http2.github.io/">HTTP/2</a> demo & interop server.</p>
+
+<p>Congratulations, <b>you're using HTTP/2 right now</b>.</p>
+
+<p>This server exists for others in the HTTP/2 community to test their HTTP/2 client implementations and point out flaws in our server.</p>
+
+<p>
+The code is at <a href="https://golang.org/x/net/http2">golang.org/x/net/http2</a> and
+is used transparently by the Go standard library from Go 1.6 and later.
+</p>
+
+<p>Contact info: <i>bradfitz@golang.org</i>, or <a
+href="https://golang.org/s/http2bug">file a bug</a>.</p>
+
+<h2>Handlers for testing</h2>
+<ul>
+ <li>GET <a href="/reqinfo">/reqinfo</a> to dump the request + headers received</li>
+ <li>GET <a href="/clockstream">/clockstream</a> streams the current time every second</li>
+ <li>GET <a href="/gophertiles">/gophertiles</a> to see a page with a bunch of images</li>
+ <li>GET <a href="/serverpush">/serverpush</a> to see a page with server push</li>
+ <li>GET <a href="/file/gopher.png">/file/gopher.png</a> for a small file (does If-Modified-Since, Content-Range, etc)</li>
+ <li>GET <a href="/file/go.src.tar.gz">/file/go.src.tar.gz</a> for a larger file (~10 MB)</li>
+ <li>GET <a href="/redirect">/redirect</a> to redirect back to / (this page)</li>
+ <li>GET <a href="/goroutines">/goroutines</a> to see all active goroutines in this server</li>
+ <li>PUT something to <a href="/crc32">/crc32</a> to get a count of number of bytes and its CRC-32</li>
+ <li>PUT something to <a href="/ECHO">/ECHO</a> and it will be streamed back to you capitalized</li>
+</ul>
+
+</body></html>`)
+}
+
+func reqInfoHandler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain")
+ fmt.Fprintf(w, "Method: %s\n", r.Method)
+ fmt.Fprintf(w, "Protocol: %s\n", r.Proto)
+ fmt.Fprintf(w, "Host: %s\n", r.Host)
+ fmt.Fprintf(w, "RemoteAddr: %s\n", r.RemoteAddr)
+ fmt.Fprintf(w, "RequestURI: %q\n", r.RequestURI)
+ fmt.Fprintf(w, "URL: %#v\n", r.URL)
+ fmt.Fprintf(w, "Body.ContentLength: %d (-1 means unknown)\n", r.ContentLength)
+ fmt.Fprintf(w, "Close: %v (relevant for HTTP/1 only)\n", r.Close)
+ fmt.Fprintf(w, "TLS: %#v\n", r.TLS)
+ fmt.Fprintf(w, "\nHeaders:\n")
+ r.Header.Write(w)
+}
+
+func crcHandler(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "PUT" {
+ http.Error(w, "PUT required.", 400)
+ return
+ }
+ crc := crc32.NewIEEE()
+ n, err := io.Copy(crc, r.Body)
+ if err == nil {
+ w.Header().Set("Content-Type", "text/plain")
+ fmt.Fprintf(w, "bytes=%d, CRC32=%x", n, crc.Sum(nil))
+ }
+}
+
+type capitalizeReader struct {
+ r io.Reader
+}
+
+func (cr capitalizeReader) Read(p []byte) (n int, err error) {
+ n, err = cr.r.Read(p)
+ for i, b := range p[:n] {
+ if b >= 'a' && b <= 'z' {
+ p[i] = b - ('a' - 'A')
+ }
+ }
+ return
+}
+
+type flushWriter struct {
+ w io.Writer
+}
+
+func (fw flushWriter) Write(p []byte) (n int, err error) {
+ n, err = fw.w.Write(p)
+ if f, ok := fw.w.(http.Flusher); ok {
+ f.Flush()
+ }
+ return
+}
+
+func echoCapitalHandler(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "PUT" {
+ http.Error(w, "PUT required.", 400)
+ return
+ }
+ io.Copy(flushWriter{w}, capitalizeReader{r.Body})
+}
+
+var (
+ fsGrp singleflight.Group
+ fsMu sync.Mutex // guards fsCache
+ fsCache = map[string]http.Handler{}
+)
+
+// fileServer returns a file-serving handler that proxies URL.
+// It lazily fetches URL on the first access and caches its contents forever.
+func fileServer(url string, latency time.Duration) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if latency > 0 {
+ time.Sleep(latency)
+ }
+ hi, err := fsGrp.Do(url, func() (interface{}, error) {
+ fsMu.Lock()
+ if h, ok := fsCache[url]; ok {
+ fsMu.Unlock()
+ return h, nil
+ }
+ fsMu.Unlock()
+
+ res, err := http.Get(url)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ modTime := time.Now()
+ var h http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.ServeContent(w, r, path.Base(url), modTime, bytes.NewReader(slurp))
+ })
+ fsMu.Lock()
+ fsCache[url] = h
+ fsMu.Unlock()
+ return h, nil
+ })
+ if err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+ hi.(http.Handler).ServeHTTP(w, r)
+ })
+}
+
+func clockStreamHandler(w http.ResponseWriter, r *http.Request) {
+ clientGone := w.(http.CloseNotifier).CloseNotify()
+ w.Header().Set("Content-Type", "text/plain")
+ ticker := time.NewTicker(1 * time.Second)
+ defer ticker.Stop()
+ fmt.Fprintf(w, "# ~1KB of junk to force browsers to start rendering immediately: \n")
+ io.WriteString(w, strings.Repeat("# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n", 13))
+
+ for {
+ fmt.Fprintf(w, "%v\n", time.Now())
+ w.(http.Flusher).Flush()
+ select {
+ case <-ticker.C:
+ case <-clientGone:
+ log.Printf("Client %v disconnected from the clock", r.RemoteAddr)
+ return
+ }
+ }
+}
+
+func registerHandlers() {
+ tiles := newGopherTilesHandler()
+ push := newPushHandler()
+
+ mux2 := http.NewServeMux()
+ http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ switch {
+ case r.URL.Path == "/gophertiles":
+ tiles.ServeHTTP(w, r) // allow HTTP/2 + HTTP/1.x
+ return
+ case strings.HasPrefix(r.URL.Path, "/serverpush"):
+ push.ServeHTTP(w, r) // allow HTTP/2 + HTTP/1.x
+ return
+ case r.TLS == nil: // do not allow HTTP/1.x for anything else
+ http.Redirect(w, r, "https://"+httpsHost()+"/", http.StatusFound)
+ return
+ }
+ if r.ProtoMajor == 1 {
+ if r.URL.Path == "/reqinfo" {
+ reqInfoHandler(w, r)
+ return
+ }
+ homeOldHTTP(w, r)
+ return
+ }
+ mux2.ServeHTTP(w, r)
+ })
+ mux2.HandleFunc("/", home)
+ mux2.Handle("/file/gopher.png", fileServer("https://golang.org/doc/gopher/frontpage.png", 0))
+ mux2.Handle("/file/go.src.tar.gz", fileServer("https://storage.googleapis.com/golang/go1.4.1.src.tar.gz", 0))
+ mux2.HandleFunc("/reqinfo", reqInfoHandler)
+ mux2.HandleFunc("/crc32", crcHandler)
+ mux2.HandleFunc("/ECHO", echoCapitalHandler)
+ mux2.HandleFunc("/clockstream", clockStreamHandler)
+ mux2.Handle("/gophertiles", tiles)
+ mux2.HandleFunc("/redirect", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, "/", http.StatusFound)
+ })
+ stripHomedir := regexp.MustCompile(`/(Users|home)/\w+`)
+ mux2.HandleFunc("/goroutines", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ buf := make([]byte, 2<<20)
+ w.Write(stripHomedir.ReplaceAll(buf[:runtime.Stack(buf, true)], nil))
+ })
+}
+
+var pushResources = map[string]http.Handler{
+ "/serverpush/static/jquery.min.js": fileServer("https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js", 100*time.Millisecond),
+ "/serverpush/static/godocs.js": fileServer("https://golang.org/lib/godoc/godocs.js", 100*time.Millisecond),
+ "/serverpush/static/playground.js": fileServer("https://golang.org/lib/godoc/playground.js", 100*time.Millisecond),
+ "/serverpush/static/style.css": fileServer("https://golang.org/lib/godoc/style.css", 100*time.Millisecond),
+}
+
+func newPushHandler() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ for path, handler := range pushResources {
+ if r.URL.Path == path {
+ handler.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ cacheBust := time.Now().UnixNano()
+ if pusher, ok := w.(http.Pusher); ok {
+ for path := range pushResources {
+ url := fmt.Sprintf("%s?%d", path, cacheBust)
+ if err := pusher.Push(url, nil); err != nil {
+ log.Printf("Failed to push %v: %v", path, err)
+ }
+ }
+ }
+ time.Sleep(100 * time.Millisecond) // fake network latency + parsing time
+ if err := pushTmpl.Execute(w, struct {
+ CacheBust int64
+ HTTPSHost string
+ HTTPHost string
+ }{
+ CacheBust: cacheBust,
+ HTTPSHost: httpsHost(),
+ HTTPHost: httpHost(),
+ }); err != nil {
+ log.Printf("Executing server push template: %v", err)
+ }
+ })
+}
+
+func newGopherTilesHandler() http.Handler {
+ const gopherURL = "https://blog.golang.org/go-programming-language-turns-two_gophers.jpg"
+ res, err := http.Get(gopherURL)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if res.StatusCode != 200 {
+ log.Fatalf("Error fetching %s: %v", gopherURL, res.Status)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ res.Body.Close()
+ if err != nil {
+ log.Fatal(err)
+ }
+ im, err := jpeg.Decode(bytes.NewReader(slurp))
+ if err != nil {
+ if len(slurp) > 1024 {
+ slurp = slurp[:1024]
+ }
+ log.Fatalf("Failed to decode gopher image: %v (got %q)", err, slurp)
+ }
+
+ type subImager interface {
+ SubImage(image.Rectangle) image.Image
+ }
+ const tileSize = 32
+ xt := im.Bounds().Max.X / tileSize
+ yt := im.Bounds().Max.Y / tileSize
+ var tile [][][]byte // y -> x -> jpeg bytes
+ for yi := 0; yi < yt; yi++ {
+ var row [][]byte
+ for xi := 0; xi < xt; xi++ {
+ si := im.(subImager).SubImage(image.Rectangle{
+ Min: image.Point{xi * tileSize, yi * tileSize},
+ Max: image.Point{(xi + 1) * tileSize, (yi + 1) * tileSize},
+ })
+ buf := new(bytes.Buffer)
+ if err := jpeg.Encode(buf, si, &jpeg.Options{Quality: 90}); err != nil {
+ log.Fatal(err)
+ }
+ row = append(row, buf.Bytes())
+ }
+ tile = append(tile, row)
+ }
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ms, _ := strconv.Atoi(r.FormValue("latency"))
+ const nanosPerMilli = 1e6
+ if r.FormValue("x") != "" {
+ x, _ := strconv.Atoi(r.FormValue("x"))
+ y, _ := strconv.Atoi(r.FormValue("y"))
+ if ms <= 1000 {
+ time.Sleep(time.Duration(ms) * nanosPerMilli)
+ }
+ if x >= 0 && x < xt && y >= 0 && y < yt {
+ http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(tile[y][x]))
+ return
+ }
+ }
+ io.WriteString(w, "<html><body onload='showtimes()'>")
+ fmt.Fprintf(w, "A grid of %d tiled images is below. Compare:<p>", xt*yt)
+ for _, ms := range []int{0, 30, 200, 1000} {
+ d := time.Duration(ms) * nanosPerMilli
+ fmt.Fprintf(w, "[<a href='https://%s/gophertiles?latency=%d'>HTTP/2, %v latency</a>] [<a href='http://%s/gophertiles?latency=%d'>HTTP/1, %v latency</a>]<br>\n",
+ httpsHost(), ms, d,
+ httpHost(), ms, d,
+ )
+ }
+ io.WriteString(w, "<p>\n")
+ cacheBust := time.Now().UnixNano()
+ for y := 0; y < yt; y++ {
+ for x := 0; x < xt; x++ {
+ fmt.Fprintf(w, "<img width=%d height=%d src='/gophertiles?x=%d&y=%d&cachebust=%d&latency=%d'>",
+ tileSize, tileSize, x, y, cacheBust, ms)
+ }
+ io.WriteString(w, "<br/>\n")
+ }
+ io.WriteString(w, `<p><div id='loadtimes'></div></p>
+<script>
+function showtimes() {
+ var times = 'Times from connection start:<br>'
+ times += 'DOM loaded: ' + (window.performance.timing.domContentLoadedEventEnd - window.performance.timing.connectStart) + 'ms<br>'
+ times += 'DOM complete (images loaded): ' + (window.performance.timing.domComplete - window.performance.timing.connectStart) + 'ms<br>'
+ document.getElementById('loadtimes').innerHTML = times
+}
+</script>
+<hr><a href='/'>&lt;&lt Back to Go HTTP/2 demo server</a></body></html>`)
+ })
+}
+
+func httpsHost() string {
+ if *hostHTTPS != "" {
+ return *hostHTTPS
+ }
+ if v := *httpsAddr; strings.HasPrefix(v, ":") {
+ return "localhost" + v
+ } else {
+ return v
+ }
+}
+
+func httpHost() string {
+ if *hostHTTP != "" {
+ return *hostHTTP
+ }
+ if v := *httpAddr; strings.HasPrefix(v, ":") {
+ return "localhost" + v
+ } else {
+ return v
+ }
+}
+
+func serveProdTLS() error {
+ const cacheDir = "/var/cache/autocert"
+ if err := os.MkdirAll(cacheDir, 0700); err != nil {
+ return err
+ }
+ m := autocert.Manager{
+ Cache: autocert.DirCache(cacheDir),
+ Prompt: autocert.AcceptTOS,
+ HostPolicy: autocert.HostWhitelist("http2.golang.org"),
+ }
+ srv := &http.Server{
+ TLSConfig: &tls.Config{
+ GetCertificate: m.GetCertificate,
+ },
+ }
+ http2.ConfigureServer(srv, &http2.Server{
+ NewWriteScheduler: func() http2.WriteScheduler {
+ return http2.NewPriorityWriteScheduler(nil)
+ },
+ })
+ ln, err := net.Listen("tcp", ":443")
+ if err != nil {
+ return err
+ }
+ return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig))
+}
+
+type tcpKeepAliveListener struct {
+ *net.TCPListener
+}
+
+func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
+ tc, err := ln.AcceptTCP()
+ if err != nil {
+ return
+ }
+ tc.SetKeepAlive(true)
+ tc.SetKeepAlivePeriod(3 * time.Minute)
+ return tc, nil
+}
+
+func serveProd() error {
+ errc := make(chan error, 2)
+ go func() { errc <- http.ListenAndServe(":80", nil) }()
+ go func() { errc <- serveProdTLS() }()
+ return <-errc
+}
+
+const idleTimeout = 5 * time.Minute
+const activeTimeout = 10 * time.Minute
+
+// TODO: put this into the standard library and actually send
+// PING frames and GOAWAY, etc: golang.org/issue/14204
+func idleTimeoutHook() func(net.Conn, http.ConnState) {
+ var mu sync.Mutex
+ m := map[net.Conn]*time.Timer{}
+ return func(c net.Conn, cs http.ConnState) {
+ mu.Lock()
+ defer mu.Unlock()
+ if t, ok := m[c]; ok {
+ delete(m, c)
+ t.Stop()
+ }
+ var d time.Duration
+ switch cs {
+ case http.StateNew, http.StateIdle:
+ d = idleTimeout
+ case http.StateActive:
+ d = activeTimeout
+ default:
+ return
+ }
+ m[c] = time.AfterFunc(d, func() {
+ log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d)
+ go c.Close()
+ })
+ }
+}
+
+func main() {
+ var srv http.Server
+ flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.")
+ flag.Parse()
+ srv.Addr = *httpsAddr
+ srv.ConnState = idleTimeoutHook()
+
+ registerHandlers()
+
+ if *prod {
+ *hostHTTP = "http2.golang.org"
+ *hostHTTPS = "http2.golang.org"
+ log.Fatal(serveProd())
+ }
+
+ url := "https://" + httpsHost() + "/"
+ log.Printf("Listening on " + url)
+ http2.ConfigureServer(&srv, &http2.Server{})
+
+ if *httpAddr != "" {
+ go func() {
+ log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)")
+ log.Fatal(http.ListenAndServe(*httpAddr, nil))
+ }()
+ }
+
+ go func() {
+ log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key"))
+ }()
+ select {}
+}
diff --git a/vendor/golang.org/x/net/http2/h2demo/launch.go b/vendor/golang.org/x/net/http2/h2demo/launch.go
new file mode 100644
index 0000000..df0866a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/launch.go
@@ -0,0 +1,302 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ compute "google.golang.org/api/compute/v1"
+)
+
+var (
+ proj = flag.String("project", "symbolic-datum-552", "name of Project")
+ zone = flag.String("zone", "us-central1-a", "GCE zone")
+ mach = flag.String("machinetype", "n1-standard-1", "Machine type")
+ instName = flag.String("instance_name", "http2-demo", "Name of VM instance.")
+ sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.")
+ staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.")
+
+ writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.")
+ publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.")
+)
+
+func readFile(v string) string {
+ slurp, err := ioutil.ReadFile(v)
+ if err != nil {
+ log.Fatalf("Error reading %s: %v", v, err)
+ }
+ return strings.TrimSpace(string(slurp))
+}
+
+var config = &oauth2.Config{
+ // The client-id and secret should be for an "Installed Application" when using
+ // the CLI. Later we'll use a web application with a callback.
+ ClientID: readFile("client-id.dat"),
+ ClientSecret: readFile("client-secret.dat"),
+ Endpoint: google.Endpoint,
+ Scopes: []string{
+ compute.DevstorageFullControlScope,
+ compute.ComputeScope,
+ "https://www.googleapis.com/auth/sqlservice",
+ "https://www.googleapis.com/auth/sqlservice.admin",
+ },
+ RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
+}
+
+const baseConfig = `#cloud-config
+coreos:
+ units:
+ - name: h2demo.service
+ command: start
+ content: |
+ [Unit]
+ Description=HTTP2 Demo
+
+ [Service]
+ ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo'
+ ExecStart=/opt/bin/h2demo --prod
+ RestartSec=5s
+ Restart=always
+ Type=simple
+
+ [Install]
+ WantedBy=multi-user.target
+`
+
+func main() {
+ flag.Parse()
+ if *proj == "" {
+ log.Fatalf("Missing --project flag")
+ }
+ prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
+ machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
+
+ const tokenFileName = "token.dat"
+ tokenFile := tokenCacheFile(tokenFileName)
+ tokenSource := oauth2.ReuseTokenSource(nil, tokenFile)
+ token, err := tokenSource.Token()
+ if err != nil {
+ if *writeObject != "" {
+ log.Fatalf("Can't use --write_object without a valid token.dat file already cached.")
+ }
+ log.Printf("Error getting token from %s: %v", tokenFileName, err)
+ log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
+ fmt.Print("\nEnter auth code: ")
+ sc := bufio.NewScanner(os.Stdin)
+ sc.Scan()
+ authCode := strings.TrimSpace(sc.Text())
+ token, err = config.Exchange(oauth2.NoContext, authCode)
+ if err != nil {
+ log.Fatalf("Error exchanging auth code for a token: %v", err)
+ }
+ if err := tokenFile.WriteToken(token); err != nil {
+ log.Fatalf("Error writing to %s: %v", tokenFileName, err)
+ }
+ tokenSource = oauth2.ReuseTokenSource(token, nil)
+ }
+
+ oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)
+
+ if *writeObject != "" {
+ writeCloudStorageObject(oauthClient)
+ return
+ }
+
+ computeService, _ := compute.New(oauthClient)
+
+ natIP := *staticIP
+ if natIP == "" {
+ // Try to find it by name.
+ aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
+ if err != nil {
+ log.Fatal(err)
+ }
+ // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList
+ IPLoop:
+ for _, asl := range aggAddrList.Items {
+ for _, addr := range asl.Addresses {
+ if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
+ natIP = addr.Address
+ break IPLoop
+ }
+ }
+ }
+ }
+
+ cloudConfig := baseConfig
+ if *sshPub != "" {
+ key := strings.TrimSpace(readFile(*sshPub))
+ cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key)
+ }
+ if os.Getenv("USER") == "bradfitz" {
+ cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com")
+ }
+ const maxCloudConfig = 32 << 10 // per compute API docs
+ if len(cloudConfig) > maxCloudConfig {
+ log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
+ }
+
+ instance := &compute.Instance{
+ Name: *instName,
+ Description: "Go Builder",
+ MachineType: machType,
+ Disks: []*compute.AttachedDisk{instanceDisk(computeService)},
+ Tags: &compute.Tags{
+ Items: []string{"http-server", "https-server"},
+ },
+ Metadata: &compute.Metadata{
+ Items: []*compute.MetadataItems{
+ {
+ Key: "user-data",
+ Value: &cloudConfig,
+ },
+ },
+ },
+ NetworkInterfaces: []*compute.NetworkInterface{
+ {
+ AccessConfigs: []*compute.AccessConfig{
+ {
+ Type: "ONE_TO_ONE_NAT",
+ Name: "External NAT",
+ NatIP: natIP,
+ },
+ },
+ Network: prefix + "/global/networks/default",
+ },
+ },
+ ServiceAccounts: []*compute.ServiceAccount{
+ {
+ Email: "default",
+ Scopes: []string{
+ compute.DevstorageFullControlScope,
+ compute.ComputeScope,
+ },
+ },
+ },
+ }
+
+ log.Printf("Creating instance...")
+ op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
+ if err != nil {
+ log.Fatalf("Failed to create instance: %v", err)
+ }
+ opName := op.Name
+ log.Printf("Created. Waiting on operation %v", opName)
+OpLoop:
+ for {
+ time.Sleep(2 * time.Second)
+ op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
+ if err != nil {
+ log.Fatalf("Failed to get op %s: %v", opName, err)
+ }
+ switch op.Status {
+ case "PENDING", "RUNNING":
+ log.Printf("Waiting on operation %v", opName)
+ continue
+ case "DONE":
+ if op.Error != nil {
+ for _, operr := range op.Error.Errors {
+ log.Printf("Error: %+v", operr)
+ }
+ log.Fatalf("Failed to start.")
+ }
+ log.Printf("Success. %+v", op)
+ break OpLoop
+ default:
+ log.Fatalf("Unknown status %q: %+v", op.Status, op)
+ }
+ }
+
+ inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
+ if err != nil {
+ log.Fatalf("Error getting instance after creation: %v", err)
+ }
+ ij, _ := json.MarshalIndent(inst, "", " ")
+ log.Printf("Instance: %s", ij)
+}
+
+func instanceDisk(svc *compute.Service) *compute.AttachedDisk {
+ const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016"
+ diskName := *instName + "-disk"
+
+ return &compute.AttachedDisk{
+ AutoDelete: true,
+ Boot: true,
+ Type: "PERSISTENT",
+ InitializeParams: &compute.AttachedDiskInitializeParams{
+ DiskName: diskName,
+ SourceImage: imageURL,
+ DiskSizeGb: 50,
+ },
+ }
+}
+
+func writeCloudStorageObject(httpClient *http.Client) {
+ content := os.Stdin
+ const maxSlurp = 1 << 20
+ var buf bytes.Buffer
+ n, err := io.CopyN(&buf, content, maxSlurp)
+ if err != nil && err != io.EOF {
+ log.Fatalf("Error reading from stdin: %v, %v", n, err)
+ }
+ contentType := http.DetectContentType(buf.Bytes())
+
+ req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content))
+ if err != nil {
+ log.Fatal(err)
+ }
+ req.Header.Set("x-goog-api-version", "2")
+ if *publicObject {
+ req.Header.Set("x-goog-acl", "public-read")
+ }
+ req.Header.Set("Content-Type", contentType)
+ res, err := httpClient.Do(req)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if res.StatusCode != 200 {
+ res.Write(os.Stderr)
+ log.Fatalf("Failed.")
+ }
+ log.Printf("Success.")
+ os.Exit(0)
+}
+
+type tokenCacheFile string
+
+func (f tokenCacheFile) Token() (*oauth2.Token, error) {
+ slurp, err := ioutil.ReadFile(string(f))
+ if err != nil {
+ return nil, err
+ }
+ t := new(oauth2.Token)
+ if err := json.Unmarshal(slurp, t); err != nil {
+ return nil, err
+ }
+ return t, nil
+}
+
+func (f tokenCacheFile) WriteToken(t *oauth2.Token) error {
+ jt, err := json.Marshal(t)
+ if err != nil {
+ return err
+ }
+ return ioutil.WriteFile(string(f), jt, 0600)
+}
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.key b/vendor/golang.org/x/net/http2/h2demo/rootCA.key
new file mode 100644
index 0000000..a15a6ab
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q
+62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby
+XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV
+mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ
+JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ
+SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA
+nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e
+/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx
+qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser
+hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j
+NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E
+LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7
+8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c
+0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws
+K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd
+bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo
+QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt
+Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1
+nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy
+b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7
+gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev
+WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr
+C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj
+x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA
+hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y
+-----END RSA PRIVATE KEY-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem
new file mode 100644
index 0000000..3a323e7
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem
@@ -0,0 +1,26 @@
+-----BEGIN CERTIFICATE-----
+MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV
+BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG
+A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3
+DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0
+NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG
+cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv
+c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS
+R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT
+ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk
+JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3
+mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW
+caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G
+A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt
+hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB
+MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES
+MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv
+bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h
+U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao
+eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4
+UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD
+58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n
+sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF
+kPe6XoSbiLm/kxk32T0=
+-----END CERTIFICATE-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl
new file mode 100644
index 0000000..6db3891
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl
@@ -0,0 +1 @@
+E2CE26BF3285059C
diff --git a/vendor/golang.org/x/net/http2/h2demo/server.crt b/vendor/golang.org/x/net/http2/h2demo/server.crt
new file mode 100644
index 0000000..c59059b
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/server.crt
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV
+UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT
+C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW
+DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow
+RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE
+ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l
+gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2
+dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL
+A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws
+/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88
+F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB
+AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R
+rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD
+EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19
+KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI
+dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU
+90p6/CbU71bGbfpM2PHot2fm
+-----END CERTIFICATE-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/server.key b/vendor/golang.org/x/net/http2/h2demo/server.key
new file mode 100644
index 0000000..f329c14
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/server.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi
+fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm
+J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef
+b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55
+mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/
+fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p
+3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3
+qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4
+NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80
+LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN
+a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+
+Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL
+W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO
+gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm
+S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS
+Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp
+V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4
+KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4
+yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5
+drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e
+ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R
+48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5
+c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY
+nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl
+IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd
+-----END RSA PRIVATE KEY-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/tmpl.go b/vendor/golang.org/x/net/http2/h2demo/tmpl.go
new file mode 100644
index 0000000..504d6a7
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/tmpl.go
@@ -0,0 +1,1991 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build h2demo
+
+package main
+
+import "html/template"
+
+var pushTmpl = template.Must(template.New("serverpush").Parse(`
+
+<!DOCTYPE html>
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+<meta name="viewport" content="width=device-width, initial-scale=1">
+<meta name="theme-color" content="#375EAB">
+
+ <title>HTTP/2 Server Push Demo</title>
+
+<link type="text/css" rel="stylesheet" href="/serverpush/static/style.css?{{.CacheBust}}">
+<script>
+window.initFuncs = [];
+</script>
+
+<script>
+function showtimes() {
+ var times = 'DOM loaded: ' + (window.performance.timing.domContentLoadedEventEnd - window.performance.timing.navigationStart) + 'ms, '
+ times += 'DOM complete (all loaded): ' + (window.performance.timing.domComplete - window.performance.timing.navigationStart) + 'ms, '
+ times += 'Load event fired: ' + (window.performance.timing.loadEventStart - window.performance.timing.navigationStart) + 'ms'
+ document.getElementById('loadtimes').innerHTML = times
+}
+</script>
+
+</head>
+<body onload="showtimes()">
+
+<div style="background:#fff9a4;padding:10px">
+Note: This page exists for demonstration purposes. For the actual cmd/go docs, go to <a href="golang.org/cmd/go">golang.org/cmd/go</a>.
+</div>
+
+<div style="padding:20px">
+
+
+<a href="https://{{.HTTPSHost}}/serverpush">HTTP/2 with Server Push</a> | <a href="http://{{.HTTPHost}}/serverpush">HTTP only</a>
+<div id="loadtimes"></div>
+
+</div>
+
+<div id='lowframe' style="position: fixed; bottom: 0; left: 0; height: 0; width: 100%; border-top: thin solid grey; background-color: white; overflow: auto;">
+...
+</div><!-- #lowframe -->
+
+<div id="topbar" class="wide"><div class="container">
+<div class="top-heading" id="heading-wide"><a href="/">The Go Programming Language</a></div>
+<div class="top-heading" id="heading-narrow"><a href="/">Go</a></div>
+<a href="#" id="menu-button"><span id="menu-button-arrow">&#9661;</span></a>
+<form method="GET" action="/search">
+<div id="menu">
+<a href="/doc/">Documents</a>
+<a href="/pkg/">Packages</a>
+<a href="/project/">The Project</a>
+<a href="/help/">Help</a>
+<a href="/blog/">Blog</a>
+
+<a id="playgroundButton" href="http://play.golang.org/" title="Show Go Playground">Play</a>
+
+<input type="text" id="search" name="q" class="inactive" value="Search" placeholder="Search">
+</div>
+</form>
+
+</div></div>
+
+
+<div id="playground" class="play">
+ <div class="input"><textarea class="code" spellcheck="false">package main
+
+import "fmt"
+
+func main() {
+ fmt.Println("Hello, 世界")
+}</textarea></div>
+ <div class="output"></div>
+ <div class="buttons">
+ <a class="run" title="Run this code [shift-enter]">Run</a>
+ <a class="fmt" title="Format this code">Format</a>
+
+ <a class="share" title="Share this code">Share</a>
+
+ </div>
+</div>
+
+
+<div id="page" class="wide">
+<div class="container">
+
+
+ <h1>Command go</h1>
+
+
+
+
+<div id="nav"></div>
+
+
+<!--
+ Copyright 2009 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+-->
+<!--
+ Note: Static (i.e., not template-generated) href and id
+ attributes start with "pkg-" to make it impossible for
+ them to conflict with generated attributes (some of which
+ correspond to Go identifiers).
+-->
+
+ <script type='text/javascript'>
+ document.ANALYSIS_DATA = null;
+ document.CALLGRAPH = null;
+ </script>
+
+
+
+ <p>
+Go is a tool for managing Go source code.
+</p>
+<p>
+Usage:
+</p>
+<pre>go command [arguments]
+</pre>
+<p>
+The commands are:
+</p>
+<pre>build compile packages and dependencies
+clean remove object files
+doc show documentation for package or symbol
+env print Go environment information
+bug start a bug report
+fix run go tool fix on packages
+fmt run gofmt on package sources
+generate generate Go files by processing source
+get download and install packages and dependencies
+install compile and install packages and dependencies
+list list packages
+run compile and run Go program
+test test packages
+tool run specified go tool
+version print Go version
+vet run go tool vet on packages
+</pre>
+<p>
+Use &#34;go help [command]&#34; for more information about a command.
+</p>
+<p>
+Additional help topics:
+</p>
+<pre>c calling between Go and C
+buildmode description of build modes
+filetype file types
+gopath GOPATH environment variable
+environment environment variables
+importpath import path syntax
+packages description of package lists
+testflag description of testing flags
+testfunc description of testing functions
+</pre>
+<p>
+Use &#34;go help [topic]&#34; for more information about that topic.
+</p>
+<h3 id="hdr-Compile_packages_and_dependencies">Compile packages and dependencies</h3>
+<p>
+Usage:
+</p>
+<pre>go build [-o output] [-i] [build flags] [packages]
+</pre>
+<p>
+Build compiles the packages named by the import paths,
+along with their dependencies, but it does not install the results.
+</p>
+<p>
+If the arguments to build are a list of .go files, build treats
+them as a list of source files specifying a single package.
+</p>
+<p>
+When compiling a single main package, build writes
+the resulting executable to an output file named after
+the first source file (&#39;go build ed.go rx.go&#39; writes &#39;ed&#39; or &#39;ed.exe&#39;)
+or the source code directory (&#39;go build unix/sam&#39; writes &#39;sam&#39; or &#39;sam.exe&#39;).
+The &#39;.exe&#39; suffix is added when writing a Windows executable.
+</p>
+<p>
+When compiling multiple packages or a single non-main package,
+build compiles the packages but discards the resulting object,
+serving only as a check that the packages can be built.
+</p>
+<p>
+When compiling packages, build ignores files that end in &#39;_test.go&#39;.
+</p>
+<p>
+The -o flag, only allowed when compiling a single package,
+forces build to write the resulting executable or object
+to the named output file, instead of the default behavior described
+in the last two paragraphs.
+</p>
+<p>
+The -i flag installs the packages that are dependencies of the target.
+</p>
+<p>
+The build flags are shared by the build, clean, get, install, list, run,
+and test commands:
+</p>
+<pre>-a
+ force rebuilding of packages that are already up-to-date.
+-n
+ print the commands but do not run them.
+-p n
+ the number of programs, such as build commands or
+ test binaries, that can be run in parallel.
+ The default is the number of CPUs available.
+-race
+ enable data race detection.
+ Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
+-msan
+ enable interoperation with memory sanitizer.
+ Supported only on linux/amd64,
+ and only with Clang/LLVM as the host C compiler.
+-v
+ print the names of packages as they are compiled.
+-work
+ print the name of the temporary work directory and
+ do not delete it when exiting.
+-x
+ print the commands.
+
+-asmflags &#39;flag list&#39;
+ arguments to pass on each go tool asm invocation.
+-buildmode mode
+ build mode to use. See &#39;go help buildmode&#39; for more.
+-compiler name
+ name of compiler to use, as in runtime.Compiler (gccgo or gc).
+-gccgoflags &#39;arg list&#39;
+ arguments to pass on each gccgo compiler/linker invocation.
+-gcflags &#39;arg list&#39;
+ arguments to pass on each go tool compile invocation.
+-installsuffix suffix
+ a suffix to use in the name of the package installation directory,
+ in order to keep output separate from default builds.
+ If using the -race flag, the install suffix is automatically set to race
+ or, if set explicitly, has _race appended to it. Likewise for the -msan
+ flag. Using a -buildmode option that requires non-default compile flags
+ has a similar effect.
+-ldflags &#39;flag list&#39;
+ arguments to pass on each go tool link invocation.
+-linkshared
+ link against shared libraries previously created with
+ -buildmode=shared.
+-pkgdir dir
+ install and load all packages from dir instead of the usual locations.
+ For example, when building with a non-standard configuration,
+ use -pkgdir to keep generated packages in a separate location.
+-tags &#39;tag list&#39;
+ a list of build tags to consider satisfied during the build.
+ For more information about build tags, see the description of
+ build constraints in the documentation for the go/build package.
+-toolexec &#39;cmd args&#39;
+ a program to use to invoke toolchain programs like vet and asm.
+ For example, instead of running asm, the go command will run
+ &#39;cmd args /path/to/asm &lt;arguments for asm&gt;&#39;.
+</pre>
+<p>
+The list flags accept a space-separated list of strings. To embed spaces
+in an element in the list, surround it with either single or double quotes.
+</p>
+<p>
+For more about specifying packages, see &#39;go help packages&#39;.
+For more about where packages and binaries are installed,
+run &#39;go help gopath&#39;.
+For more about calling between Go and C/C++, run &#39;go help c&#39;.
+</p>
+<p>
+Note: Build adheres to certain conventions such as those described
+by &#39;go help gopath&#39;. Not all projects can follow these conventions,
+however. Installations that have their own conventions or that use
+a separate software build system may choose to use lower-level
+invocations such as &#39;go tool compile&#39; and &#39;go tool link&#39; to avoid
+some of the overheads and design decisions of the build tool.
+</p>
+<p>
+See also: go install, go get, go clean.
+</p>
+<h3 id="hdr-Remove_object_files">Remove object files</h3>
+<p>
+Usage:
+</p>
+<pre>go clean [-i] [-r] [-n] [-x] [build flags] [packages]
+</pre>
+<p>
+Clean removes object files from package source directories.
+The go command builds most objects in a temporary directory,
+so go clean is mainly concerned with object files left by other
+tools or by manual invocations of go build.
+</p>
+<p>
+Specifically, clean removes the following files from each of the
+source directories corresponding to the import paths:
+</p>
+<pre>_obj/ old object directory, left from Makefiles
+_test/ old test directory, left from Makefiles
+_testmain.go old gotest file, left from Makefiles
+test.out old test log, left from Makefiles
+build.out old test log, left from Makefiles
+*.[568ao] object files, left from Makefiles
+
+DIR(.exe) from go build
+DIR.test(.exe) from go test -c
+MAINFILE(.exe) from go build MAINFILE.go
+*.so from SWIG
+</pre>
+<p>
+In the list, DIR represents the final path element of the
+directory, and MAINFILE is the base name of any Go source
+file in the directory that is not included when building
+the package.
+</p>
+<p>
+The -i flag causes clean to remove the corresponding installed
+archive or binary (what &#39;go install&#39; would create).
+</p>
+<p>
+The -n flag causes clean to print the remove commands it would execute,
+but not run them.
+</p>
+<p>
+The -r flag causes clean to be applied recursively to all the
+dependencies of the packages named by the import paths.
+</p>
+<p>
+The -x flag causes clean to print remove commands as it executes them.
+</p>
+<p>
+For more about build flags, see &#39;go help build&#39;.
+</p>
+<p>
+For more about specifying packages, see &#39;go help packages&#39;.
+</p>
+<h3 id="hdr-Show_documentation_for_package_or_symbol">Show documentation for package or symbol</h3>
+<p>
+Usage:
+</p>
+<pre>go doc [-u] [-c] [package|[package.]symbol[.method]]
+</pre>
+<p>
+Doc prints the documentation comments associated with the item identified by its
+arguments (a package, const, func, type, var, or method) followed by a one-line
+summary of each of the first-level items &#34;under&#34; that item (package-level
+declarations for a package, methods for a type, etc.).
+</p>
+<p>
+Doc accepts zero, one, or two arguments.
+</p>
+<p>
+Given no arguments, that is, when run as
+</p>
+<pre>go doc
+</pre>
+<p>
+it prints the package documentation for the package in the current directory.
+If the package is a command (package main), the exported symbols of the package
+are elided from the presentation unless the -cmd flag is provided.
+</p>
+<p>
+When run with one argument, the argument is treated as a Go-syntax-like
+representation of the item to be documented. What the argument selects depends
+on what is installed in GOROOT and GOPATH, as well as the form of the argument,
+which is schematically one of these:
+</p>
+<pre>go doc &lt;pkg&gt;
+go doc &lt;sym&gt;[.&lt;method&gt;]
+go doc [&lt;pkg&gt;.]&lt;sym&gt;[.&lt;method&gt;]
+go doc [&lt;pkg&gt;.][&lt;sym&gt;.]&lt;method&gt;
+</pre>
+<p>
+The first item in this list matched by the argument is the one whose documentation
+is printed. (See the examples below.) However, if the argument starts with a capital
+letter it is assumed to identify a symbol or method in the current directory.
+</p>
+<p>
+For packages, the order of scanning is determined lexically in breadth-first order.
+That is, the package presented is the one that matches the search and is nearest
+the root and lexically first at its level of the hierarchy. The GOROOT tree is
+always scanned in its entirety before GOPATH.
+</p>
+<p>
+If there is no package specified or matched, the package in the current
+directory is selected, so &#34;go doc Foo&#34; shows the documentation for symbol Foo in
+the current package.
+</p>
+<p>
+The package path must be either a qualified path or a proper suffix of a
+path. The go tool&#39;s usual package mechanism does not apply: package path
+elements like . and ... are not implemented by go doc.
+</p>
+<p>
+When run with two arguments, the first must be a full package path (not just a
+suffix), and the second is a symbol or symbol and method; this is similar to the
+syntax accepted by godoc:
+</p>
+<pre>go doc &lt;pkg&gt; &lt;sym&gt;[.&lt;method&gt;]
+</pre>
+<p>
+In all forms, when matching symbols, lower-case letters in the argument match
+either case but upper-case letters match exactly. This means that there may be
+multiple matches of a lower-case argument in a package if different symbols have
+different cases. If this occurs, documentation for all matches is printed.
+</p>
+<p>
+Examples:
+</p>
+<pre>go doc
+ Show documentation for current package.
+go doc Foo
+ Show documentation for Foo in the current package.
+ (Foo starts with a capital letter so it cannot match
+ a package path.)
+go doc encoding/json
+ Show documentation for the encoding/json package.
+go doc json
+ Shorthand for encoding/json.
+go doc json.Number (or go doc json.number)
+ Show documentation and method summary for json.Number.
+go doc json.Number.Int64 (or go doc json.number.int64)
+ Show documentation for json.Number&#39;s Int64 method.
+go doc cmd/doc
+ Show package docs for the doc command.
+go doc -cmd cmd/doc
+ Show package docs and exported symbols within the doc command.
+go doc template.new
+ Show documentation for html/template&#39;s New function.
+ (html/template is lexically before text/template)
+go doc text/template.new # One argument
+ Show documentation for text/template&#39;s New function.
+go doc text/template new # Two arguments
+ Show documentation for text/template&#39;s New function.
+
+At least in the current tree, these invocations all print the
+documentation for json.Decoder&#39;s Decode method:
+
+go doc json.Decoder.Decode
+go doc json.decoder.decode
+go doc json.decode
+cd go/src/encoding/json; go doc decode
+</pre>
+<p>
+Flags:
+</p>
+<pre>-c
+ Respect case when matching symbols.
+-cmd
+ Treat a command (package main) like a regular package.
+ Otherwise package main&#39;s exported symbols are hidden
+ when showing the package&#39;s top-level documentation.
+-u
+ Show documentation for unexported as well as exported
+ symbols and methods.
+</pre>
+<h3 id="hdr-Print_Go_environment_information">Print Go environment information</h3>
+<p>
+Usage:
+</p>
+<pre>go env [var ...]
+</pre>
+<p>
+Env prints Go environment information.
+</p>
+<p>
+By default env prints information as a shell script
+(on Windows, a batch file). If one or more variable
+names is given as arguments, env prints the value of
+each named variable on its own line.
+</p>
+<h3 id="hdr-Start_a_bug_report">Start a bug report</h3>
+<p>
+Usage:
+</p>
+<pre>go bug
+</pre>
+<p>
+Bug opens the default browser and starts a new bug report.
+The report includes useful system information.
+</p>
+<h3 id="hdr-Run_go_tool_fix_on_packages">Run go tool fix on packages</h3>
+<p>
+Usage:
+</p>
+<pre>go fix [packages]
+</pre>
+<p>
+Fix runs the Go fix command on the packages named by the import paths.
+</p>
+<p>
+For more about fix, see &#39;go doc cmd/fix&#39;.
+For more about specifying packages, see &#39;go help packages&#39;.
+</p>
+<p>
+To run fix with specific options, run &#39;go tool fix&#39;.
+</p>
+<p>
+See also: go fmt, go vet.
+</p>
+<h3 id="hdr-Run_gofmt_on_package_sources">Run gofmt on package sources</h3>
+<p>
+Usage:
+</p>
+<pre>go fmt [-n] [-x] [packages]
+</pre>
+<p>
+Fmt runs the command &#39;gofmt -l -w&#39; on the packages named
+by the import paths. It prints the names of the files that are modified.
+</p>
+<p>
+For more about gofmt, see &#39;go doc cmd/gofmt&#39;.
+For more about specifying packages, see &#39;go help packages&#39;.
+</p>
+<p>
+The -n flag prints commands that would be executed.
+The -x flag prints commands as they are executed.
+</p>
+<p>
+To run gofmt with specific options, run gofmt itself.
+</p>
+<p>
+See also: go fix, go vet.
+</p>
+<h3 id="hdr-Generate_Go_files_by_processing_source">Generate Go files by processing source</h3>
+<p>
+Usage:
+</p>
+<pre>go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]
+</pre>
+<p>
+Generate runs commands described by directives within existing
+files. Those commands can run any process but the intent is to
+create or update Go source files.
+</p>
+<p>
+Go generate is never run automatically by go build, go get, go test,
+and so on. It must be run explicitly.
+</p>
+<p>
+Go generate scans the file for directives, which are lines of
+the form,
+</p>
+<pre>//go:generate command argument...
+</pre>
+<p>
+(note: no leading spaces and no space in &#34;//go&#34;) where command
+is the generator to be run, corresponding to an executable file
+that can be run locally. It must either be in the shell path
+(gofmt), a fully qualified path (/usr/you/bin/mytool), or a
+command alias, described below.
+</p>
+<p>
+Note that go generate does not parse the file, so lines that look
+like directives in comments or multiline strings will be treated
+as directives.
+</p>
+<p>
+The arguments to the directive are space-separated tokens or
+double-quoted strings passed to the generator as individual
+arguments when it is run.
+</p>
+<p>
+Quoted strings use Go syntax and are evaluated before execution; a
+quoted string appears as a single argument to the generator.
+</p>
+<p>
+Go generate sets several variables when it runs the generator:
+</p>
+<pre>$GOARCH
+ The execution architecture (arm, amd64, etc.)
+$GOOS
+ The execution operating system (linux, windows, etc.)
+$GOFILE
+ The base name of the file.
+$GOLINE
+ The line number of the directive in the source file.
+$GOPACKAGE
+ The name of the package of the file containing the directive.
+$DOLLAR
+ A dollar sign.
+</pre>
+<p>
+Other than variable substitution and quoted-string evaluation, no
+special processing such as &#34;globbing&#34; is performed on the command
+line.
+</p>
+<p>
+As a last step before running the command, any invocations of any
+environment variables with alphanumeric names, such as $GOFILE or
+$HOME, are expanded throughout the command line. The syntax for
+variable expansion is $NAME on all operating systems. Due to the
+order of evaluation, variables are expanded even inside quoted
+strings. If the variable NAME is not set, $NAME expands to the
+empty string.
+</p>
+<p>
+A directive of the form,
+</p>
+<pre>//go:generate -command xxx args...
+</pre>
+<p>
+specifies, for the remainder of this source file only, that the
+string xxx represents the command identified by the arguments. This
+can be used to create aliases or to handle multiword generators.
+For example,
+</p>
+<pre>//go:generate -command foo go tool foo
+</pre>
+<p>
+specifies that the command &#34;foo&#34; represents the generator
+&#34;go tool foo&#34;.
+</p>
+<p>
+Generate processes packages in the order given on the command line,
+one at a time. If the command line lists .go files, they are treated
+as a single package. Within a package, generate processes the
+source files in a package in file name order, one at a time. Within
+a source file, generate runs generators in the order they appear
+in the file, one at a time.
+</p>
+<p>
+If any generator returns an error exit status, &#34;go generate&#34; skips
+all further processing for that package.
+</p>
+<p>
+The generator is run in the package&#39;s source directory.
+</p>
+<p>
+Go generate accepts one specific flag:
+</p>
+<pre>-run=&#34;&#34;
+ if non-empty, specifies a regular expression to select
+ directives whose full original source text (excluding
+ any trailing spaces and final newline) matches the
+ expression.
+</pre>
+<p>
+It also accepts the standard build flags including -v, -n, and -x.
+The -v flag prints the names of packages and files as they are
+processed.
+The -n flag prints commands that would be executed.
+The -x flag prints commands as they are executed.
+</p>
+<p>
+For more about build flags, see &#39;go help build&#39;.
+</p>
+<p>
+For more about specifying packages, see &#39;go help packages&#39;.
+</p>
+<h3 id="hdr-Download_and_install_packages_and_dependencies">Download and install packages and dependencies</h3>
+<p>
+Usage:
+</p>
+<pre>go get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]
+</pre>
+<p>
+Get downloads the packages named by the import paths, along with their
+dependencies. It then installs the named packages, like &#39;go install&#39;.
+</p>
+<p>
+The -d flag instructs get to stop after downloading the packages; that is,
+it instructs get not to install the packages.
+</p>
+<p>
+The -f flag, valid only when -u is set, forces get -u not to verify that
+each package has been checked out from the source control repository
+implied by its import path. This can be useful if the source is a local fork
+of the original.
+</p>
+<p>
+The -fix flag instructs get to run the fix tool on the downloaded packages
+before resolving dependencies or building the code.
+</p>
+<p>
+The -insecure flag permits fetching from repositories and resolving
+custom domains using insecure schemes such as HTTP. Use with caution.
+</p>
+<p>
+The -t flag instructs get to also download the packages required to build
+the tests for the specified packages.
+</p>
+<p>
+The -u flag instructs get to use the network to update the named packages
+and their dependencies. By default, get uses the network to check out
+missing packages but does not use it to look for updates to existing packages.
+</p>
+<p>
+The -v flag enables verbose progress and debug output.
+</p>
+<p>
+Get also accepts build flags to control the installation. See &#39;go help build&#39;.
+</p>
+<p>
+When checking out a new package, get creates the target directory
+GOPATH/src/&lt;import-path&gt;. If the GOPATH contains multiple entries,
+get uses the first one. For more details see: &#39;go help gopath&#39;.
+</p>
+<p>
+When checking out or updating a package, get looks for a branch or tag
+that matches the locally installed version of Go. The most important
+rule is that if the local installation is running version &#34;go1&#34;, get
+searches for a branch or tag named &#34;go1&#34;. If no such version exists it
+retrieves the most recent version of the package.
+</p>
+<p>
+When go get checks out or updates a Git repository,
+it also updates any git submodules referenced by the repository.
+</p>
+<p>
+Get never checks out or updates code stored in vendor directories.
+</p>
+<p>
+For more about specifying packages, see &#39;go help packages&#39;.
+</p>
+<p>
+For more about how &#39;go get&#39; finds source code to
+download, see &#39;go help importpath&#39;.
+</p>
+<p>
+See also: go build, go install, go clean.
+</p>
+<h3 id="hdr-Compile_and_install_packages_and_dependencies">Compile and install packages and dependencies</h3>
+<p>
+Usage:
+</p>
+<pre>go install [build flags] [packages]
+</pre>
+<p>
+Install compiles and installs the packages named by the import paths,
+along with their dependencies.
+</p>
+<p>
+For more about the build flags, see &#39;go help build&#39;.
+For more about specifying packages, see &#39;go help packages&#39;.
+</p>
+<p>
+See also: go build, go get, go clean.
+</p>
+<h3 id="hdr-List_packages">List packages</h3>
+<p>
+Usage:
+</p>
+<pre>go list [-e] [-f format] [-json] [build flags] [packages]
+</pre>
+<p>
+List lists the packages named by the import paths, one per line.
+</p>
+<p>
+The default output shows the package import path:
+</p>
+<pre>bytes
+encoding/json
+github.com/gorilla/mux
+golang.org/x/net/html
+</pre>
+<p>
+The -f flag specifies an alternate format for the list, using the
+syntax of package template. The default output is equivalent to -f
+&#39;&#39;. The struct being passed to the template is:
+</p>
+<pre>type Package struct {
+ Dir string // directory containing package sources
+ ImportPath string // import path of package in dir
+ ImportComment string // path in import comment on package statement
+ Name string // package name
+ Doc string // package documentation string
+ Target string // install path
+ Shlib string // the shared library that contains this package (only set when -linkshared)
+ Goroot bool // is this package in the Go root?
+ Standard bool // is this package part of the standard Go library?
+ Stale bool // would &#39;go install&#39; do anything for this package?
+ StaleReason string // explanation for Stale==true
+ Root string // Go root or Go path dir containing this package
+ ConflictDir string // this directory shadows Dir in $GOPATH
+ BinaryOnly bool // binary-only package: cannot be recompiled from sources
+
+ // Source files
+ GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
+ CgoFiles []string // .go sources files that import &#34;C&#34;
+ IgnoredGoFiles []string // .go sources ignored due to build constraints
+ CFiles []string // .c source files
+ CXXFiles []string // .cc, .cxx and .cpp source files
+ MFiles []string // .m source files
+ HFiles []string // .h, .hh, .hpp and .hxx source files
+ FFiles []string // .f, .F, .for and .f90 Fortran source files
+ SFiles []string // .s source files
+ SwigFiles []string // .swig files
+ SwigCXXFiles []string // .swigcxx files
+ SysoFiles []string // .syso object files to add to archive
+ TestGoFiles []string // _test.go files in package
+ XTestGoFiles []string // _test.go files outside package
+
+ // Cgo directives
+ CgoCFLAGS []string // cgo: flags for C compiler
+ CgoCPPFLAGS []string // cgo: flags for C preprocessor
+ CgoCXXFLAGS []string // cgo: flags for C++ compiler
+ CgoFFLAGS []string // cgo: flags for Fortran compiler
+ CgoLDFLAGS []string // cgo: flags for linker
+ CgoPkgConfig []string // cgo: pkg-config names
+
+ // Dependency information
+ Imports []string // import paths used by this package
+ Deps []string // all (recursively) imported dependencies
+ TestImports []string // imports from TestGoFiles
+ XTestImports []string // imports from XTestGoFiles
+
+ // Error information
+ Incomplete bool // this package or a dependency has an error
+ Error *PackageError // error loading package
+ DepsErrors []*PackageError // errors loading dependencies
+}
+</pre>
+<p>
+Packages stored in vendor directories report an ImportPath that includes the
+path to the vendor directory (for example, &#34;d/vendor/p&#34; instead of &#34;p&#34;),
+so that the ImportPath uniquely identifies a given copy of a package.
+The Imports, Deps, TestImports, and XTestImports lists also contain these
+expanded imports paths. See golang.org/s/go15vendor for more about vendoring.
+</p>
+<p>
+The error information, if any, is
+</p>
+<pre>type PackageError struct {
+ ImportStack []string // shortest path from package named on command line to this one
+ Pos string // position of error (if present, file:line:col)
+ Err string // the error itself
+}
+</pre>
+<p>
+The template function &#34;join&#34; calls strings.Join.
+</p>
+<p>
+The template function &#34;context&#34; returns the build context, defined as:
+</p>
+<pre>type Context struct {
+ GOARCH string // target architecture
+ GOOS string // target operating system
+ GOROOT string // Go root
+ GOPATH string // Go path
+ CgoEnabled bool // whether cgo can be used
+ UseAllFiles bool // use files regardless of +build lines, file names
+ Compiler string // compiler to assume when computing target paths
+ BuildTags []string // build constraints to match in +build lines
+ ReleaseTags []string // releases the current release is compatible with
+ InstallSuffix string // suffix to use in the name of the install dir
+}
+</pre>
+<p>
+For more information about the meaning of these fields see the documentation
+for the go/build package&#39;s Context type.
+</p>
+<p>
+The -json flag causes the package data to be printed in JSON format
+instead of using the template format.
+</p>
+<p>
+The -e flag changes the handling of erroneous packages, those that
+cannot be found or are malformed. By default, the list command
+prints an error to standard error for each erroneous package and
+omits the packages from consideration during the usual printing.
+With the -e flag, the list command never prints errors to standard
+error and instead processes the erroneous packages with the usual
+printing. Erroneous packages will have a non-empty ImportPath and
+a non-nil Error field; other information may or may not be missing
+(zeroed).
+</p>
+<p>
+For more about build flags, see &#39;go help build&#39;.
+</p>
+<p>
+For more about specifying packages, see &#39;go help packages&#39;.
+</p>
+<h3 id="hdr-Compile_and_run_Go_program">Compile and run Go program</h3>
+<p>
+Usage:
+</p>
+<pre>go run [build flags] [-exec xprog] gofiles... [arguments...]
+</pre>
+<p>
+Run compiles and runs the main package comprising the named Go source files.
+A Go source file is defined to be a file ending in a literal &#34;.go&#34; suffix.
+</p>
+<p>
+By default, &#39;go run&#39; runs the compiled binary directly: &#39;a.out arguments...&#39;.
+If the -exec flag is given, &#39;go run&#39; invokes the binary using xprog:
+</p>
+<pre>&#39;xprog a.out arguments...&#39;.
+</pre>
+<p>
+If the -exec flag is not given, GOOS or GOARCH is different from the system
+default, and a program named go_$GOOS_$GOARCH_exec can be found
+on the current search path, &#39;go run&#39; invokes the binary using that program,
+for example &#39;go_nacl_386_exec a.out arguments...&#39;. This allows execution of
+cross-compiled programs when a simulator or other execution method is
+available.
+</p>
+<p>
+For more about build flags, see &#39;go help build&#39;.
+</p>
+<p>
+See also: go build.
+</p>
+<h3 id="hdr-Test_packages">Test packages</h3>
+<p>
+Usage:
+</p>
+<pre>go test [build/test flags] [packages] [build/test flags &amp; test binary flags]
+</pre>
+<p>
+&#39;Go test&#39; automates testing the packages named by the import paths.
+It prints a summary of the test results in the format:
+</p>
+<pre>ok archive/tar 0.011s
+FAIL archive/zip 0.022s
+ok compress/gzip 0.033s
+...
+</pre>
+<p>
+followed by detailed output for each failed package.
+</p>
+<p>
+&#39;Go test&#39; recompiles each package along with any files with names matching
+the file pattern &#34;*_test.go&#34;.
+Files whose names begin with &#34;_&#34; (including &#34;_test.go&#34;) or &#34;.&#34; are ignored.
+These additional files can contain test functions, benchmark functions, and
+example functions. See &#39;go help testfunc&#39; for more.
+Each listed package causes the execution of a separate test binary.
+</p>
+<p>
+Test files that declare a package with the suffix &#34;_test&#34; will be compiled as a
+separate package, and then linked and run with the main test binary.
+</p>
+<p>
+The go tool will ignore a directory named &#34;testdata&#34;, making it available
+to hold ancillary data needed by the tests.
+</p>
+<p>
+By default, go test needs no arguments. It compiles and tests the package
+with source in the current directory, including tests, and runs the tests.
+</p>
+<p>
+The package is built in a temporary directory so it does not interfere with the
+non-test installation.
+</p>
+<p>
+In addition to the build flags, the flags handled by &#39;go test&#39; itself are:
+</p>
+<pre>-args
+ Pass the remainder of the command line (everything after -args)
+ to the test binary, uninterpreted and unchanged.
+ Because this flag consumes the remainder of the command line,
+ the package list (if present) must appear before this flag.
+
+-c
+ Compile the test binary to pkg.test but do not run it
+ (where pkg is the last element of the package&#39;s import path).
+ The file name can be changed with the -o flag.
+
+-exec xprog
+ Run the test binary using xprog. The behavior is the same as
+ in &#39;go run&#39;. See &#39;go help run&#39; for details.
+
+-i
+ Install packages that are dependencies of the test.
+ Do not run the test.
+
+-o file
+ Compile the test binary to the named file.
+ The test still runs (unless -c or -i is specified).
+</pre>
+<p>
+The test binary also accepts flags that control execution of the test; these
+flags are also accessible by &#39;go test&#39;. See &#39;go help testflag&#39; for details.
+</p>
+<p>
+For more about build flags, see &#39;go help build&#39;.
+For more about specifying packages, see &#39;go help packages&#39;.
+</p>
+<p>
+See also: go build, go vet.
+</p>
+<h3 id="hdr-Run_specified_go_tool">Run specified go tool</h3>
+<p>
+Usage:
+</p>
+<pre>go tool [-n] command [args...]
+</pre>
+<p>
+Tool runs the go tool command identified by the arguments.
+With no arguments it prints the list of known tools.
+</p>
+<p>
+The -n flag causes tool to print the command that would be
+executed but not execute it.
+</p>
+<p>
+For more about each tool command, see &#39;go tool command -h&#39;.
+</p>
+<h3 id="hdr-Print_Go_version">Print Go version</h3>
+<p>
+Usage:
+</p>
+<pre>go version
+</pre>
+<p>
+Version prints the Go version, as reported by runtime.Version.
+</p>
+<h3 id="hdr-Run_go_tool_vet_on_packages">Run go tool vet on packages</h3>
+<p>
+Usage:
+</p>
+<pre>go vet [-n] [-x] [build flags] [packages]
+</pre>
+<p>
+Vet runs the Go vet command on the packages named by the import paths.
+</p>
+<p>
+For more about vet, see &#39;go doc cmd/vet&#39;.
+For more about specifying packages, see &#39;go help packages&#39;.
+</p>
+<p>
+To run the vet tool with specific options, run &#39;go tool vet&#39;.
+</p>
+<p>
+The -n flag prints commands that would be executed.
+The -x flag prints commands as they are executed.
+</p>
+<p>
+For more about build flags, see &#39;go help build&#39;.
+</p>
+<p>
+See also: go fmt, go fix.
+</p>
+<h3 id="hdr-Calling_between_Go_and_C">Calling between Go and C</h3>
+<p>
+There are two different ways to call between Go and C/C++ code.
+</p>
+<p>
+The first is the cgo tool, which is part of the Go distribution. For
+information on how to use it see the cgo documentation (go doc cmd/cgo).
+</p>
+<p>
+The second is the SWIG program, which is a general tool for
+interfacing between languages. For information on SWIG see
+<a href="http://swig.org/">http://swig.org/</a>. When running go build, any file with a .swig
+extension will be passed to SWIG. Any file with a .swigcxx extension
+will be passed to SWIG with the -c++ option.
+</p>
+<p>
+When either cgo or SWIG is used, go build will pass any .c, .m, .s,
+or .S files to the C compiler, and any .cc, .cpp, .cxx files to the C++
+compiler. The CC or CXX environment variables may be set to determine
+the C or C++ compiler, respectively, to use.
+</p>
+<h3 id="hdr-Description_of_build_modes">Description of build modes</h3>
+<p>
+The &#39;go build&#39; and &#39;go install&#39; commands take a -buildmode argument which
+indicates which kind of object file is to be built. Currently supported values
+are:
+</p>
+<pre>-buildmode=archive
+ Build the listed non-main packages into .a files. Packages named
+ main are ignored.
+
+-buildmode=c-archive
+ Build the listed main package, plus all packages it imports,
+ into a C archive file. The only callable symbols will be those
+ functions exported using a cgo //export comment. Requires
+ exactly one main package to be listed.
+
+-buildmode=c-shared
+ Build the listed main packages, plus all packages that they
+ import, into C shared libraries. The only callable symbols will
+ be those functions exported using a cgo //export comment.
+ Non-main packages are ignored.
+
+-buildmode=default
+ Listed main packages are built into executables and listed
+ non-main packages are built into .a files (the default
+ behavior).
+
+-buildmode=shared
+ Combine all the listed non-main packages into a single shared
+ library that will be used when building with the -linkshared
+ option. Packages named main are ignored.
+
+-buildmode=exe
+ Build the listed main packages and everything they import into
+ executables. Packages not named main are ignored.
+
+-buildmode=pie
+ Build the listed main packages and everything they import into
+ position independent executables (PIE). Packages not named
+ main are ignored.
+
+-buildmode=plugin
+ Build the listed main packages, plus all packages that they
+ import, into a Go plugin. Packages not named main are ignored.
+</pre>
+<h3 id="hdr-File_types">File types</h3>
+<p>
+The go command examines the contents of a restricted set of files
+in each directory. It identifies which files to examine based on
+the extension of the file name. These extensions are:
+</p>
+<pre>.go
+ Go source files.
+.c, .h
+ C source files.
+ If the package uses cgo or SWIG, these will be compiled with the
+ OS-native compiler (typically gcc); otherwise they will
+ trigger an error.
+.cc, .cpp, .cxx, .hh, .hpp, .hxx
+ C++ source files. Only useful with cgo or SWIG, and always
+ compiled with the OS-native compiler.
+.m
+ Objective-C source files. Only useful with cgo, and always
+ compiled with the OS-native compiler.
+.s, .S
+ Assembler source files.
+ If the package uses cgo or SWIG, these will be assembled with the
+ OS-native assembler (typically gcc (sic)); otherwise they
+ will be assembled with the Go assembler.
+.swig, .swigcxx
+ SWIG definition files.
+.syso
+ System object files.
+</pre>
+<p>
+Files of each of these types except .syso may contain build
+constraints, but the go command stops scanning for build constraints
+at the first item in the file that is not a blank line or //-style
+line comment. See the go/build package documentation for
+more details.
+</p>
+<p>
+Non-test Go source files can also include a //go:binary-only-package
+comment, indicating that the package sources are included
+for documentation only and must not be used to build the
+package binary. This enables distribution of Go packages in
+their compiled form alone. See the go/build package documentation
+for more details.
+</p>
+<h3 id="hdr-GOPATH_environment_variable">GOPATH environment variable</h3>
+<p>
+The Go path is used to resolve import statements.
+It is implemented by and documented in the go/build package.
+</p>
+<p>
+The GOPATH environment variable lists places to look for Go code.
+On Unix, the value is a colon-separated string.
+On Windows, the value is a semicolon-separated string.
+On Plan 9, the value is a list.
+</p>
+<p>
+If the environment variable is unset, GOPATH defaults
+to a subdirectory named &#34;go&#34; in the user&#39;s home directory
+($HOME/go on Unix, %USERPROFILE%\go on Windows),
+unless that directory holds a Go distribution.
+Run &#34;go env GOPATH&#34; to see the current GOPATH.
+</p>
+<p>
+See <a href="https://golang.org/wiki/SettingGOPATH">https://golang.org/wiki/SettingGOPATH</a> to set a custom GOPATH.
+</p>
+<p>
+Each directory listed in GOPATH must have a prescribed structure:
+</p>
+<p>
+The src directory holds source code. The path below src
+determines the import path or executable name.
+</p>
+<p>
+The pkg directory holds installed package objects.
+As in the Go tree, each target operating system and
+architecture pair has its own subdirectory of pkg
+(pkg/GOOS_GOARCH).
+</p>
+<p>
+If DIR is a directory listed in the GOPATH, a package with
+source in DIR/src/foo/bar can be imported as &#34;foo/bar&#34; and
+has its compiled form installed to &#34;DIR/pkg/GOOS_GOARCH/foo/bar.a&#34;.
+</p>
+<p>
+The bin directory holds compiled commands.
+Each command is named for its source directory, but only
+the final element, not the entire path. That is, the
+command with source in DIR/src/foo/quux is installed into
+DIR/bin/quux, not DIR/bin/foo/quux. The &#34;foo/&#34; prefix is stripped
+so that you can add DIR/bin to your PATH to get at the
+installed commands. If the GOBIN environment variable is
+set, commands are installed to the directory it names instead
+of DIR/bin. GOBIN must be an absolute path.
+</p>
+<p>
+Here&#39;s an example directory layout:
+</p>
+<pre>GOPATH=/home/user/go
+
+/home/user/go/
+ src/
+ foo/
+ bar/ (go code in package bar)
+ x.go
+ quux/ (go code in package main)
+ y.go
+ bin/
+ quux (installed command)
+ pkg/
+ linux_amd64/
+ foo/
+ bar.a (installed package object)
+</pre>
+<p>
+Go searches each directory listed in GOPATH to find source code,
+but new packages are always downloaded into the first directory
+in the list.
+</p>
+<p>
+See <a href="https://golang.org/doc/code.html">https://golang.org/doc/code.html</a> for an example.
+</p>
+<h3 id="hdr-Internal_Directories">Internal Directories</h3>
+<p>
+Code in or below a directory named &#34;internal&#34; is importable only
+by code in the directory tree rooted at the parent of &#34;internal&#34;.
+Here&#39;s an extended version of the directory layout above:
+</p>
+<pre>/home/user/go/
+ src/
+ crash/
+ bang/ (go code in package bang)
+ b.go
+ foo/ (go code in package foo)
+ f.go
+ bar/ (go code in package bar)
+ x.go
+ internal/
+ baz/ (go code in package baz)
+ z.go
+ quux/ (go code in package main)
+ y.go
+</pre>
+<p>
+The code in z.go is imported as &#34;foo/internal/baz&#34;, but that
+import statement can only appear in source files in the subtree
+rooted at foo. The source files foo/f.go, foo/bar/x.go, and
+foo/quux/y.go can all import &#34;foo/internal/baz&#34;, but the source file
+crash/bang/b.go cannot.
+</p>
+<p>
+See <a href="https://golang.org/s/go14internal">https://golang.org/s/go14internal</a> for details.
+</p>
+<h3 id="hdr-Vendor_Directories">Vendor Directories</h3>
+<p>
+Go 1.6 includes support for using local copies of external dependencies
+to satisfy imports of those dependencies, often referred to as vendoring.
+</p>
+<p>
+Code below a directory named &#34;vendor&#34; is importable only
+by code in the directory tree rooted at the parent of &#34;vendor&#34;,
+and only using an import path that omits the prefix up to and
+including the vendor element.
+</p>
+<p>
+Here&#39;s the example from the previous section,
+but with the &#34;internal&#34; directory renamed to &#34;vendor&#34;
+and a new foo/vendor/crash/bang directory added:
+</p>
+<pre>/home/user/go/
+ src/
+ crash/
+ bang/ (go code in package bang)
+ b.go
+ foo/ (go code in package foo)
+ f.go
+ bar/ (go code in package bar)
+ x.go
+ vendor/
+ crash/
+ bang/ (go code in package bang)
+ b.go
+ baz/ (go code in package baz)
+ z.go
+ quux/ (go code in package main)
+ y.go
+</pre>
+<p>
+The same visibility rules apply as for internal, but the code
+in z.go is imported as &#34;baz&#34;, not as &#34;foo/vendor/baz&#34;.
+</p>
+<p>
+Code in vendor directories deeper in the source tree shadows
+code in higher directories. Within the subtree rooted at foo, an import
+of &#34;crash/bang&#34; resolves to &#34;foo/vendor/crash/bang&#34;, not the
+top-level &#34;crash/bang&#34;.
+</p>
+<p>
+Code in vendor directories is not subject to import path
+checking (see &#39;go help importpath&#39;).
+</p>
+<p>
+When &#39;go get&#39; checks out or updates a git repository, it now also
+updates submodules.
+</p>
+<p>
+Vendor directories do not affect the placement of new repositories
+being checked out for the first time by &#39;go get&#39;: those are always
+placed in the main GOPATH, never in a vendor subtree.
+</p>
+<p>
+See <a href="https://golang.org/s/go15vendor">https://golang.org/s/go15vendor</a> for details.
+</p>
+<h3 id="hdr-Environment_variables">Environment variables</h3>
+<p>
+The go command, and the tools it invokes, examine a few different
+environment variables. For many of these, you can see the default
+value of on your system by running &#39;go env NAME&#39;, where NAME is the
+name of the variable.
+</p>
+<p>
+General-purpose environment variables:
+</p>
+<pre>GCCGO
+ The gccgo command to run for &#39;go build -compiler=gccgo&#39;.
+GOARCH
+ The architecture, or processor, for which to compile code.
+ Examples are amd64, 386, arm, ppc64.
+GOBIN
+ The directory where &#39;go install&#39; will install a command.
+GOOS
+ The operating system for which to compile code.
+ Examples are linux, darwin, windows, netbsd.
+GOPATH
+ For more details see: &#39;go help gopath&#39;.
+GORACE
+ Options for the race detector.
+ See <a href="https://golang.org/doc/articles/race_detector.html">https://golang.org/doc/articles/race_detector.html</a>.
+GOROOT
+ The root of the go tree.
+</pre>
+<p>
+Environment variables for use with cgo:
+</p>
+<pre>CC
+ The command to use to compile C code.
+CGO_ENABLED
+ Whether the cgo command is supported. Either 0 or 1.
+CGO_CFLAGS
+ Flags that cgo will pass to the compiler when compiling
+ C code.
+CGO_CPPFLAGS
+ Flags that cgo will pass to the compiler when compiling
+ C or C++ code.
+CGO_CXXFLAGS
+ Flags that cgo will pass to the compiler when compiling
+ C++ code.
+CGO_FFLAGS
+ Flags that cgo will pass to the compiler when compiling
+ Fortran code.
+CGO_LDFLAGS
+ Flags that cgo will pass to the compiler when linking.
+CXX
+ The command to use to compile C++ code.
+PKG_CONFIG
+ Path to pkg-config tool.
+</pre>
+<p>
+Architecture-specific environment variables:
+</p>
+<pre>GOARM
+ For GOARCH=arm, the ARM architecture for which to compile.
+ Valid values are 5, 6, 7.
+GO386
+ For GOARCH=386, the floating point instruction set.
+ Valid values are 387, sse2.
+</pre>
+<p>
+Special-purpose environment variables:
+</p>
+<pre>GOROOT_FINAL
+ The root of the installed Go tree, when it is
+ installed in a location other than where it is built.
+ File names in stack traces are rewritten from GOROOT to
+ GOROOT_FINAL.
+GO_EXTLINK_ENABLED
+ Whether the linker should use external linking mode
+ when using -linkmode=auto with code that uses cgo.
+ Set to 0 to disable external linking mode, 1 to enable it.
+GIT_ALLOW_PROTOCOL
+ Defined by Git. A colon-separated list of schemes that are allowed to be used
+ with git fetch/clone. If set, any scheme not explicitly mentioned will be
+ considered insecure by &#39;go get&#39;.
+</pre>
+<h3 id="hdr-Import_path_syntax">Import path syntax</h3>
+<p>
+An import path (see &#39;go help packages&#39;) denotes a package stored in the local
+file system. In general, an import path denotes either a standard package (such
+as &#34;unicode/utf8&#34;) or a package found in one of the work spaces (For more
+details see: &#39;go help gopath&#39;).
+</p>
+<h3 id="hdr-Relative_import_paths">Relative import paths</h3>
+<p>
+An import path beginning with ./ or ../ is called a relative path.
+The toolchain supports relative import paths as a shortcut in two ways.
+</p>
+<p>
+First, a relative path can be used as a shorthand on the command line.
+If you are working in the directory containing the code imported as
+&#34;unicode&#34; and want to run the tests for &#34;unicode/utf8&#34;, you can type
+&#34;go test ./utf8&#34; instead of needing to specify the full path.
+Similarly, in the reverse situation, &#34;go test ..&#34; will test &#34;unicode&#34; from
+the &#34;unicode/utf8&#34; directory. Relative patterns are also allowed, like
+&#34;go test ./...&#34; to test all subdirectories. See &#39;go help packages&#39; for details
+on the pattern syntax.
+</p>
+<p>
+Second, if you are compiling a Go program not in a work space,
+you can use a relative path in an import statement in that program
+to refer to nearby code also not in a work space.
+This makes it easy to experiment with small multipackage programs
+outside of the usual work spaces, but such programs cannot be
+installed with &#34;go install&#34; (there is no work space in which to install them),
+so they are rebuilt from scratch each time they are built.
+To avoid ambiguity, Go programs cannot use relative import paths
+within a work space.
+</p>
+<h3 id="hdr-Remote_import_paths">Remote import paths</h3>
+<p>
+Certain import paths also
+describe how to obtain the source code for the package using
+a revision control system.
+</p>
+<p>
+A few common code hosting sites have special syntax:
+</p>
+<pre>Bitbucket (Git, Mercurial)
+
+ import &#34;bitbucket.org/user/project&#34;
+ import &#34;bitbucket.org/user/project/sub/directory&#34;
+
+GitHub (Git)
+
+ import &#34;github.com/user/project&#34;
+ import &#34;github.com/user/project/sub/directory&#34;
+
+Launchpad (Bazaar)
+
+ import &#34;launchpad.net/project&#34;
+ import &#34;launchpad.net/project/series&#34;
+ import &#34;launchpad.net/project/series/sub/directory&#34;
+
+ import &#34;launchpad.net/~user/project/branch&#34;
+ import &#34;launchpad.net/~user/project/branch/sub/directory&#34;
+
+IBM DevOps Services (Git)
+
+ import &#34;hub.jazz.net/git/user/project&#34;
+ import &#34;hub.jazz.net/git/user/project/sub/directory&#34;
+</pre>
+<p>
+For code hosted on other servers, import paths may either be qualified
+with the version control type, or the go tool can dynamically fetch
+the import path over https/http and discover where the code resides
+from a &lt;meta&gt; tag in the HTML.
+</p>
+<p>
+To declare the code location, an import path of the form
+</p>
+<pre>repository.vcs/path
+</pre>
+<p>
+specifies the given repository, with or without the .vcs suffix,
+using the named version control system, and then the path inside
+that repository. The supported version control systems are:
+</p>
+<pre>Bazaar .bzr
+Git .git
+Mercurial .hg
+Subversion .svn
+</pre>
+<p>
+For example,
+</p>
+<pre>import &#34;example.org/user/foo.hg&#34;
+</pre>
+<p>
+denotes the root directory of the Mercurial repository at
+example.org/user/foo or foo.hg, and
+</p>
+<pre>import &#34;example.org/repo.git/foo/bar&#34;
+</pre>
+<p>
+denotes the foo/bar directory of the Git repository at
+example.org/repo or repo.git.
+</p>
+<p>
+When a version control system supports multiple protocols,
+each is tried in turn when downloading. For example, a Git
+download tries https://, then git+ssh://.
+</p>
+<p>
+By default, downloads are restricted to known secure protocols
+(e.g. https, ssh). To override this setting for Git downloads, the
+GIT_ALLOW_PROTOCOL environment variable can be set (For more details see:
+&#39;go help environment&#39;).
+</p>
+<p>
+If the import path is not a known code hosting site and also lacks a
+version control qualifier, the go tool attempts to fetch the import
+over https/http and looks for a &lt;meta&gt; tag in the document&#39;s HTML
+&lt;head&gt;.
+</p>
+<p>
+The meta tag has the form:
+</p>
+<pre>&lt;meta name=&#34;go-import&#34; content=&#34;import-prefix vcs repo-root&#34;&gt;
+</pre>
+<p>
+The import-prefix is the import path corresponding to the repository
+root. It must be a prefix or an exact match of the package being
+fetched with &#34;go get&#34;. If it&#39;s not an exact match, another http
+request is made at the prefix to verify the &lt;meta&gt; tags match.
+</p>
+<p>
+The meta tag should appear as early in the file as possible.
+In particular, it should appear before any raw JavaScript or CSS,
+to avoid confusing the go command&#39;s restricted parser.
+</p>
+<p>
+The vcs is one of &#34;git&#34;, &#34;hg&#34;, &#34;svn&#34;, etc,
+</p>
+<p>
+The repo-root is the root of the version control system
+containing a scheme and not containing a .vcs qualifier.
+</p>
+<p>
+For example,
+</p>
+<pre>import &#34;example.org/pkg/foo&#34;
+</pre>
+<p>
+will result in the following requests:
+</p>
+<pre><a href="https://example.org/pkg/foo?go-get=1">https://example.org/pkg/foo?go-get=1</a> (preferred)
+<a href="http://example.org/pkg/foo?go-get=1">http://example.org/pkg/foo?go-get=1</a> (fallback, only with -insecure)
+</pre>
+<p>
+If that page contains the meta tag
+</p>
+<pre>&lt;meta name=&#34;go-import&#34; content=&#34;example.org git <a href="https://code.org/r/p/exproj">https://code.org/r/p/exproj</a>&#34;&gt;
+</pre>
+<p>
+the go tool will verify that <a href="https://example.org/?go-get=1">https://example.org/?go-get=1</a> contains the
+same meta tag and then git clone <a href="https://code.org/r/p/exproj">https://code.org/r/p/exproj</a> into
+GOPATH/src/example.org.
+</p>
+<p>
+New downloaded packages are written to the first directory listed in the GOPATH
+environment variable (For more details see: &#39;go help gopath&#39;).
+</p>
+<p>
+The go command attempts to download the version of the
+package appropriate for the Go release being used.
+Run &#39;go help get&#39; for more.
+</p>
+<h3 id="hdr-Import_path_checking">Import path checking</h3>
+<p>
+When the custom import path feature described above redirects to a
+known code hosting site, each of the resulting packages has two possible
+import paths, using the custom domain or the known hosting site.
+</p>
+<p>
+A package statement is said to have an &#34;import comment&#34; if it is immediately
+followed (before the next newline) by a comment of one of these two forms:
+</p>
+<pre>package math // import &#34;path&#34;
+package math /* import &#34;path&#34; */
+</pre>
+<p>
+The go command will refuse to install a package with an import comment
+unless it is being referred to by that import path. In this way, import comments
+let package authors make sure the custom import path is used and not a
+direct path to the underlying code hosting site.
+</p>
+<p>
+Import path checking is disabled for code found within vendor trees.
+This makes it possible to copy code into alternate locations in vendor trees
+without needing to update import comments.
+</p>
+<p>
+See <a href="https://golang.org/s/go14customimport">https://golang.org/s/go14customimport</a> for details.
+</p>
+<h3 id="hdr-Description_of_package_lists">Description of package lists</h3>
+<p>
+Many commands apply to a set of packages:
+</p>
+<pre>go action [packages]
+</pre>
+<p>
+Usually, [packages] is a list of import paths.
+</p>
+<p>
+An import path that is a rooted path or that begins with
+a . or .. element is interpreted as a file system path and
+denotes the package in that directory.
+</p>
+<p>
+Otherwise, the import path P denotes the package found in
+the directory DIR/src/P for some DIR listed in the GOPATH
+environment variable (For more details see: &#39;go help gopath&#39;).
+</p>
+<p>
+If no import paths are given, the action applies to the
+package in the current directory.
+</p>
+<p>
+There are four reserved names for paths that should not be used
+for packages to be built with the go tool:
+</p>
+<p>
+- &#34;main&#34; denotes the top-level package in a stand-alone executable.
+</p>
+<p>
+- &#34;all&#34; expands to all package directories found in all the GOPATH
+trees. For example, &#39;go list all&#39; lists all the packages on the local
+system.
+</p>
+<p>
+- &#34;std&#34; is like all but expands to just the packages in the standard
+Go library.
+</p>
+<p>
+- &#34;cmd&#34; expands to the Go repository&#39;s commands and their
+internal libraries.
+</p>
+<p>
+Import paths beginning with &#34;cmd/&#34; only match source code in
+the Go repository.
+</p>
+<p>
+An import path is a pattern if it includes one or more &#34;...&#34; wildcards,
+each of which can match any string, including the empty string and
+strings containing slashes. Such a pattern expands to all package
+directories found in the GOPATH trees with names matching the
+patterns. As a special case, x/... matches x as well as x&#39;s subdirectories.
+For example, net/... expands to net and packages in its subdirectories.
+</p>
+<p>
+An import path can also name a package to be downloaded from
+a remote repository. Run &#39;go help importpath&#39; for details.
+</p>
+<p>
+Every package in a program must have a unique import path.
+By convention, this is arranged by starting each path with a
+unique prefix that belongs to you. For example, paths used
+internally at Google all begin with &#39;google&#39;, and paths
+denoting remote repositories begin with the path to the code,
+such as &#39;github.com/user/repo&#39;.
+</p>
+<p>
+Packages in a program need not have unique package names,
+but there are two reserved package names with special meaning.
+The name main indicates a command, not a library.
+Commands are built into binaries and cannot be imported.
+The name documentation indicates documentation for
+a non-Go program in the directory. Files in package documentation
+are ignored by the go command.
+</p>
+<p>
+As a special case, if the package list is a list of .go files from a
+single directory, the command is applied to a single synthesized
+package made up of exactly those files, ignoring any build constraints
+in those files and ignoring any other files in the directory.
+</p>
+<p>
+Directory and file names that begin with &#34;.&#34; or &#34;_&#34; are ignored
+by the go tool, as are directories named &#34;testdata&#34;.
+</p>
+<h3 id="hdr-Description_of_testing_flags">Description of testing flags</h3>
+<p>
+The &#39;go test&#39; command takes both flags that apply to &#39;go test&#39; itself
+and flags that apply to the resulting test binary.
+</p>
+<p>
+Several of the flags control profiling and write an execution profile
+suitable for &#34;go tool pprof&#34;; run &#34;go tool pprof -h&#34; for more
+information. The --alloc_space, --alloc_objects, and --show_bytes
+options of pprof control how the information is presented.
+</p>
+<p>
+The following flags are recognized by the &#39;go test&#39; command and
+control the execution of any test:
+</p>
+<pre>-bench regexp
+ Run (sub)benchmarks matching a regular expression.
+ The given regular expression is split into smaller ones by
+ top-level &#39;/&#39;, where each must match the corresponding part of a
+ benchmark&#39;s identifier.
+ By default, no benchmarks run. To run all benchmarks,
+ use &#39;-bench .&#39; or &#39;-bench=.&#39;.
+
+-benchtime t
+ Run enough iterations of each benchmark to take t, specified
+ as a time.Duration (for example, -benchtime 1h30s).
+ The default is 1 second (1s).
+
+-count n
+ Run each test and benchmark n times (default 1).
+ If -cpu is set, run n times for each GOMAXPROCS value.
+ Examples are always run once.
+
+-cover
+ Enable coverage analysis.
+
+-covermode set,count,atomic
+ Set the mode for coverage analysis for the package[s]
+ being tested. The default is &#34;set&#34; unless -race is enabled,
+ in which case it is &#34;atomic&#34;.
+ The values:
+ set: bool: does this statement run?
+ count: int: how many times does this statement run?
+ atomic: int: count, but correct in multithreaded tests;
+ significantly more expensive.
+ Sets -cover.
+
+-coverpkg pkg1,pkg2,pkg3
+ Apply coverage analysis in each test to the given list of packages.
+ The default is for each test to analyze only the package being tested.
+ Packages are specified as import paths.
+ Sets -cover.
+
+-cpu 1,2,4
+ Specify a list of GOMAXPROCS values for which the tests or
+ benchmarks should be executed. The default is the current value
+ of GOMAXPROCS.
+
+-parallel n
+ Allow parallel execution of test functions that call t.Parallel.
+ The value of this flag is the maximum number of tests to run
+ simultaneously; by default, it is set to the value of GOMAXPROCS.
+ Note that -parallel only applies within a single test binary.
+ The &#39;go test&#39; command may run tests for different packages
+ in parallel as well, according to the setting of the -p flag
+ (see &#39;go help build&#39;).
+
+-run regexp
+ Run only those tests and examples matching the regular expression.
+ For tests the regular expression is split into smaller ones by
+ top-level &#39;/&#39;, where each must match the corresponding part of a
+ test&#39;s identifier.
+
+-short
+ Tell long-running tests to shorten their run time.
+ It is off by default but set during all.bash so that installing
+ the Go tree can run a sanity check but not spend time running
+ exhaustive tests.
+
+-timeout t
+ If a test runs longer than t, panic.
+ The default is 10 minutes (10m).
+
+-v
+ Verbose output: log all tests as they are run. Also print all
+ text from Log and Logf calls even if the test succeeds.
+</pre>
+<p>
+The following flags are also recognized by &#39;go test&#39; and can be used to
+profile the tests during execution:
+</p>
+<pre>-benchmem
+ Print memory allocation statistics for benchmarks.
+
+-blockprofile block.out
+ Write a goroutine blocking profile to the specified file
+ when all tests are complete.
+ Writes test binary as -c would.
+
+-blockprofilerate n
+ Control the detail provided in goroutine blocking profiles by
+ calling runtime.SetBlockProfileRate with n.
+ See &#39;go doc runtime.SetBlockProfileRate&#39;.
+ The profiler aims to sample, on average, one blocking event every
+ n nanoseconds the program spends blocked. By default,
+ if -test.blockprofile is set without this flag, all blocking events
+ are recorded, equivalent to -test.blockprofilerate=1.
+
+-coverprofile cover.out
+ Write a coverage profile to the file after all tests have passed.
+ Sets -cover.
+
+-cpuprofile cpu.out
+ Write a CPU profile to the specified file before exiting.
+ Writes test binary as -c would.
+
+-memprofile mem.out
+ Write a memory profile to the file after all tests have passed.
+ Writes test binary as -c would.
+
+-memprofilerate n
+ Enable more precise (and expensive) memory profiles by setting
+ runtime.MemProfileRate. See &#39;go doc runtime.MemProfileRate&#39;.
+ To profile all memory allocations, use -test.memprofilerate=1
+ and pass --alloc_space flag to the pprof tool.
+
+-mutexprofile mutex.out
+ Write a mutex contention profile to the specified file
+ when all tests are complete.
+ Writes test binary as -c would.
+
+-mutexprofilefraction n
+ Sample 1 in n stack traces of goroutines holding a
+ contended mutex.
+
+-outputdir directory
+ Place output files from profiling in the specified directory,
+ by default the directory in which &#34;go test&#34; is running.
+
+-trace trace.out
+ Write an execution trace to the specified file before exiting.
+</pre>
+<p>
+Each of these flags is also recognized with an optional &#39;test.&#39; prefix,
+as in -test.v. When invoking the generated test binary (the result of
+&#39;go test -c&#39;) directly, however, the prefix is mandatory.
+</p>
+<p>
+The &#39;go test&#39; command rewrites or removes recognized flags,
+as appropriate, both before and after the optional package list,
+before invoking the test binary.
+</p>
+<p>
+For instance, the command
+</p>
+<pre>go test -v -myflag testdata -cpuprofile=prof.out -x
+</pre>
+<p>
+will compile the test binary and then run it as
+</p>
+<pre>pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
+</pre>
+<p>
+(The -x flag is removed because it applies only to the go command&#39;s
+execution, not to the test itself.)
+</p>
+<p>
+The test flags that generate profiles (other than for coverage) also
+leave the test binary in pkg.test for use when analyzing the profiles.
+</p>
+<p>
+When &#39;go test&#39; runs a test binary, it does so from within the
+corresponding package&#39;s source code directory. Depending on the test,
+it may be necessary to do the same when invoking a generated test
+binary directly.
+</p>
+<p>
+The command-line package list, if present, must appear before any
+flag not known to the go test command. Continuing the example above,
+the package list would have to appear before -myflag, but could appear
+on either side of -v.
+</p>
+<p>
+To keep an argument for a test binary from being interpreted as a
+known flag or a package name, use -args (see &#39;go help test&#39;) which
+passes the remainder of the command line through to the test binary
+uninterpreted and unaltered.
+</p>
+<p>
+For instance, the command
+</p>
+<pre>go test -v -args -x -v
+</pre>
+<p>
+will compile the test binary and then run it as
+</p>
+<pre>pkg.test -test.v -x -v
+</pre>
+<p>
+Similarly,
+</p>
+<pre>go test -args math
+</pre>
+<p>
+will compile the test binary and then run it as
+</p>
+<pre>pkg.test math
+</pre>
+<p>
+In the first example, the -x and the second -v are passed through to the
+test binary unchanged and with no effect on the go command itself.
+In the second example, the argument math is passed through to the test
+binary, instead of being interpreted as the package list.
+</p>
+<h3 id="hdr-Description_of_testing_functions">Description of testing functions</h3>
+<p>
+The &#39;go test&#39; command expects to find test, benchmark, and example functions
+in the &#34;*_test.go&#34; files corresponding to the package under test.
+</p>
+<p>
+A test function is one named TestXXX (where XXX is any alphanumeric string
+not starting with a lower case letter) and should have the signature,
+</p>
+<pre>func TestXXX(t *testing.T) { ... }
+</pre>
+<p>
+A benchmark function is one named BenchmarkXXX and should have the signature,
+</p>
+<pre>func BenchmarkXXX(b *testing.B) { ... }
+</pre>
+<p>
+An example function is similar to a test function but, instead of using
+*testing.T to report success or failure, prints output to os.Stdout.
+If the last comment in the function starts with &#34;Output:&#34; then the output
+is compared exactly against the comment (see examples below). If the last
+comment begins with &#34;Unordered output:&#34; then the output is compared to the
+comment, however the order of the lines is ignored. An example with no such
+comment is compiled but not executed. An example with no text after
+&#34;Output:&#34; is compiled, executed, and expected to produce no output.
+</p>
+<p>
+Godoc displays the body of ExampleXXX to demonstrate the use
+of the function, constant, or variable XXX. An example of a method M with
+receiver type T or *T is named ExampleT_M. There may be multiple examples
+for a given function, constant, or variable, distinguished by a trailing _xxx,
+where xxx is a suffix not beginning with an upper case letter.
+</p>
+<p>
+Here is an example of an example:
+</p>
+<pre>func ExamplePrintln() {
+ Println(&#34;The output of\nthis example.&#34;)
+ // Output: The output of
+ // this example.
+}
+</pre>
+<p>
+Here is another example where the ordering of the output is ignored:
+</p>
+<pre>func ExamplePerm() {
+ for _, value := range Perm(4) {
+ fmt.Println(value)
+ }
+
+ // Unordered output: 4
+ // 2
+ // 1
+ // 3
+ // 0
+}
+</pre>
+<p>
+The entire test file is presented as the example when it contains a single
+example function, at least one other function, type, variable, or constant
+declaration, and no test or benchmark functions.
+</p>
+<p>
+See the documentation of the testing package for more information.
+</p>
+
+<div id="footer">
+Build version go1.8.<br>
+Except as <a href="https://developers.google.com/site-policies#restrictions">noted</a>,
+the content of this page is licensed under the
+Creative Commons Attribution 3.0 License,
+and code is licensed under a <a href="/LICENSE">BSD license</a>.<br>
+<a href="/doc/tos.html">Terms of Service</a> |
+<a href="http://www.google.com/intl/en/policies/privacy/">Privacy Policy</a>
+</div>
+
+</div><!-- .container -->
+</div><!-- #page -->
+
+<!-- TODO(adonovan): load these from <head> using "defer" attribute? -->
+<script type="text/javascript" src="/serverpush/static/jquery.min.js?{{.CacheBust}}"></script>
+<script type="text/javascript" src="/serverpush/static/playground.js?{{.CacheBust}}"></script>
+<script>var goVersion = "go1.8";</script>
+<script type="text/javascript" src="/serverpush/static/godocs.js?{{.CacheBust}}"></script>
+</body>
+</html>
+`))
diff --git a/vendor/golang.org/x/net/http2/h2i/README.md b/vendor/golang.org/x/net/http2/h2i/README.md
new file mode 100644
index 0000000..fb5c5ef
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2i/README.md
@@ -0,0 +1,97 @@
+# h2i
+
+**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol'
+days of telnetting to your HTTP/1.n servers? We're bringing you
+back.
+
+Features:
+- send raw HTTP/2 frames
+ - PING
+ - SETTINGS
+ - HEADERS
+ - etc
+- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2
+- pretty print all received HTTP/2 frames from the peer (including HPACK decoding)
+- tab completion of commands, options
+
+Not yet features, but soon:
+- unnecessary CONTINUATION frames on short boundaries, to test peer implementations
+- request bodies (DATA frames)
+- send invalid frames for testing server implementations (supported by underlying Framer)
+
+Later:
+- act like a server
+
+## Installation
+
+```
+$ go get golang.org/x/net/http2/h2i
+$ h2i <host>
+```
+
+## Demo
+
+```
+$ h2i
+Usage: h2i <hostname>
+
+ -insecure
+ Whether to skip TLS cert validation
+ -nextproto string
+ Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14")
+
+$ h2i google.com
+Connecting to google.com:443 ...
+Connected to 74.125.224.41:443
+Negotiated protocol "h2-14"
+[FrameHeader SETTINGS len=18]
+ [MAX_CONCURRENT_STREAMS = 100]
+ [INITIAL_WINDOW_SIZE = 1048576]
+ [MAX_FRAME_SIZE = 16384]
+[FrameHeader WINDOW_UPDATE len=4]
+ Window-Increment = 983041
+
+h2i> PING h2iSayHI
+[FrameHeader PING flags=ACK len=8]
+ Data = "h2iSayHI"
+h2i> headers
+(as HTTP/1.1)> GET / HTTP/1.1
+(as HTTP/1.1)> Host: ip.appspot.com
+(as HTTP/1.1)> User-Agent: h2i/brad-n-blake
+(as HTTP/1.1)>
+Opening Stream-ID 1:
+ :authority = ip.appspot.com
+ :method = GET
+ :path = /
+ :scheme = https
+ user-agent = h2i/brad-n-blake
+[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77]
+ :status = "200"
+ alternate-protocol = "443:quic,p=1"
+ content-length = "15"
+ content-type = "text/html"
+ date = "Fri, 01 May 2015 23:06:56 GMT"
+ server = "Google Frontend"
+[FrameHeader DATA flags=END_STREAM stream=1 len=15]
+ "173.164.155.78\n"
+[FrameHeader PING len=8]
+ Data = "\x00\x00\x00\x00\x00\x00\x00\x00"
+h2i> ping
+[FrameHeader PING flags=ACK len=8]
+ Data = "h2i_ping"
+h2i> ping
+[FrameHeader PING flags=ACK len=8]
+ Data = "h2i_ping"
+h2i> ping
+[FrameHeader GOAWAY len=22]
+ Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1)
+
+ReadFrame: EOF
+```
+
+## Status
+
+Quick few hour hack. So much yet to do. Feel free to file issues for
+bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/)
+and I aren't yet accepting pull requests until things settle down.
+
diff --git a/vendor/golang.org/x/net/http2/h2i/h2i.go b/vendor/golang.org/x/net/http2/h2i/h2i.go
new file mode 100644
index 0000000..62e5752
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2i/h2i.go
@@ -0,0 +1,522 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris
+
+/*
+The h2i command is an interactive HTTP/2 console.
+
+Usage:
+ $ h2i [flags] <hostname>
+
+Interactive commands in the console: (all parts case-insensitive)
+
+ ping [data]
+ settings ack
+ settings FOO=n BAR=z
+ headers (open a new stream by typing HTTP/1.1)
+*/
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "golang.org/x/crypto/ssh/terminal"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
+)
+
+// Flags
+var (
+ flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.")
+ flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation")
+ flagSettings = flag.String("settings", "empty", "comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.")
+ flagDial = flag.String("dial", "", "optional ip:port to dial, to connect to a host:port but use a different SNI name (including a SNI name without DNS)")
+)
+
+type command struct {
+ run func(*h2i, []string) error // required
+
+ // complete optionally specifies tokens (case-insensitive) which are
+ // valid for this subcommand.
+ complete func() []string
+}
+
+var commands = map[string]command{
+ "ping": {run: (*h2i).cmdPing},
+ "settings": {
+ run: (*h2i).cmdSettings,
+ complete: func() []string {
+ return []string{
+ "ACK",
+ http2.SettingHeaderTableSize.String(),
+ http2.SettingEnablePush.String(),
+ http2.SettingMaxConcurrentStreams.String(),
+ http2.SettingInitialWindowSize.String(),
+ http2.SettingMaxFrameSize.String(),
+ http2.SettingMaxHeaderListSize.String(),
+ }
+ },
+ },
+ "quit": {run: (*h2i).cmdQuit},
+ "headers": {run: (*h2i).cmdHeaders},
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage: h2i <hostname>\n\n")
+ flag.PrintDefaults()
+}
+
+// withPort adds ":443" if another port isn't already present.
+func withPort(host string) string {
+ if _, _, err := net.SplitHostPort(host); err != nil {
+ return net.JoinHostPort(host, "443")
+ }
+ return host
+}
+
+// withoutPort strips the port from addr if present.
+func withoutPort(addr string) string {
+ if h, _, err := net.SplitHostPort(addr); err == nil {
+ return h
+ }
+ return addr
+}
+
+// h2i is the app's state.
+type h2i struct {
+ host string
+ tc *tls.Conn
+ framer *http2.Framer
+ term *terminal.Terminal
+
+ // owned by the command loop:
+ streamID uint32
+ hbuf bytes.Buffer
+ henc *hpack.Encoder
+
+ // owned by the readFrames loop:
+ peerSetting map[http2.SettingID]uint32
+ hdec *hpack.Decoder
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+ if flag.NArg() != 1 {
+ usage()
+ os.Exit(2)
+ }
+ log.SetFlags(0)
+
+ host := flag.Arg(0)
+ app := &h2i{
+ host: host,
+ peerSetting: make(map[http2.SettingID]uint32),
+ }
+ app.henc = hpack.NewEncoder(&app.hbuf)
+
+ if err := app.Main(); err != nil {
+ if app.term != nil {
+ app.logf("%v\n", err)
+ } else {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ }
+ os.Exit(1)
+ }
+ fmt.Fprintf(os.Stdout, "\n")
+}
+
+func (app *h2i) Main() error {
+ cfg := &tls.Config{
+ ServerName: withoutPort(app.host),
+ NextProtos: strings.Split(*flagNextProto, ","),
+ InsecureSkipVerify: *flagInsecure,
+ }
+
+ hostAndPort := *flagDial
+ if hostAndPort == "" {
+ hostAndPort = withPort(app.host)
+ }
+ log.Printf("Connecting to %s ...", hostAndPort)
+ tc, err := tls.Dial("tcp", hostAndPort, cfg)
+ if err != nil {
+ return fmt.Errorf("Error dialing %s: %v", hostAndPort, err)
+ }
+ log.Printf("Connected to %v", tc.RemoteAddr())
+ defer tc.Close()
+
+ if err := tc.Handshake(); err != nil {
+ return fmt.Errorf("TLS handshake: %v", err)
+ }
+ if !*flagInsecure {
+ if err := tc.VerifyHostname(app.host); err != nil {
+ return fmt.Errorf("VerifyHostname: %v", err)
+ }
+ }
+ state := tc.ConnectionState()
+ log.Printf("Negotiated protocol %q", state.NegotiatedProtocol)
+ if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" {
+ return fmt.Errorf("Could not negotiate protocol mutually")
+ }
+
+ if _, err := io.WriteString(tc, http2.ClientPreface); err != nil {
+ return err
+ }
+
+ app.framer = http2.NewFramer(tc, tc)
+
+ oldState, err := terminal.MakeRaw(int(os.Stdin.Fd()))
+ if err != nil {
+ return err
+ }
+ defer terminal.Restore(0, oldState)
+
+ var screen = struct {
+ io.Reader
+ io.Writer
+ }{os.Stdin, os.Stdout}
+
+ app.term = terminal.NewTerminal(screen, "h2i> ")
+ lastWord := regexp.MustCompile(`.+\W(\w+)$`)
+ app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) {
+ if key != '\t' {
+ return
+ }
+ if pos != len(line) {
+ // TODO: we're being lazy for now, only supporting tab completion at the end.
+ return
+ }
+ // Auto-complete for the command itself.
+ if !strings.Contains(line, " ") {
+ var name string
+ name, _, ok = lookupCommand(line)
+ if !ok {
+ return
+ }
+ return name, len(name), true
+ }
+ _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')])
+ if !ok || c.complete == nil {
+ return
+ }
+ if strings.HasSuffix(line, " ") {
+ app.logf("%s", strings.Join(c.complete(), " "))
+ return line, pos, true
+ }
+ m := lastWord.FindStringSubmatch(line)
+ if m == nil {
+ return line, len(line), true
+ }
+ soFar := m[1]
+ var match []string
+ for _, cand := range c.complete() {
+ if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) {
+ continue
+ }
+ match = append(match, cand)
+ }
+ if len(match) == 0 {
+ return
+ }
+ if len(match) > 1 {
+ // TODO: auto-complete any common prefix
+ app.logf("%s", strings.Join(match, " "))
+ return line, pos, true
+ }
+ newLine = line[:len(line)-len(soFar)] + match[0]
+ return newLine, len(newLine), true
+
+ }
+
+ errc := make(chan error, 2)
+ go func() { errc <- app.readFrames() }()
+ go func() { errc <- app.readConsole() }()
+ return <-errc
+}
+
+func (app *h2i) logf(format string, args ...interface{}) {
+ fmt.Fprintf(app.term, format+"\r\n", args...)
+}
+
+func (app *h2i) readConsole() error {
+ if s := *flagSettings; s != "omit" {
+ var args []string
+ if s != "empty" {
+ args = strings.Split(s, ",")
+ }
+ _, c, ok := lookupCommand("settings")
+ if !ok {
+ panic("settings command not found")
+ }
+ c.run(app, args)
+ }
+
+ for {
+ line, err := app.term.ReadLine()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("terminal.ReadLine: %v", err)
+ }
+ f := strings.Fields(line)
+ if len(f) == 0 {
+ continue
+ }
+ cmd, args := f[0], f[1:]
+ if _, c, ok := lookupCommand(cmd); ok {
+ err = c.run(app, args)
+ } else {
+ app.logf("Unknown command %q", line)
+ }
+ if err == errExitApp {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ }
+}
+
+func lookupCommand(prefix string) (name string, c command, ok bool) {
+ prefix = strings.ToLower(prefix)
+ if c, ok = commands[prefix]; ok {
+ return prefix, c, ok
+ }
+
+ for full, candidate := range commands {
+ if strings.HasPrefix(full, prefix) {
+ if c.run != nil {
+ return "", command{}, false // ambiguous
+ }
+ c = candidate
+ name = full
+ }
+ }
+ return name, c, c.run != nil
+}
+
+var errExitApp = errors.New("internal sentinel error value to quit the console reading loop")
+
+func (a *h2i) cmdQuit(args []string) error {
+ if len(args) > 0 {
+ a.logf("the QUIT command takes no argument")
+ return nil
+ }
+ return errExitApp
+}
+
+func (a *h2i) cmdSettings(args []string) error {
+ if len(args) == 1 && strings.EqualFold(args[0], "ACK") {
+ return a.framer.WriteSettingsAck()
+ }
+ var settings []http2.Setting
+ for _, arg := range args {
+ if strings.EqualFold(arg, "ACK") {
+ a.logf("Error: ACK must be only argument with the SETTINGS command")
+ return nil
+ }
+ eq := strings.Index(arg, "=")
+ if eq == -1 {
+ a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
+ return nil
+ }
+ sid, ok := settingByName(arg[:eq])
+ if !ok {
+ a.logf("Error: unknown setting name %q", arg[:eq])
+ return nil
+ }
+ val, err := strconv.ParseUint(arg[eq+1:], 10, 32)
+ if err != nil {
+ a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
+ return nil
+ }
+ settings = append(settings, http2.Setting{
+ ID: sid,
+ Val: uint32(val),
+ })
+ }
+ a.logf("Sending: %v", settings)
+ return a.framer.WriteSettings(settings...)
+}
+
+func settingByName(name string) (http2.SettingID, bool) {
+ for _, sid := range [...]http2.SettingID{
+ http2.SettingHeaderTableSize,
+ http2.SettingEnablePush,
+ http2.SettingMaxConcurrentStreams,
+ http2.SettingInitialWindowSize,
+ http2.SettingMaxFrameSize,
+ http2.SettingMaxHeaderListSize,
+ } {
+ if strings.EqualFold(sid.String(), name) {
+ return sid, true
+ }
+ }
+ return 0, false
+}
+
+func (app *h2i) cmdPing(args []string) error {
+ if len(args) > 1 {
+ app.logf("invalid PING usage: only accepts 0 or 1 args")
+ return nil // nil means don't end the program
+ }
+ var data [8]byte
+ if len(args) == 1 {
+ copy(data[:], args[0])
+ } else {
+ copy(data[:], "h2i_ping")
+ }
+ return app.framer.WritePing(false, data)
+}
+
+func (app *h2i) cmdHeaders(args []string) error {
+ if len(args) > 0 {
+ app.logf("Error: HEADERS doesn't yet take arguments.")
+ // TODO: flags for restricting window size, to force CONTINUATION
+ // frames.
+ return nil
+ }
+ var h1req bytes.Buffer
+ app.term.SetPrompt("(as HTTP/1.1)> ")
+ defer app.term.SetPrompt("h2i> ")
+ for {
+ line, err := app.term.ReadLine()
+ if err != nil {
+ return err
+ }
+ h1req.WriteString(line)
+ h1req.WriteString("\r\n")
+ if line == "" {
+ break
+ }
+ }
+ req, err := http.ReadRequest(bufio.NewReader(&h1req))
+ if err != nil {
+ app.logf("Invalid HTTP/1.1 request: %v", err)
+ return nil
+ }
+ if app.streamID == 0 {
+ app.streamID = 1
+ } else {
+ app.streamID += 2
+ }
+ app.logf("Opening Stream-ID %d:", app.streamID)
+ hbf := app.encodeHeaders(req)
+ if len(hbf) > 16<<10 {
+ app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go")
+ return nil
+ }
+ return app.framer.WriteHeaders(http2.HeadersFrameParam{
+ StreamID: app.streamID,
+ BlockFragment: hbf,
+ EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now
+ EndHeaders: true, // for now
+ })
+}
+
+func (app *h2i) readFrames() error {
+ for {
+ f, err := app.framer.ReadFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame: %v", err)
+ }
+ app.logf("%v", f)
+ switch f := f.(type) {
+ case *http2.PingFrame:
+ app.logf(" Data = %q", f.Data)
+ case *http2.SettingsFrame:
+ f.ForeachSetting(func(s http2.Setting) error {
+ app.logf(" %v", s)
+ app.peerSetting[s.ID] = s.Val
+ return nil
+ })
+ case *http2.WindowUpdateFrame:
+ app.logf(" Window-Increment = %v", f.Increment)
+ case *http2.GoAwayFrame:
+ app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)", f.LastStreamID, f.ErrCode, f.ErrCode)
+ case *http2.DataFrame:
+ app.logf(" %q", f.Data())
+ case *http2.HeadersFrame:
+ if f.HasPriority() {
+ app.logf(" PRIORITY = %v", f.Priority)
+ }
+ if app.hdec == nil {
+ // TODO: if the user uses h2i to send a SETTINGS frame advertising
+ // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE
+ // and stuff here instead of using the 4k default. But for now:
+ tableSize := uint32(4 << 10)
+ app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField)
+ }
+ app.hdec.Write(f.HeaderBlockFragment())
+ case *http2.PushPromiseFrame:
+ if app.hdec == nil {
+ // TODO: if the user uses h2i to send a SETTINGS frame advertising
+ // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE
+ // and stuff here instead of using the 4k default. But for now:
+ tableSize := uint32(4 << 10)
+ app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField)
+ }
+ app.hdec.Write(f.HeaderBlockFragment())
+ }
+ }
+}
+
+// called from readLoop
+func (app *h2i) onNewHeaderField(f hpack.HeaderField) {
+ if f.Sensitive {
+ app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value)
+ }
+ app.logf(" %s = %q", f.Name, f.Value)
+}
+
+func (app *h2i) encodeHeaders(req *http.Request) []byte {
+ app.hbuf.Reset()
+
+ // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+
+ path := req.RequestURI
+ if path == "" {
+ path = "/"
+ }
+
+ app.writeHeader(":authority", host) // probably not right for all sites
+ app.writeHeader(":method", req.Method)
+ app.writeHeader(":path", path)
+ app.writeHeader(":scheme", "https")
+
+ for k, vv := range req.Header {
+ lowKey := strings.ToLower(k)
+ if lowKey == "host" {
+ continue
+ }
+ for _, v := range vv {
+ app.writeHeader(lowKey, v)
+ }
+ }
+ return app.hbuf.Bytes()
+}
+
+func (app *h2i) writeHeader(name, value string) {
+ app.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+ app.logf(" %s = %s", name, value)
+}
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go
new file mode 100644
index 0000000..c2805f6
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/headermap.go
@@ -0,0 +1,78 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "net/http"
+ "strings"
+)
+
+var (
+ commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
+ commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
+)
+
+func init() {
+ for _, v := range []string{
+ "accept",
+ "accept-charset",
+ "accept-encoding",
+ "accept-language",
+ "accept-ranges",
+ "age",
+ "access-control-allow-origin",
+ "allow",
+ "authorization",
+ "cache-control",
+ "content-disposition",
+ "content-encoding",
+ "content-language",
+ "content-length",
+ "content-location",
+ "content-range",
+ "content-type",
+ "cookie",
+ "date",
+ "etag",
+ "expect",
+ "expires",
+ "from",
+ "host",
+ "if-match",
+ "if-modified-since",
+ "if-none-match",
+ "if-unmodified-since",
+ "last-modified",
+ "link",
+ "location",
+ "max-forwards",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "range",
+ "referer",
+ "refresh",
+ "retry-after",
+ "server",
+ "set-cookie",
+ "strict-transport-security",
+ "trailer",
+ "transfer-encoding",
+ "user-agent",
+ "vary",
+ "via",
+ "www-authenticate",
+ } {
+ chk := http.CanonicalHeaderKey(v)
+ commonLowerHeader[chk] = v
+ commonCanonHeader[v] = chk
+ }
+}
+
+func lowerHeader(v string) string {
+ if s, ok := commonLowerHeader[v]; ok {
+ return s
+ }
+ return strings.ToLower(v)
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
new file mode 100644
index 0000000..54726c2
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -0,0 +1,240 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "io"
+)
+
+const (
+ uint32Max = ^uint32(0)
+ initialHeaderTableSize = 4096
+)
+
+type Encoder struct {
+ dynTab dynamicTable
+ // minSize is the minimum table size set by
+ // SetMaxDynamicTableSize after the previous Header Table Size
+ // Update.
+ minSize uint32
+ // maxSizeLimit is the maximum table size this encoder
+ // supports. This will protect the encoder from too large
+ // size.
+ maxSizeLimit uint32
+ // tableSizeUpdate indicates whether "Header Table Size
+ // Update" is required.
+ tableSizeUpdate bool
+ w io.Writer
+ buf []byte
+}
+
+// NewEncoder returns a new Encoder which performs HPACK encoding. An
+// encoded data is written to w.
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{
+ minSize: uint32Max,
+ maxSizeLimit: initialHeaderTableSize,
+ tableSizeUpdate: false,
+ w: w,
+ }
+ e.dynTab.table.init()
+ e.dynTab.setMaxSize(initialHeaderTableSize)
+ return e
+}
+
+// WriteField encodes f into a single Write to e's underlying Writer.
+// This function may also produce bytes for "Header Table Size Update"
+// if necessary. If produced, it is done before encoding f.
+func (e *Encoder) WriteField(f HeaderField) error {
+ e.buf = e.buf[:0]
+
+ if e.tableSizeUpdate {
+ e.tableSizeUpdate = false
+ if e.minSize < e.dynTab.maxSize {
+ e.buf = appendTableSize(e.buf, e.minSize)
+ }
+ e.minSize = uint32Max
+ e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
+ }
+
+ idx, nameValueMatch := e.searchTable(f)
+ if nameValueMatch {
+ e.buf = appendIndexed(e.buf, idx)
+ } else {
+ indexing := e.shouldIndex(f)
+ if indexing {
+ e.dynTab.add(f)
+ }
+
+ if idx == 0 {
+ e.buf = appendNewName(e.buf, f, indexing)
+ } else {
+ e.buf = appendIndexedName(e.buf, f, idx, indexing)
+ }
+ }
+ n, err := e.w.Write(e.buf)
+ if err == nil && n != len(e.buf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+// searchTable searches f in both stable and dynamic header tables.
+// The static header table is searched first. Only when there is no
+// exact match for both name and value, the dynamic header table is
+// then searched. If there is no match, i is 0. If both name and value
+// match, i is the matched index and nameValueMatch becomes true. If
+// only name matches, i points to that index and nameValueMatch
+// becomes false.
+func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
+ i, nameValueMatch = staticTable.search(f)
+ if nameValueMatch {
+ return i, true
+ }
+
+ j, nameValueMatch := e.dynTab.table.search(f)
+ if nameValueMatch || (i == 0 && j != 0) {
+ return j + uint64(staticTable.len()), nameValueMatch
+ }
+
+ return i, false
+}
+
+// SetMaxDynamicTableSize changes the dynamic header table size to v.
+// The actual size is bounded by the value passed to
+// SetMaxDynamicTableSizeLimit.
+func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
+ if v > e.maxSizeLimit {
+ v = e.maxSizeLimit
+ }
+ if v < e.minSize {
+ e.minSize = v
+ }
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+}
+
+// SetMaxDynamicTableSizeLimit changes the maximum value that can be
+// specified in SetMaxDynamicTableSize to v. By default, it is set to
+// 4096, which is the same size of the default dynamic header table
+// size described in HPACK specification. If the current maximum
+// dynamic header table size is strictly greater than v, "Header Table
+// Size Update" will be done in the next WriteField call and the
+// maximum dynamic header table size is truncated to v.
+func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
+ e.maxSizeLimit = v
+ if e.dynTab.maxSize > v {
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+ }
+}
+
+// shouldIndex reports whether f should be indexed.
+func (e *Encoder) shouldIndex(f HeaderField) bool {
+ return !f.Sensitive && f.Size() <= e.dynTab.maxSize
+}
+
+// appendIndexed appends index i, as encoded in "Indexed Header Field"
+// representation, to dst and returns the extended buffer.
+func appendIndexed(dst []byte, i uint64) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, i)
+ dst[first] |= 0x80
+ return dst
+}
+
+// appendNewName appends f, as encoded in one of "Literal Header field
+// - New Name" representation variants, to dst and returns the
+// extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Inremental Indexing"
+// representation is used.
+func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
+ dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
+ dst = appendHpackString(dst, f.Name)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendIndexedName appends f and index i referring indexed name
+// entry, as encoded in one of "Literal Header field - Indexed Name"
+// representation variants, to dst and returns the extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Incremental Indexing"
+// representation is used.
+func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
+ first := len(dst)
+ var n byte
+ if indexing {
+ n = 6
+ } else {
+ n = 4
+ }
+ dst = appendVarInt(dst, n, i)
+ dst[first] |= encodeTypeByte(indexing, f.Sensitive)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendTableSize appends v, as encoded in "Header Table Size Update"
+// representation, to dst and returns the extended buffer.
+func appendTableSize(dst []byte, v uint32) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 5, uint64(v))
+ dst[first] |= 0x20
+ return dst
+}
+
+// appendVarInt appends i, as encoded in variable integer form using n
+// bit prefix, to dst and returns the extended buffer.
+//
+// See
+// http://http2.github.io/http2-spec/compression.html#integer.representation
+func appendVarInt(dst []byte, n byte, i uint64) []byte {
+ k := uint64((1 << n) - 1)
+ if i < k {
+ return append(dst, byte(i))
+ }
+ dst = append(dst, byte(k))
+ i -= k
+ for ; i >= 128; i >>= 7 {
+ dst = append(dst, byte(0x80|(i&0x7f)))
+ }
+ return append(dst, byte(i))
+}
+
+// appendHpackString appends s, as encoded in "String Literal"
+// representation, to dst and returns the the extended buffer.
+//
+// s will be encoded in Huffman codes only when it produces strictly
+// shorter byte string.
+func appendHpackString(dst []byte, s string) []byte {
+ huffmanLength := HuffmanEncodeLength(s)
+ if huffmanLength < uint64(len(s)) {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, huffmanLength)
+ dst = AppendHuffmanString(dst, s)
+ dst[first] |= 0x80
+ } else {
+ dst = appendVarInt(dst, 7, uint64(len(s)))
+ dst = append(dst, s...)
+ }
+ return dst
+}
+
+// encodeTypeByte returns type byte. If sensitive is true, type byte
+// for "Never Indexed" representation is returned. If sensitive is
+// false and indexing is true, type byte for "Incremental Indexing"
+// representation is returned. Otherwise, type byte for "Without
+// Indexing" is returned.
+func encodeTypeByte(indexing, sensitive bool) byte {
+ if sensitive {
+ return 0x10
+ }
+ if indexing {
+ return 0x40
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/encode_test.go b/vendor/golang.org/x/net/http2/hpack/encode_test.go
new file mode 100644
index 0000000..05f12db
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/encode_test.go
@@ -0,0 +1,386 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestEncoderTableSizeUpdate(t *testing.T) {
+ tests := []struct {
+ size1, size2 uint32
+ wantHex string
+ }{
+ // Should emit 2 table size updates (2048 and 4096)
+ {2048, 4096, "3fe10f 3fe11f 82"},
+
+ // Should emit 1 table size update (2048)
+ {16384, 2048, "3fe10f 82"},
+ }
+ for _, tt := range tests {
+ var buf bytes.Buffer
+ e := NewEncoder(&buf)
+ e.SetMaxDynamicTableSize(tt.size1)
+ e.SetMaxDynamicTableSize(tt.size2)
+ if err := e.WriteField(pair(":method", "GET")); err != nil {
+ t.Fatal(err)
+ }
+ want := removeSpace(tt.wantHex)
+ if got := hex.EncodeToString(buf.Bytes()); got != want {
+ t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want)
+ }
+ }
+}
+
+func TestEncoderWriteField(t *testing.T) {
+ var buf bytes.Buffer
+ e := NewEncoder(&buf)
+ var got []HeaderField
+ d := NewDecoder(4<<10, func(f HeaderField) {
+ got = append(got, f)
+ })
+
+ tests := []struct {
+ hdrs []HeaderField
+ }{
+ {[]HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ }},
+ {[]HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ pair("cache-control", "no-cache"),
+ }},
+ {[]HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "https"),
+ pair(":path", "/index.html"),
+ pair(":authority", "www.example.com"),
+ pair("custom-key", "custom-value"),
+ }},
+ }
+ for i, tt := range tests {
+ buf.Reset()
+ got = got[:0]
+ for _, hf := range tt.hdrs {
+ if err := e.WriteField(hf); err != nil {
+ t.Fatal(err)
+ }
+ }
+ _, err := d.Write(buf.Bytes())
+ if err != nil {
+ t.Errorf("%d. Decoder Write = %v", i, err)
+ }
+ if !reflect.DeepEqual(got, tt.hdrs) {
+ t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs)
+ }
+ }
+}
+
+func TestEncoderSearchTable(t *testing.T) {
+ e := NewEncoder(nil)
+
+ e.dynTab.add(pair("foo", "bar"))
+ e.dynTab.add(pair("blake", "miz"))
+ e.dynTab.add(pair(":method", "GET"))
+
+ tests := []struct {
+ hf HeaderField
+ wantI uint64
+ wantMatch bool
+ }{
+ // Name and Value match
+ {pair("foo", "bar"), uint64(staticTable.len()) + 3, true},
+ {pair("blake", "miz"), uint64(staticTable.len()) + 2, true},
+ {pair(":method", "GET"), 2, true},
+
+ // Only name match because Sensitive == true. This is allowed to match
+ // any ":method" entry. The current implementation uses the last entry
+ // added in newStaticTable.
+ {HeaderField{":method", "GET", true}, 3, false},
+
+ // Only Name matches
+ {pair("foo", "..."), uint64(staticTable.len()) + 3, false},
+ {pair("blake", "..."), uint64(staticTable.len()) + 2, false},
+ // As before, this is allowed to match any ":method" entry.
+ {pair(":method", "..."), 3, false},
+
+ // None match
+ {pair("foo-", "bar"), 0, false},
+ }
+ for _, tt := range tests {
+ if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
+ t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
+ }
+ }
+}
+
+func TestAppendVarInt(t *testing.T) {
+ tests := []struct {
+ n byte
+ i uint64
+ want []byte
+ }{
+ // Fits in a byte:
+ {1, 0, []byte{0}},
+ {2, 2, []byte{2}},
+ {3, 6, []byte{6}},
+ {4, 14, []byte{14}},
+ {5, 30, []byte{30}},
+ {6, 62, []byte{62}},
+ {7, 126, []byte{126}},
+ {8, 254, []byte{254}},
+
+ // Multiple bytes:
+ {5, 1337, []byte{31, 154, 10}},
+ }
+ for _, tt := range tests {
+ got := appendVarInt(nil, tt.n, tt.i)
+ if !bytes.Equal(got, tt.want) {
+ t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want)
+ }
+ }
+}
+
+func TestAppendHpackString(t *testing.T) {
+ tests := []struct {
+ s, wantHex string
+ }{
+ // Huffman encoded
+ {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
+
+ // Not Huffman encoded
+ {"a", "01 61"},
+
+ // zero length
+ {"", "00"},
+ }
+ for _, tt := range tests {
+ want := removeSpace(tt.wantHex)
+ buf := appendHpackString(nil, tt.s)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want)
+ }
+ }
+}
+
+func TestAppendIndexed(t *testing.T) {
+ tests := []struct {
+ i uint64
+ wantHex string
+ }{
+ // 1 byte
+ {1, "81"},
+ {126, "fe"},
+
+ // 2 bytes
+ {127, "ff00"},
+ {128, "ff01"},
+ }
+ for _, tt := range tests {
+ want := removeSpace(tt.wantHex)
+ buf := appendIndexed(nil, tt.i)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want)
+ }
+ }
+}
+
+func TestAppendNewName(t *testing.T) {
+ tests := []struct {
+ f HeaderField
+ indexing bool
+ wantHex string
+ }{
+ // Incremental indexing
+ {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+
+ // Without indexing
+ {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+
+ // Never indexed
+ {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+ {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+ }
+ for _, tt := range tests {
+ want := removeSpace(tt.wantHex)
+ buf := appendNewName(nil, tt.f, tt.indexing)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
+ }
+ }
+}
+
+func TestAppendIndexedName(t *testing.T) {
+ tests := []struct {
+ f HeaderField
+ i uint64
+ indexing bool
+ wantHex string
+ }{
+ // Incremental indexing
+ {HeaderField{":status", "302", false}, 8, true, "48 82 6402"},
+
+ // Without indexing
+ {HeaderField{":status", "302", false}, 8, false, "08 82 6402"},
+
+ // Never indexed
+ {HeaderField{":status", "302", true}, 8, true, "18 82 6402"},
+ {HeaderField{":status", "302", true}, 8, false, "18 82 6402"},
+ }
+ for _, tt := range tests {
+ want := removeSpace(tt.wantHex)
+ buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
+ }
+ }
+}
+
+func TestAppendTableSize(t *testing.T) {
+ tests := []struct {
+ i uint32
+ wantHex string
+ }{
+ // Fits into 1 byte
+ {30, "3e"},
+
+ // Extra byte
+ {31, "3f00"},
+ {32, "3f01"},
+ }
+ for _, tt := range tests {
+ want := removeSpace(tt.wantHex)
+ buf := appendTableSize(nil, tt.i)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want)
+ }
+ }
+}
+
+func TestEncoderSetMaxDynamicTableSize(t *testing.T) {
+ var buf bytes.Buffer
+ e := NewEncoder(&buf)
+ tests := []struct {
+ v uint32
+ wantUpdate bool
+ wantMinSize uint32
+ wantMaxSize uint32
+ }{
+ // Set new table size to 2048
+ {2048, true, 2048, 2048},
+
+ // Set new table size to 16384, but still limited to
+ // 4096
+ {16384, true, 2048, 4096},
+ }
+ for _, tt := range tests {
+ e.SetMaxDynamicTableSize(tt.v)
+ if got := e.tableSizeUpdate; tt.wantUpdate != got {
+ t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate)
+ }
+ if got := e.minSize; tt.wantMinSize != got {
+ t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize)
+ }
+ if got := e.dynTab.maxSize; tt.wantMaxSize != got {
+ t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize)
+ }
+ }
+}
+
+func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) {
+ e := NewEncoder(nil)
+ // 4095 < initialHeaderTableSize means maxSize is truncated to
+ // 4095.
+ e.SetMaxDynamicTableSizeLimit(4095)
+ if got, want := e.dynTab.maxSize, uint32(4095); got != want {
+ t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
+ }
+ if got, want := e.maxSizeLimit, uint32(4095); got != want {
+ t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
+ }
+ if got, want := e.tableSizeUpdate, true; got != want {
+ t.Errorf("e.tableSizeUpdate = %v; want %v", got, want)
+ }
+ // maxSize will be truncated to maxSizeLimit
+ e.SetMaxDynamicTableSize(16384)
+ if got, want := e.dynTab.maxSize, uint32(4095); got != want {
+ t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
+ }
+ // 8192 > current maxSizeLimit, so maxSize does not change.
+ e.SetMaxDynamicTableSizeLimit(8192)
+ if got, want := e.dynTab.maxSize, uint32(4095); got != want {
+ t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
+ }
+ if got, want := e.maxSizeLimit, uint32(8192); got != want {
+ t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
+ }
+}
+
+func removeSpace(s string) string {
+ return strings.Replace(s, " ", "", -1)
+}
+
+func BenchmarkEncoderSearchTable(b *testing.B) {
+ e := NewEncoder(nil)
+
+ // A sample of possible header fields.
+ // This is not based on any actual data from HTTP/2 traces.
+ var possible []HeaderField
+ for _, f := range staticTable.ents {
+ if f.Value == "" {
+ possible = append(possible, f)
+ continue
+ }
+ // Generate 5 random values, except for cookie and set-cookie,
+ // which we know can have many values in practice.
+ num := 5
+ if f.Name == "cookie" || f.Name == "set-cookie" {
+ num = 25
+ }
+ for i := 0; i < num; i++ {
+ f.Value = fmt.Sprintf("%s-%d", f.Name, i)
+ possible = append(possible, f)
+ }
+ }
+ for k := 0; k < 10; k++ {
+ f := HeaderField{
+ Name: fmt.Sprintf("x-header-%d", k),
+ Sensitive: rand.Int()%2 == 0,
+ }
+ for i := 0; i < 5; i++ {
+ f.Value = fmt.Sprintf("%s-%d", f.Name, i)
+ possible = append(possible, f)
+ }
+ }
+
+ // Add a random sample to the dynamic table. This very loosely simulates
+ // a history of 100 requests with 20 header fields per request.
+ for r := 0; r < 100*20; r++ {
+ f := possible[rand.Int31n(int32(len(possible)))]
+ // Skip if this is in the staticTable verbatim.
+ if _, has := staticTable.search(f); !has {
+ e.dynTab.add(f)
+ }
+ }
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ for _, f := range possible {
+ e.searchTable(f)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go
new file mode 100644
index 0000000..176644a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/hpack.go
@@ -0,0 +1,490 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hpack implements HPACK, a compression format for
+// efficiently representing HTTP header fields in the context of HTTP/2.
+//
+// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// A DecodingError is something the spec defines as a decoding error.
+type DecodingError struct {
+ Err error
+}
+
+func (de DecodingError) Error() string {
+ return fmt.Sprintf("decoding error: %v", de.Err)
+}
+
+// An InvalidIndexError is returned when an encoder references a table
+// entry before the static table or after the end of the dynamic table.
+type InvalidIndexError int
+
+func (e InvalidIndexError) Error() string {
+ return fmt.Sprintf("invalid indexed representation index %d", int(e))
+}
+
+// A HeaderField is a name-value pair. Both the name and value are
+// treated as opaque sequences of octets.
+type HeaderField struct {
+ Name, Value string
+
+ // Sensitive means that this header field should never be
+ // indexed.
+ Sensitive bool
+}
+
+// IsPseudo reports whether the header field is an http2 pseudo header.
+// That is, it reports whether it starts with a colon.
+// It is not otherwise guaranteed to be a valid pseudo header field,
+// though.
+func (hf HeaderField) IsPseudo() bool {
+ return len(hf.Name) != 0 && hf.Name[0] == ':'
+}
+
+func (hf HeaderField) String() string {
+ var suffix string
+ if hf.Sensitive {
+ suffix = " (sensitive)"
+ }
+ return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
+}
+
+// Size returns the size of an entry per RFC 7541 section 4.1.
+func (hf HeaderField) Size() uint32 {
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
+ // "The size of the dynamic table is the sum of the size of
+ // its entries. The size of an entry is the sum of its name's
+ // length in octets (as defined in Section 5.2), its value's
+ // length in octets (see Section 5.2), plus 32. The size of
+ // an entry is calculated using the length of the name and
+ // value without any Huffman encoding applied."
+
+ // This can overflow if somebody makes a large HeaderField
+ // Name and/or Value by hand, but we don't care, because that
+ // won't happen on the wire because the encoding doesn't allow
+ // it.
+ return uint32(len(hf.Name) + len(hf.Value) + 32)
+}
+
+// A Decoder is the decoding context for incremental processing of
+// header blocks.
+type Decoder struct {
+ dynTab dynamicTable
+ emit func(f HeaderField)
+
+ emitEnabled bool // whether calls to emit are enabled
+ maxStrLen int // 0 means unlimited
+
+ // buf is the unparsed buffer. It's only written to
+ // saveBuf if it was truncated in the middle of a header
+ // block. Because it's usually not owned, we can only
+ // process it under Write.
+ buf []byte // not owned; only valid during Write
+
+ // saveBuf is previous data passed to Write which we weren't able
+ // to fully parse before. Unlike buf, we own this data.
+ saveBuf bytes.Buffer
+}
+
+// NewDecoder returns a new decoder with the provided maximum dynamic
+// table size. The emitFunc will be called for each valid field
+// parsed, in the same goroutine as calls to Write, before Write returns.
+func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
+ d := &Decoder{
+ emit: emitFunc,
+ emitEnabled: true,
+ }
+ d.dynTab.table.init()
+ d.dynTab.allowedMaxSize = maxDynamicTableSize
+ d.dynTab.setMaxSize(maxDynamicTableSize)
+ return d
+}
+
+// ErrStringLength is returned by Decoder.Write when the max string length
+// (as configured by Decoder.SetMaxStringLength) would be violated.
+var ErrStringLength = errors.New("hpack: string too long")
+
+// SetMaxStringLength sets the maximum size of a HeaderField name or
+// value string. If a string exceeds this length (even after any
+// decompression), Write will return ErrStringLength.
+// A value of 0 means unlimited and is the default from NewDecoder.
+func (d *Decoder) SetMaxStringLength(n int) {
+ d.maxStrLen = n
+}
+
+// SetEmitFunc changes the callback used when new header fields
+// are decoded.
+// It must be non-nil. It does not affect EmitEnabled.
+func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
+ d.emit = emitFunc
+}
+
+// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
+// should be called. The default is true.
+//
+// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
+// while still decoding and keeping in-sync with decoder state, but
+// without doing unnecessary decompression or generating unnecessary
+// garbage for header fields past the limit.
+func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
+
+// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
+// are currently enabled. The default is true.
+func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
+
+// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
+// underlying buffers for garbage reasons.
+
+func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
+ d.dynTab.setMaxSize(v)
+}
+
+// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
+// stream (via dynamic table size updates) may set the maximum size
+// to.
+func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
+ d.dynTab.allowedMaxSize = v
+}
+
+type dynamicTable struct {
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
+ table headerFieldTable
+ size uint32 // in bytes
+ maxSize uint32 // current maxSize
+ allowedMaxSize uint32 // maxSize may go up to this, inclusive
+}
+
+func (dt *dynamicTable) setMaxSize(v uint32) {
+ dt.maxSize = v
+ dt.evict()
+}
+
+func (dt *dynamicTable) add(f HeaderField) {
+ dt.table.addEntry(f)
+ dt.size += f.Size()
+ dt.evict()
+}
+
+// If we're too big, evict old stuff.
+func (dt *dynamicTable) evict() {
+ var n int
+ for dt.size > dt.maxSize && n < dt.table.len() {
+ dt.size -= dt.table.ents[n].Size()
+ n++
+ }
+ dt.table.evictOldest(n)
+}
+
+func (d *Decoder) maxTableIndex() int {
+ // This should never overflow. RFC 7540 Section 6.5.2 limits the size of
+ // the dynamic table to 2^32 bytes, where each entry will occupy more than
+ // one byte. Further, the staticTable has a fixed, small length.
+ return d.dynTab.table.len() + staticTable.len()
+}
+
+func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
+ // See Section 2.3.3.
+ if i == 0 {
+ return
+ }
+ if i <= uint64(staticTable.len()) {
+ return staticTable.ents[i-1], true
+ }
+ if i > uint64(d.maxTableIndex()) {
+ return
+ }
+ // In the dynamic table, newer entries have lower indices.
+ // However, dt.ents[0] is the oldest entry. Hence, dt.ents is
+ // the reversed dynamic table.
+ dt := d.dynTab.table
+ return dt.ents[dt.len()-(int(i)-staticTable.len())], true
+}
+
+// Decode decodes an entire block.
+//
+// TODO: remove this method and make it incremental later? This is
+// easier for debugging now.
+func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
+ var hf []HeaderField
+ saveFunc := d.emit
+ defer func() { d.emit = saveFunc }()
+ d.emit = func(f HeaderField) { hf = append(hf, f) }
+ if _, err := d.Write(p); err != nil {
+ return nil, err
+ }
+ if err := d.Close(); err != nil {
+ return nil, err
+ }
+ return hf, nil
+}
+
+func (d *Decoder) Close() error {
+ if d.saveBuf.Len() > 0 {
+ d.saveBuf.Reset()
+ return DecodingError{errors.New("truncated headers")}
+ }
+ return nil
+}
+
+func (d *Decoder) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ // Prevent state machine CPU attacks (making us redo
+ // work up to the point of finding out we don't have
+ // enough data)
+ return
+ }
+ // Only copy the data if we have to. Optimistically assume
+ // that p will contain a complete header block.
+ if d.saveBuf.Len() == 0 {
+ d.buf = p
+ } else {
+ d.saveBuf.Write(p)
+ d.buf = d.saveBuf.Bytes()
+ d.saveBuf.Reset()
+ }
+
+ for len(d.buf) > 0 {
+ err = d.parseHeaderFieldRepr()
+ if err == errNeedMore {
+ // Extra paranoia, making sure saveBuf won't
+ // get too large. All the varint and string
+ // reading code earlier should already catch
+ // overlong things and return ErrStringLength,
+ // but keep this as a last resort.
+ const varIntOverhead = 8 // conservative
+ if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
+ return 0, ErrStringLength
+ }
+ d.saveBuf.Write(d.buf)
+ return len(p), nil
+ }
+ if err != nil {
+ break
+ }
+ }
+ return len(p), err
+}
+
+// errNeedMore is an internal sentinel error value that means the
+// buffer is truncated and we need to read more data before we can
+// continue parsing.
+var errNeedMore = errors.New("need more data")
+
+type indexType int
+
+const (
+ indexedTrue indexType = iota
+ indexedFalse
+ indexedNever
+)
+
+func (v indexType) indexed() bool { return v == indexedTrue }
+func (v indexType) sensitive() bool { return v == indexedNever }
+
+// returns errNeedMore if there isn't enough data available.
+// any other error is fatal.
+// consumes d.buf iff it returns nil.
+// precondition: must be called with len(d.buf) > 0
+func (d *Decoder) parseHeaderFieldRepr() error {
+ b := d.buf[0]
+ switch {
+ case b&128 != 0:
+ // Indexed representation.
+ // High bit set?
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
+ return d.parseFieldIndexed()
+ case b&192 == 64:
+ // 6.2.1 Literal Header Field with Incremental Indexing
+ // 0b10xxxxxx: top two bits are 10
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
+ return d.parseFieldLiteral(6, indexedTrue)
+ case b&240 == 0:
+ // 6.2.2 Literal Header Field without Indexing
+ // 0b0000xxxx: top four bits are 0000
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
+ return d.parseFieldLiteral(4, indexedFalse)
+ case b&240 == 16:
+ // 6.2.3 Literal Header Field never Indexed
+ // 0b0001xxxx: top four bits are 0001
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
+ return d.parseFieldLiteral(4, indexedNever)
+ case b&224 == 32:
+ // 6.3 Dynamic Table Size Update
+ // Top three bits are '001'.
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
+ return d.parseDynamicTableSizeUpdate()
+ }
+
+ return DecodingError{errors.New("invalid encoding")}
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldIndexed() error {
+ buf := d.buf
+ idx, buf, err := readVarInt(7, buf)
+ if err != nil {
+ return err
+ }
+ hf, ok := d.at(idx)
+ if !ok {
+ return DecodingError{InvalidIndexError(idx)}
+ }
+ d.buf = buf
+ return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+ buf := d.buf
+ nameIdx, buf, err := readVarInt(n, buf)
+ if err != nil {
+ return err
+ }
+
+ var hf HeaderField
+ wantStr := d.emitEnabled || it.indexed()
+ if nameIdx > 0 {
+ ihf, ok := d.at(nameIdx)
+ if !ok {
+ return DecodingError{InvalidIndexError(nameIdx)}
+ }
+ hf.Name = ihf.Name
+ } else {
+ hf.Name, buf, err = d.readString(buf, wantStr)
+ if err != nil {
+ return err
+ }
+ }
+ hf.Value, buf, err = d.readString(buf, wantStr)
+ if err != nil {
+ return err
+ }
+ d.buf = buf
+ if it.indexed() {
+ d.dynTab.add(hf)
+ }
+ hf.Sensitive = it.sensitive()
+ return d.callEmit(hf)
+}
+
+func (d *Decoder) callEmit(hf HeaderField) error {
+ if d.maxStrLen != 0 {
+ if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
+ return ErrStringLength
+ }
+ }
+ if d.emitEnabled {
+ d.emit(hf)
+ }
+ return nil
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseDynamicTableSizeUpdate() error {
+ buf := d.buf
+ size, buf, err := readVarInt(5, buf)
+ if err != nil {
+ return err
+ }
+ if size > uint64(d.dynTab.allowedMaxSize) {
+ return DecodingError{errors.New("dynamic table size update too large")}
+ }
+ d.dynTab.setMaxSize(uint32(size))
+ d.buf = buf
+ return nil
+}
+
+var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
+
+// readVarInt reads an unsigned variable length integer off the
+// beginning of p. n is the parameter as described in
+// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
+//
+// n must always be between 1 and 8.
+//
+// The returned remain buffer is either a smaller suffix of p, or err != nil.
+// The error is errNeedMore if p doesn't contain a complete integer.
+func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+ if n < 1 || n > 8 {
+ panic("bad n")
+ }
+ if len(p) == 0 {
+ return 0, p, errNeedMore
+ }
+ i = uint64(p[0])
+ if n < 8 {
+ i &= (1 << uint64(n)) - 1
+ }
+ if i < (1<<uint64(n))-1 {
+ return i, p[1:], nil
+ }
+
+ origP := p
+ p = p[1:]
+ var m uint64
+ for len(p) > 0 {
+ b := p[0]
+ p = p[1:]
+ i += uint64(b&127) << m
+ if b&128 == 0 {
+ return i, p, nil
+ }
+ m += 7
+ if m >= 63 { // TODO: proper overflow check. making this up.
+ return 0, origP, errVarintOverflow
+ }
+ }
+ return 0, origP, errNeedMore
+}
+
+// readString decodes an hpack string from p.
+//
+// wantStr is whether s will be used. If false, decompression and
+// []byte->string garbage are skipped if s will be ignored
+// anyway. This does mean that huffman decoding errors for non-indexed
+// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
+// is returning an error anyway, and because they're not indexed, the error
+// won't affect the decoding state.
+func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
+ if len(p) == 0 {
+ return "", p, errNeedMore
+ }
+ isHuff := p[0]&128 != 0
+ strLen, p, err := readVarInt(7, p)
+ if err != nil {
+ return "", p, err
+ }
+ if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
+ return "", nil, ErrStringLength
+ }
+ if uint64(len(p)) < strLen {
+ return "", p, errNeedMore
+ }
+ if !isHuff {
+ if wantStr {
+ s = string(p[:strLen])
+ }
+ return s, p[strLen:], nil
+ }
+
+ if wantStr {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset() // don't trust others
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
+ buf.Reset()
+ return "", nil, err
+ }
+ s = buf.String()
+ buf.Reset() // be nice to GC
+ }
+ return s, p[strLen:], nil
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/vendor/golang.org/x/net/http2/hpack/hpack_test.go
new file mode 100644
index 0000000..bc7f476
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/hpack_test.go
@@ -0,0 +1,722 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+func (d *Decoder) mustAt(idx int) HeaderField {
+ if hf, ok := d.at(uint64(idx)); !ok {
+ panic(fmt.Sprintf("bogus index %d", idx))
+ } else {
+ return hf
+ }
+}
+
+func TestDynamicTableAt(t *testing.T) {
+ d := NewDecoder(4096, nil)
+ at := d.mustAt
+ if got, want := at(2), (pair(":method", "GET")); got != want {
+ t.Errorf("at(2) = %v; want %v", got, want)
+ }
+ d.dynTab.add(pair("foo", "bar"))
+ d.dynTab.add(pair("blake", "miz"))
+ if got, want := at(staticTable.len()+1), (pair("blake", "miz")); got != want {
+ t.Errorf("at(dyn 1) = %v; want %v", got, want)
+ }
+ if got, want := at(staticTable.len()+2), (pair("foo", "bar")); got != want {
+ t.Errorf("at(dyn 2) = %v; want %v", got, want)
+ }
+ if got, want := at(3), (pair(":method", "POST")); got != want {
+ t.Errorf("at(3) = %v; want %v", got, want)
+ }
+}
+
+func TestDynamicTableSizeEvict(t *testing.T) {
+ d := NewDecoder(4096, nil)
+ if want := uint32(0); d.dynTab.size != want {
+ t.Fatalf("size = %d; want %d", d.dynTab.size, want)
+ }
+ add := d.dynTab.add
+ add(pair("blake", "eats pizza"))
+ if want := uint32(15 + 32); d.dynTab.size != want {
+ t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want)
+ }
+ add(pair("foo", "bar"))
+ if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want {
+ t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want)
+ }
+ d.dynTab.setMaxSize(15 + 32 + 1 /* slop */)
+ if want := uint32(6 + 32); d.dynTab.size != want {
+ t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want)
+ }
+ if got, want := d.mustAt(staticTable.len()+1), (pair("foo", "bar")); got != want {
+ t.Errorf("at(dyn 1) = %v; want %v", got, want)
+ }
+ add(pair("long", strings.Repeat("x", 500)))
+ if want := uint32(0); d.dynTab.size != want {
+ t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want)
+ }
+}
+
+func TestDecoderDecode(t *testing.T) {
+ tests := []struct {
+ name string
+ in []byte
+ want []HeaderField
+ wantDynTab []HeaderField // newest entry first
+ }{
+ // C.2.1 Literal Header Field with Indexing
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1
+ {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"),
+ []HeaderField{pair("custom-key", "custom-header")},
+ []HeaderField{pair("custom-key", "custom-header")},
+ },
+
+ // C.2.2 Literal Header Field without Indexing
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2
+ {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"),
+ []HeaderField{pair(":path", "/sample/path")},
+ []HeaderField{}},
+
+ // C.2.3 Literal Header Field never Indexed
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3
+ {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"),
+ []HeaderField{{"password", "secret", true}},
+ []HeaderField{}},
+
+ // C.2.4 Indexed Header Field
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4
+ {"C.2.4", []byte("\x82"),
+ []HeaderField{pair(":method", "GET")},
+ []HeaderField{}},
+ }
+ for _, tt := range tests {
+ d := NewDecoder(4096, nil)
+ hf, err := d.DecodeFull(tt.in)
+ if err != nil {
+ t.Errorf("%s: %v", tt.name, err)
+ continue
+ }
+ if !reflect.DeepEqual(hf, tt.want) {
+ t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want)
+ }
+ gotDynTab := d.dynTab.reverseCopy()
+ if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) {
+ t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab)
+ }
+ }
+}
+
+func (dt *dynamicTable) reverseCopy() (hf []HeaderField) {
+ hf = make([]HeaderField, len(dt.table.ents))
+ for i := range hf {
+ hf[i] = dt.table.ents[len(dt.table.ents)-1-i]
+ }
+ return
+}
+
+type encAndWant struct {
+ enc []byte
+ want []HeaderField
+ wantDynTab []HeaderField
+ wantDynSize uint32
+}
+
+// C.3 Request Examples without Huffman Coding
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3
+func TestDecodeC3_NoHuffman(t *testing.T) {
+ testDecodeSeries(t, 4096, []encAndWant{
+ {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ },
+ []HeaderField{
+ pair(":authority", "www.example.com"),
+ },
+ 57,
+ },
+ {dehex("8286 84be 5808 6e6f 2d63 6163 6865"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ pair("cache-control", "no-cache"),
+ },
+ []HeaderField{
+ pair("cache-control", "no-cache"),
+ pair(":authority", "www.example.com"),
+ },
+ 110,
+ },
+ {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "https"),
+ pair(":path", "/index.html"),
+ pair(":authority", "www.example.com"),
+ pair("custom-key", "custom-value"),
+ },
+ []HeaderField{
+ pair("custom-key", "custom-value"),
+ pair("cache-control", "no-cache"),
+ pair(":authority", "www.example.com"),
+ },
+ 164,
+ },
+ })
+}
+
+// C.4 Request Examples with Huffman Coding
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4
+func TestDecodeC4_Huffman(t *testing.T) {
+ testDecodeSeries(t, 4096, []encAndWant{
+ {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ },
+ []HeaderField{
+ pair(":authority", "www.example.com"),
+ },
+ 57,
+ },
+ {dehex("8286 84be 5886 a8eb 1064 9cbf"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ pair("cache-control", "no-cache"),
+ },
+ []HeaderField{
+ pair("cache-control", "no-cache"),
+ pair(":authority", "www.example.com"),
+ },
+ 110,
+ },
+ {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "https"),
+ pair(":path", "/index.html"),
+ pair(":authority", "www.example.com"),
+ pair("custom-key", "custom-value"),
+ },
+ []HeaderField{
+ pair("custom-key", "custom-value"),
+ pair("cache-control", "no-cache"),
+ pair(":authority", "www.example.com"),
+ },
+ 164,
+ },
+ })
+}
+
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5
+// "This section shows several consecutive header lists, corresponding
+// to HTTP responses, on the same connection. The HTTP/2 setting
+// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
+// octets, causing some evictions to occur."
+func TestDecodeC5_ResponsesNoHuff(t *testing.T) {
+ testDecodeSeries(t, 256, []encAndWant{
+ {dehex(`
+4803 3330 3258 0770 7269 7661 7465 611d
+4d6f 6e2c 2032 3120 4f63 7420 3230 3133
+2032 303a 3133 3a32 3120 474d 546e 1768
+7474 7073 3a2f 2f77 7777 2e65 7861 6d70
+6c65 2e63 6f6d
+`),
+ []HeaderField{
+ pair(":status", "302"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("location", "https://www.example.com"),
+ },
+ []HeaderField{
+ pair("location", "https://www.example.com"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("cache-control", "private"),
+ pair(":status", "302"),
+ },
+ 222,
+ },
+ {dehex("4803 3330 37c1 c0bf"),
+ []HeaderField{
+ pair(":status", "307"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("location", "https://www.example.com"),
+ },
+ []HeaderField{
+ pair(":status", "307"),
+ pair("location", "https://www.example.com"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("cache-control", "private"),
+ },
+ 222,
+ },
+ {dehex(`
+88c1 611d 4d6f 6e2c 2032 3120 4f63 7420
+3230 3133 2032 303a 3133 3a32 3220 474d
+54c0 5a04 677a 6970 7738 666f 6f3d 4153
+444a 4b48 514b 425a 584f 5157 454f 5049
+5541 5851 5745 4f49 553b 206d 6178 2d61
+6765 3d33 3630 303b 2076 6572 7369 6f6e
+3d31
+`),
+ []HeaderField{
+ pair(":status", "200"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+ pair("location", "https://www.example.com"),
+ pair("content-encoding", "gzip"),
+ pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+ },
+ []HeaderField{
+ pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+ pair("content-encoding", "gzip"),
+ pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+ },
+ 215,
+ },
+ })
+}
+
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6
+// "This section shows the same examples as the previous section, but
+// using Huffman encoding for the literal values. The HTTP/2 setting
+// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
+// octets, causing some evictions to occur. The eviction mechanism
+// uses the length of the decoded literal values, so the same
+// evictions occurs as in the previous section."
+func TestDecodeC6_ResponsesHuffman(t *testing.T) {
+ testDecodeSeries(t, 256, []encAndWant{
+ {dehex(`
+4882 6402 5885 aec3 771a 4b61 96d0 7abe
+9410 54d4 44a8 2005 9504 0b81 66e0 82a6
+2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8
+e9ae 82ae 43d3
+`),
+ []HeaderField{
+ pair(":status", "302"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("location", "https://www.example.com"),
+ },
+ []HeaderField{
+ pair("location", "https://www.example.com"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("cache-control", "private"),
+ pair(":status", "302"),
+ },
+ 222,
+ },
+ {dehex("4883 640e ffc1 c0bf"),
+ []HeaderField{
+ pair(":status", "307"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("location", "https://www.example.com"),
+ },
+ []HeaderField{
+ pair(":status", "307"),
+ pair("location", "https://www.example.com"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("cache-control", "private"),
+ },
+ 222,
+ },
+ {dehex(`
+88c1 6196 d07a be94 1054 d444 a820 0595
+040b 8166 e084 a62d 1bff c05a 839b d9ab
+77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b
+3960 d5af 2708 7f36 72c1 ab27 0fb5 291f
+9587 3160 65c0 03ed 4ee5 b106 3d50 07
+`),
+ []HeaderField{
+ pair(":status", "200"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+ pair("location", "https://www.example.com"),
+ pair("content-encoding", "gzip"),
+ pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+ },
+ []HeaderField{
+ pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+ pair("content-encoding", "gzip"),
+ pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+ },
+ 215,
+ },
+ })
+}
+
+func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) {
+ d := NewDecoder(size, nil)
+ for i, step := range steps {
+ hf, err := d.DecodeFull(step.enc)
+ if err != nil {
+ t.Fatalf("Error at step index %d: %v", i, err)
+ }
+ if !reflect.DeepEqual(hf, step.want) {
+ t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want)
+ }
+ gotDynTab := d.dynTab.reverseCopy()
+ if !reflect.DeepEqual(gotDynTab, step.wantDynTab) {
+ t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab)
+ }
+ if d.dynTab.size != step.wantDynSize {
+ t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize)
+ }
+ }
+}
+
+func TestHuffmanDecodeExcessPadding(t *testing.T) {
+ tests := [][]byte{
+ {0xff}, // Padding Exceeds 7 bits
+ {0x1f, 0xff}, // {"a", 1 byte excess padding}
+ {0x1f, 0xff, 0xff}, // {"a", 2 byte excess padding}
+ {0x1f, 0xff, 0xff, 0xff}, // {"a", 3 byte excess padding}
+ {0xff, 0x9f, 0xff, 0xff, 0xff}, // {"a", 29 bit excess padding}
+ {'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol.
+ }
+ for i, in := range tests {
+ var buf bytes.Buffer
+ if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
+ t.Errorf("test-%d: decode(%q) = %v; want ErrInvalidHuffman", i, in, err)
+ }
+ }
+}
+
+func TestHuffmanDecodeEOS(t *testing.T) {
+ in := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, "?"}
+ var buf bytes.Buffer
+ if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
+ t.Errorf("error = %v; want ErrInvalidHuffman", err)
+ }
+}
+
+func TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) {
+ in := []byte{0x00, 0x01} // {"0", "0", "0"}
+ var buf bytes.Buffer
+ if err := huffmanDecode(&buf, 2, in); err != ErrStringLength {
+ t.Errorf("error = %v; want ErrStringLength", err)
+ }
+}
+
+func TestHuffmanDecodeCorruptPadding(t *testing.T) {
+ in := []byte{0x00}
+ var buf bytes.Buffer
+ if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
+ t.Errorf("error = %v; want ErrInvalidHuffman", err)
+ }
+}
+
+func TestHuffmanDecode(t *testing.T) {
+ tests := []struct {
+ inHex, want string
+ }{
+ {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"},
+ {"a8eb 1064 9cbf", "no-cache"},
+ {"25a8 49e9 5ba9 7d7f", "custom-key"},
+ {"25a8 49e9 5bb8 e8b4 bf", "custom-value"},
+ {"6402", "302"},
+ {"aec3 771a 4b", "private"},
+ {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"},
+ {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"},
+ {"9bd9 ab", "gzip"},
+ {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07",
+ "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
+ }
+ for i, tt := range tests {
+ var buf bytes.Buffer
+ in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1))
+ if err != nil {
+ t.Errorf("%d. hex input error: %v", i, err)
+ continue
+ }
+ if _, err := HuffmanDecode(&buf, in); err != nil {
+ t.Errorf("%d. decode error: %v", i, err)
+ continue
+ }
+ if got := buf.String(); tt.want != got {
+ t.Errorf("%d. decode = %q; want %q", i, got, tt.want)
+ }
+ }
+}
+
+func TestAppendHuffmanString(t *testing.T) {
+ tests := []struct {
+ in, want string
+ }{
+ {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
+ {"no-cache", "a8eb 1064 9cbf"},
+ {"custom-key", "25a8 49e9 5ba9 7d7f"},
+ {"custom-value", "25a8 49e9 5bb8 e8b4 bf"},
+ {"302", "6402"},
+ {"private", "aec3 771a 4b"},
+ {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"},
+ {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"},
+ {"gzip", "9bd9 ab"},
+ {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1",
+ "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"},
+ }
+ for i, tt := range tests {
+ buf := []byte{}
+ want := strings.Replace(tt.want, " ", "", -1)
+ buf = AppendHuffmanString(buf, tt.in)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("%d. encode = %q; want %q", i, got, want)
+ }
+ }
+}
+
+func TestHuffmanMaxStrLen(t *testing.T) {
+ const msg = "Some string"
+ huff := AppendHuffmanString(nil, msg)
+
+ testGood := func(max int) {
+ var out bytes.Buffer
+ if err := huffmanDecode(&out, max, huff); err != nil {
+ t.Errorf("For maxLen=%d, unexpected error: %v", max, err)
+ }
+ if out.String() != msg {
+ t.Errorf("For maxLen=%d, out = %q; want %q", max, out.String(), msg)
+ }
+ }
+ testGood(0)
+ testGood(len(msg))
+ testGood(len(msg) + 1)
+
+ var out bytes.Buffer
+ if err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength {
+ t.Errorf("err = %v; want ErrStringLength", err)
+ }
+}
+
+func TestHuffmanRoundtripStress(t *testing.T) {
+ const Len = 50 // of uncompressed string
+ input := make([]byte, Len)
+ var output bytes.Buffer
+ var huff []byte
+
+ n := 5000
+ if testing.Short() {
+ n = 100
+ }
+ seed := time.Now().UnixNano()
+ t.Logf("Seed = %v", seed)
+ src := rand.New(rand.NewSource(seed))
+ var encSize int64
+ for i := 0; i < n; i++ {
+ for l := range input {
+ input[l] = byte(src.Intn(256))
+ }
+ huff = AppendHuffmanString(huff[:0], string(input))
+ encSize += int64(len(huff))
+ output.Reset()
+ if err := huffmanDecode(&output, 0, huff); err != nil {
+ t.Errorf("Failed to decode %q -> %q -> error %v", input, huff, err)
+ continue
+ }
+ if !bytes.Equal(output.Bytes(), input) {
+ t.Errorf("Roundtrip failure on %q -> %q -> %q", input, huff, output.Bytes())
+ }
+ }
+ t.Logf("Compressed size of original: %0.02f%% (%v -> %v)", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize)
+}
+
+func TestHuffmanDecodeFuzz(t *testing.T) {
+ const Len = 50 // of compressed
+ var buf, zbuf bytes.Buffer
+
+ n := 5000
+ if testing.Short() {
+ n = 100
+ }
+ seed := time.Now().UnixNano()
+ t.Logf("Seed = %v", seed)
+ src := rand.New(rand.NewSource(seed))
+ numFail := 0
+ for i := 0; i < n; i++ {
+ zbuf.Reset()
+ if i == 0 {
+ // Start with at least one invalid one.
+ zbuf.WriteString("00\x91\xff\xff\xff\xff\xc8")
+ } else {
+ for l := 0; l < Len; l++ {
+ zbuf.WriteByte(byte(src.Intn(256)))
+ }
+ }
+
+ buf.Reset()
+ if err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil {
+ if err == ErrInvalidHuffman {
+ numFail++
+ continue
+ }
+ t.Errorf("Failed to decode %q: %v", zbuf.Bytes(), err)
+ continue
+ }
+ }
+ t.Logf("%0.02f%% are invalid (%d / %d)", 100*float64(numFail)/float64(n), numFail, n)
+ if numFail < 1 {
+ t.Error("expected at least one invalid huffman encoding (test starts with one)")
+ }
+}
+
+func TestReadVarInt(t *testing.T) {
+ type res struct {
+ i uint64
+ consumed int
+ err error
+ }
+ tests := []struct {
+ n byte
+ p []byte
+ want res
+ }{
+ // Fits in a byte:
+ {1, []byte{0}, res{0, 1, nil}},
+ {2, []byte{2}, res{2, 1, nil}},
+ {3, []byte{6}, res{6, 1, nil}},
+ {4, []byte{14}, res{14, 1, nil}},
+ {5, []byte{30}, res{30, 1, nil}},
+ {6, []byte{62}, res{62, 1, nil}},
+ {7, []byte{126}, res{126, 1, nil}},
+ {8, []byte{254}, res{254, 1, nil}},
+
+ // Doesn't fit in a byte:
+ {1, []byte{1}, res{0, 0, errNeedMore}},
+ {2, []byte{3}, res{0, 0, errNeedMore}},
+ {3, []byte{7}, res{0, 0, errNeedMore}},
+ {4, []byte{15}, res{0, 0, errNeedMore}},
+ {5, []byte{31}, res{0, 0, errNeedMore}},
+ {6, []byte{63}, res{0, 0, errNeedMore}},
+ {7, []byte{127}, res{0, 0, errNeedMore}},
+ {8, []byte{255}, res{0, 0, errNeedMore}},
+
+ // Ignoring top bits:
+ {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111
+ {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100
+ {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101
+
+ // Extra byte:
+ {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte
+
+ // Short a byte:
+ {5, []byte{191, 154}, res{0, 0, errNeedMore}},
+
+ // integer overflow:
+ {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}},
+ }
+ for _, tt := range tests {
+ i, remain, err := readVarInt(tt.n, tt.p)
+ consumed := len(tt.p) - len(remain)
+ got := res{i, consumed, err}
+ if got != tt.want {
+ t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want)
+ }
+ }
+}
+
+// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56
+func TestHuffmanFuzzCrash(t *testing.T) {
+ got, err := HuffmanDecodeToString([]byte("00\x91\xff\xff\xff\xff\xc8"))
+ if got != "" {
+ t.Errorf("Got %q; want empty string", got)
+ }
+ if err != ErrInvalidHuffman {
+ t.Errorf("Err = %v; want ErrInvalidHuffman", err)
+ }
+}
+
+func pair(name, value string) HeaderField {
+ return HeaderField{Name: name, Value: value}
+}
+
+func dehex(s string) []byte {
+ s = strings.Replace(s, " ", "", -1)
+ s = strings.Replace(s, "\n", "", -1)
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+func TestEmitEnabled(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ enc.WriteField(HeaderField{Name: "foo", Value: "bar"})
+ enc.WriteField(HeaderField{Name: "foo", Value: "bar"})
+
+ numCallback := 0
+ var dec *Decoder
+ dec = NewDecoder(8<<20, func(HeaderField) {
+ numCallback++
+ dec.SetEmitEnabled(false)
+ })
+ if !dec.EmitEnabled() {
+ t.Errorf("initial emit enabled = false; want true")
+ }
+ if _, err := dec.Write(buf.Bytes()); err != nil {
+ t.Error(err)
+ }
+ if numCallback != 1 {
+ t.Errorf("num callbacks = %d; want 1", numCallback)
+ }
+ if dec.EmitEnabled() {
+ t.Errorf("emit enabled = true; want false")
+ }
+}
+
+func TestSaveBufLimit(t *testing.T) {
+ const maxStr = 1 << 10
+ var got []HeaderField
+ dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) {
+ got = append(got, hf)
+ })
+ dec.SetMaxStringLength(maxStr)
+ var frag []byte
+ frag = append(frag[:0], encodeTypeByte(false, false))
+ frag = appendVarInt(frag, 7, 3)
+ frag = append(frag, "foo"...)
+ frag = appendVarInt(frag, 7, 3)
+ frag = append(frag, "bar"...)
+
+ if _, err := dec.Write(frag); err != nil {
+ t.Fatal(err)
+ }
+
+ want := []HeaderField{{Name: "foo", Value: "bar"}}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("After small writes, got %v; want %v", got, want)
+ }
+
+ frag = append(frag[:0], encodeTypeByte(false, false))
+ frag = appendVarInt(frag, 7, maxStr*3)
+ frag = append(frag, make([]byte, maxStr*3)...)
+
+ _, err := dec.Write(frag)
+ if err != ErrStringLength {
+ t.Fatalf("Write error = %v; want ErrStringLength", err)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
new file mode 100644
index 0000000..8850e39
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -0,0 +1,212 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} { return new(bytes.Buffer) },
+}
+
+// HuffmanDecode decodes the string in v and writes the expanded
+// result to w, returning the number of bytes written to w and the
+// Write call's return value. At most one Write call is made.
+func HuffmanDecode(w io.Writer, v []byte) (int, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return 0, err
+ }
+ return w.Write(buf.Bytes())
+}
+
+// HuffmanDecodeToString decodes the string in v.
+func HuffmanDecodeToString(v []byte) (string, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// ErrInvalidHuffman is returned for errors found decoding
+// Huffman-encoded strings.
+var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
+
+// huffmanDecode decodes v to buf.
+// If maxLen is greater than 0, attempts to write more to buf than
+// maxLen bytes will return ErrStringLength.
+func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
+ n := rootHuffmanNode
+ // cur is the bit buffer that has not been fed into n.
+ // cbits is the number of low order bits in cur that are valid.
+ // sbits is the number of bits of the symbol prefix being decoded.
+ cur, cbits, sbits := uint(0), uint8(0), uint8(0)
+ for _, b := range v {
+ cur = cur<<8 | uint(b)
+ cbits += 8
+ sbits += 8
+ for cbits >= 8 {
+ idx := byte(cur >> (cbits - 8))
+ n = n.children[idx]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children == nil {
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
+ buf.WriteByte(n.sym)
+ cbits -= n.codeLen
+ n = rootHuffmanNode
+ sbits = cbits
+ } else {
+ cbits -= 8
+ }
+ }
+ }
+ for cbits > 0 {
+ n = n.children[byte(cur<<(8-cbits))]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children != nil || n.codeLen > cbits {
+ break
+ }
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
+ buf.WriteByte(n.sym)
+ cbits -= n.codeLen
+ n = rootHuffmanNode
+ sbits = cbits
+ }
+ if sbits > 7 {
+ // Either there was an incomplete symbol, or overlong padding.
+ // Both are decoding errors per RFC 7541 section 5.2.
+ return ErrInvalidHuffman
+ }
+ if mask := uint(1<<cbits - 1); cur&mask != mask {
+ // Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
+ return ErrInvalidHuffman
+ }
+
+ return nil
+}
+
+type node struct {
+ // children is non-nil for internal nodes
+ children []*node
+
+ // The following are only valid if children is nil:
+ codeLen uint8 // number of bits that led to the output of sym
+ sym byte // output symbol
+}
+
+func newInternalNode() *node {
+ return &node{children: make([]*node, 256)}
+}
+
+var rootHuffmanNode = newInternalNode()
+
+func init() {
+ if len(huffmanCodes) != 256 {
+ panic("unexpected size")
+ }
+ for i, code := range huffmanCodes {
+ addDecoderNode(byte(i), code, huffmanCodeLen[i])
+ }
+}
+
+func addDecoderNode(sym byte, code uint32, codeLen uint8) {
+ cur := rootHuffmanNode
+ for codeLen > 8 {
+ codeLen -= 8
+ i := uint8(code >> codeLen)
+ if cur.children[i] == nil {
+ cur.children[i] = newInternalNode()
+ }
+ cur = cur.children[i]
+ }
+ shift := 8 - codeLen
+ start, end := int(uint8(code<<shift)), int(1<<shift)
+ for i := start; i < start+end; i++ {
+ cur.children[i] = &node{sym: sym, codeLen: codeLen}
+ }
+}
+
+// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
+// and returns the extended buffer.
+func AppendHuffmanString(dst []byte, s string) []byte {
+ rembits := uint8(8)
+
+ for i := 0; i < len(s); i++ {
+ if rembits == 8 {
+ dst = append(dst, 0)
+ }
+ dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
+ }
+
+ if rembits < 8 {
+ // special EOS symbol
+ code := uint32(0x3fffffff)
+ nbits := uint8(30)
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+ }
+
+ return dst
+}
+
+// HuffmanEncodeLength returns the number of bytes required to encode
+// s in Huffman codes. The result is round up to byte boundary.
+func HuffmanEncodeLength(s string) uint64 {
+ n := uint64(0)
+ for i := 0; i < len(s); i++ {
+ n += uint64(huffmanCodeLen[s[i]])
+ }
+ return (n + 7) / 8
+}
+
+// appendByteToHuffmanCode appends Huffman code for c to dst and
+// returns the extended buffer and the remaining bits in the last
+// element. The appending is not byte aligned and the remaining bits
+// in the last element of dst is given in rembits.
+func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
+ code := huffmanCodes[c]
+ nbits := huffmanCodeLen[c]
+
+ for {
+ if rembits > nbits {
+ t := uint8(code << (rembits - nbits))
+ dst[len(dst)-1] |= t
+ rembits -= nbits
+ break
+ }
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+
+ nbits -= rembits
+ rembits = 8
+
+ if nbits == 0 {
+ break
+ }
+
+ dst = append(dst, 0)
+ }
+
+ return dst, rembits
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go
new file mode 100644
index 0000000..a66cfbe
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/tables.go
@@ -0,0 +1,479 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "fmt"
+)
+
+// headerFieldTable implements a list of HeaderFields.
+// This is used to implement the static and dynamic tables.
+type headerFieldTable struct {
+ // For static tables, entries are never evicted.
+ //
+ // For dynamic tables, entries are evicted from ents[0] and added to the end.
+ // Each entry has a unique id that starts at one and increments for each
+ // entry that is added. This unique id is stable across evictions, meaning
+ // it can be used as a pointer to a specific entry. As in hpack, unique ids
+ // are 1-based. The unique id for ents[k] is k + evictCount + 1.
+ //
+ // Zero is not a valid unique id.
+ //
+ // evictCount should not overflow in any remotely practical situation. In
+ // practice, we will have one dynamic table per HTTP/2 connection. If we
+ // assume a very powerful server that handles 1M QPS per connection and each
+ // request adds (then evicts) 100 entries from the table, it would still take
+ // 2M years for evictCount to overflow.
+ ents []HeaderField
+ evictCount uint64
+
+ // byName maps a HeaderField name to the unique id of the newest entry with
+ // the same name. See above for a definition of "unique id".
+ byName map[string]uint64
+
+ // byNameValue maps a HeaderField name/value pair to the unique id of the newest
+ // entry with the same name and value. See above for a definition of "unique id".
+ byNameValue map[pairNameValue]uint64
+}
+
+type pairNameValue struct {
+ name, value string
+}
+
+func (t *headerFieldTable) init() {
+ t.byName = make(map[string]uint64)
+ t.byNameValue = make(map[pairNameValue]uint64)
+}
+
+// len reports the number of entries in the table.
+func (t *headerFieldTable) len() int {
+ return len(t.ents)
+}
+
+// addEntry adds a new entry.
+func (t *headerFieldTable) addEntry(f HeaderField) {
+ id := uint64(t.len()) + t.evictCount + 1
+ t.byName[f.Name] = id
+ t.byNameValue[pairNameValue{f.Name, f.Value}] = id
+ t.ents = append(t.ents, f)
+}
+
+// evictOldest evicts the n oldest entries in the table.
+func (t *headerFieldTable) evictOldest(n int) {
+ if n > t.len() {
+ panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
+ }
+ for k := 0; k < n; k++ {
+ f := t.ents[k]
+ id := t.evictCount + uint64(k) + 1
+ if t.byName[f.Name] == id {
+ delete(t.byName, f.Name)
+ }
+ if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
+ delete(t.byNameValue, p)
+ }
+ }
+ copy(t.ents, t.ents[n:])
+ for k := t.len() - n; k < t.len(); k++ {
+ t.ents[k] = HeaderField{} // so strings can be garbage collected
+ }
+ t.ents = t.ents[:t.len()-n]
+ if t.evictCount+uint64(n) < t.evictCount {
+ panic("evictCount overflow")
+ }
+ t.evictCount += uint64(n)
+}
+
+// search finds f in the table. If there is no match, i is 0.
+// If both name and value match, i is the matched index and nameValueMatch
+// becomes true. If only name matches, i points to that index and
+// nameValueMatch becomes false.
+//
+// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
+// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
+// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
+// table, the return value i actually refers to the entry t.ents[t.len()-i].
+//
+// All tables are assumed to be a dynamic tables except for the global
+// staticTable pointer.
+//
+// See Section 2.3.3.
+func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
+ if !f.Sensitive {
+ if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
+ return t.idToIndex(id), true
+ }
+ }
+ if id := t.byName[f.Name]; id != 0 {
+ return t.idToIndex(id), false
+ }
+ return 0, false
+}
+
+// idToIndex converts a unique id to an HPACK index.
+// See Section 2.3.3.
+func (t *headerFieldTable) idToIndex(id uint64) uint64 {
+ if id <= t.evictCount {
+ panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
+ }
+ k := id - t.evictCount - 1 // convert id to an index t.ents[k]
+ if t != staticTable {
+ return uint64(t.len()) - k // dynamic table
+ }
+ return k + 1
+}
+
+// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
+var staticTable = newStaticTable()
+var staticTableEntries = [...]HeaderField{
+ {Name: ":authority"},
+ {Name: ":method", Value: "GET"},
+ {Name: ":method", Value: "POST"},
+ {Name: ":path", Value: "/"},
+ {Name: ":path", Value: "/index.html"},
+ {Name: ":scheme", Value: "http"},
+ {Name: ":scheme", Value: "https"},
+ {Name: ":status", Value: "200"},
+ {Name: ":status", Value: "204"},
+ {Name: ":status", Value: "206"},
+ {Name: ":status", Value: "304"},
+ {Name: ":status", Value: "400"},
+ {Name: ":status", Value: "404"},
+ {Name: ":status", Value: "500"},
+ {Name: "accept-charset"},
+ {Name: "accept-encoding", Value: "gzip, deflate"},
+ {Name: "accept-language"},
+ {Name: "accept-ranges"},
+ {Name: "accept"},
+ {Name: "access-control-allow-origin"},
+ {Name: "age"},
+ {Name: "allow"},
+ {Name: "authorization"},
+ {Name: "cache-control"},
+ {Name: "content-disposition"},
+ {Name: "content-encoding"},
+ {Name: "content-language"},
+ {Name: "content-length"},
+ {Name: "content-location"},
+ {Name: "content-range"},
+ {Name: "content-type"},
+ {Name: "cookie"},
+ {Name: "date"},
+ {Name: "etag"},
+ {Name: "expect"},
+ {Name: "expires"},
+ {Name: "from"},
+ {Name: "host"},
+ {Name: "if-match"},
+ {Name: "if-modified-since"},
+ {Name: "if-none-match"},
+ {Name: "if-range"},
+ {Name: "if-unmodified-since"},
+ {Name: "last-modified"},
+ {Name: "link"},
+ {Name: "location"},
+ {Name: "max-forwards"},
+ {Name: "proxy-authenticate"},
+ {Name: "proxy-authorization"},
+ {Name: "range"},
+ {Name: "referer"},
+ {Name: "refresh"},
+ {Name: "retry-after"},
+ {Name: "server"},
+ {Name: "set-cookie"},
+ {Name: "strict-transport-security"},
+ {Name: "transfer-encoding"},
+ {Name: "user-agent"},
+ {Name: "vary"},
+ {Name: "via"},
+ {Name: "www-authenticate"},
+}
+
+func newStaticTable() *headerFieldTable {
+ t := &headerFieldTable{}
+ t.init()
+ for _, e := range staticTableEntries[:] {
+ t.addEntry(e)
+ }
+ return t
+}
+
+var huffmanCodes = [256]uint32{
+ 0x1ff8,
+ 0x7fffd8,
+ 0xfffffe2,
+ 0xfffffe3,
+ 0xfffffe4,
+ 0xfffffe5,
+ 0xfffffe6,
+ 0xfffffe7,
+ 0xfffffe8,
+ 0xffffea,
+ 0x3ffffffc,
+ 0xfffffe9,
+ 0xfffffea,
+ 0x3ffffffd,
+ 0xfffffeb,
+ 0xfffffec,
+ 0xfffffed,
+ 0xfffffee,
+ 0xfffffef,
+ 0xffffff0,
+ 0xffffff1,
+ 0xffffff2,
+ 0x3ffffffe,
+ 0xffffff3,
+ 0xffffff4,
+ 0xffffff5,
+ 0xffffff6,
+ 0xffffff7,
+ 0xffffff8,
+ 0xffffff9,
+ 0xffffffa,
+ 0xffffffb,
+ 0x14,
+ 0x3f8,
+ 0x3f9,
+ 0xffa,
+ 0x1ff9,
+ 0x15,
+ 0xf8,
+ 0x7fa,
+ 0x3fa,
+ 0x3fb,
+ 0xf9,
+ 0x7fb,
+ 0xfa,
+ 0x16,
+ 0x17,
+ 0x18,
+ 0x0,
+ 0x1,
+ 0x2,
+ 0x19,
+ 0x1a,
+ 0x1b,
+ 0x1c,
+ 0x1d,
+ 0x1e,
+ 0x1f,
+ 0x5c,
+ 0xfb,
+ 0x7ffc,
+ 0x20,
+ 0xffb,
+ 0x3fc,
+ 0x1ffa,
+ 0x21,
+ 0x5d,
+ 0x5e,
+ 0x5f,
+ 0x60,
+ 0x61,
+ 0x62,
+ 0x63,
+ 0x64,
+ 0x65,
+ 0x66,
+ 0x67,
+ 0x68,
+ 0x69,
+ 0x6a,
+ 0x6b,
+ 0x6c,
+ 0x6d,
+ 0x6e,
+ 0x6f,
+ 0x70,
+ 0x71,
+ 0x72,
+ 0xfc,
+ 0x73,
+ 0xfd,
+ 0x1ffb,
+ 0x7fff0,
+ 0x1ffc,
+ 0x3ffc,
+ 0x22,
+ 0x7ffd,
+ 0x3,
+ 0x23,
+ 0x4,
+ 0x24,
+ 0x5,
+ 0x25,
+ 0x26,
+ 0x27,
+ 0x6,
+ 0x74,
+ 0x75,
+ 0x28,
+ 0x29,
+ 0x2a,
+ 0x7,
+ 0x2b,
+ 0x76,
+ 0x2c,
+ 0x8,
+ 0x9,
+ 0x2d,
+ 0x77,
+ 0x78,
+ 0x79,
+ 0x7a,
+ 0x7b,
+ 0x7ffe,
+ 0x7fc,
+ 0x3ffd,
+ 0x1ffd,
+ 0xffffffc,
+ 0xfffe6,
+ 0x3fffd2,
+ 0xfffe7,
+ 0xfffe8,
+ 0x3fffd3,
+ 0x3fffd4,
+ 0x3fffd5,
+ 0x7fffd9,
+ 0x3fffd6,
+ 0x7fffda,
+ 0x7fffdb,
+ 0x7fffdc,
+ 0x7fffdd,
+ 0x7fffde,
+ 0xffffeb,
+ 0x7fffdf,
+ 0xffffec,
+ 0xffffed,
+ 0x3fffd7,
+ 0x7fffe0,
+ 0xffffee,
+ 0x7fffe1,
+ 0x7fffe2,
+ 0x7fffe3,
+ 0x7fffe4,
+ 0x1fffdc,
+ 0x3fffd8,
+ 0x7fffe5,
+ 0x3fffd9,
+ 0x7fffe6,
+ 0x7fffe7,
+ 0xffffef,
+ 0x3fffda,
+ 0x1fffdd,
+ 0xfffe9,
+ 0x3fffdb,
+ 0x3fffdc,
+ 0x7fffe8,
+ 0x7fffe9,
+ 0x1fffde,
+ 0x7fffea,
+ 0x3fffdd,
+ 0x3fffde,
+ 0xfffff0,
+ 0x1fffdf,
+ 0x3fffdf,
+ 0x7fffeb,
+ 0x7fffec,
+ 0x1fffe0,
+ 0x1fffe1,
+ 0x3fffe0,
+ 0x1fffe2,
+ 0x7fffed,
+ 0x3fffe1,
+ 0x7fffee,
+ 0x7fffef,
+ 0xfffea,
+ 0x3fffe2,
+ 0x3fffe3,
+ 0x3fffe4,
+ 0x7ffff0,
+ 0x3fffe5,
+ 0x3fffe6,
+ 0x7ffff1,
+ 0x3ffffe0,
+ 0x3ffffe1,
+ 0xfffeb,
+ 0x7fff1,
+ 0x3fffe7,
+ 0x7ffff2,
+ 0x3fffe8,
+ 0x1ffffec,
+ 0x3ffffe2,
+ 0x3ffffe3,
+ 0x3ffffe4,
+ 0x7ffffde,
+ 0x7ffffdf,
+ 0x3ffffe5,
+ 0xfffff1,
+ 0x1ffffed,
+ 0x7fff2,
+ 0x1fffe3,
+ 0x3ffffe6,
+ 0x7ffffe0,
+ 0x7ffffe1,
+ 0x3ffffe7,
+ 0x7ffffe2,
+ 0xfffff2,
+ 0x1fffe4,
+ 0x1fffe5,
+ 0x3ffffe8,
+ 0x3ffffe9,
+ 0xffffffd,
+ 0x7ffffe3,
+ 0x7ffffe4,
+ 0x7ffffe5,
+ 0xfffec,
+ 0xfffff3,
+ 0xfffed,
+ 0x1fffe6,
+ 0x3fffe9,
+ 0x1fffe7,
+ 0x1fffe8,
+ 0x7ffff3,
+ 0x3fffea,
+ 0x3fffeb,
+ 0x1ffffee,
+ 0x1ffffef,
+ 0xfffff4,
+ 0xfffff5,
+ 0x3ffffea,
+ 0x7ffff4,
+ 0x3ffffeb,
+ 0x7ffffe6,
+ 0x3ffffec,
+ 0x3ffffed,
+ 0x7ffffe7,
+ 0x7ffffe8,
+ 0x7ffffe9,
+ 0x7ffffea,
+ 0x7ffffeb,
+ 0xffffffe,
+ 0x7ffffec,
+ 0x7ffffed,
+ 0x7ffffee,
+ 0x7ffffef,
+ 0x7fffff0,
+ 0x3ffffee,
+}
+
+var huffmanCodeLen = [256]uint8{
+ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
+ 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
+ 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
+ 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
+ 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
+ 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
+ 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
+ 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
+ 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
+ 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
+ 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
+ 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
+ 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/tables_test.go b/vendor/golang.org/x/net/http2/hpack/tables_test.go
new file mode 100644
index 0000000..d963f36
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/tables_test.go
@@ -0,0 +1,214 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bufio"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func TestHeaderFieldTable(t *testing.T) {
+ table := &headerFieldTable{}
+ table.init()
+ table.addEntry(pair("key1", "value1-1"))
+ table.addEntry(pair("key2", "value2-1"))
+ table.addEntry(pair("key1", "value1-2"))
+ table.addEntry(pair("key3", "value3-1"))
+ table.addEntry(pair("key4", "value4-1"))
+ table.addEntry(pair("key2", "value2-2"))
+
+ // Tests will be run twice: once before evicting anything, and
+ // again after evicting the three oldest entries.
+ tests := []struct {
+ f HeaderField
+ beforeWantStaticI uint64
+ beforeWantMatch bool
+ afterWantStaticI uint64
+ afterWantMatch bool
+ }{
+ {HeaderField{"key1", "value1-1", false}, 1, true, 0, false},
+ {HeaderField{"key1", "value1-2", false}, 3, true, 0, false},
+ {HeaderField{"key1", "value1-3", false}, 3, false, 0, false},
+ {HeaderField{"key2", "value2-1", false}, 2, true, 3, false},
+ {HeaderField{"key2", "value2-2", false}, 6, true, 3, true},
+ {HeaderField{"key2", "value2-3", false}, 6, false, 3, false},
+ {HeaderField{"key4", "value4-1", false}, 5, true, 2, true},
+ // Name match only, because sensitive.
+ {HeaderField{"key4", "value4-1", true}, 5, false, 2, false},
+ // Key not found.
+ {HeaderField{"key5", "value5-x", false}, 0, false, 0, false},
+ }
+
+ staticToDynamic := func(i uint64) uint64 {
+ if i == 0 {
+ return 0
+ }
+ return uint64(table.len()) - i + 1 // dynamic is the reversed table
+ }
+
+ searchStatic := func(f HeaderField) (uint64, bool) {
+ old := staticTable
+ staticTable = table
+ defer func() { staticTable = old }()
+ return staticTable.search(f)
+ }
+
+ searchDynamic := func(f HeaderField) (uint64, bool) {
+ return table.search(f)
+ }
+
+ for _, test := range tests {
+ gotI, gotMatch := searchStatic(test.f)
+ if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {
+ t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
+ }
+ gotI, gotMatch = searchDynamic(test.f)
+ wantDynamicI := staticToDynamic(test.beforeWantStaticI)
+ if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {
+ t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
+ }
+ }
+
+ table.evictOldest(3)
+
+ for _, test := range tests {
+ gotI, gotMatch := searchStatic(test.f)
+ if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {
+ t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
+ }
+ gotI, gotMatch = searchDynamic(test.f)
+ wantDynamicI := staticToDynamic(test.afterWantStaticI)
+ if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {
+ t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
+ }
+ }
+}
+
+func TestHeaderFieldTable_LookupMapEviction(t *testing.T) {
+ table := &headerFieldTable{}
+ table.init()
+ table.addEntry(pair("key1", "value1-1"))
+ table.addEntry(pair("key2", "value2-1"))
+ table.addEntry(pair("key1", "value1-2"))
+ table.addEntry(pair("key3", "value3-1"))
+ table.addEntry(pair("key4", "value4-1"))
+ table.addEntry(pair("key2", "value2-2"))
+
+ // evict all pairs
+ table.evictOldest(table.len())
+
+ if l := table.len(); l > 0 {
+ t.Errorf("table.len() = %d, want 0", l)
+ }
+
+ if l := len(table.byName); l > 0 {
+ t.Errorf("len(table.byName) = %d, want 0", l)
+ }
+
+ if l := len(table.byNameValue); l > 0 {
+ t.Errorf("len(table.byNameValue) = %d, want 0", l)
+ }
+}
+
+func TestStaticTable(t *testing.T) {
+ fromSpec := `
+ +-------+-----------------------------+---------------+
+ | 1 | :authority | |
+ | 2 | :method | GET |
+ | 3 | :method | POST |
+ | 4 | :path | / |
+ | 5 | :path | /index.html |
+ | 6 | :scheme | http |
+ | 7 | :scheme | https |
+ | 8 | :status | 200 |
+ | 9 | :status | 204 |
+ | 10 | :status | 206 |
+ | 11 | :status | 304 |
+ | 12 | :status | 400 |
+ | 13 | :status | 404 |
+ | 14 | :status | 500 |
+ | 15 | accept-charset | |
+ | 16 | accept-encoding | gzip, deflate |
+ | 17 | accept-language | |
+ | 18 | accept-ranges | |
+ | 19 | accept | |
+ | 20 | access-control-allow-origin | |
+ | 21 | age | |
+ | 22 | allow | |
+ | 23 | authorization | |
+ | 24 | cache-control | |
+ | 25 | content-disposition | |
+ | 26 | content-encoding | |
+ | 27 | content-language | |
+ | 28 | content-length | |
+ | 29 | content-location | |
+ | 30 | content-range | |
+ | 31 | content-type | |
+ | 32 | cookie | |
+ | 33 | date | |
+ | 34 | etag | |
+ | 35 | expect | |
+ | 36 | expires | |
+ | 37 | from | |
+ | 38 | host | |
+ | 39 | if-match | |
+ | 40 | if-modified-since | |
+ | 41 | if-none-match | |
+ | 42 | if-range | |
+ | 43 | if-unmodified-since | |
+ | 44 | last-modified | |
+ | 45 | link | |
+ | 46 | location | |
+ | 47 | max-forwards | |
+ | 48 | proxy-authenticate | |
+ | 49 | proxy-authorization | |
+ | 50 | range | |
+ | 51 | referer | |
+ | 52 | refresh | |
+ | 53 | retry-after | |
+ | 54 | server | |
+ | 55 | set-cookie | |
+ | 56 | strict-transport-security | |
+ | 57 | transfer-encoding | |
+ | 58 | user-agent | |
+ | 59 | vary | |
+ | 60 | via | |
+ | 61 | www-authenticate | |
+ +-------+-----------------------------+---------------+
+`
+ bs := bufio.NewScanner(strings.NewReader(fromSpec))
+ re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`)
+ for bs.Scan() {
+ l := bs.Text()
+ if !strings.Contains(l, "|") {
+ continue
+ }
+ m := re.FindStringSubmatch(l)
+ if m == nil {
+ continue
+ }
+ i, err := strconv.Atoi(m[1])
+ if err != nil {
+ t.Errorf("Bogus integer on line %q", l)
+ continue
+ }
+ if i < 1 || i > staticTable.len() {
+ t.Errorf("Bogus index %d on line %q", i, l)
+ continue
+ }
+ if got, want := staticTable.ents[i-1].Name, m[2]; got != want {
+ t.Errorf("header index %d name = %q; want %q", i, got, want)
+ }
+ if got, want := staticTable.ents[i-1].Value, m[3]; got != want {
+ t.Errorf("header index %d value = %q; want %q", i, got, want)
+ }
+ }
+ if err := bs.Err(); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
new file mode 100644
index 0000000..d565f40
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -0,0 +1,391 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package http2 implements the HTTP/2 protocol.
+//
+// This package is low-level and intended to be used directly by very
+// few people. Most users will use it indirectly through the automatic
+// use by the net/http package (from Go 1.6 and later).
+// For use in earlier Go versions see ConfigureServer. (Transport support
+// requires Go 1.6 or later)
+//
+// See https://http2.github.io/ for more information on HTTP/2.
+//
+// See https://http2.golang.org/ for a test server running this code.
+//
+package http2 // import "golang.org/x/net/http2"
+
+import (
+ "bufio"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/lex/httplex"
+)
+
+var (
+ VerboseLogs bool
+ logFrameWrites bool
+ logFrameReads bool
+ inTests bool
+)
+
+func init() {
+ e := os.Getenv("GODEBUG")
+ if strings.Contains(e, "http2debug=1") {
+ VerboseLogs = true
+ }
+ if strings.Contains(e, "http2debug=2") {
+ VerboseLogs = true
+ logFrameWrites = true
+ logFrameReads = true
+ }
+}
+
+const (
+ // ClientPreface is the string that must be sent by new
+ // connections from clients.
+ ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+
+ // SETTINGS_MAX_FRAME_SIZE default
+ // http://http2.github.io/http2-spec/#rfc.section.6.5.2
+ initialMaxFrameSize = 16384
+
+ // NextProtoTLS is the NPN/ALPN protocol negotiated during
+ // HTTP/2's TLS setup.
+ NextProtoTLS = "h2"
+
+ // http://http2.github.io/http2-spec/#SettingValues
+ initialHeaderTableSize = 4096
+
+ initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
+
+ defaultMaxReadFrameSize = 1 << 20
+)
+
+var (
+ clientPreface = []byte(ClientPreface)
+)
+
+type streamState int
+
+// HTTP/2 stream states.
+//
+// See http://tools.ietf.org/html/rfc7540#section-5.1.
+//
+// For simplicity, the server code merges "reserved (local)" into
+// "half-closed (remote)". This is one less state transition to track.
+// The only downside is that we send PUSH_PROMISEs slightly less
+// liberally than allowable. More discussion here:
+// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
+//
+// "reserved (remote)" is omitted since the client code does not
+// support server push.
+const (
+ stateIdle streamState = iota
+ stateOpen
+ stateHalfClosedLocal
+ stateHalfClosedRemote
+ stateClosed
+)
+
+var stateName = [...]string{
+ stateIdle: "Idle",
+ stateOpen: "Open",
+ stateHalfClosedLocal: "HalfClosedLocal",
+ stateHalfClosedRemote: "HalfClosedRemote",
+ stateClosed: "Closed",
+}
+
+func (st streamState) String() string {
+ return stateName[st]
+}
+
+// Setting is a setting parameter: which setting it is, and its value.
+type Setting struct {
+ // ID is which setting is being set.
+ // See http://http2.github.io/http2-spec/#SettingValues
+ ID SettingID
+
+ // Val is the value.
+ Val uint32
+}
+
+func (s Setting) String() string {
+ return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
+}
+
+// Valid reports whether the setting is valid.
+func (s Setting) Valid() error {
+ // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
+ switch s.ID {
+ case SettingEnablePush:
+ if s.Val != 1 && s.Val != 0 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ case SettingInitialWindowSize:
+ if s.Val > 1<<31-1 {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ case SettingMaxFrameSize:
+ if s.Val < 16384 || s.Val > 1<<24-1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ }
+ return nil
+}
+
+// A SettingID is an HTTP/2 setting as defined in
+// http://http2.github.io/http2-spec/#iana-settings
+type SettingID uint16
+
+const (
+ SettingHeaderTableSize SettingID = 0x1
+ SettingEnablePush SettingID = 0x2
+ SettingMaxConcurrentStreams SettingID = 0x3
+ SettingInitialWindowSize SettingID = 0x4
+ SettingMaxFrameSize SettingID = 0x5
+ SettingMaxHeaderListSize SettingID = 0x6
+)
+
+var settingName = map[SettingID]string{
+ SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ SettingEnablePush: "ENABLE_PUSH",
+ SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+}
+
+func (s SettingID) String() string {
+ if v, ok := settingName[s]; ok {
+ return v
+ }
+ return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
+}
+
+var (
+ errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
+ errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
+)
+
+// validWireHeaderFieldName reports whether v is a valid header field
+// name (key). See httplex.ValidHeaderName for the base rules.
+//
+// Further, http2 says:
+// "Just as in HTTP/1.x, header field names are strings of ASCII
+// characters that are compared in a case-insensitive
+// fashion. However, header field names MUST be converted to
+// lowercase prior to their encoding in HTTP/2. "
+func validWireHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !httplex.IsTokenRune(r) {
+ return false
+ }
+ if 'A' <= r && r <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
+
+func init() {
+ for i := 100; i <= 999; i++ {
+ if v := http.StatusText(i); v != "" {
+ httpCodeStringCommon[i] = strconv.Itoa(i)
+ }
+ }
+}
+
+func httpCodeString(code int) string {
+ if s, ok := httpCodeStringCommon[code]; ok {
+ return s
+ }
+ return strconv.Itoa(code)
+}
+
+// from pkg io
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// A gate lets two goroutines coordinate their activities.
+type gate chan struct{}
+
+func (g gate) Done() { g <- struct{}{} }
+func (g gate) Wait() { <-g }
+
+// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
+type closeWaiter chan struct{}
+
+// Init makes a closeWaiter usable.
+// It exists because so a closeWaiter value can be placed inside a
+// larger struct and have the Mutex and Cond's memory in the same
+// allocation.
+func (cw *closeWaiter) Init() {
+ *cw = make(chan struct{})
+}
+
+// Close marks the closeWaiter as closed and unblocks any waiters.
+func (cw closeWaiter) Close() {
+ close(cw)
+}
+
+// Wait waits for the closeWaiter to become closed.
+func (cw closeWaiter) Wait() {
+ <-cw
+}
+
+// bufferedWriter is a buffered writer that writes to w.
+// Its buffered writer is lazily allocated as needed, to minimize
+// idle memory usage with many connections.
+type bufferedWriter struct {
+ w io.Writer // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+}
+
+func newBufferedWriter(w io.Writer) *bufferedWriter {
+ return &bufferedWriter{w: w}
+}
+
+// bufWriterPoolBufferSize is the size of bufio.Writer's
+// buffers created using bufWriterPool.
+//
+// TODO: pick a less arbitrary value? this is a bit under
+// (3 x typical 1500 byte MTU) at least. Other than that,
+// not much thought went into it.
+const bufWriterPoolBufferSize = 4 << 10
+
+var bufWriterPool = sync.Pool{
+ New: func() interface{} {
+ return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
+ },
+}
+
+func (w *bufferedWriter) Available() int {
+ if w.bw == nil {
+ return bufWriterPoolBufferSize
+ }
+ return w.bw.Available()
+}
+
+func (w *bufferedWriter) Write(p []byte) (n int, err error) {
+ if w.bw == nil {
+ bw := bufWriterPool.Get().(*bufio.Writer)
+ bw.Reset(w.w)
+ w.bw = bw
+ }
+ return w.bw.Write(p)
+}
+
+func (w *bufferedWriter) Flush() error {
+ bw := w.bw
+ if bw == nil {
+ return nil
+ }
+ err := bw.Flush()
+ bw.Reset(nil)
+ bufWriterPool.Put(bw)
+ w.bw = nil
+ return err
+}
+
+func mustUint31(v int32) uint32 {
+ if v < 0 || v > 2147483647 {
+ panic("out of range")
+ }
+ return uint32(v)
+}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 2616, section 4.4.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+type httpError struct {
+ msg string
+ timeout bool
+}
+
+func (e *httpError) Error() string { return e.msg }
+func (e *httpError) Timeout() bool { return e.timeout }
+func (e *httpError) Temporary() bool { return true }
+
+var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
+
+type connectionStater interface {
+ ConnectionState() tls.ConnectionState
+}
+
+var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
+
+type sorter struct {
+ v []string // owned by sorter
+}
+
+func (s *sorter) Len() int { return len(s.v) }
+func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
+func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
+
+// Keys returns the sorted keys of h.
+//
+// The returned slice is only valid until s used again or returned to
+// its pool.
+func (s *sorter) Keys(h http.Header) []string {
+ keys := s.v[:0]
+ for k := range h {
+ keys = append(keys, k)
+ }
+ s.v = keys
+ sort.Sort(s)
+ return keys
+}
+
+func (s *sorter) SortStrings(ss []string) {
+ // Our sorter works on s.v, which sorter owns, so
+ // stash it away while we sort the user's buffer.
+ save := s.v
+ s.v = ss
+ sort.Sort(s)
+ s.v = save
+}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+// *) a non-empty string starting with '/'
+// *) the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+//
+// We used to enforce that the path also didn't start with "//", but
+// Google's GFE accepts such paths and Chrome sends them, so ignore
+// that part of the spec. See golang.org/issue/19103.
+func validPseudoPath(v string) bool {
+ return (len(v) > 0 && v[0] == '/') || v == "*"
+}
diff --git a/vendor/golang.org/x/net/http2/http2_test.go b/vendor/golang.org/x/net/http2/http2_test.go
new file mode 100644
index 0000000..5248776
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/http2_test.go
@@ -0,0 +1,199 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "net/http"
+ "os/exec"
+ "strconv"
+ "strings"
+ "testing"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.")
+
+func condSkipFailingTest(t *testing.T) {
+ if !*knownFailing {
+ t.Skip("Skipping known-failing test without --known_failing")
+ }
+}
+
+func init() {
+ inTests = true
+ DebugGoroutines = true
+ flag.BoolVar(&VerboseLogs, "verboseh2", VerboseLogs, "Verbose HTTP/2 debug logging")
+}
+
+func TestSettingString(t *testing.T) {
+ tests := []struct {
+ s Setting
+ want string
+ }{
+ {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"},
+ {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"},
+ }
+ for i, tt := range tests {
+ got := fmt.Sprint(tt.s)
+ if got != tt.want {
+ t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want)
+ }
+ }
+}
+
+type twriter struct {
+ t testing.TB
+ st *serverTester // optional
+}
+
+func (w twriter) Write(p []byte) (n int, err error) {
+ if w.st != nil {
+ ps := string(p)
+ for _, phrase := range w.st.logFilter {
+ if strings.Contains(ps, phrase) {
+ return len(p), nil // no logging
+ }
+ }
+ }
+ w.t.Logf("%s", p)
+ return len(p), nil
+}
+
+// like encodeHeader, but don't add implicit pseudo headers.
+func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte {
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ for len(headers) > 0 {
+ k, v := headers[0], headers[1]
+ headers = headers[2:]
+ if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil {
+ t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
+ }
+ }
+ return buf.Bytes()
+}
+
+// Verify that curl has http2.
+func requireCurl(t *testing.T) {
+ out, err := dockerLogs(curl(t, "--version"))
+ if err != nil {
+ t.Skipf("failed to determine curl features; skipping test")
+ }
+ if !strings.Contains(string(out), "HTTP2") {
+ t.Skip("curl doesn't support HTTP2; skipping test")
+ }
+}
+
+func curl(t *testing.T, args ...string) (container string) {
+ out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output()
+ if err != nil {
+ t.Skipf("Failed to run curl in docker: %v, %s", err, out)
+ }
+ return strings.TrimSpace(string(out))
+}
+
+// Verify that h2load exists.
+func requireH2load(t *testing.T) {
+ out, err := dockerLogs(h2load(t, "--version"))
+ if err != nil {
+ t.Skipf("failed to probe h2load; skipping test: %s", out)
+ }
+ if !strings.Contains(string(out), "h2load nghttp2/") {
+ t.Skipf("h2load not present; skipping test. (Output=%q)", out)
+ }
+}
+
+func h2load(t *testing.T, args ...string) (container string) {
+ out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output()
+ if err != nil {
+ t.Skipf("Failed to run h2load in docker: %v, %s", err, out)
+ }
+ return strings.TrimSpace(string(out))
+}
+
+type puppetCommand struct {
+ fn func(w http.ResponseWriter, r *http.Request)
+ done chan<- bool
+}
+
+type handlerPuppet struct {
+ ch chan puppetCommand
+}
+
+func newHandlerPuppet() *handlerPuppet {
+ return &handlerPuppet{
+ ch: make(chan puppetCommand),
+ }
+}
+
+func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) {
+ for cmd := range p.ch {
+ cmd.fn(w, r)
+ cmd.done <- true
+ }
+}
+
+func (p *handlerPuppet) done() { close(p.ch) }
+func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) {
+ done := make(chan bool)
+ p.ch <- puppetCommand{fn, done}
+ <-done
+}
+func dockerLogs(container string) ([]byte, error) {
+ out, err := exec.Command("docker", "wait", container).CombinedOutput()
+ if err != nil {
+ return out, err
+ }
+ exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out)))
+ if err != nil {
+ return out, errors.New("unexpected exit status from docker wait")
+ }
+ out, err = exec.Command("docker", "logs", container).CombinedOutput()
+ exec.Command("docker", "rm", container).Run()
+ if err == nil && exitStatus != 0 {
+ err = fmt.Errorf("exit status %d: %s", exitStatus, out)
+ }
+ return out, err
+}
+
+func kill(container string) {
+ exec.Command("docker", "kill", container).Run()
+ exec.Command("docker", "rm", container).Run()
+}
+
+func cleanDate(res *http.Response) {
+ if d := res.Header["Date"]; len(d) == 1 {
+ d[0] = "XXX"
+ }
+}
+
+func TestSorterPoolAllocs(t *testing.T) {
+ ss := []string{"a", "b", "c"}
+ h := http.Header{
+ "a": nil,
+ "b": nil,
+ "c": nil,
+ }
+ sorter := new(sorter)
+
+ if allocs := testing.AllocsPerRun(100, func() {
+ sorter.SortStrings(ss)
+ }); allocs >= 1 {
+ t.Logf("SortStrings allocs = %v; want <1", allocs)
+ }
+
+ if allocs := testing.AllocsPerRun(5, func() {
+ if len(sorter.Keys(h)) != 3 {
+ t.Fatal("wrong result")
+ }
+ }); allocs > 0 {
+ t.Logf("Keys allocs = %v; want <1", allocs)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go
new file mode 100644
index 0000000..508cebc
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go16.go
@@ -0,0 +1,21 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.6
+
+package http2
+
+import (
+ "net/http"
+ "time"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ return nil, errTransportVersion
+}
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+ return 0
+
+}
diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go
new file mode 100644
index 0000000..140434a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go17.go
@@ -0,0 +1,87 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package http2
+
+import (
+ "crypto/tls"
+ "net"
+ "net/http"
+ "time"
+)
+
+type contextContext interface {
+ Done() <-chan struct{}
+ Err() error
+}
+
+type fakeContext struct{}
+
+func (fakeContext) Done() <-chan struct{} { return nil }
+func (fakeContext) Err() error { panic("should not be called") }
+
+func reqContext(r *http.Request) fakeContext {
+ return fakeContext{}
+}
+
+func setResponseUncompressed(res *http.Response) {
+ // Nothing.
+}
+
+type clientTrace struct{}
+
+func requestTrace(*http.Request) *clientTrace { return nil }
+func traceGotConn(*http.Request, *ClientConn) {}
+func traceFirstResponseByte(*clientTrace) {}
+func traceWroteHeaders(*clientTrace) {}
+func traceWroteRequest(*clientTrace, error) {}
+func traceGot100Continue(trace *clientTrace) {}
+func traceWait100Continue(trace *clientTrace) {}
+
+func nop() {}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+ return nil, nop
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+ return ctx, nop
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+ return req
+}
+
+// temporary copy of Go 1.6's private tls.Config.clone:
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ }
+}
+
+func (cc *ClientConn) Ping(ctx contextContext) error {
+ return cc.ping(ctx)
+}
+
+func (t *Transport) idleConnTimeout() time.Duration { return 0 }
diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go
new file mode 100644
index 0000000..6f8d3f8
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go18.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package http2
+
+import (
+ "io"
+ "net/http"
+)
+
+func configureServer18(h1 *http.Server, h2 *Server) error {
+ // No IdleTimeout to sync prior to Go 1.8.
+ return nil
+}
+
+func shouldLogPanic(panicValue interface{}) bool {
+ return panicValue != nil
+}
+
+func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
+ return nil
+}
+
+func reqBodyIsNoBody(io.ReadCloser) bool { return false }
+
+func go18httpNoBody() io.ReadCloser { return nil } // for tests only
diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go
new file mode 100644
index 0000000..5ae0772
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go19.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.9
+
+package http2
+
+import (
+ "net/http"
+)
+
+func configureServer19(s *http.Server, conf *Server) error {
+ // not supported prior to go1.9
+ return nil
+}
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
new file mode 100644
index 0000000..a614009
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -0,0 +1,163 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
+// io.Pipe except there are no PipeReader/PipeWriter halves, and the
+// underlying buffer is an interface. (io.Pipe is always unbuffered)
+type pipe struct {
+ mu sync.Mutex
+ c sync.Cond // c.L lazily initialized to &p.mu
+ b pipeBuffer // nil when done reading
+ err error // read error once empty. non-nil means closed.
+ breakErr error // immediate read error (caller doesn't see rest of b)
+ donec chan struct{} // closed on error
+ readFn func() // optional code to run in Read before error
+}
+
+type pipeBuffer interface {
+ Len() int
+ io.Writer
+ io.Reader
+}
+
+func (p *pipe) Len() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.b == nil {
+ return 0
+ }
+ return p.b.Len()
+}
+
+// Read waits until data is available and copies bytes
+// from the buffer into p.
+func (p *pipe) Read(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ for {
+ if p.breakErr != nil {
+ return 0, p.breakErr
+ }
+ if p.b != nil && p.b.Len() > 0 {
+ return p.b.Read(d)
+ }
+ if p.err != nil {
+ if p.readFn != nil {
+ p.readFn() // e.g. copy trailers
+ p.readFn = nil // not sticky like p.err
+ }
+ p.b = nil
+ return 0, p.err
+ }
+ p.c.Wait()
+ }
+}
+
+var errClosedPipeWrite = errors.New("write on closed buffer")
+
+// Write copies bytes from p into the buffer and wakes a reader.
+// It is an error to write more data than the buffer can hold.
+func (p *pipe) Write(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if p.err != nil {
+ return 0, errClosedPipeWrite
+ }
+ if p.breakErr != nil {
+ return len(d), nil // discard when there is no reader
+ }
+ return p.b.Write(d)
+}
+
+// CloseWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err after all data has been
+// read.
+//
+// The error must be non-nil.
+func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
+
+// BreakWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err immediately, without
+// waiting for unread data.
+func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
+
+// closeWithErrorAndCode is like CloseWithError but also sets some code to run
+// in the caller's goroutine before returning the error.
+func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
+
+func (p *pipe) closeWithError(dst *error, err error, fn func()) {
+ if err == nil {
+ panic("err must be non-nil")
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if *dst != nil {
+ // Already been done.
+ return
+ }
+ p.readFn = fn
+ if dst == &p.breakErr {
+ p.b = nil
+ }
+ *dst = err
+ p.closeDoneLocked()
+}
+
+// requires p.mu be held.
+func (p *pipe) closeDoneLocked() {
+ if p.donec == nil {
+ return
+ }
+ // Close if unclosed. This isn't racy since we always
+ // hold p.mu while closing.
+ select {
+ case <-p.donec:
+ default:
+ close(p.donec)
+ }
+}
+
+// Err returns the error (if any) first set by BreakWithError or CloseWithError.
+func (p *pipe) Err() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.breakErr != nil {
+ return p.breakErr
+ }
+ return p.err
+}
+
+// Done returns a channel which is closed if and when this pipe is closed
+// with CloseWithError.
+func (p *pipe) Done() <-chan struct{} {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.donec == nil {
+ p.donec = make(chan struct{})
+ if p.err != nil || p.breakErr != nil {
+ // Already hit an error.
+ p.closeDoneLocked()
+ }
+ }
+ return p.donec
+}
diff --git a/vendor/golang.org/x/net/http2/pipe_test.go b/vendor/golang.org/x/net/http2/pipe_test.go
new file mode 100644
index 0000000..1bf351f
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/pipe_test.go
@@ -0,0 +1,130 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestPipeClose(t *testing.T) {
+ var p pipe
+ p.b = new(bytes.Buffer)
+ a := errors.New("a")
+ b := errors.New("b")
+ p.CloseWithError(a)
+ p.CloseWithError(b)
+ _, err := p.Read(make([]byte, 1))
+ if err != a {
+ t.Errorf("err = %v want %v", err, a)
+ }
+}
+
+func TestPipeDoneChan(t *testing.T) {
+ var p pipe
+ done := p.Done()
+ select {
+ case <-done:
+ t.Fatal("done too soon")
+ default:
+ }
+ p.CloseWithError(io.EOF)
+ select {
+ case <-done:
+ default:
+ t.Fatal("should be done")
+ }
+}
+
+func TestPipeDoneChan_ErrFirst(t *testing.T) {
+ var p pipe
+ p.CloseWithError(io.EOF)
+ done := p.Done()
+ select {
+ case <-done:
+ default:
+ t.Fatal("should be done")
+ }
+}
+
+func TestPipeDoneChan_Break(t *testing.T) {
+ var p pipe
+ done := p.Done()
+ select {
+ case <-done:
+ t.Fatal("done too soon")
+ default:
+ }
+ p.BreakWithError(io.EOF)
+ select {
+ case <-done:
+ default:
+ t.Fatal("should be done")
+ }
+}
+
+func TestPipeDoneChan_Break_ErrFirst(t *testing.T) {
+ var p pipe
+ p.BreakWithError(io.EOF)
+ done := p.Done()
+ select {
+ case <-done:
+ default:
+ t.Fatal("should be done")
+ }
+}
+
+func TestPipeCloseWithError(t *testing.T) {
+ p := &pipe{b: new(bytes.Buffer)}
+ const body = "foo"
+ io.WriteString(p, body)
+ a := errors.New("test error")
+ p.CloseWithError(a)
+ all, err := ioutil.ReadAll(p)
+ if string(all) != body {
+ t.Errorf("read bytes = %q; want %q", all, body)
+ }
+ if err != a {
+ t.Logf("read error = %v, %v", err, a)
+ }
+ // Read and Write should fail.
+ if n, err := p.Write([]byte("abc")); err != errClosedPipeWrite || n != 0 {
+ t.Errorf("Write(abc) after close\ngot %v, %v\nwant 0, %v", n, err, errClosedPipeWrite)
+ }
+ if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 {
+ t.Errorf("Read() after close\ngot %v, nil\nwant 0, %v", n, errClosedPipeWrite)
+ }
+}
+
+func TestPipeBreakWithError(t *testing.T) {
+ p := &pipe{b: new(bytes.Buffer)}
+ io.WriteString(p, "foo")
+ a := errors.New("test err")
+ p.BreakWithError(a)
+ all, err := ioutil.ReadAll(p)
+ if string(all) != "" {
+ t.Errorf("read bytes = %q; want empty string", all)
+ }
+ if err != a {
+ t.Logf("read error = %v, %v", err, a)
+ }
+ if p.b != nil {
+ t.Errorf("buffer should be nil after BreakWithError")
+ }
+ // Write should succeed silently.
+ if n, err := p.Write([]byte("abc")); err != nil || n != 3 {
+ t.Errorf("Write(abc) after break\ngot %v, %v\nwant 0, nil", n, err)
+ }
+ if p.b != nil {
+ t.Errorf("buffer should be nil after Write")
+ }
+ // Read should fail.
+ if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 {
+ t.Errorf("Read() after close\ngot %v, nil\nwant 0, not nil", n)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
new file mode 100644
index 0000000..3e705a0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -0,0 +1,2866 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: turn off the serve goroutine when idle, so
+// an idle conn only has the readFrames goroutine active. (which could
+// also be optimized probably to pin less memory in crypto/tls). This
+// would involve tracking when the serve goroutine is active (atomic
+// int32 read/CAS probably?) and starting it up when frames arrive,
+// and shutting it down when all handlers exit. the occasional PING
+// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
+// (which is a no-op if already running) and then queue the PING write
+// as normal. The serve loop would then exit in most cases (if no
+// Handlers running) and not be woken up again until the PING packet
+// returns.
+
+// TODO (maybe): add a mechanism for Handlers to going into
+// half-closed-local mode (rw.(io.Closer) test?) but not exit their
+// handler, and continue to be able to read from the
+// Request.Body. This would be a somewhat semantic change from HTTP/1
+// (or at least what we expose in net/http), so I'd probably want to
+// add it there too. For now, this package says that returning from
+// the Handler ServeHTTP function means you're both done reading and
+// done writing, without a way to stop just one or the other.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "net"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+const (
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+)
+
+var (
+ errClientDisconnected = errors.New("client disconnected")
+ errClosedBody = errors.New("body closed by handler")
+ errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
+ errStreamClosed = errors.New("http2: stream closed")
+)
+
+var responseWriterStatePool = sync.Pool{
+ New: func() interface{} {
+ rws := &responseWriterState{}
+ rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
+ return rws
+ },
+}
+
+// Test hooks.
+var (
+ testHookOnConn func()
+ testHookGetServerConn func(*serverConn)
+ testHookOnPanicMu *sync.Mutex // nil except in tests
+ testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool)
+)
+
+// Server is an HTTP/2 server.
+type Server struct {
+ // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
+ // which may run at a time over all connections.
+ // Negative or zero no limit.
+ // TODO: implement
+ MaxHandlers int
+
+ // MaxConcurrentStreams optionally specifies the number of
+ // concurrent streams that each client may have open at a
+ // time. This is unrelated to the number of http.Handler goroutines
+ // which may be active globally, which is MaxHandlers.
+ // If zero, MaxConcurrentStreams defaults to at least 100, per
+ // the HTTP/2 spec's recommendations.
+ MaxConcurrentStreams uint32
+
+ // MaxReadFrameSize optionally specifies the largest frame
+ // this server is willing to read. A valid value is between
+ // 16k and 16M, inclusive. If zero or otherwise invalid, a
+ // default value is used.
+ MaxReadFrameSize uint32
+
+ // PermitProhibitedCipherSuites, if true, permits the use of
+ // cipher suites prohibited by the HTTP/2 spec.
+ PermitProhibitedCipherSuites bool
+
+ // IdleTimeout specifies how long until idle clients should be
+ // closed with a GOAWAY frame. PING frames are not considered
+ // activity for the purposes of IdleTimeout.
+ IdleTimeout time.Duration
+
+ // MaxUploadBufferPerConnection is the size of the initial flow
+ // control window for each connections. The HTTP/2 spec does not
+ // allow this to be smaller than 65535 or larger than 2^32-1.
+ // If the value is outside this range, a default value will be
+ // used instead.
+ MaxUploadBufferPerConnection int32
+
+ // MaxUploadBufferPerStream is the size of the initial flow control
+ // window for each stream. The HTTP/2 spec does not allow this to
+ // be larger than 2^32-1. If the value is zero or larger than the
+ // maximum, a default value will be used instead.
+ MaxUploadBufferPerStream int32
+
+ // NewWriteScheduler constructs a write scheduler for a connection.
+ // If nil, a default scheduler is chosen.
+ NewWriteScheduler func() WriteScheduler
+
+ // Internal state. This is a pointer (rather than embedded directly)
+ // so that we don't embed a Mutex in this struct, which will make the
+ // struct non-copyable, which might break some callers.
+ state *serverInternalState
+}
+
+func (s *Server) initialConnRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerConnection > initialWindowSize {
+ return s.MaxUploadBufferPerConnection
+ }
+ return 1 << 20
+}
+
+func (s *Server) initialStreamRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerStream > 0 {
+ return s.MaxUploadBufferPerStream
+ }
+ return 1 << 20
+}
+
+func (s *Server) maxReadFrameSize() uint32 {
+ if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
+ return v
+ }
+ return defaultMaxReadFrameSize
+}
+
+func (s *Server) maxConcurrentStreams() uint32 {
+ if v := s.MaxConcurrentStreams; v > 0 {
+ return v
+ }
+ return defaultMaxStreams
+}
+
+type serverInternalState struct {
+ mu sync.Mutex
+ activeConns map[*serverConn]struct{}
+}
+
+func (s *serverInternalState) registerConn(sc *serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ s.activeConns[sc] = struct{}{}
+ s.mu.Unlock()
+}
+
+func (s *serverInternalState) unregisterConn(sc *serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ delete(s.activeConns, sc)
+ s.mu.Unlock()
+}
+
+func (s *serverInternalState) startGracefulShutdown() {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ for sc := range s.activeConns {
+ sc.startGracefulShutdown()
+ }
+ s.mu.Unlock()
+}
+
+// ConfigureServer adds HTTP/2 support to a net/http Server.
+//
+// The configuration conf may be nil.
+//
+// ConfigureServer must be called before s begins serving.
+func ConfigureServer(s *http.Server, conf *Server) error {
+ if s == nil {
+ panic("nil *http.Server")
+ }
+ if conf == nil {
+ conf = new(Server)
+ }
+ conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+ if err := configureServer18(s, conf); err != nil {
+ return err
+ }
+ if err := configureServer19(s, conf); err != nil {
+ return err
+ }
+
+ if s.TLSConfig == nil {
+ s.TLSConfig = new(tls.Config)
+ } else if s.TLSConfig.CipherSuites != nil {
+ // If they already provided a CipherSuite list, return
+ // an error if it has a bad order or is missing
+ // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
+ haveRequired := false
+ sawBad := false
+ for i, cs := range s.TLSConfig.CipherSuites {
+ switch cs {
+ case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ // Alternative MTI cipher to not discourage ECDSA-only servers.
+ // See http://golang.org/cl/30721 for further information.
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+ haveRequired = true
+ }
+ if isBadCipher(cs) {
+ sawBad = true
+ } else if sawBad {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs)
+ }
+ }
+ if !haveRequired {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.")
+ }
+ }
+
+ // Note: not setting MinVersion to tls.VersionTLS12,
+ // as we don't want to interfere with HTTP/1.1 traffic
+ // on the user's server. We enforce TLS 1.2 later once
+ // we accept a connection. Ideally this should be done
+ // during next-proto selection, but using TLS <1.2 with
+ // HTTP/2 is still the client's bug.
+
+ s.TLSConfig.PreferServerCipherSuites = true
+
+ haveNPN := false
+ for _, p := range s.TLSConfig.NextProtos {
+ if p == NextProtoTLS {
+ haveNPN = true
+ break
+ }
+ }
+ if !haveNPN {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
+ }
+
+ if s.TLSNextProto == nil {
+ s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
+ }
+ protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ if testHookOnConn != nil {
+ testHookOnConn()
+ }
+ conf.ServeConn(c, &ServeConnOpts{
+ Handler: h,
+ BaseConfig: hs,
+ })
+ }
+ s.TLSNextProto[NextProtoTLS] = protoHandler
+ return nil
+}
+
+// ServeConnOpts are options for the Server.ServeConn method.
+type ServeConnOpts struct {
+ // BaseConfig optionally sets the base configuration
+ // for values. If nil, defaults are used.
+ BaseConfig *http.Server
+
+ // Handler specifies which handler to use for processing
+ // requests. If nil, BaseConfig.Handler is used. If BaseConfig
+ // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
+ Handler http.Handler
+}
+
+func (o *ServeConnOpts) baseConfig() *http.Server {
+ if o != nil && o.BaseConfig != nil {
+ return o.BaseConfig
+ }
+ return new(http.Server)
+}
+
+func (o *ServeConnOpts) handler() http.Handler {
+ if o != nil {
+ if o.Handler != nil {
+ return o.Handler
+ }
+ if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
+ return o.BaseConfig.Handler
+ }
+ }
+ return http.DefaultServeMux
+}
+
+// ServeConn serves HTTP/2 requests on the provided connection and
+// blocks until the connection is no longer readable.
+//
+// ServeConn starts speaking HTTP/2 assuming that c has not had any
+// reads or writes. It writes its initial settings frame and expects
+// to be able to read the preface and settings frame from the
+// client. If c has a ConnectionState method like a *tls.Conn, the
+// ConnectionState is used to verify the TLS ciphersuite and to set
+// the Request.TLS field in Handlers.
+//
+// ServeConn does not support h2c by itself. Any h2c support must be
+// implemented in terms of providing a suitably-behaving net.Conn.
+//
+// The opts parameter is optional. If nil, default values are used.
+func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ baseCtx, cancel := serverConnBaseContext(c, opts)
+ defer cancel()
+
+ sc := &serverConn{
+ srv: s,
+ hs: opts.baseConfig(),
+ conn: c,
+ baseCtx: baseCtx,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: newBufferedWriter(c),
+ handler: opts.handler(),
+ streams: make(map[uint32]*stream),
+ readFrameCh: make(chan readFrameResult),
+ wantWriteFrameCh: make(chan FrameWriteRequest, 8),
+ serveMsgCh: make(chan interface{}, 8),
+ wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
+ advMaxStreams: s.maxConcurrentStreams(),
+ initialStreamSendWindowSize: initialWindowSize,
+ maxFrameSize: initialMaxFrameSize,
+ headerTableSize: initialHeaderTableSize,
+ serveG: newGoroutineLock(),
+ pushEnabled: true,
+ }
+
+ s.state.registerConn(sc)
+ defer s.state.unregisterConn(sc)
+
+ // The net/http package sets the write deadline from the
+ // http.Server.WriteTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already set.
+ // Write deadlines are set per stream in serverConn.newStream.
+ // Disarm the net.Conn write deadline here.
+ if sc.hs.WriteTimeout != 0 {
+ sc.conn.SetWriteDeadline(time.Time{})
+ }
+
+ if s.NewWriteScheduler != nil {
+ sc.writeSched = s.NewWriteScheduler()
+ } else {
+ sc.writeSched = NewRandomWriteScheduler()
+ }
+
+ // These start at the RFC-specified defaults. If there is a higher
+ // configured value for inflow, that will be updated when we send a
+ // WINDOW_UPDATE shortly after sending SETTINGS.
+ sc.flow.add(initialWindowSize)
+ sc.inflow.add(initialWindowSize)
+ sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+
+ fr := NewFramer(sc.bw, c)
+ fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ fr.MaxHeaderListSize = sc.maxHeaderListSize()
+ fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ sc.framer = fr
+
+ if tc, ok := c.(connectionStater); ok {
+ sc.tlsState = new(tls.ConnectionState)
+ *sc.tlsState = tc.ConnectionState()
+ // 9.2 Use of TLS Features
+ // An implementation of HTTP/2 over TLS MUST use TLS
+ // 1.2 or higher with the restrictions on feature set
+ // and cipher suite described in this section. Due to
+ // implementation limitations, it might not be
+ // possible to fail TLS negotiation. An endpoint MUST
+ // immediately terminate an HTTP/2 connection that
+ // does not meet the TLS requirements described in
+ // this section with a connection error (Section
+ // 5.4.1) of type INADEQUATE_SECURITY.
+ if sc.tlsState.Version < tls.VersionTLS12 {
+ sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
+ return
+ }
+
+ if sc.tlsState.ServerName == "" {
+ // Client must use SNI, but we don't enforce that anymore,
+ // since it was causing problems when connecting to bare IP
+ // addresses during development.
+ //
+ // TODO: optionally enforce? Or enforce at the time we receive
+ // a new request, and verify the the ServerName matches the :authority?
+ // But that precludes proxy situations, perhaps.
+ //
+ // So for now, do nothing here again.
+ }
+
+ if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ // "Endpoints MAY choose to generate a connection error
+ // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+ // the prohibited cipher suites are negotiated."
+ //
+ // We choose that. In my opinion, the spec is weak
+ // here. It also says both parties must support at least
+ // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+ // excuses here. If we really must, we could allow an
+ // "AllowInsecureWeakCiphers" option on the server later.
+ // Let's see how it plays out first.
+ sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
+ return
+ }
+ }
+
+ if hook := testHookGetServerConn; hook != nil {
+ hook(sc)
+ }
+ sc.serve()
+}
+
+func (sc *serverConn) rejectConn(err ErrCode, debug string) {
+ sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
+ // ignoring errors. hanging up anyway.
+ sc.framer.WriteGoAway(0, err, []byte(debug))
+ sc.bw.Flush()
+ sc.conn.Close()
+}
+
+type serverConn struct {
+ // Immutable:
+ srv *Server
+ hs *http.Server
+ conn net.Conn
+ bw *bufferedWriter // writing to conn
+ handler http.Handler
+ baseCtx contextContext
+ framer *Framer
+ doneServing chan struct{} // closed when serverConn.serve ends
+ readFrameCh chan readFrameResult // written by serverConn.readFrames
+ wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
+ wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
+ bodyReadCh chan bodyReadMsg // from handlers -> serve
+ serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
+ flow flow // conn-wide (not stream-specific) outbound flow control
+ inflow flow // conn-wide inbound flow control
+ tlsState *tls.ConnectionState // shared by all handlers, like net/http
+ remoteAddrStr string
+ writeSched WriteScheduler
+
+ // Everything following is owned by the serve loop; use serveG.check():
+ serveG goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curClientStreams uint32 // number of open streams initiated by the client
+ curPushedStreams uint32 // number of open streams initiated by server push
+ maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
+ maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
+ streams map[uint32]*stream
+ initialStreamSendWindowSize int32
+ maxFrameSize int32
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ writingFrame bool // started writing a frame (on serve goroutine or separate)
+ writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ inGoAway bool // we've started to or sent GOAWAY
+ inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ goAwayCode ErrCode
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
+
+ // Owned by the writeFrameAsync goroutine:
+ headerWriteBuf bytes.Buffer
+ hpackEncoder *hpack.Encoder
+
+ // Used by startGracefulShutdown.
+ shutdownOnce sync.Once
+}
+
+func (sc *serverConn) maxHeaderListSize() uint32 {
+ n := sc.hs.MaxHeaderBytes
+ if n <= 0 {
+ n = http.DefaultMaxHeaderBytes
+ }
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return uint32(n + typicalHeaders*perFieldOverhead)
+}
+
+func (sc *serverConn) curOpenStreams() uint32 {
+ sc.serveG.check()
+ return sc.curClientStreams + sc.curPushedStreams
+}
+
+// stream represents a stream. This is the minimal metadata needed by
+// the serve goroutine. Most of the actual stream state is owned by
+// the http.Handler's goroutine in the responseWriter. Because the
+// responseWriter's responseWriterState is recycled at the end of a
+// handler, this struct intentionally has no pointer to the
+// *responseWriter{,State} itself, as the Handler ending nils out the
+// responseWriter's state field.
+type stream struct {
+ // immutable:
+ sc *serverConn
+ id uint32
+ body *pipe // non-nil if expecting DATA frames
+ cw closeWaiter // closed wait stream transitions to closed state
+ ctx contextContext
+ cancelCtx func()
+
+ // owned by serverConn's serve loop:
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow flow // limits writing from Handler to client
+ inflow flow // what the client is allowed to POST/etc to us
+ parent *stream // or nil
+ numTrailerValues int64
+ weight uint8
+ state streamState
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ writeDeadline *time.Timer // nil if unused
+
+ trailer http.Header // accumulated trailers
+ reqTrailer http.Header // handler's Request.Trailer
+}
+
+func (sc *serverConn) Framer() *Framer { return sc.framer }
+func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
+func (sc *serverConn) Flush() error { return sc.bw.Flush() }
+func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
+ return sc.hpackEncoder, &sc.headerWriteBuf
+}
+
+func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
+ sc.serveG.check()
+ // http://tools.ietf.org/html/rfc7540#section-5.1
+ if st, ok := sc.streams[streamID]; ok {
+ return st.state, st
+ }
+ // "The first use of a new stream identifier implicitly closes all
+ // streams in the "idle" state that might have been initiated by
+ // that peer with a lower-valued stream identifier. For example, if
+ // a client sends a HEADERS frame on stream 7 without ever sending a
+ // frame on stream 5, then stream 5 transitions to the "closed"
+ // state when the first frame for stream 7 is sent or received."
+ if streamID%2 == 1 {
+ if streamID <= sc.maxClientStreamID {
+ return stateClosed, nil
+ }
+ } else {
+ if streamID <= sc.maxPushPromiseID {
+ return stateClosed, nil
+ }
+ }
+ return stateIdle, nil
+}
+
+// setConnState calls the net/http ConnState hook for this connection, if configured.
+// Note that the net/http package does StateNew and StateClosed for us.
+// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
+func (sc *serverConn) setConnState(state http.ConnState) {
+ if sc.hs.ConnState != nil {
+ sc.hs.ConnState(sc.conn, state)
+ }
+}
+
+func (sc *serverConn) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) logf(format string, args ...interface{}) {
+ if lg := sc.hs.ErrorLog; lg != nil {
+ lg.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// errno returns v's underlying uintptr, else 0.
+//
+// TODO: remove this helper function once http2 can use build
+// tags. See comment in isClosedConnError.
+func errno(v error) uintptr {
+ if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
+ return uintptr(rv.Uint())
+ }
+ return 0
+}
+
+// isClosedConnError reports whether err is an error from use of a closed
+// network connection.
+func isClosedConnError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // TODO: remove this string search and be more like the Windows
+ // case below. That might involve modifying the standard library
+ // to return better error types.
+ str := err.Error()
+ if strings.Contains(str, "use of closed network connection") {
+ return true
+ }
+
+ // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
+ // build tags, so I can't make an http2_windows.go file with
+ // Windows-specific stuff. Fix that and move this, once we
+ // have a way to bundle this into std's net/http somehow.
+ if runtime.GOOS == "windows" {
+ if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+ if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
+ const WSAECONNABORTED = 10053
+ const WSAECONNRESET = 10054
+ if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
+ if err == nil {
+ return
+ }
+ if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
+ // Boring, expected errors.
+ sc.vlogf(format, args...)
+ } else {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ cv, ok := commonCanonHeader[v]
+ if ok {
+ return cv
+ }
+ cv, ok = sc.canonHeader[v]
+ if ok {
+ return cv
+ }
+ if sc.canonHeader == nil {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = http.CanonicalHeaderKey(v)
+ sc.canonHeader[v] = cv
+ return cv
+}
+
+type readFrameResult struct {
+ f Frame // valid until readMore is called
+ err error
+
+ // readMore should be called once the consumer no longer needs or
+ // retains f. After readMore, f is invalid and more frames can be
+ // read.
+ readMore func()
+}
+
+// readFrames is the loop that reads incoming frames.
+// It takes care to only read one frame at a time, blocking until the
+// consumer is done with the frame.
+// It's run on its own goroutine.
+func (sc *serverConn) readFrames() {
+ gate := make(gate)
+ gateDone := gate.Done
+ for {
+ f, err := sc.framer.ReadFrame()
+ select {
+ case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
+ case <-sc.doneServing:
+ return
+ }
+ select {
+ case <-gate:
+ case <-sc.doneServing:
+ return
+ }
+ if terminalReadFrameError(err) {
+ return
+ }
+ }
+}
+
+// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
+type frameWriteResult struct {
+ wr FrameWriteRequest // what was written (or attempted)
+ err error // result of the writeFrame call
+}
+
+// writeFrameAsync runs in its own goroutine and writes a single frame
+// and then reports when it's done.
+// At most one goroutine can be running writeFrameAsync at a time per
+// serverConn.
+func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
+ err := wr.write.writeFrame(sc)
+ sc.wroteFrameCh <- frameWriteResult{wr, err}
+}
+
+func (sc *serverConn) closeAllStreamsOnConnClose() {
+ sc.serveG.check()
+ for _, st := range sc.streams {
+ sc.closeStream(st, errClientDisconnected)
+ }
+}
+
+func (sc *serverConn) stopShutdownTimer() {
+ sc.serveG.check()
+ if t := sc.shutdownTimer; t != nil {
+ t.Stop()
+ }
+}
+
+func (sc *serverConn) notePanic() {
+ // Note: this is for serverConn.serve panicking, not http.Handler code.
+ if testHookOnPanicMu != nil {
+ testHookOnPanicMu.Lock()
+ defer testHookOnPanicMu.Unlock()
+ }
+ if testHookOnPanic != nil {
+ if e := recover(); e != nil {
+ if testHookOnPanic(sc, e) {
+ panic(e)
+ }
+ }
+ }
+}
+
+func (sc *serverConn) serve() {
+ sc.serveG.check()
+ defer sc.notePanic()
+ defer sc.conn.Close()
+ defer sc.closeAllStreamsOnConnClose()
+ defer sc.stopShutdownTimer()
+ defer close(sc.doneServing) // unblocks handlers trying to send
+
+ if VerboseLogs {
+ sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+ }
+
+ sc.writeFrame(FrameWriteRequest{
+ write: writeSettings{
+ {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+ {SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+ {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
+ },
+ })
+ sc.unackedSettings++
+
+ // Each connection starts with intialWindowSize inflow tokens.
+ // If a higher value is configured, we add more tokens.
+ if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
+ sc.sendWindowUpdate(nil, int(diff))
+ }
+
+ if err := sc.readPreface(); err != nil {
+ sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+ return
+ }
+ // Now that we've got the preface, get us out of the
+ // "StateNew" state. We can't go directly to idle, though.
+ // Active means we read some data and anticipate a request. We'll
+ // do another Active when we get a HEADERS frame.
+ sc.setConnState(http.StateActive)
+ sc.setConnState(http.StateIdle)
+
+ if sc.srv.IdleTimeout != 0 {
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ defer sc.idleTimer.Stop()
+ }
+
+ go sc.readFrames() // closed by defer sc.conn.Close above
+
+ settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ defer settingsTimer.Stop()
+
+ loopNum := 0
+ for {
+ loopNum++
+ select {
+ case wr := <-sc.wantWriteFrameCh:
+ if se, ok := wr.write.(StreamError); ok {
+ sc.resetStream(se)
+ break
+ }
+ sc.writeFrame(wr)
+ case res := <-sc.wroteFrameCh:
+ sc.wroteFrame(res)
+ case res := <-sc.readFrameCh:
+ if !sc.processFrameFromReader(res) {
+ return
+ }
+ res.readMore()
+ if settingsTimer != nil {
+ settingsTimer.Stop()
+ settingsTimer = nil
+ }
+ case m := <-sc.bodyReadCh:
+ sc.noteBodyRead(m.st, m.n)
+ case msg := <-sc.serveMsgCh:
+ switch v := msg.(type) {
+ case func(int):
+ v(loopNum) // for testing
+ case *serverMessage:
+ switch v {
+ case settingsTimerMsg:
+ sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+ return
+ case idleTimerMsg:
+ sc.vlogf("connection is idle")
+ sc.goAway(ErrCodeNo)
+ case shutdownTimerMsg:
+ sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+ return
+ case gracefulShutdownMsg:
+ sc.startGracefulShutdownInternal()
+ default:
+ panic("unknown timer")
+ }
+ case *startPushRequest:
+ sc.startPush(v)
+ default:
+ panic(fmt.Sprintf("unexpected type %T", v))
+ }
+ }
+
+ // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
+ // with no error code (graceful shutdown), don't start the timer until
+ // all open streams have been completed.
+ sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
+ gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0
+ if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) {
+ sc.shutDownIn(goAwayTimeout)
+ }
+ }
+}
+
+func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
+ select {
+ case <-sc.doneServing:
+ case <-sharedCh:
+ close(privateCh)
+ }
+}
+
+type serverMessage int
+
+// Message values sent to serveMsgCh.
+var (
+ settingsTimerMsg = new(serverMessage)
+ idleTimerMsg = new(serverMessage)
+ shutdownTimerMsg = new(serverMessage)
+ gracefulShutdownMsg = new(serverMessage)
+)
+
+func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
+func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
+func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
+
+func (sc *serverConn) sendServeMsg(msg interface{}) {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.serveMsgCh <- msg:
+ case <-sc.doneServing:
+ }
+}
+
+// readPreface reads the ClientPreface greeting from the peer
+// or returns an error on timeout or an invalid greeting.
+func (sc *serverConn) readPreface() error {
+ errc := make(chan error, 1)
+ go func() {
+ // Read the client preface
+ buf := make([]byte, len(ClientPreface))
+ if _, err := io.ReadFull(sc.conn, buf); err != nil {
+ errc <- err
+ } else if !bytes.Equal(buf, clientPreface) {
+ errc <- fmt.Errorf("bogus greeting %q", buf)
+ } else {
+ errc <- nil
+ }
+ }()
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ return errors.New("timeout waiting for client preface")
+ case err := <-errc:
+ if err == nil {
+ if VerboseLogs {
+ sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
+ }
+ }
+ return err
+ }
+}
+
+var errChanPool = sync.Pool{
+ New: func() interface{} { return make(chan error, 1) },
+}
+
+var writeDataPool = sync.Pool{
+ New: func() interface{} { return new(writeData) },
+}
+
+// writeDataFromHandler writes DATA response frames from a handler on
+// the given stream.
+func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
+ ch := errChanPool.Get().(chan error)
+ writeArg := writeDataPool.Get().(*writeData)
+ *writeArg = writeData{stream.id, data, endStream}
+ err := sc.writeFrameFromHandler(FrameWriteRequest{
+ write: writeArg,
+ stream: stream,
+ done: ch,
+ })
+ if err != nil {
+ return err
+ }
+ var frameWriteDone bool // the frame write is done (successfully or not)
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-stream.cw:
+ // If both ch and stream.cw were ready (as might
+ // happen on the final Write after an http.Handler
+ // ends), prefer the write result. Otherwise this
+ // might just be us successfully closing the stream.
+ // The writeFrameAsync and serve goroutines guarantee
+ // that the ch send will happen before the stream.cw
+ // close.
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ default:
+ return errStreamClosed
+ }
+ }
+ errChanPool.Put(ch)
+ if frameWriteDone {
+ writeDataPool.Put(writeArg)
+ }
+ return err
+}
+
+// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
+// if the connection has gone away.
+//
+// This must not be run from the serve goroutine itself, else it might
+// deadlock writing to sc.wantWriteFrameCh (which is only mildly
+// buffered and is read by serve itself). If you're on the serve
+// goroutine, call writeFrame instead.
+func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.wantWriteFrameCh <- wr:
+ return nil
+ case <-sc.doneServing:
+ // Serve loop is gone.
+ // Client has closed their connection to the server.
+ return errClientDisconnected
+ }
+}
+
+// writeFrame schedules a frame to write and sends it if there's nothing
+// already being written.
+//
+// There is no pushback here (the serve goroutine never blocks). It's
+// the http.Handlers that block, waiting for their previous frames to
+// make it onto the wire
+//
+// If you're not on the serve goroutine, use writeFrameFromHandler instead.
+func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
+ sc.serveG.check()
+
+ // If true, wr will not be written and wr.done will not be signaled.
+ var ignoreWrite bool
+
+ // We are not allowed to write frames on closed streams. RFC 7540 Section
+ // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
+ // a closed stream." Our server never sends PRIORITY, so that exception
+ // does not apply.
+ //
+ // The serverConn might close an open stream while the stream's handler
+ // is still running. For example, the server might close a stream when it
+ // receives bad data from the client. If this happens, the handler might
+ // attempt to write a frame after the stream has been closed (since the
+ // handler hasn't yet been notified of the close). In this case, we simply
+ // ignore the frame. The handler will notice that the stream is closed when
+ // it waits for the frame to be written.
+ //
+ // As an exception to this rule, we allow sending RST_STREAM after close.
+ // This allows us to immediately reject new streams without tracking any
+ // state for those streams (except for the queued RST_STREAM frame). This
+ // may result in duplicate RST_STREAMs in some cases, but the client should
+ // ignore those.
+ if wr.StreamID() != 0 {
+ _, isReset := wr.write.(StreamError)
+ if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {
+ ignoreWrite = true
+ }
+ }
+
+ // Don't send a 100-continue response if we've already sent headers.
+ // See golang.org/issue/14030.
+ switch wr.write.(type) {
+ case *writeResHeaders:
+ wr.stream.wroteHeaders = true
+ case write100ContinueHeadersFrame:
+ if wr.stream.wroteHeaders {
+ // We do not need to notify wr.done because this frame is
+ // never written with wr.done != nil.
+ if wr.done != nil {
+ panic("wr.done != nil for write100ContinueHeadersFrame")
+ }
+ ignoreWrite = true
+ }
+ }
+
+ if !ignoreWrite {
+ sc.writeSched.Push(wr)
+ }
+ sc.scheduleFrameWrite()
+}
+
+// startFrameWrite starts a goroutine to write wr (in a separate
+// goroutine since that might block on the network), and updates the
+// serve goroutine's state about the world, updated from info in wr.
+func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
+ sc.serveG.check()
+ if sc.writingFrame {
+ panic("internal error: can only be writing one frame at a time")
+ }
+
+ st := wr.stream
+ if st != nil {
+ switch st.state {
+ case stateHalfClosedLocal:
+ switch wr.write.(type) {
+ case StreamError, handlerPanicRST, writeWindowUpdate:
+ // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
+ // in this state. (We never send PRIORITY from the server, so that is not checked.)
+ default:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
+ }
+ case stateClosed:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
+ }
+ }
+ if wpp, ok := wr.write.(*writePushPromise); ok {
+ var err error
+ wpp.promisedID, err = wpp.allocatePromisedID()
+ if err != nil {
+ sc.writingFrameAsync = false
+ wr.replyToWriter(err)
+ return
+ }
+ }
+
+ sc.writingFrame = true
+ sc.needsFrameFlush = true
+ if wr.write.staysWithinBuffer(sc.bw.Available()) {
+ sc.writingFrameAsync = false
+ err := wr.write.writeFrame(sc)
+ sc.wroteFrame(frameWriteResult{wr, err})
+ } else {
+ sc.writingFrameAsync = true
+ go sc.writeFrameAsync(wr)
+ }
+}
+
+// errHandlerPanicked is the error given to any callers blocked in a read from
+// Request.Body when the main goroutine panics. Since most handlers read in the
+// the main ServeHTTP goroutine, this will show up rarely.
+var errHandlerPanicked = errors.New("http2: handler panicked")
+
+// wroteFrame is called on the serve goroutine with the result of
+// whatever happened on writeFrameAsync.
+func (sc *serverConn) wroteFrame(res frameWriteResult) {
+ sc.serveG.check()
+ if !sc.writingFrame {
+ panic("internal error: expected to be already writing a frame")
+ }
+ sc.writingFrame = false
+ sc.writingFrameAsync = false
+
+ wr := res.wr
+
+ if writeEndsStream(wr.write) {
+ st := wr.stream
+ if st == nil {
+ panic("internal error: expecting non-nil stream")
+ }
+ switch st.state {
+ case stateOpen:
+ // Here we would go to stateHalfClosedLocal in
+ // theory, but since our handler is done and
+ // the net/http package provides no mechanism
+ // for closing a ResponseWriter while still
+ // reading data (see possible TODO at top of
+ // this file), we go into closed state here
+ // anyway, after telling the peer we're
+ // hanging up on them. We'll transition to
+ // stateClosed after the RST_STREAM frame is
+ // written.
+ st.state = stateHalfClosedLocal
+ // Section 8.1: a server MAY request that the client abort
+ // transmission of a request without error by sending a
+ // RST_STREAM with an error code of NO_ERROR after sending
+ // a complete response.
+ sc.resetStream(streamError(st.id, ErrCodeNo))
+ case stateHalfClosedRemote:
+ sc.closeStream(st, errHandlerComplete)
+ }
+ } else {
+ switch v := wr.write.(type) {
+ case StreamError:
+ // st may be unknown if the RST_STREAM was generated to reject bad input.
+ if st, ok := sc.streams[v.StreamID]; ok {
+ sc.closeStream(st, v)
+ }
+ case handlerPanicRST:
+ sc.closeStream(wr.stream, errHandlerPanicked)
+ }
+ }
+
+ // Reply (if requested) to unblock the ServeHTTP goroutine.
+ wr.replyToWriter(res.err)
+
+ sc.scheduleFrameWrite()
+}
+
+// scheduleFrameWrite tickles the frame writing scheduler.
+//
+// If a frame is already being written, nothing happens. This will be called again
+// when the frame is done being written.
+//
+// If a frame isn't being written we need to send one, the best frame
+// to send is selected, preferring first things that aren't
+// stream-specific (e.g. ACKing settings), and then finding the
+// highest priority stream.
+//
+// If a frame isn't being written and there's nothing else to send, we
+// flush the write buffer.
+func (sc *serverConn) scheduleFrameWrite() {
+ sc.serveG.check()
+ if sc.writingFrame || sc.inFrameScheduleLoop {
+ return
+ }
+ sc.inFrameScheduleLoop = true
+ for !sc.writingFrameAsync {
+ if sc.needToSendGoAway {
+ sc.needToSendGoAway = false
+ sc.startFrameWrite(FrameWriteRequest{
+ write: &writeGoAway{
+ maxStreamID: sc.maxClientStreamID,
+ code: sc.goAwayCode,
+ },
+ })
+ continue
+ }
+ if sc.needToSendSettingsAck {
+ sc.needToSendSettingsAck = false
+ sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
+ continue
+ }
+ if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
+ if wr, ok := sc.writeSched.Pop(); ok {
+ sc.startFrameWrite(wr)
+ continue
+ }
+ }
+ if sc.needsFrameFlush {
+ sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+ continue
+ }
+ break
+ }
+ sc.inFrameScheduleLoop = false
+}
+
+// startGracefulShutdown gracefully shuts down a connection. This
+// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
+// shutting down. The connection isn't closed until all current
+// streams are done.
+//
+// startGracefulShutdown returns immediately; it does not wait until
+// the connection has shut down.
+func (sc *serverConn) startGracefulShutdown() {
+ sc.serveG.checkNotOn() // NOT
+ sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
+}
+
+// After sending GOAWAY, the connection will close after goAwayTimeout.
+// If we close the connection immediately after sending GOAWAY, there may
+// be unsent data in our kernel receive buffer, which will cause the kernel
+// to send a TCP RST on close() instead of a FIN. This RST will abort the
+// connection immediately, whether or not the client had received the GOAWAY.
+//
+// Ideally we should delay for at least 1 RTT + epsilon so the client has
+// a chance to read the GOAWAY and stop sending messages. Measuring RTT
+// is hard, so we approximate with 1 second. See golang.org/issue/18701.
+//
+// This is a var so it can be shorter in tests, where all requests uses the
+// loopback interface making the expected RTT very small.
+//
+// TODO: configurable?
+var goAwayTimeout = 1 * time.Second
+
+func (sc *serverConn) startGracefulShutdownInternal() {
+ sc.goAway(ErrCodeNo)
+}
+
+func (sc *serverConn) goAway(code ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
+ return
+ }
+ sc.inGoAway = true
+ sc.needToSendGoAway = true
+ sc.goAwayCode = code
+ sc.scheduleFrameWrite()
+}
+
+func (sc *serverConn) shutDownIn(d time.Duration) {
+ sc.serveG.check()
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
+}
+
+func (sc *serverConn) resetStream(se StreamError) {
+ sc.serveG.check()
+ sc.writeFrame(FrameWriteRequest{write: se})
+ if st, ok := sc.streams[se.StreamID]; ok {
+ st.resetQueued = true
+ }
+}
+
+// processFrameFromReader processes the serve loop's read from readFrameCh from the
+// frame-reading goroutine.
+// processFrameFromReader returns whether the connection should be kept open.
+func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
+ sc.serveG.check()
+ err := res.err
+ if err != nil {
+ if err == ErrFrameTooLarge {
+ sc.goAway(ErrCodeFrameSize)
+ return true // goAway will close the loop
+ }
+ clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
+ if clientGone {
+ // TODO: could we also get into this state if
+ // the peer does a half close
+ // (e.g. CloseWrite) because they're done
+ // sending frames but they're still wanting
+ // our open replies? Investigate.
+ // TODO: add CloseWrite to crypto/tls.Conn first
+ // so we have a way to test this? I suppose
+ // just for testing we could have a non-TLS mode.
+ return false
+ }
+ } else {
+ f := res.f
+ if VerboseLogs {
+ sc.vlogf("http2: server read frame %v", summarizeFrame(f))
+ }
+ err = sc.processFrame(f)
+ if err == nil {
+ return true
+ }
+ }
+
+ switch ev := err.(type) {
+ case StreamError:
+ sc.resetStream(ev)
+ return true
+ case goAwayFlowError:
+ sc.goAway(ErrCodeFlowControl)
+ return true
+ case ConnectionError:
+ sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
+ sc.goAway(ErrCode(ev))
+ return true // goAway will handle shutdown
+ default:
+ if res.err != nil {
+ sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+ } else {
+ sc.logf("http2: server closing client connection: %v", err)
+ }
+ return false
+ }
+}
+
+func (sc *serverConn) processFrame(f Frame) error {
+ sc.serveG.check()
+
+ // First frame received must be SETTINGS.
+ if !sc.sawFirstSettings {
+ if _, ok := f.(*SettingsFrame); !ok {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.sawFirstSettings = true
+ }
+
+ switch f := f.(type) {
+ case *SettingsFrame:
+ return sc.processSettings(f)
+ case *MetaHeadersFrame:
+ return sc.processHeaders(f)
+ case *WindowUpdateFrame:
+ return sc.processWindowUpdate(f)
+ case *PingFrame:
+ return sc.processPing(f)
+ case *DataFrame:
+ return sc.processData(f)
+ case *RSTStreamFrame:
+ return sc.processResetStream(f)
+ case *PriorityFrame:
+ return sc.processPriority(f)
+ case *GoAwayFrame:
+ return sc.processGoAway(f)
+ case *PushPromiseFrame:
+ // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+ // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ default:
+ sc.vlogf("http2: server ignoring frame: %v", f.Header())
+ return nil
+ }
+}
+
+func (sc *serverConn) processPing(f *PingFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ if f.StreamID != 0 {
+ // "PING frames are not associated with any individual
+ // stream. If a PING frame is received with a stream
+ // identifier field value other than 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
+ return nil
+ }
+ sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
+ return nil
+}
+
+func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
+ sc.serveG.check()
+ switch {
+ case f.StreamID != 0: // stream-level flow control
+ state, st := sc.state(f.StreamID)
+ if state == stateIdle {
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if st == nil {
+ // "WINDOW_UPDATE can be sent by a peer that has sent a
+ // frame bearing the END_STREAM flag. This means that a
+ // receiver could receive a WINDOW_UPDATE frame on a "half
+ // closed (remote)" or "closed" stream. A receiver MUST
+ // NOT treat this as an error, see Section 5.1."
+ return nil
+ }
+ if !st.flow.add(int32(f.Increment)) {
+ return streamError(f.StreamID, ErrCodeFlowControl)
+ }
+ default: // connection-level flow control
+ if !sc.flow.add(int32(f.Increment)) {
+ return goAwayFlowError{}
+ }
+ }
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
+ sc.serveG.check()
+
+ state, st := sc.state(f.StreamID)
+ if state == stateIdle {
+ // 6.4 "RST_STREAM frames MUST NOT be sent for a
+ // stream in the "idle" state. If a RST_STREAM frame
+ // identifying an idle stream is received, the
+ // recipient MUST treat this as a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if st != nil {
+ st.cancelCtx()
+ sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
+ }
+ return nil
+}
+
+func (sc *serverConn) closeStream(st *stream, err error) {
+ sc.serveG.check()
+ if st.state == stateIdle || st.state == stateClosed {
+ panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
+ }
+ st.state = stateClosed
+ if st.writeDeadline != nil {
+ st.writeDeadline.Stop()
+ }
+ if st.isPushed() {
+ sc.curPushedStreams--
+ } else {
+ sc.curClientStreams--
+ }
+ delete(sc.streams, st.id)
+ if len(sc.streams) == 0 {
+ sc.setConnState(http.StateIdle)
+ if sc.srv.IdleTimeout != 0 {
+ sc.idleTimer.Reset(sc.srv.IdleTimeout)
+ }
+ if h1ServerKeepAlivesDisabled(sc.hs) {
+ sc.startGracefulShutdownInternal()
+ }
+ }
+ if p := st.body; p != nil {
+ // Return any buffered unread bytes worth of conn-level flow control.
+ // See golang.org/issue/16481
+ sc.sendWindowUpdate(nil, p.Len())
+
+ p.CloseWithError(err)
+ }
+ st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
+ sc.writeSched.CloseStream(st.id)
+}
+
+func (sc *serverConn) processSettings(f *SettingsFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ sc.unackedSettings--
+ if sc.unackedSettings < 0 {
+ // Why is the peer ACKing settings we never sent?
+ // The spec doesn't mention this case, but
+ // hang up on them anyway.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ return nil
+ }
+ if err := f.ForeachSetting(sc.processSetting); err != nil {
+ return err
+ }
+ sc.needToSendSettingsAck = true
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processSetting(s Setting) error {
+ sc.serveG.check()
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ if VerboseLogs {
+ sc.vlogf("http2: server processing setting %v", s)
+ }
+ switch s.ID {
+ case SettingHeaderTableSize:
+ sc.headerTableSize = s.Val
+ sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
+ case SettingEnablePush:
+ sc.pushEnabled = s.Val != 0
+ case SettingMaxConcurrentStreams:
+ sc.clientMaxStreams = s.Val
+ case SettingInitialWindowSize:
+ return sc.processSettingInitialWindowSize(s.Val)
+ case SettingMaxFrameSize:
+ sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
+ case SettingMaxHeaderListSize:
+ sc.peerMaxHeaderListSize = s.Val
+ default:
+ // Unknown setting: "An endpoint that receives a SETTINGS
+ // frame with any unknown or unsupported identifier MUST
+ // ignore that setting."
+ if VerboseLogs {
+ sc.vlogf("http2: server ignoring unknown setting %v", s)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
+ sc.serveG.check()
+ // Note: val already validated to be within range by
+ // processSetting's Valid call.
+
+ // "A SETTINGS frame can alter the initial flow control window
+ // size for all current streams. When the value of
+ // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+ // adjust the size of all stream flow control windows that it
+ // maintains by the difference between the new value and the
+ // old value."
+ old := sc.initialStreamSendWindowSize
+ sc.initialStreamSendWindowSize = int32(val)
+ growth := int32(val) - old // may be negative
+ for _, st := range sc.streams {
+ if !st.flow.add(growth) {
+ // 6.9.2 Initial Flow Control Window Size
+ // "An endpoint MUST treat a change to
+ // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+ // control window to exceed the maximum size as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR."
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processData(f *DataFrame) error {
+ sc.serveG.check()
+ if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
+ return nil
+ }
+ data := f.Data()
+
+ // "If a DATA frame is received whose stream is not in "open"
+ // or "half closed (local)" state, the recipient MUST respond
+ // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
+ id := f.Header().StreamID
+ state, st := sc.state(id)
+ if id == 0 || state == stateIdle {
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
+ // This includes sending a RST_STREAM if the stream is
+ // in stateHalfClosedLocal (which currently means that
+ // the http.Handler returned, so it's done reading &
+ // done writing). Try to stop the client from sending
+ // more DATA.
+
+ // But still enforce their connection-level flow control,
+ // and return any flow control bytes since we're not going
+ // to consume them.
+ if sc.inflow.available() < int32(f.Length) {
+ return streamError(id, ErrCodeFlowControl)
+ }
+ // Deduct the flow control from inflow, since we're
+ // going to immediately add it back in
+ // sendWindowUpdate, which also schedules sending the
+ // frames.
+ sc.inflow.take(int32(f.Length))
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+
+ if st != nil && st.resetQueued {
+ // Already have a stream error in flight. Don't send another.
+ return nil
+ }
+ return streamError(id, ErrCodeStreamClosed)
+ }
+ if st.body == nil {
+ panic("internal error: should have a body in this state")
+ }
+
+ // Sender sending more than they'd declared?
+ if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+ st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
+ return streamError(id, ErrCodeStreamClosed)
+ }
+ if f.Length > 0 {
+ // Check whether the client has flow control quota.
+ if st.inflow.available() < int32(f.Length) {
+ return streamError(id, ErrCodeFlowControl)
+ }
+ st.inflow.take(int32(f.Length))
+
+ if len(data) > 0 {
+ wrote, err := st.body.Write(data)
+ if err != nil {
+ return streamError(id, ErrCodeStreamClosed)
+ }
+ if wrote != len(data) {
+ panic("internal error: bad Writer")
+ }
+ st.bodyBytes += int64(len(data))
+ }
+
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ if pad := int32(f.Length) - int32(len(data)); pad > 0 {
+ sc.sendWindowUpdate32(nil, pad)
+ sc.sendWindowUpdate32(st, pad)
+ }
+ }
+ if f.StreamEnded() {
+ st.endStream()
+ }
+ return nil
+}
+
+func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
+ sc.serveG.check()
+ if f.ErrCode != ErrCodeNo {
+ sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ } else {
+ sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ }
+ sc.startGracefulShutdownInternal()
+ // http://tools.ietf.org/html/rfc7540#section-6.8
+ // We should not create any new streams, which means we should disable push.
+ sc.pushEnabled = false
+ return nil
+}
+
+// isPushed reports whether the stream is server-initiated.
+func (st *stream) isPushed() bool {
+ return st.id%2 == 0
+}
+
+// endStream closes a Request.Body's pipe. It is called when a DATA
+// frame says a request body is over (or after trailers).
+func (st *stream) endStream() {
+ sc := st.sc
+ sc.serveG.check()
+
+ if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+ st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+ st.declBodyBytes, st.bodyBytes))
+ } else {
+ st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
+ st.body.CloseWithError(io.EOF)
+ }
+ st.state = stateHalfClosedRemote
+}
+
+// copyTrailersToHandlerRequest is run in the Handler's goroutine in
+// its Request.Body.Read just before it gets io.EOF.
+func (st *stream) copyTrailersToHandlerRequest() {
+ for k, vv := range st.trailer {
+ if _, ok := st.reqTrailer[k]; ok {
+ // Only copy it over it was pre-declared.
+ st.reqTrailer[k] = vv
+ }
+ }
+}
+
+// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
+// when the stream's WriteTimeout has fired.
+func (st *stream) onWriteTimeout() {
+ st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})
+}
+
+func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
+ sc.serveG.check()
+ id := f.StreamID
+ if sc.inGoAway {
+ // Ignore.
+ return nil
+ }
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1
+ // Streams initiated by a client MUST use odd-numbered stream
+ // identifiers. [...] An endpoint that receives an unexpected
+ // stream identifier MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id%2 != 1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // A HEADERS frame can be used to create a new stream or
+ // send a trailer for an open one. If we already have a stream
+ // open, let it process its own HEADERS frame (trailers at this
+ // point, if it's valid).
+ if st := sc.streams[f.StreamID]; st != nil {
+ if st.resetQueued {
+ // We're sending RST_STREAM to close the stream, so don't bother
+ // processing this frame.
+ return nil
+ }
+ return st.processTrailerHeaders(f)
+ }
+
+ // [...] The identifier of a newly established stream MUST be
+ // numerically greater than all streams that the initiating
+ // endpoint has opened or reserved. [...] An endpoint that
+ // receives an unexpected stream identifier MUST respond with
+ // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id <= sc.maxClientStreamID {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.maxClientStreamID = id
+
+ if sc.idleTimer != nil {
+ sc.idleTimer.Stop()
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-5.1.2
+ // [...] Endpoints MUST NOT exceed the limit set by their peer. An
+ // endpoint that receives a HEADERS frame that causes their
+ // advertised concurrent stream limit to be exceeded MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
+ // or REFUSED_STREAM.
+ if sc.curClientStreams+1 > sc.advMaxStreams {
+ if sc.unackedSettings == 0 {
+ // They should know better.
+ return streamError(id, ErrCodeProtocol)
+ }
+ // Assume it's a network race, where they just haven't
+ // received our last SETTINGS update. But actually
+ // this can't happen yet, because we don't yet provide
+ // a way for users to adjust server parameters at
+ // runtime.
+ return streamError(id, ErrCodeRefusedStream)
+ }
+
+ initialState := stateOpen
+ if f.StreamEnded() {
+ initialState = stateHalfClosedRemote
+ }
+ st := sc.newStream(id, 0, initialState)
+
+ if f.HasPriority() {
+ if err := checkPriority(f.StreamID, f.Priority); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(st.id, f.Priority)
+ }
+
+ rw, req, err := sc.newWriterAndRequest(st, f)
+ if err != nil {
+ return err
+ }
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(http.Header)
+ }
+ st.body = req.Body.(*requestBody).pipe // may be nil
+ st.declBodyBytes = req.ContentLength
+
+ handler := sc.handler.ServeHTTP
+ if f.Truncated {
+ // Their header list was too long. Send a 431 error.
+ handler = handleHeaderListTooLong
+ } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
+ handler = new400Handler(err)
+ }
+
+ // The net/http package sets the read deadline from the
+ // http.Server.ReadTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already
+ // set. Disarm it here after the request headers are read,
+ // similar to how the http1 server works. Here it's
+ // technically more like the http1 Server's ReadHeaderTimeout
+ // (in Go 1.8), though. That's a more sane option anyway.
+ if sc.hs.ReadTimeout != 0 {
+ sc.conn.SetReadDeadline(time.Time{})
+ }
+
+ go sc.runHandler(rw, req, handler)
+ return nil
+}
+
+func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
+ sc := st.sc
+ sc.serveG.check()
+ if st.gotTrailerHeader {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ st.gotTrailerHeader = true
+ if !f.StreamEnded() {
+ return streamError(st.id, ErrCodeProtocol)
+ }
+
+ if len(f.PseudoFields()) > 0 {
+ return streamError(st.id, ErrCodeProtocol)
+ }
+ if st.trailer != nil {
+ for _, hf := range f.RegularFields() {
+ key := sc.canonicalHeader(hf.Name)
+ if !ValidTrailerHeader(key) {
+ // TODO: send more details to the peer somehow. But http2 has
+ // no way to send debug data at a stream level. Discuss with
+ // HTTP folk.
+ return streamError(st.id, ErrCodeProtocol)
+ }
+ st.trailer[key] = append(st.trailer[key], hf.Value)
+ }
+ }
+ st.endStream()
+ return nil
+}
+
+func checkPriority(streamID uint32, p PriorityParam) error {
+ if streamID == p.StreamDep {
+ // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
+ // Section 5.3.3 says that a stream can depend on one of its dependencies,
+ // so it's only self-dependencies that are forbidden.
+ return streamError(streamID, ErrCodeProtocol)
+ }
+ return nil
+}
+
+func (sc *serverConn) processPriority(f *PriorityFrame) error {
+ if sc.inGoAway {
+ return nil
+ }
+ if err := checkPriority(f.StreamID, f.PriorityParam); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
+ return nil
+}
+
+func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
+ sc.serveG.check()
+ if id == 0 {
+ panic("internal error: cannot create stream with id 0")
+ }
+
+ ctx, cancelCtx := contextWithCancel(sc.baseCtx)
+ st := &stream{
+ sc: sc,
+ id: id,
+ state: state,
+ ctx: ctx,
+ cancelCtx: cancelCtx,
+ }
+ st.cw.Init()
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialStreamSendWindowSize)
+ st.inflow.conn = &sc.inflow // link to conn-level counter
+ st.inflow.add(sc.srv.initialStreamRecvWindowSize())
+ if sc.hs.WriteTimeout != 0 {
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ }
+
+ sc.streams[id] = st
+ sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
+ if st.isPushed() {
+ sc.curPushedStreams++
+ } else {
+ sc.curClientStreams++
+ }
+ if sc.curOpenStreams() == 1 {
+ sc.setConnState(http.StateActive)
+ }
+
+ return st
+}
+
+func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
+ sc.serveG.check()
+
+ rp := requestParam{
+ method: f.PseudoValue("method"),
+ scheme: f.PseudoValue("scheme"),
+ authority: f.PseudoValue("authority"),
+ path: f.PseudoValue("path"),
+ }
+
+ isConnect := rp.method == "CONNECT"
+ if isConnect {
+ if rp.path != "" || rp.scheme != "" || rp.authority == "" {
+ return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ }
+ } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
+ // See 8.1.2.6 Malformed Requests and Responses:
+ //
+ // Malformed requests or responses that are detected
+ // MUST be treated as a stream error (Section 5.4.2)
+ // of type PROTOCOL_ERROR."
+ //
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid
+ // value for the :method, :scheme, and :path
+ // pseudo-header fields"
+ return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ }
+
+ bodyOpen := !f.StreamEnded()
+ if rp.method == "HEAD" && bodyOpen {
+ // HEAD requests can't have bodies
+ return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ }
+
+ rp.header = make(http.Header)
+ for _, hf := range f.RegularFields() {
+ rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ }
+ if rp.authority == "" {
+ rp.authority = rp.header.Get("Host")
+ }
+
+ rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
+ if err != nil {
+ return nil, nil, err
+ }
+ if bodyOpen {
+ if vv, ok := rp.header["Content-Length"]; ok {
+ req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+ } else {
+ req.ContentLength = -1
+ }
+ req.Body.(*requestBody).pipe = &pipe{
+ b: &dataBuffer{expected: req.ContentLength},
+ }
+ }
+ return rw, req, nil
+}
+
+type requestParam struct {
+ method string
+ scheme, authority, path string
+ header http.Header
+}
+
+func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
+ sc.serveG.check()
+
+ var tlsState *tls.ConnectionState // nil if not scheme https
+ if rp.scheme == "https" {
+ tlsState = sc.tlsState
+ }
+
+ needsContinue := rp.header.Get("Expect") == "100-continue"
+ if needsContinue {
+ rp.header.Del("Expect")
+ }
+ // Merge Cookie headers into one "; "-delimited value.
+ if cookies := rp.header["Cookie"]; len(cookies) > 1 {
+ rp.header.Set("Cookie", strings.Join(cookies, "; "))
+ }
+
+ // Setup Trailers
+ var trailer http.Header
+ for _, v := range rp.header["Trailer"] {
+ for _, key := range strings.Split(v, ",") {
+ key = http.CanonicalHeaderKey(strings.TrimSpace(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ // Bogus. (copy of http1 rules)
+ // Ignore.
+ default:
+ if trailer == nil {
+ trailer = make(http.Header)
+ }
+ trailer[key] = nil
+ }
+ }
+ }
+ delete(rp.header, "Trailer")
+
+ var url_ *url.URL
+ var requestURI string
+ if rp.method == "CONNECT" {
+ url_ = &url.URL{Host: rp.authority}
+ requestURI = rp.authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(rp.path)
+ if err != nil {
+ return nil, nil, streamError(st.id, ErrCodeProtocol)
+ }
+ requestURI = rp.path
+ }
+
+ body := &requestBody{
+ conn: sc,
+ stream: st,
+ needsContinue: needsContinue,
+ }
+ req := &http.Request{
+ Method: rp.method,
+ URL: url_,
+ RemoteAddr: sc.remoteAddrStr,
+ Header: rp.header,
+ RequestURI: requestURI,
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ ProtoMinor: 0,
+ TLS: tlsState,
+ Host: rp.authority,
+ Body: body,
+ Trailer: trailer,
+ }
+ req = requestWithContext(req, st.ctx)
+
+ rws := responseWriterStatePool.Get().(*responseWriterState)
+ bwSave := rws.bw
+ *rws = responseWriterState{} // zero all the fields
+ rws.conn = sc
+ rws.bw = bwSave
+ rws.bw.Reset(chunkWriter{rws})
+ rws.stream = st
+ rws.req = req
+ rws.body = body
+
+ rw := &responseWriter{rws: rws}
+ return rw, req, nil
+}
+
+// Run on its own goroutine.
+func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
+ didPanic := true
+ defer func() {
+ rw.rws.stream.cancelCtx()
+ if didPanic {
+ e := recover()
+ sc.writeFrameFromHandler(FrameWriteRequest{
+ write: handlerPanicRST{rw.rws.stream.id},
+ stream: rw.rws.stream,
+ })
+ // Same as net/http:
+ if shouldLogPanic(e) {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ }
+ return
+ }
+ rw.handlerDone()
+ }()
+ handler(rw, req)
+ didPanic = false
+}
+
+func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
+ // 10.5.1 Limits on Header Block Size:
+ // .. "A server that receives a larger header block than it is
+ // willing to handle can send an HTTP 431 (Request Header Fields Too
+ // Large) status code"
+ const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
+ w.WriteHeader(statusRequestHeaderFieldsTooLarge)
+ io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
+}
+
+// called from handler goroutines.
+// h may be nil.
+func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
+ sc.serveG.checkNotOn() // NOT on
+ var errc chan error
+ if headerData.h != nil {
+ // If there's a header map (which we don't own), so we have to block on
+ // waiting for this frame to be written, so an http.Flush mid-handler
+ // writes out the correct value of keys, before a handler later potentially
+ // mutates it.
+ errc = errChanPool.Get().(chan error)
+ }
+ if err := sc.writeFrameFromHandler(FrameWriteRequest{
+ write: headerData,
+ stream: st,
+ done: errc,
+ }); err != nil {
+ return err
+ }
+ if errc != nil {
+ select {
+ case err := <-errc:
+ errChanPool.Put(errc)
+ return err
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ }
+ }
+ return nil
+}
+
+// called from handler goroutines.
+func (sc *serverConn) write100ContinueHeaders(st *stream) {
+ sc.writeFrameFromHandler(FrameWriteRequest{
+ write: write100ContinueHeadersFrame{st.id},
+ stream: st,
+ })
+}
+
+// A bodyReadMsg tells the server loop that the http.Handler read n
+// bytes of the DATA from the client on the given stream.
+type bodyReadMsg struct {
+ st *stream
+ n int
+}
+
+// called from handler goroutines.
+// Notes that the handler for the given stream ID read n bytes of its body
+// and schedules flow control tokens to be sent.
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
+ sc.serveG.checkNotOn() // NOT on
+ if n > 0 {
+ select {
+ case sc.bodyReadCh <- bodyReadMsg{st, n}:
+ case <-sc.doneServing:
+ }
+ }
+}
+
+func (sc *serverConn) noteBodyRead(st *stream, n int) {
+ sc.serveG.check()
+ sc.sendWindowUpdate(nil, n) // conn-level
+ if st.state != stateHalfClosedRemote && st.state != stateClosed {
+ // Don't send this WINDOW_UPDATE if the stream is closed
+ // remotely.
+ sc.sendWindowUpdate(st, n)
+ }
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
+ sc.serveG.check()
+ // "The legal range for the increment to the flow control
+ // window is 1 to 2^31-1 (2,147,483,647) octets."
+ // A Go Read call on 64-bit machines could in theory read
+ // a larger Read than this. Very unlikely, but we handle it here
+ // rather than elsewhere for now.
+ const maxUint31 = 1<<31 - 1
+ for n >= maxUint31 {
+ sc.sendWindowUpdate32(st, maxUint31)
+ n -= maxUint31
+ }
+ sc.sendWindowUpdate32(st, int32(n))
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+ sc.serveG.check()
+ if n == 0 {
+ return
+ }
+ if n < 0 {
+ panic("negative update")
+ }
+ var streamID uint32
+ if st != nil {
+ streamID = st.id
+ }
+ sc.writeFrame(FrameWriteRequest{
+ write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
+ stream: st,
+ })
+ var ok bool
+ if st == nil {
+ ok = sc.inflow.add(n)
+ } else {
+ ok = st.inflow.add(n)
+ }
+ if !ok {
+ panic("internal error; sent too many window updates without decrements?")
+ }
+}
+
+// requestBody is the Handler's Request.Body type.
+// Read and Close may be called concurrently.
+type requestBody struct {
+ stream *stream
+ conn *serverConn
+ closed bool // for use by Close only
+ sawEOF bool // for use by Read only
+ pipe *pipe // non-nil if we have a HTTP entity message body
+ needsContinue bool // need to send a 100-continue
+}
+
+func (b *requestBody) Close() error {
+ if b.pipe != nil && !b.closed {
+ b.pipe.BreakWithError(errClosedBody)
+ }
+ b.closed = true
+ return nil
+}
+
+func (b *requestBody) Read(p []byte) (n int, err error) {
+ if b.needsContinue {
+ b.needsContinue = false
+ b.conn.write100ContinueHeaders(b.stream)
+ }
+ if b.pipe == nil || b.sawEOF {
+ return 0, io.EOF
+ }
+ n, err = b.pipe.Read(p)
+ if err == io.EOF {
+ b.sawEOF = true
+ }
+ if b.conn == nil && inTests {
+ return
+ }
+ b.conn.noteBodyReadFromHandler(b.stream, n, err)
+ return
+}
+
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriterState pointer inside is zeroed at the end of a
+// request (in handlerDone) and calls on the responseWriter thereafter
+// simply crash (caller's mistake), but the much larger responseWriterState
+// and buffers are reused between multiple requests.
+type responseWriter struct {
+ rws *responseWriterState
+}
+
+// Optional http.ResponseWriter interfaces implemented.
+var (
+ _ http.CloseNotifier = (*responseWriter)(nil)
+ _ http.Flusher = (*responseWriter)(nil)
+ _ stringWriter = (*responseWriter)(nil)
+)
+
+type responseWriterState struct {
+ // immutable within a request:
+ stream *stream
+ req *http.Request
+ body *requestBody // to close at end of request, if DATA frames didn't
+ conn *serverConn
+
+ // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
+ bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
+
+ // mutated by http.Handler goroutine:
+ handlerHeader http.Header // nil until called
+ snapHeader http.Header // snapshot of handlerHeader at WriteHeader time
+ trailers []string // set in writeChunk
+ status int // status code passed to WriteHeader
+ wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
+ sentHeader bool // have we sent the header frame?
+ handlerDone bool // handler has finished
+ dirty bool // a Write failed; don't reuse this responseWriterState
+
+ sentContentLen int64 // non-zero if handler set a Content-Length header
+ wroteBytes int64
+
+ closeNotifierMu sync.Mutex // guards closeNotifierCh
+ closeNotifierCh chan bool // nil until first used
+}
+
+type chunkWriter struct{ rws *responseWriterState }
+
+func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
+
+func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (rws *responseWriterState) declareTrailer(k string) {
+ k = http.CanonicalHeaderKey(k)
+ if !ValidTrailerHeader(k) {
+ // Forbidden by RFC 2616 14.40.
+ rws.conn.logf("ignoring invalid trailer %q", k)
+ return
+ }
+ if !strSliceContains(rws.trailers, k) {
+ rws.trailers = append(rws.trailers, k)
+ }
+}
+
+// writeChunk writes chunks from the bufio.Writer. But because
+// bufio.Writer may bypass its chunking, sometimes p may be
+// arbitrarily large.
+//
+// writeChunk is also responsible (on the first chunk) for sending the
+// HEADER response.
+func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
+ if !rws.wroteHeader {
+ rws.writeHeader(200)
+ }
+
+ isHeadResp := rws.req.Method == "HEAD"
+ if !rws.sentHeader {
+ rws.sentHeader = true
+ var ctype, clen string
+ if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
+ rws.snapHeader.Del("Content-Length")
+ clen64, err := strconv.ParseInt(clen, 10, 64)
+ if err == nil && clen64 >= 0 {
+ rws.sentContentLen = clen64
+ } else {
+ clen = ""
+ }
+ }
+ if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
+ clen = strconv.Itoa(len(p))
+ }
+ _, hasContentType := rws.snapHeader["Content-Type"]
+ if !hasContentType && bodyAllowedForStatus(rws.status) {
+ ctype = http.DetectContentType(p)
+ }
+ var date string
+ if _, ok := rws.snapHeader["Date"]; !ok {
+ // TODO(bradfitz): be faster here, like net/http? measure.
+ date = time.Now().UTC().Format(http.TimeFormat)
+ }
+
+ for _, v := range rws.snapHeader["Trailer"] {
+ foreachHeaderElement(v, rws.declareTrailer)
+ }
+
+ endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: rws.status,
+ h: rws.snapHeader,
+ endStream: endStream,
+ contentType: ctype,
+ contentLength: clen,
+ date: date,
+ })
+ if err != nil {
+ rws.dirty = true
+ return 0, err
+ }
+ if endStream {
+ return 0, nil
+ }
+ }
+ if isHeadResp {
+ return len(p), nil
+ }
+ if len(p) == 0 && !rws.handlerDone {
+ return 0, nil
+ }
+
+ if rws.handlerDone {
+ rws.promoteUndeclaredTrailers()
+ }
+
+ endStream := rws.handlerDone && !rws.hasTrailers()
+ if len(p) > 0 || endStream {
+ // only send a 0 byte DATA frame if we're ending the stream.
+ if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+ rws.dirty = true
+ return 0, err
+ }
+ }
+
+ if rws.handlerDone && rws.hasTrailers() {
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ h: rws.handlerHeader,
+ trailers: rws.trailers,
+ endStream: true,
+ })
+ if err != nil {
+ rws.dirty = true
+ }
+ return len(p), err
+ }
+ return len(p), nil
+}
+
+// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+// https://golang.org/pkg/net/http/#ResponseWriter
+// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+const TrailerPrefix = "Trailer:"
+
+// promoteUndeclaredTrailers permits http.Handlers to set trailers
+// after the header has already been flushed. Because the Go
+// ResponseWriter interface has no way to set Trailers (only the
+// Header), and because we didn't want to expand the ResponseWriter
+// interface, and because nobody used trailers, and because RFC 2616
+// says you SHOULD (but not must) predeclare any trailers in the
+// header, the official ResponseWriter rules said trailers in Go must
+// be predeclared, and then we reuse the same ResponseWriter.Header()
+// map to mean both Headers and Trailers. When it's time to write the
+// Trailers, we pick out the fields of Headers that were declared as
+// trailers. That worked for a while, until we found the first major
+// user of Trailers in the wild: gRPC (using them only over http2),
+// and gRPC libraries permit setting trailers mid-stream without
+// predeclarnig them. So: change of plans. We still permit the old
+// way, but we also permit this hack: if a Header() key begins with
+// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
+// invalid token byte anyway, there is no ambiguity. (And it's already
+// filtered out) It's mildly hacky, but not terrible.
+//
+// This method runs after the Handler is done and promotes any Header
+// fields to be trailers.
+func (rws *responseWriterState) promoteUndeclaredTrailers() {
+ for k, vv := range rws.handlerHeader {
+ if !strings.HasPrefix(k, TrailerPrefix) {
+ continue
+ }
+ trailerKey := strings.TrimPrefix(k, TrailerPrefix)
+ rws.declareTrailer(trailerKey)
+ rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
+ }
+
+ if len(rws.trailers) > 1 {
+ sorter := sorterPool.Get().(*sorter)
+ sorter.SortStrings(rws.trailers)
+ sorterPool.Put(sorter)
+ }
+}
+
+func (w *responseWriter) Flush() {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.bw.Buffered() > 0 {
+ if err := rws.bw.Flush(); err != nil {
+ // Ignore the error. The frame writer already knows.
+ return
+ }
+ } else {
+ // The bufio.Writer won't call chunkWriter.Write
+ // (writeChunk with zero bytes, so we have to do it
+ // ourselves to force the HTTP response header and/or
+ // final DATA frame (with END_STREAM) to be sent.
+ rws.writeChunk(nil)
+ }
+}
+
+func (w *responseWriter) CloseNotify() <-chan bool {
+ rws := w.rws
+ if rws == nil {
+ panic("CloseNotify called after Handler finished")
+ }
+ rws.closeNotifierMu.Lock()
+ ch := rws.closeNotifierCh
+ if ch == nil {
+ ch = make(chan bool, 1)
+ rws.closeNotifierCh = ch
+ cw := rws.stream.cw
+ go func() {
+ cw.Wait() // wait for close
+ ch <- true
+ }()
+ }
+ rws.closeNotifierMu.Unlock()
+ return ch
+}
+
+func (w *responseWriter) Header() http.Header {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.handlerHeader == nil {
+ rws.handlerHeader = make(http.Header)
+ }
+ return rws.handlerHeader
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+ rws := w.rws
+ if rws == nil {
+ panic("WriteHeader called after Handler finished")
+ }
+ rws.writeHeader(code)
+}
+
+func (rws *responseWriterState) writeHeader(code int) {
+ if !rws.wroteHeader {
+ rws.wroteHeader = true
+ rws.status = code
+ if len(rws.handlerHeader) > 0 {
+ rws.snapHeader = cloneHeader(rws.handlerHeader)
+ }
+ }
+}
+
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+// The Life Of A Write is like this:
+//
+// * Handler calls w.Write or w.WriteString ->
+// * -> rws.bw (*bufio.Writer) ->
+// * (Handler might call Flush)
+// * -> chunkWriter{rws}
+// * -> responseWriterState.writeChunk(p []byte)
+// * -> responseWriterState.writeChunk (most of the magic; see comment there)
+func (w *responseWriter) Write(p []byte) (n int, err error) {
+ return w.write(len(p), p, "")
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+ return w.write(len(s), nil, s)
+}
+
+// either dataB or dataS is non-zero.
+func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+ rws := w.rws
+ if rws == nil {
+ panic("Write called after Handler finished")
+ }
+ if !rws.wroteHeader {
+ w.WriteHeader(200)
+ }
+ if !bodyAllowedForStatus(rws.status) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
+ if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
+ // TODO: send a RST_STREAM
+ return 0, errors.New("http2: handler wrote more than declared Content-Length")
+ }
+
+ if dataB != nil {
+ return rws.bw.Write(dataB)
+ } else {
+ return rws.bw.WriteString(dataS)
+ }
+}
+
+func (w *responseWriter) handlerDone() {
+ rws := w.rws
+ dirty := rws.dirty
+ rws.handlerDone = true
+ w.Flush()
+ w.rws = nil
+ if !dirty {
+ // Only recycle the pool if all prior Write calls to
+ // the serverConn goroutine completed successfully. If
+ // they returned earlier due to resets from the peer
+ // there might still be write goroutines outstanding
+ // from the serverConn referencing the rws memory. See
+ // issue 20704.
+ responseWriterStatePool.Put(rws)
+ }
+}
+
+// Push errors.
+var (
+ ErrRecursivePush = errors.New("http2: recursive push not allowed")
+ ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
+)
+
+// pushOptions is the internal version of http.PushOptions, which we
+// cannot include here because it's only defined in Go 1.8 and later.
+type pushOptions struct {
+ Method string
+ Header http.Header
+}
+
+func (w *responseWriter) push(target string, opts pushOptions) error {
+ st := w.rws.stream
+ sc := st.sc
+ sc.serveG.checkNotOn()
+
+ // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
+ // http://tools.ietf.org/html/rfc7540#section-6.6
+ if st.isPushed() {
+ return ErrRecursivePush
+ }
+
+ // Default options.
+ if opts.Method == "" {
+ opts.Method = "GET"
+ }
+ if opts.Header == nil {
+ opts.Header = http.Header{}
+ }
+ wantScheme := "http"
+ if w.rws.req.TLS != nil {
+ wantScheme = "https"
+ }
+
+ // Validate the request.
+ u, err := url.Parse(target)
+ if err != nil {
+ return err
+ }
+ if u.Scheme == "" {
+ if !strings.HasPrefix(target, "/") {
+ return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
+ }
+ u.Scheme = wantScheme
+ u.Host = w.rws.req.Host
+ } else {
+ if u.Scheme != wantScheme {
+ return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
+ }
+ if u.Host == "" {
+ return errors.New("URL must have a host")
+ }
+ }
+ for k := range opts.Header {
+ if strings.HasPrefix(k, ":") {
+ return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
+ }
+ // These headers are meaningful only if the request has a body,
+ // but PUSH_PROMISE requests cannot have a body.
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ // Also disallow Host, since the promised URL must be absolute.
+ switch strings.ToLower(k) {
+ case "content-length", "content-encoding", "trailer", "te", "expect", "host":
+ return fmt.Errorf("promised request headers cannot include %q", k)
+ }
+ }
+ if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
+ return err
+ }
+
+ // The RFC effectively limits promised requests to GET and HEAD:
+ // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ if opts.Method != "GET" && opts.Method != "HEAD" {
+ return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
+ }
+
+ msg := &startPushRequest{
+ parent: st,
+ method: opts.Method,
+ url: u,
+ header: cloneHeader(opts.Header),
+ done: errChanPool.Get().(chan error),
+ }
+
+ select {
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ case sc.serveMsgCh <- msg:
+ }
+
+ select {
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ case err := <-msg.done:
+ errChanPool.Put(msg.done)
+ return err
+ }
+}
+
+type startPushRequest struct {
+ parent *stream
+ method string
+ url *url.URL
+ header http.Header
+ done chan error
+}
+
+func (sc *serverConn) startPush(msg *startPushRequest) {
+ sc.serveG.check()
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
+ // is in either the "open" or "half-closed (remote)" state.
+ if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
+ // responseWriter.Push checks that the stream is peer-initiaed.
+ msg.done <- errStreamClosed
+ return
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ if !sc.pushEnabled {
+ msg.done <- http.ErrNotSupported
+ return
+ }
+
+ // PUSH_PROMISE frames must be sent in increasing order by stream ID, so
+ // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
+ // is written. Once the ID is allocated, we start the request handler.
+ allocatePromisedID := func() (uint32, error) {
+ sc.serveG.check()
+
+ // Check this again, just in case. Technically, we might have received
+ // an updated SETTINGS by the time we got around to writing this frame.
+ if !sc.pushEnabled {
+ return 0, http.ErrNotSupported
+ }
+ // http://tools.ietf.org/html/rfc7540#section-6.5.2.
+ if sc.curPushedStreams+1 > sc.clientMaxStreams {
+ return 0, ErrPushLimitReached
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1.
+ // Streams initiated by the server MUST use even-numbered identifiers.
+ // A server that is unable to establish a new stream identifier can send a GOAWAY
+ // frame so that the client is forced to open a new connection for new streams.
+ if sc.maxPushPromiseID+2 >= 1<<31 {
+ sc.startGracefulShutdownInternal()
+ return 0, ErrPushLimitReached
+ }
+ sc.maxPushPromiseID += 2
+ promisedID := sc.maxPushPromiseID
+
+ // http://tools.ietf.org/html/rfc7540#section-8.2.
+ // Strictly speaking, the new stream should start in "reserved (local)", then
+ // transition to "half closed (remote)" after sending the initial HEADERS, but
+ // we start in "half closed (remote)" for simplicity.
+ // See further comments at the definition of stateHalfClosedRemote.
+ promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
+ rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
+ method: msg.method,
+ scheme: msg.url.Scheme,
+ authority: msg.url.Host,
+ path: msg.url.RequestURI(),
+ header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
+ })
+ if err != nil {
+ // Should not happen, since we've already validated msg.url.
+ panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
+ }
+
+ go sc.runHandler(rw, req, sc.handler.ServeHTTP)
+ return promisedID, nil
+ }
+
+ sc.writeFrame(FrameWriteRequest{
+ write: &writePushPromise{
+ streamID: msg.parent.id,
+ method: msg.method,
+ url: msg.url,
+ h: msg.header,
+ allocatePromisedID: allocatePromisedID,
+ },
+ stream: msg.parent,
+ done: msg.done,
+ })
+}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 2616 section 2.1 and calls fn for each non-empty element.
+func foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
+
+// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
+var connHeaders = []string{
+ "Connection",
+ "Keep-Alive",
+ "Proxy-Connection",
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
+// per RFC 7540 Section 8.1.2.2.
+// The returned error is reported to users.
+func checkValidHTTP2RequestHeaders(h http.Header) error {
+ for _, k := range connHeaders {
+ if _, ok := h[k]; ok {
+ return fmt.Errorf("request header %q is not valid in HTTP/2", k)
+ }
+ }
+ te := h["Te"]
+ if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
+ return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
+ }
+ return nil
+}
+
+func new400Handler(err error) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ }
+}
+
+// ValidTrailerHeader reports whether name is a valid header field name to appear
+// in trailers.
+// See: http://tools.ietf.org/html/rfc7230#section-4.1.2
+func ValidTrailerHeader(name string) bool {
+ name = http.CanonicalHeaderKey(name)
+ if strings.HasPrefix(name, "If-") || badTrailer[name] {
+ return false
+ }
+ return true
+}
+
+var badTrailer = map[string]bool{
+ "Authorization": true,
+ "Cache-Control": true,
+ "Connection": true,
+ "Content-Encoding": true,
+ "Content-Length": true,
+ "Content-Range": true,
+ "Content-Type": true,
+ "Expect": true,
+ "Host": true,
+ "Keep-Alive": true,
+ "Max-Forwards": true,
+ "Pragma": true,
+ "Proxy-Authenticate": true,
+ "Proxy-Authorization": true,
+ "Proxy-Connection": true,
+ "Range": true,
+ "Realm": true,
+ "Te": true,
+ "Trailer": true,
+ "Transfer-Encoding": true,
+ "Www-Authenticate": true,
+}
+
+// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
+// disabled. See comments on h1ServerShutdownChan above for why
+// the code is written this way.
+func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
+ var x interface{} = hs
+ type I interface {
+ doKeepAlives() bool
+ }
+ if hs, ok := x.(I); ok {
+ return !hs.doKeepAlives()
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/net/http2/server_push_test.go b/vendor/golang.org/x/net/http2/server_push_test.go
new file mode 100644
index 0000000..918fd30
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/server_push_test.go
@@ -0,0 +1,521 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package http2
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestServer_Push_Success(t *testing.T) {
+ const (
+ mainBody = "<html>index page</html>"
+ pushedBody = "<html>pushed page</html>"
+ userAgent = "testagent"
+ cookie = "testcookie"
+ )
+
+ var stURL string
+ checkPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error {
+ if got, want := r.Method, wantMethod; got != want {
+ return fmt.Errorf("promised Req.Method=%q, want %q", got, want)
+ }
+ if got, want := r.Header, wantH; !reflect.DeepEqual(got, want) {
+ return fmt.Errorf("promised Req.Header=%q, want %q", got, want)
+ }
+ if got, want := "https://"+r.Host, stURL; got != want {
+ return fmt.Errorf("promised Req.Host=%q, want %q", got, want)
+ }
+ if r.Body == nil {
+ return fmt.Errorf("nil Body")
+ }
+ if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 {
+ return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err)
+ }
+ return nil
+ }
+
+ errc := make(chan error, 3)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.RequestURI() {
+ case "/":
+ // Push "/pushed?get" as a GET request, using an absolute URL.
+ opt := &http.PushOptions{
+ Header: http.Header{
+ "User-Agent": {userAgent},
+ },
+ }
+ if err := w.(http.Pusher).Push(stURL+"/pushed?get", opt); err != nil {
+ errc <- fmt.Errorf("error pushing /pushed?get: %v", err)
+ return
+ }
+ // Push "/pushed?head" as a HEAD request, using a path.
+ opt = &http.PushOptions{
+ Method: "HEAD",
+ Header: http.Header{
+ "User-Agent": {userAgent},
+ "Cookie": {cookie},
+ },
+ }
+ if err := w.(http.Pusher).Push("/pushed?head", opt); err != nil {
+ errc <- fmt.Errorf("error pushing /pushed?head: %v", err)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html")
+ w.Header().Set("Content-Length", strconv.Itoa(len(mainBody)))
+ w.WriteHeader(200)
+ io.WriteString(w, mainBody)
+ errc <- nil
+
+ case "/pushed?get":
+ wantH := http.Header{}
+ wantH.Set("User-Agent", userAgent)
+ if err := checkPromisedReq(r, "GET", wantH); err != nil {
+ errc <- fmt.Errorf("/pushed?get: %v", err)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html")
+ w.Header().Set("Content-Length", strconv.Itoa(len(pushedBody)))
+ w.WriteHeader(200)
+ io.WriteString(w, pushedBody)
+ errc <- nil
+
+ case "/pushed?head":
+ wantH := http.Header{}
+ wantH.Set("User-Agent", userAgent)
+ wantH.Set("Cookie", cookie)
+ if err := checkPromisedReq(r, "HEAD", wantH); err != nil {
+ errc <- fmt.Errorf("/pushed?head: %v", err)
+ return
+ }
+ w.WriteHeader(204)
+ errc <- nil
+
+ default:
+ errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI())
+ }
+ })
+ stURL = st.ts.URL
+
+ // Send one request, which should push two responses.
+ st.greet()
+ getSlash(st)
+ for k := 0; k < 3; k++ {
+ select {
+ case <-time.After(2 * time.Second):
+ t.Errorf("timeout waiting for handler %d to finish", k)
+ case err := <-errc:
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+
+ checkPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error {
+ pp, ok := f.(*PushPromiseFrame)
+ if !ok {
+ return fmt.Errorf("got a %T; want *PushPromiseFrame", f)
+ }
+ if !pp.HeadersEnded() {
+ return fmt.Errorf("want END_HEADERS flag in PushPromiseFrame")
+ }
+ if got, want := pp.PromiseID, promiseID; got != want {
+ return fmt.Errorf("got PromiseID %v; want %v", got, want)
+ }
+ gotH := st.decodeHeader(pp.HeaderBlockFragment())
+ if !reflect.DeepEqual(gotH, wantH) {
+ return fmt.Errorf("got promised headers %v; want %v", gotH, wantH)
+ }
+ return nil
+ }
+ checkHeaders := func(f Frame, wantH [][2]string) error {
+ hf, ok := f.(*HeadersFrame)
+ if !ok {
+ return fmt.Errorf("got a %T; want *HeadersFrame", f)
+ }
+ gotH := st.decodeHeader(hf.HeaderBlockFragment())
+ if !reflect.DeepEqual(gotH, wantH) {
+ return fmt.Errorf("got response headers %v; want %v", gotH, wantH)
+ }
+ return nil
+ }
+ checkData := func(f Frame, wantData string) error {
+ df, ok := f.(*DataFrame)
+ if !ok {
+ return fmt.Errorf("got a %T; want *DataFrame", f)
+ }
+ if gotData := string(df.Data()); gotData != wantData {
+ return fmt.Errorf("got response data %q; want %q", gotData, wantData)
+ }
+ return nil
+ }
+
+ // Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA
+ // Stream 2 has HEADERS + DATA
+ // Stream 4 has HEADERS
+ expected := map[uint32][]func(Frame) error{
+ 1: {
+ func(f Frame) error {
+ return checkPushPromise(f, 2, [][2]string{
+ {":method", "GET"},
+ {":scheme", "https"},
+ {":authority", st.ts.Listener.Addr().String()},
+ {":path", "/pushed?get"},
+ {"user-agent", userAgent},
+ })
+ },
+ func(f Frame) error {
+ return checkPushPromise(f, 4, [][2]string{
+ {":method", "HEAD"},
+ {":scheme", "https"},
+ {":authority", st.ts.Listener.Addr().String()},
+ {":path", "/pushed?head"},
+ {"cookie", cookie},
+ {"user-agent", userAgent},
+ })
+ },
+ func(f Frame) error {
+ return checkHeaders(f, [][2]string{
+ {":status", "200"},
+ {"content-type", "text/html"},
+ {"content-length", strconv.Itoa(len(mainBody))},
+ })
+ },
+ func(f Frame) error {
+ return checkData(f, mainBody)
+ },
+ },
+ 2: {
+ func(f Frame) error {
+ return checkHeaders(f, [][2]string{
+ {":status", "200"},
+ {"content-type", "text/html"},
+ {"content-length", strconv.Itoa(len(pushedBody))},
+ })
+ },
+ func(f Frame) error {
+ return checkData(f, pushedBody)
+ },
+ },
+ 4: {
+ func(f Frame) error {
+ return checkHeaders(f, [][2]string{
+ {":status", "204"},
+ })
+ },
+ },
+ }
+
+ consumed := map[uint32]int{}
+ for k := 0; len(expected) > 0; k++ {
+ f, err := st.readFrame()
+ if err != nil {
+ for id, left := range expected {
+ t.Errorf("stream %d: missing %d frames", id, len(left))
+ }
+ t.Fatalf("readFrame %d: %v", k, err)
+ }
+ id := f.Header().StreamID
+ label := fmt.Sprintf("stream %d, frame %d", id, consumed[id])
+ if len(expected[id]) == 0 {
+ t.Fatalf("%s: unexpected frame %#+v", label, f)
+ }
+ check := expected[id][0]
+ expected[id] = expected[id][1:]
+ if len(expected[id]) == 0 {
+ delete(expected, id)
+ }
+ if err := check(f); err != nil {
+ t.Fatalf("%s: %v", label, err)
+ }
+ consumed[id]++
+ }
+}
+
+func TestServer_Push_SuccessNoRace(t *testing.T) {
+ // Regression test for issue #18326. Ensure the request handler can mutate
+ // pushed request headers without racing with the PUSH_PROMISE write.
+ errc := make(chan error, 2)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.RequestURI() {
+ case "/":
+ opt := &http.PushOptions{
+ Header: http.Header{"User-Agent": {"testagent"}},
+ }
+ if err := w.(http.Pusher).Push("/pushed", opt); err != nil {
+ errc <- fmt.Errorf("error pushing: %v", err)
+ return
+ }
+ w.WriteHeader(200)
+ errc <- nil
+
+ case "/pushed":
+ // Update request header, ensure there is no race.
+ r.Header.Set("User-Agent", "newagent")
+ r.Header.Set("Cookie", "cookie")
+ w.WriteHeader(200)
+ errc <- nil
+
+ default:
+ errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI())
+ }
+ })
+
+ // Send one request, which should push one response.
+ st.greet()
+ getSlash(st)
+ for k := 0; k < 2; k++ {
+ select {
+ case <-time.After(2 * time.Second):
+ t.Errorf("timeout waiting for handler %d to finish", k)
+ case err := <-errc:
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+}
+
+func TestServer_Push_RejectRecursivePush(t *testing.T) {
+ // Expect two requests, but might get three if there's a bug and the second push succeeds.
+ errc := make(chan error, 3)
+ handler := func(w http.ResponseWriter, r *http.Request) error {
+ baseURL := "https://" + r.Host
+ switch r.URL.Path {
+ case "/":
+ if err := w.(http.Pusher).Push(baseURL+"/push1", nil); err != nil {
+ return fmt.Errorf("first Push()=%v, want nil", err)
+ }
+ return nil
+
+ case "/push1":
+ if got, want := w.(http.Pusher).Push(baseURL+"/push2", nil), ErrRecursivePush; got != want {
+ return fmt.Errorf("Push()=%v, want %v", got, want)
+ }
+ return nil
+
+ default:
+ return fmt.Errorf("unexpected path: %q", r.URL.Path)
+ }
+ }
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ errc <- handler(w, r)
+ })
+ defer st.Close()
+ st.greet()
+ getSlash(st)
+ if err := <-errc; err != nil {
+ t.Errorf("First request failed: %v", err)
+ }
+ if err := <-errc; err != nil {
+ t.Errorf("Second request failed: %v", err)
+ }
+}
+
+func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) {
+ // Expect one request, but might get two if there's a bug and the push succeeds.
+ errc := make(chan error, 2)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ errc <- doPush(w.(http.Pusher), r)
+ })
+ defer st.Close()
+ st.greet()
+ if err := st.fr.WriteSettings(settings...); err != nil {
+ st.t.Fatalf("WriteSettings: %v", err)
+ }
+ st.wantSettingsAck()
+ getSlash(st)
+ if err := <-errc; err != nil {
+ t.Error(err)
+ }
+ // Should not get a PUSH_PROMISE frame.
+ hf := st.wantHeaders()
+ if !hf.StreamEnded() {
+ t.Error("stream should end after headers")
+ }
+}
+
+func TestServer_Push_RejectIfDisabled(t *testing.T) {
+ testServer_Push_RejectSingleRequest(t,
+ func(p http.Pusher, r *http.Request) error {
+ if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want {
+ return fmt.Errorf("Push()=%v, want %v", got, want)
+ }
+ return nil
+ },
+ Setting{SettingEnablePush, 0})
+}
+
+func TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) {
+ testServer_Push_RejectSingleRequest(t,
+ func(p http.Pusher, r *http.Request) error {
+ if got, want := p.Push("https://"+r.Host+"/pushed", nil), ErrPushLimitReached; got != want {
+ return fmt.Errorf("Push()=%v, want %v", got, want)
+ }
+ return nil
+ },
+ Setting{SettingMaxConcurrentStreams, 0})
+}
+
+func TestServer_Push_RejectWrongScheme(t *testing.T) {
+ testServer_Push_RejectSingleRequest(t,
+ func(p http.Pusher, r *http.Request) error {
+ if err := p.Push("http://"+r.Host+"/pushed", nil); err == nil {
+ return errors.New("Push() should have failed (push target URL is http)")
+ }
+ return nil
+ })
+}
+
+func TestServer_Push_RejectMissingHost(t *testing.T) {
+ testServer_Push_RejectSingleRequest(t,
+ func(p http.Pusher, r *http.Request) error {
+ if err := p.Push("https:pushed", nil); err == nil {
+ return errors.New("Push() should have failed (push target URL missing host)")
+ }
+ return nil
+ })
+}
+
+func TestServer_Push_RejectRelativePath(t *testing.T) {
+ testServer_Push_RejectSingleRequest(t,
+ func(p http.Pusher, r *http.Request) error {
+ if err := p.Push("../test", nil); err == nil {
+ return errors.New("Push() should have failed (push target is a relative path)")
+ }
+ return nil
+ })
+}
+
+func TestServer_Push_RejectForbiddenMethod(t *testing.T) {
+ testServer_Push_RejectSingleRequest(t,
+ func(p http.Pusher, r *http.Request) error {
+ if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Method: "POST"}); err == nil {
+ return errors.New("Push() should have failed (cannot promise a POST)")
+ }
+ return nil
+ })
+}
+
+func TestServer_Push_RejectForbiddenHeader(t *testing.T) {
+ testServer_Push_RejectSingleRequest(t,
+ func(p http.Pusher, r *http.Request) error {
+ header := http.Header{
+ "Content-Length": {"10"},
+ "Content-Encoding": {"gzip"},
+ "Trailer": {"Foo"},
+ "Te": {"trailers"},
+ "Host": {"test.com"},
+ ":authority": {"test.com"},
+ }
+ if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Header: header}); err == nil {
+ return errors.New("Push() should have failed (forbidden headers)")
+ }
+ return nil
+ })
+}
+
+func TestServer_Push_StateTransitions(t *testing.T) {
+ const body = "foo"
+
+ gotPromise := make(chan bool)
+ finishedPush := make(chan bool)
+
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.RequestURI() {
+ case "/":
+ if err := w.(http.Pusher).Push("/pushed", nil); err != nil {
+ t.Errorf("Push error: %v", err)
+ }
+ // Don't finish this request until the push finishes so we don't
+ // nondeterministically interleave output frames with the push.
+ <-finishedPush
+ case "/pushed":
+ <-gotPromise
+ }
+ w.Header().Set("Content-Type", "text/html")
+ w.Header().Set("Content-Length", strconv.Itoa(len(body)))
+ w.WriteHeader(200)
+ io.WriteString(w, body)
+ })
+ defer st.Close()
+
+ st.greet()
+ if st.stream(2) != nil {
+ t.Fatal("stream 2 should be empty")
+ }
+ if got, want := st.streamState(2), stateIdle; got != want {
+ t.Fatalf("streamState(2)=%v, want %v", got, want)
+ }
+ getSlash(st)
+ // After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote.
+ st.wantPushPromise()
+ if got, want := st.streamState(2), stateHalfClosedRemote; got != want {
+ t.Fatalf("streamState(2)=%v, want %v", got, want)
+ }
+ // We stall the HTTP handler for "/pushed" until the above check. If we don't
+ // stall the handler, then the handler might write HEADERS and DATA and finish
+ // the stream before we check st.streamState(2) -- should that happen, we'll
+ // see stateClosed and fail the above check.
+ close(gotPromise)
+ st.wantHeaders()
+ if df := st.wantData(); !df.StreamEnded() {
+ t.Fatal("expected END_STREAM flag on DATA")
+ }
+ if got, want := st.streamState(2), stateClosed; got != want {
+ t.Fatalf("streamState(2)=%v, want %v", got, want)
+ }
+ close(finishedPush)
+}
+
+func TestServer_Push_RejectAfterGoAway(t *testing.T) {
+ var readyOnce sync.Once
+ ready := make(chan struct{})
+ errc := make(chan error, 2)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ select {
+ case <-ready:
+ case <-time.After(5 * time.Second):
+ errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed")
+ }
+ if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want {
+ errc <- fmt.Errorf("Push()=%v, want %v", got, want)
+ }
+ errc <- nil
+ })
+ defer st.Close()
+ st.greet()
+ getSlash(st)
+
+ // Send GOAWAY and wait for it to be processed.
+ st.fr.WriteGoAway(1, ErrCodeNo, nil)
+ go func() {
+ for {
+ select {
+ case <-ready:
+ return
+ default:
+ }
+ st.sc.serveMsgCh <- func(loopNum int) {
+ if !st.sc.pushEnabled {
+ readyOnce.Do(func() { close(ready) })
+ }
+ }
+ }
+ }()
+ if err := <-errc; err != nil {
+ t.Error(err)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/server_test.go b/vendor/golang.org/x/net/http2/server_test.go
new file mode 100644
index 0000000..91db6a2
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/server_test.go
@@ -0,0 +1,3728 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "os/exec"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered")
+
+func stderrv() io.Writer {
+ if *stderrVerbose {
+ return os.Stderr
+ }
+
+ return ioutil.Discard
+}
+
+type serverTester struct {
+ cc net.Conn // client conn
+ t testing.TB
+ ts *httptest.Server
+ fr *Framer
+ serverLogBuf bytes.Buffer // logger for httptest.Server
+ logFilter []string // substrings to filter out
+ scMu sync.Mutex // guards sc
+ sc *serverConn
+ hpackDec *hpack.Decoder
+ decodedHeaders [][2]string
+
+ // If http2debug!=2, then we capture Frame debug logs that will be written
+ // to t.Log after a test fails. The read and write logs use separate locks
+ // and buffers so we don't accidentally introduce synchronization between
+ // the read and write goroutines, which may hide data races.
+ frameReadLogMu sync.Mutex
+ frameReadLogBuf bytes.Buffer
+ frameWriteLogMu sync.Mutex
+ frameWriteLogBuf bytes.Buffer
+
+ // writing headers:
+ headerBuf bytes.Buffer
+ hpackEnc *hpack.Encoder
+}
+
+func init() {
+ testHookOnPanicMu = new(sync.Mutex)
+ goAwayTimeout = 25 * time.Millisecond
+}
+
+func resetHooks() {
+ testHookOnPanicMu.Lock()
+ testHookOnPanic = nil
+ testHookOnPanicMu.Unlock()
+}
+
+type serverTesterOpt string
+
+var optOnlyServer = serverTesterOpt("only_server")
+var optQuiet = serverTesterOpt("quiet_logging")
+var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames")
+
+func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester {
+ resetHooks()
+
+ ts := httptest.NewUnstartedServer(handler)
+
+ tlsConfig := &tls.Config{
+ InsecureSkipVerify: true,
+ NextProtos: []string{NextProtoTLS},
+ }
+
+ var onlyServer, quiet, framerReuseFrames bool
+ h2server := new(Server)
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case func(*tls.Config):
+ v(tlsConfig)
+ case func(*httptest.Server):
+ v(ts)
+ case func(*Server):
+ v(h2server)
+ case serverTesterOpt:
+ switch v {
+ case optOnlyServer:
+ onlyServer = true
+ case optQuiet:
+ quiet = true
+ case optFramerReuseFrames:
+ framerReuseFrames = true
+ }
+ case func(net.Conn, http.ConnState):
+ ts.Config.ConnState = v
+ default:
+ t.Fatalf("unknown newServerTester option type %T", v)
+ }
+ }
+
+ ConfigureServer(ts.Config, h2server)
+
+ st := &serverTester{
+ t: t,
+ ts: ts,
+ }
+ st.hpackEnc = hpack.NewEncoder(&st.headerBuf)
+ st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField)
+
+ ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
+ if quiet {
+ ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0)
+ } else {
+ ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags)
+ }
+ ts.StartTLS()
+
+ if VerboseLogs {
+ t.Logf("Running test server at: %s", ts.URL)
+ }
+ testHookGetServerConn = func(v *serverConn) {
+ st.scMu.Lock()
+ defer st.scMu.Unlock()
+ st.sc = v
+ }
+ log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st}))
+ if !onlyServer {
+ cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ st.cc = cc
+ st.fr = NewFramer(cc, cc)
+ if framerReuseFrames {
+ st.fr.SetReuseFrames()
+ }
+ if !logFrameReads && !logFrameWrites {
+ st.fr.debugReadLoggerf = func(m string, v ...interface{}) {
+ m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n"
+ st.frameReadLogMu.Lock()
+ fmt.Fprintf(&st.frameReadLogBuf, m, v...)
+ st.frameReadLogMu.Unlock()
+ }
+ st.fr.debugWriteLoggerf = func(m string, v ...interface{}) {
+ m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n"
+ st.frameWriteLogMu.Lock()
+ fmt.Fprintf(&st.frameWriteLogBuf, m, v...)
+ st.frameWriteLogMu.Unlock()
+ }
+ st.fr.logReads = true
+ st.fr.logWrites = true
+ }
+ }
+ return st
+}
+
+func (st *serverTester) closeConn() {
+ st.scMu.Lock()
+ defer st.scMu.Unlock()
+ st.sc.conn.Close()
+}
+
+func (st *serverTester) addLogFilter(phrase string) {
+ st.logFilter = append(st.logFilter, phrase)
+}
+
+func (st *serverTester) stream(id uint32) *stream {
+ ch := make(chan *stream, 1)
+ st.sc.serveMsgCh <- func(int) {
+ ch <- st.sc.streams[id]
+ }
+ return <-ch
+}
+
+func (st *serverTester) streamState(id uint32) streamState {
+ ch := make(chan streamState, 1)
+ st.sc.serveMsgCh <- func(int) {
+ state, _ := st.sc.state(id)
+ ch <- state
+ }
+ return <-ch
+}
+
+// loopNum reports how many times this conn's select loop has gone around.
+func (st *serverTester) loopNum() int {
+ lastc := make(chan int, 1)
+ st.sc.serveMsgCh <- func(loopNum int) {
+ lastc <- loopNum
+ }
+ return <-lastc
+}
+
+// awaitIdle heuristically awaits for the server conn's select loop to be idle.
+// The heuristic is that the server connection's serve loop must schedule
+// 50 times in a row without any channel sends or receives occurring.
+func (st *serverTester) awaitIdle() {
+ remain := 50
+ last := st.loopNum()
+ for remain > 0 {
+ n := st.loopNum()
+ if n == last+1 {
+ remain--
+ } else {
+ remain = 50
+ }
+ last = n
+ }
+}
+
+func (st *serverTester) Close() {
+ if st.t.Failed() {
+ st.frameReadLogMu.Lock()
+ if st.frameReadLogBuf.Len() > 0 {
+ st.t.Logf("Framer read log:\n%s", st.frameReadLogBuf.String())
+ }
+ st.frameReadLogMu.Unlock()
+
+ st.frameWriteLogMu.Lock()
+ if st.frameWriteLogBuf.Len() > 0 {
+ st.t.Logf("Framer write log:\n%s", st.frameWriteLogBuf.String())
+ }
+ st.frameWriteLogMu.Unlock()
+
+ // If we failed already (and are likely in a Fatal,
+ // unwindowing), force close the connection, so the
+ // httptest.Server doesn't wait forever for the conn
+ // to close.
+ if st.cc != nil {
+ st.cc.Close()
+ }
+ }
+ st.ts.Close()
+ if st.cc != nil {
+ st.cc.Close()
+ }
+ log.SetOutput(os.Stderr)
+}
+
+// greet initiates the client's HTTP/2 connection into a state where
+// frames may be sent.
+func (st *serverTester) greet() {
+ st.greetAndCheckSettings(func(Setting) error { return nil })
+}
+
+func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) {
+ st.writePreface()
+ st.writeInitialSettings()
+ st.wantSettings().ForeachSetting(checkSetting)
+ st.writeSettingsAck()
+
+ // The initial WINDOW_UPDATE and SETTINGS ACK can come in any order.
+ var gotSettingsAck bool
+ var gotWindowUpdate bool
+
+ for i := 0; i < 2; i++ {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatal(err)
+ }
+ switch f := f.(type) {
+ case *SettingsFrame:
+ if !f.Header().Flags.Has(FlagSettingsAck) {
+ st.t.Fatal("Settings Frame didn't have ACK set")
+ }
+ gotSettingsAck = true
+
+ case *WindowUpdateFrame:
+ if f.FrameHeader.StreamID != 0 {
+ st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID)
+ }
+ incr := uint32((&Server{}).initialConnRecvWindowSize() - initialWindowSize)
+ if f.Increment != incr {
+ st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr)
+ }
+ gotWindowUpdate = true
+
+ default:
+ st.t.Fatalf("Wanting a settings ACK or window update, received a %T", f)
+ }
+ }
+
+ if !gotSettingsAck {
+ st.t.Fatalf("Didn't get a settings ACK")
+ }
+ if !gotWindowUpdate {
+ st.t.Fatalf("Didn't get a window update")
+ }
+}
+
+func (st *serverTester) writePreface() {
+ n, err := st.cc.Write(clientPreface)
+ if err != nil {
+ st.t.Fatalf("Error writing client preface: %v", err)
+ }
+ if n != len(clientPreface) {
+ st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface))
+ }
+}
+
+func (st *serverTester) writeInitialSettings() {
+ if err := st.fr.WriteSettings(); err != nil {
+ st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err)
+ }
+}
+
+func (st *serverTester) writeSettingsAck() {
+ if err := st.fr.WriteSettingsAck(); err != nil {
+ st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err)
+ }
+}
+
+func (st *serverTester) writeHeaders(p HeadersFrameParam) {
+ if err := st.fr.WriteHeaders(p); err != nil {
+ st.t.Fatalf("Error writing HEADERS: %v", err)
+ }
+}
+
+func (st *serverTester) writePriority(id uint32, p PriorityParam) {
+ if err := st.fr.WritePriority(id, p); err != nil {
+ st.t.Fatalf("Error writing PRIORITY: %v", err)
+ }
+}
+
+func (st *serverTester) encodeHeaderField(k, v string) {
+ err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
+ if err != nil {
+ st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
+ }
+}
+
+// encodeHeaderRaw is the magic-free version of encodeHeader.
+// It takes 0 or more (k, v) pairs and encodes them.
+func (st *serverTester) encodeHeaderRaw(headers ...string) []byte {
+ if len(headers)%2 == 1 {
+ panic("odd number of kv args")
+ }
+ st.headerBuf.Reset()
+ for len(headers) > 0 {
+ k, v := headers[0], headers[1]
+ st.encodeHeaderField(k, v)
+ headers = headers[2:]
+ }
+ return st.headerBuf.Bytes()
+}
+
+// encodeHeader encodes headers and returns their HPACK bytes. headers
+// must contain an even number of key/value pairs. There may be
+// multiple pairs for keys (e.g. "cookie"). The :method, :path, and
+// :scheme headers default to GET, / and https. The :authority header
+// defaults to st.ts.Listener.Addr().
+func (st *serverTester) encodeHeader(headers ...string) []byte {
+ if len(headers)%2 == 1 {
+ panic("odd number of kv args")
+ }
+
+ st.headerBuf.Reset()
+ defaultAuthority := st.ts.Listener.Addr().String()
+
+ if len(headers) == 0 {
+ // Fast path, mostly for benchmarks, so test code doesn't pollute
+ // profiles when we're looking to improve server allocations.
+ st.encodeHeaderField(":method", "GET")
+ st.encodeHeaderField(":scheme", "https")
+ st.encodeHeaderField(":authority", defaultAuthority)
+ st.encodeHeaderField(":path", "/")
+ return st.headerBuf.Bytes()
+ }
+
+ if len(headers) == 2 && headers[0] == ":method" {
+ // Another fast path for benchmarks.
+ st.encodeHeaderField(":method", headers[1])
+ st.encodeHeaderField(":scheme", "https")
+ st.encodeHeaderField(":authority", defaultAuthority)
+ st.encodeHeaderField(":path", "/")
+ return st.headerBuf.Bytes()
+ }
+
+ pseudoCount := map[string]int{}
+ keys := []string{":method", ":scheme", ":authority", ":path"}
+ vals := map[string][]string{
+ ":method": {"GET"},
+ ":scheme": {"https"},
+ ":authority": {defaultAuthority},
+ ":path": {"/"},
+ }
+ for len(headers) > 0 {
+ k, v := headers[0], headers[1]
+ headers = headers[2:]
+ if _, ok := vals[k]; !ok {
+ keys = append(keys, k)
+ }
+ if strings.HasPrefix(k, ":") {
+ pseudoCount[k]++
+ if pseudoCount[k] == 1 {
+ vals[k] = []string{v}
+ } else {
+ // Allows testing of invalid headers w/ dup pseudo fields.
+ vals[k] = append(vals[k], v)
+ }
+ } else {
+ vals[k] = append(vals[k], v)
+ }
+ }
+ for _, k := range keys {
+ for _, v := range vals[k] {
+ st.encodeHeaderField(k, v)
+ }
+ }
+ return st.headerBuf.Bytes()
+}
+
+// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set.
+func (st *serverTester) bodylessReq1(headers ...string) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(headers...),
+ EndStream: true,
+ EndHeaders: true,
+ })
+}
+
+func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) {
+ if err := st.fr.WriteData(streamID, endStream, data); err != nil {
+ st.t.Fatalf("Error writing DATA: %v", err)
+ }
+}
+
+func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) {
+ if err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil {
+ st.t.Fatalf("Error writing DATA: %v", err)
+ }
+}
+
+func readFrameTimeout(fr *Framer, wait time.Duration) (Frame, error) {
+ ch := make(chan interface{}, 1)
+ go func() {
+ fr, err := fr.ReadFrame()
+ if err != nil {
+ ch <- err
+ } else {
+ ch <- fr
+ }
+ }()
+ t := time.NewTimer(wait)
+ select {
+ case v := <-ch:
+ t.Stop()
+ if fr, ok := v.(Frame); ok {
+ return fr, nil
+ }
+ return nil, v.(error)
+ case <-t.C:
+ return nil, errors.New("timeout waiting for frame")
+ }
+}
+
+func (st *serverTester) readFrame() (Frame, error) {
+ return readFrameTimeout(st.fr, 2*time.Second)
+}
+
+func (st *serverTester) wantHeaders() *HeadersFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a HEADERS frame: %v", err)
+ }
+ hf, ok := f.(*HeadersFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *HeadersFrame", f)
+ }
+ return hf
+}
+
+func (st *serverTester) wantContinuation() *ContinuationFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err)
+ }
+ cf, ok := f.(*ContinuationFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *ContinuationFrame", f)
+ }
+ return cf
+}
+
+func (st *serverTester) wantData() *DataFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a DATA frame: %v", err)
+ }
+ df, ok := f.(*DataFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *DataFrame", f)
+ }
+ return df
+}
+
+func (st *serverTester) wantSettings() *SettingsFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err)
+ }
+ sf, ok := f.(*SettingsFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *SettingsFrame", f)
+ }
+ return sf
+}
+
+func (st *serverTester) wantPing() *PingFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a PING frame: %v", err)
+ }
+ pf, ok := f.(*PingFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *PingFrame", f)
+ }
+ return pf
+}
+
+func (st *serverTester) wantGoAway() *GoAwayFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err)
+ }
+ gf, ok := f.(*GoAwayFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *GoAwayFrame", f)
+ }
+ return gf
+}
+
+func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting an RSTStream frame: %v", err)
+ }
+ rs, ok := f.(*RSTStreamFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *RSTStreamFrame", f)
+ }
+ if rs.FrameHeader.StreamID != streamID {
+ st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID)
+ }
+ if rs.ErrCode != errCode {
+ st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode)
+ }
+}
+
+func (st *serverTester) wantWindowUpdate(streamID, incr uint32) {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err)
+ }
+ wu, ok := f.(*WindowUpdateFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *WindowUpdateFrame", f)
+ }
+ if wu.FrameHeader.StreamID != streamID {
+ st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID)
+ }
+ if wu.Increment != incr {
+ st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr)
+ }
+}
+
+func (st *serverTester) wantSettingsAck() {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatal(err)
+ }
+ sf, ok := f.(*SettingsFrame)
+ if !ok {
+ st.t.Fatalf("Wanting a settings ACK, received a %T", f)
+ }
+ if !sf.Header().Flags.Has(FlagSettingsAck) {
+ st.t.Fatal("Settings Frame didn't have ACK set")
+ }
+}
+
+func (st *serverTester) wantPushPromise() *PushPromiseFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatal(err)
+ }
+ ppf, ok := f.(*PushPromiseFrame)
+ if !ok {
+ st.t.Fatalf("Wanted PushPromise, received %T", ppf)
+ }
+ return ppf
+}
+
+func TestServer(t *testing.T) {
+ gotReq := make(chan bool, 1)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Foo", "Bar")
+ gotReq <- true
+ })
+ defer st.Close()
+
+ covers("3.5", `
+ The server connection preface consists of a potentially empty
+ SETTINGS frame ([SETTINGS]) that MUST be the first frame the
+ server sends in the HTTP/2 connection.
+ `)
+
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(),
+ EndStream: true, // no DATA frames
+ EndHeaders: true,
+ })
+
+ select {
+ case <-gotReq:
+ case <-time.After(2 * time.Second):
+ t.Error("timeout waiting for request")
+ }
+}
+
+func TestServer_Request_Get(t *testing.T) {
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader("foo-bar", "some-value"),
+ EndStream: true, // no DATA frames
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if r.Method != "GET" {
+ t.Errorf("Method = %q; want GET", r.Method)
+ }
+ if r.URL.Path != "/" {
+ t.Errorf("URL.Path = %q; want /", r.URL.Path)
+ }
+ if r.ContentLength != 0 {
+ t.Errorf("ContentLength = %v; want 0", r.ContentLength)
+ }
+ if r.Close {
+ t.Error("Close = true; want false")
+ }
+ if !strings.Contains(r.RemoteAddr, ":") {
+ t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr)
+ }
+ if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 {
+ t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor)
+ }
+ wantHeader := http.Header{
+ "Foo-Bar": []string{"some-value"},
+ }
+ if !reflect.DeepEqual(r.Header, wantHeader) {
+ t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
+ }
+ if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
+ t.Errorf("Read = %d, %v; want 0, EOF", n, err)
+ }
+ })
+}
+
+func TestServer_Request_Get_PathSlashes(t *testing.T) {
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":path", "/%2f/"),
+ EndStream: true, // no DATA frames
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if r.RequestURI != "/%2f/" {
+ t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI)
+ }
+ if r.URL.Path != "///" {
+ t.Errorf("URL.Path = %q; want ///", r.URL.Path)
+ }
+ })
+}
+
+// TODO: add a test with EndStream=true on the HEADERS but setting a
+// Content-Length anyway. Should we just omit it and force it to
+// zero?
+
+func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) {
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if r.Method != "POST" {
+ t.Errorf("Method = %q; want POST", r.Method)
+ }
+ if r.ContentLength != 0 {
+ t.Errorf("ContentLength = %v; want 0", r.ContentLength)
+ }
+ if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
+ t.Errorf("Read = %d, %v; want 0, EOF", n, err)
+ }
+ })
+}
+
+func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) {
+ testBodyContents(t, -1, "", func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, true, nil) // just kidding. empty body.
+ })
+}
+
+func TestServer_Request_Post_Body_OneData(t *testing.T) {
+ const content = "Some content"
+ testBodyContents(t, -1, content, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, true, []byte(content))
+ })
+}
+
+func TestServer_Request_Post_Body_TwoData(t *testing.T) {
+ const content = "Some content"
+ testBodyContents(t, -1, content, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, false, []byte(content[:5]))
+ st.writeData(1, true, []byte(content[5:]))
+ })
+}
+
+func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) {
+ const content = "Some content"
+ testBodyContents(t, int64(len(content)), content, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(
+ ":method", "POST",
+ "content-length", strconv.Itoa(len(content)),
+ ),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, true, []byte(content))
+ })
+}
+
+func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) {
+ testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes",
+ func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(
+ ":method", "POST",
+ "content-length", "3",
+ ),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, true, []byte("12"))
+ })
+}
+
+func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) {
+ testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes",
+ func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(
+ ":method", "POST",
+ "content-length", "4",
+ ),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, true, []byte("12345"))
+ })
+}
+
+func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) {
+ testServerRequest(t, write, func(r *http.Request) {
+ if r.Method != "POST" {
+ t.Errorf("Method = %q; want POST", r.Method)
+ }
+ if r.ContentLength != wantContentLength {
+ t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
+ }
+ all, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(all) != wantBody {
+ t.Errorf("Read = %q; want %q", all, wantBody)
+ }
+ if err := r.Body.Close(); err != nil {
+ t.Fatalf("Close: %v", err)
+ }
+ })
+}
+
+func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) {
+ testServerRequest(t, write, func(r *http.Request) {
+ if r.Method != "POST" {
+ t.Errorf("Method = %q; want POST", r.Method)
+ }
+ if r.ContentLength != wantContentLength {
+ t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
+ }
+ all, err := ioutil.ReadAll(r.Body)
+ if err == nil {
+ t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.",
+ wantReadError, all)
+ }
+ if !strings.Contains(err.Error(), wantReadError) {
+ t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError)
+ }
+ if err := r.Body.Close(); err != nil {
+ t.Fatalf("Close: %v", err)
+ }
+ })
+}
+
+// Using a Host header, instead of :authority
+func TestServer_Request_Get_Host(t *testing.T) {
+ const host = "example.com"
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":authority", "", "host", host),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if r.Host != host {
+ t.Errorf("Host = %q; want %q", r.Host, host)
+ }
+ })
+}
+
+// Using an :authority pseudo-header, instead of Host
+func TestServer_Request_Get_Authority(t *testing.T) {
+ const host = "example.com"
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":authority", host),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if r.Host != host {
+ t.Errorf("Host = %q; want %q", r.Host, host)
+ }
+ })
+}
+
+func TestServer_Request_WithContinuation(t *testing.T) {
+ wantHeader := http.Header{
+ "Foo-One": []string{"value-one"},
+ "Foo-Two": []string{"value-two"},
+ "Foo-Three": []string{"value-three"},
+ }
+ testServerRequest(t, func(st *serverTester) {
+ fullHeaders := st.encodeHeader(
+ "foo-one", "value-one",
+ "foo-two", "value-two",
+ "foo-three", "value-three",
+ )
+ remain := fullHeaders
+ chunks := 0
+ for len(remain) > 0 {
+ const maxChunkSize = 5
+ chunk := remain
+ if len(chunk) > maxChunkSize {
+ chunk = chunk[:maxChunkSize]
+ }
+ remain = remain[len(chunk):]
+
+ if chunks == 0 {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: chunk,
+ EndStream: true, // no DATA frames
+ EndHeaders: false, // we'll have continuation frames
+ })
+ } else {
+ err := st.fr.WriteContinuation(1, len(remain) == 0, chunk)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ chunks++
+ }
+ if chunks < 2 {
+ t.Fatal("too few chunks")
+ }
+ }, func(r *http.Request) {
+ if !reflect.DeepEqual(r.Header, wantHeader) {
+ t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
+ }
+ })
+}
+
+// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field")
+func TestServer_Request_CookieConcat(t *testing.T) {
+ const host = "example.com"
+ testServerRequest(t, func(st *serverTester) {
+ st.bodylessReq1(
+ ":authority", host,
+ "cookie", "a=b",
+ "cookie", "c=d",
+ "cookie", "e=f",
+ )
+ }, func(r *http.Request) {
+ const want = "a=b; c=d; e=f"
+ if got := r.Header.Get("Cookie"); got != want {
+ t.Errorf("Cookie = %q; want %q", got, want)
+ }
+ })
+}
+
+func TestServer_Request_Reject_CapitalHeader(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") })
+}
+
+func TestServer_Request_Reject_HeaderFieldNameColon(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has:colon", "v") })
+}
+
+func TestServer_Request_Reject_HeaderFieldNameNULL(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has\x00null", "v") })
+}
+
+func TestServer_Request_Reject_HeaderFieldNameEmpty(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("", "v") })
+}
+
+func TestServer_Request_Reject_HeaderFieldValueNewline(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\nnewline") })
+}
+
+func TestServer_Request_Reject_HeaderFieldValueCR(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\rcarriage") })
+}
+
+func TestServer_Request_Reject_HeaderFieldValueDEL(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\x7fdel") })
+}
+
+func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") })
+}
+
+func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid value" ...
+ testRejectRequest(t, func(st *serverTester) {
+ st.addLogFilter("duplicate pseudo-header")
+ st.bodylessReq1(":method", "GET", ":method", "POST")
+ })
+}
+
+func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All pseudo-header fields MUST appear in the header block
+ // before regular header fields. Any request or response that
+ // contains a pseudo-header field that appears in a header
+ // block after a regular header field MUST be treated as
+ // malformed (Section 8.1.2.6)."
+ testRejectRequest(t, func(st *serverTester) {
+ st.addLogFilter("pseudo-header after regular header")
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"})
+ enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"})
+ enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"})
+ enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"})
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: buf.Bytes(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ })
+}
+
+func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") })
+}
+
+func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") })
+}
+
+func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") })
+}
+
+func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) {
+ st.addLogFilter(`invalid pseudo-header ":unknown_thing"`)
+ st.bodylessReq1(":unknown_thing", "")
+ })
+}
+
+func testRejectRequest(t *testing.T, send func(*serverTester)) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ t.Error("server request made it to handler; should've been rejected")
+ })
+ defer st.Close()
+
+ st.greet()
+ send(st)
+ st.wantRSTStream(1, ErrCodeProtocol)
+}
+
+func testRejectRequestWithProtocolError(t *testing.T, send func(*serverTester)) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ t.Error("server request made it to handler; should've been rejected")
+ }, optQuiet)
+ defer st.Close()
+
+ st.greet()
+ send(st)
+ gf := st.wantGoAway()
+ if gf.ErrCode != ErrCodeProtocol {
+ t.Errorf("err code = %v; want %v", gf.ErrCode, ErrCodeProtocol)
+ }
+}
+
+// Section 5.1, on idle connections: "Receiving any frame other than
+// HEADERS or PRIORITY on a stream in this state MUST be treated as a
+// connection error (Section 5.4.1) of type PROTOCOL_ERROR."
+func TestRejectFrameOnIdle_WindowUpdate(t *testing.T) {
+ testRejectRequestWithProtocolError(t, func(st *serverTester) {
+ st.fr.WriteWindowUpdate(123, 456)
+ })
+}
+func TestRejectFrameOnIdle_Data(t *testing.T) {
+ testRejectRequestWithProtocolError(t, func(st *serverTester) {
+ st.fr.WriteData(123, true, nil)
+ })
+}
+func TestRejectFrameOnIdle_RSTStream(t *testing.T) {
+ testRejectRequestWithProtocolError(t, func(st *serverTester) {
+ st.fr.WriteRSTStream(123, ErrCodeCancel)
+ })
+}
+
+func TestServer_Request_Connect(t *testing.T) {
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeaderRaw(
+ ":method", "CONNECT",
+ ":authority", "example.com:123",
+ ),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if g, w := r.Method, "CONNECT"; g != w {
+ t.Errorf("Method = %q; want %q", g, w)
+ }
+ if g, w := r.RequestURI, "example.com:123"; g != w {
+ t.Errorf("RequestURI = %q; want %q", g, w)
+ }
+ if g, w := r.URL.Host, "example.com:123"; g != w {
+ t.Errorf("URL.Host = %q; want %q", g, w)
+ }
+ })
+}
+
+func TestServer_Request_Connect_InvalidPath(t *testing.T) {
+ testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeaderRaw(
+ ":method", "CONNECT",
+ ":authority", "example.com:123",
+ ":path", "/bogus",
+ ),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ })
+}
+
+func TestServer_Request_Connect_InvalidScheme(t *testing.T) {
+ testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeaderRaw(
+ ":method", "CONNECT",
+ ":authority", "example.com:123",
+ ":scheme", "https",
+ ),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ })
+}
+
+func TestServer_Ping(t *testing.T) {
+ st := newServerTester(t, nil)
+ defer st.Close()
+ st.greet()
+
+ // Server should ignore this one, since it has ACK set.
+ ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
+ if err := st.fr.WritePing(true, ackPingData); err != nil {
+ t.Fatal(err)
+ }
+
+ // But the server should reply to this one, since ACK is false.
+ pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
+ if err := st.fr.WritePing(false, pingData); err != nil {
+ t.Fatal(err)
+ }
+
+ pf := st.wantPing()
+ if !pf.Flags.Has(FlagPingAck) {
+ t.Error("response ping doesn't have ACK set")
+ }
+ if pf.Data != pingData {
+ t.Errorf("response ping has data %q; want %q", pf.Data, pingData)
+ }
+}
+
+func TestServer_RejectsLargeFrames(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("see golang.org/issue/13434")
+ }
+
+ st := newServerTester(t, nil)
+ defer st.Close()
+ st.greet()
+
+ // Write too large of a frame (too large by one byte)
+ // We ignore the return value because it's expected that the server
+ // will only read the first 9 bytes (the headre) and then disconnect.
+ st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1))
+
+ gf := st.wantGoAway()
+ if gf.ErrCode != ErrCodeFrameSize {
+ t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize)
+ }
+ if st.serverLogBuf.Len() != 0 {
+ // Previously we spun here for a bit until the GOAWAY disconnect
+ // timer fired, logging while we fired.
+ t.Errorf("unexpected server output: %.500s\n", st.serverLogBuf.Bytes())
+ }
+}
+
+func TestServer_Handler_Sends_WindowUpdate(t *testing.T) {
+ puppet := newHandlerPuppet()
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ puppet.act(w, r)
+ })
+ defer st.Close()
+ defer puppet.done()
+
+ st.greet()
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // data coming
+ EndHeaders: true,
+ })
+ st.writeData(1, false, []byte("abcdef"))
+ puppet.do(readBodyHandler(t, "abc"))
+ st.wantWindowUpdate(0, 3)
+ st.wantWindowUpdate(1, 3)
+
+ puppet.do(readBodyHandler(t, "def"))
+ st.wantWindowUpdate(0, 3)
+ st.wantWindowUpdate(1, 3)
+
+ st.writeData(1, true, []byte("ghijkl")) // END_STREAM here
+ puppet.do(readBodyHandler(t, "ghi"))
+ puppet.do(readBodyHandler(t, "jkl"))
+ st.wantWindowUpdate(0, 3)
+ st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM
+}
+
+// the version of the TestServer_Handler_Sends_WindowUpdate with padding.
+// See golang.org/issue/16556
+func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) {
+ puppet := newHandlerPuppet()
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ puppet.act(w, r)
+ })
+ defer st.Close()
+ defer puppet.done()
+
+ st.greet()
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false,
+ EndHeaders: true,
+ })
+ st.writeDataPadded(1, false, []byte("abcdef"), []byte{0, 0, 0, 0})
+
+ // Expect to immediately get our 5 bytes of padding back for
+ // both the connection and stream (4 bytes of padding + 1 byte of length)
+ st.wantWindowUpdate(0, 5)
+ st.wantWindowUpdate(1, 5)
+
+ puppet.do(readBodyHandler(t, "abc"))
+ st.wantWindowUpdate(0, 3)
+ st.wantWindowUpdate(1, 3)
+
+ puppet.do(readBodyHandler(t, "def"))
+ st.wantWindowUpdate(0, 3)
+ st.wantWindowUpdate(1, 3)
+}
+
+func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) {
+ st := newServerTester(t, nil)
+ defer st.Close()
+ st.greet()
+ if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil {
+ t.Fatal(err)
+ }
+ gf := st.wantGoAway()
+ if gf.ErrCode != ErrCodeFlowControl {
+ t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl)
+ }
+ if gf.LastStreamID != 0 {
+ t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0)
+ }
+}
+
+func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) {
+ inHandler := make(chan bool)
+ blockHandler := make(chan bool)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ inHandler <- true
+ <-blockHandler
+ })
+ defer st.Close()
+ defer close(blockHandler)
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // keep it open
+ EndHeaders: true,
+ })
+ <-inHandler
+ // Send a bogus window update:
+ if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil {
+ t.Fatal(err)
+ }
+ st.wantRSTStream(1, ErrCodeFlowControl)
+}
+
+// testServerPostUnblock sends a hanging POST with unsent data to handler,
+// then runs fn once in the handler, and verifies that the error returned from
+// handler is acceptable. It fails if takes over 5 seconds for handler to exit.
+func testServerPostUnblock(t *testing.T,
+ handler func(http.ResponseWriter, *http.Request) error,
+ fn func(*serverTester),
+ checkErr func(error),
+ otherHeaders ...string) {
+ inHandler := make(chan bool)
+ errc := make(chan error, 1)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ inHandler <- true
+ errc <- handler(w, r)
+ })
+ defer st.Close()
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...),
+ EndStream: false, // keep it open
+ EndHeaders: true,
+ })
+ <-inHandler
+ fn(st)
+ select {
+ case err := <-errc:
+ if checkErr != nil {
+ checkErr(err)
+ }
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout waiting for Handler to return")
+ }
+}
+
+func TestServer_RSTStream_Unblocks_Read(t *testing.T) {
+ testServerPostUnblock(t,
+ func(w http.ResponseWriter, r *http.Request) (err error) {
+ _, err = r.Body.Read(make([]byte, 1))
+ return
+ },
+ func(st *serverTester) {
+ if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+ t.Fatal(err)
+ }
+ },
+ func(err error) {
+ want := StreamError{StreamID: 0x1, Code: 0x8}
+ if !reflect.DeepEqual(err, want) {
+ t.Errorf("Read error = %v; want %v", err, want)
+ }
+ },
+ )
+}
+
+func TestServer_RSTStream_Unblocks_Header_Write(t *testing.T) {
+ // Run this test a bunch, because it doesn't always
+ // deadlock. But with a bunch, it did.
+ n := 50
+ if testing.Short() {
+ n = 5
+ }
+ for i := 0; i < n; i++ {
+ testServer_RSTStream_Unblocks_Header_Write(t)
+ }
+}
+
+func testServer_RSTStream_Unblocks_Header_Write(t *testing.T) {
+ inHandler := make(chan bool, 1)
+ unblockHandler := make(chan bool, 1)
+ headerWritten := make(chan bool, 1)
+ wroteRST := make(chan bool, 1)
+
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ inHandler <- true
+ <-wroteRST
+ w.Header().Set("foo", "bar")
+ w.WriteHeader(200)
+ w.(http.Flusher).Flush()
+ headerWritten <- true
+ <-unblockHandler
+ })
+ defer st.Close()
+
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // keep it open
+ EndHeaders: true,
+ })
+ <-inHandler
+ if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+ t.Fatal(err)
+ }
+ wroteRST <- true
+ st.awaitIdle()
+ select {
+ case <-headerWritten:
+ case <-time.After(2 * time.Second):
+ t.Error("timeout waiting for header write")
+ }
+ unblockHandler <- true
+}
+
+func TestServer_DeadConn_Unblocks_Read(t *testing.T) {
+ testServerPostUnblock(t,
+ func(w http.ResponseWriter, r *http.Request) (err error) {
+ _, err = r.Body.Read(make([]byte, 1))
+ return
+ },
+ func(st *serverTester) { st.cc.Close() },
+ func(err error) {
+ if err == nil {
+ t.Error("unexpected nil error from Request.Body.Read")
+ }
+ },
+ )
+}
+
+var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error {
+ <-w.(http.CloseNotifier).CloseNotify()
+ return nil
+}
+
+func TestServer_CloseNotify_After_RSTStream(t *testing.T) {
+ testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
+ if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+ t.Fatal(err)
+ }
+ }, nil)
+}
+
+func TestServer_CloseNotify_After_ConnClose(t *testing.T) {
+ testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil)
+}
+
+// that CloseNotify unblocks after a stream error due to the client's
+// problem that's unrelated to them explicitly canceling it (which is
+// TestServer_CloseNotify_After_RSTStream above)
+func TestServer_CloseNotify_After_StreamError(t *testing.T) {
+ testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
+ // data longer than declared Content-Length => stream error
+ st.writeData(1, true, []byte("1234"))
+ }, nil, "content-length", "3")
+}
+
+func TestServer_StateTransitions(t *testing.T) {
+ var st *serverTester
+ inHandler := make(chan bool)
+ writeData := make(chan bool)
+ leaveHandler := make(chan bool)
+ st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ inHandler <- true
+ if st.stream(1) == nil {
+ t.Errorf("nil stream 1 in handler")
+ }
+ if got, want := st.streamState(1), stateOpen; got != want {
+ t.Errorf("in handler, state is %v; want %v", got, want)
+ }
+ writeData <- true
+ if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF {
+ t.Errorf("body read = %d, %v; want 0, EOF", n, err)
+ }
+ if got, want := st.streamState(1), stateHalfClosedRemote; got != want {
+ t.Errorf("in handler, state is %v; want %v", got, want)
+ }
+
+ <-leaveHandler
+ })
+ st.greet()
+ if st.stream(1) != nil {
+ t.Fatal("stream 1 should be empty")
+ }
+ if got := st.streamState(1); got != stateIdle {
+ t.Fatalf("stream 1 should be idle; got %v", got)
+ }
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // keep it open
+ EndHeaders: true,
+ })
+ <-inHandler
+ <-writeData
+ st.writeData(1, true, nil)
+
+ leaveHandler <- true
+ hf := st.wantHeaders()
+ if !hf.StreamEnded() {
+ t.Fatal("expected END_STREAM flag")
+ }
+
+ if got, want := st.streamState(1), stateClosed; got != want {
+ t.Errorf("at end, state is %v; want %v", got, want)
+ }
+ if st.stream(1) != nil {
+ t.Fatal("at end, stream 1 should be gone")
+ }
+}
+
+// test HEADERS w/o EndHeaders + another HEADERS (should get rejected)
+func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: false,
+ })
+ st.writeHeaders(HeadersFrameParam{ // Not a continuation.
+ StreamID: 3, // different stream.
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ })
+}
+
+// test HEADERS w/o EndHeaders + PING (should get rejected)
+func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: false,
+ })
+ if err := st.fr.WritePing(false, [8]byte{}); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected)
+func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ st.wantHeaders()
+ if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID
+func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: false,
+ })
+ if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// No HEADERS on stream 0.
+func TestServer_Rejects_Headers0(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.fr.AllowIllegalWrites = true
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 0,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ })
+}
+
+// No CONTINUATION on stream 0.
+func TestServer_Rejects_Continuation0(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.fr.AllowIllegalWrites = true
+ if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// No PRIORITY on stream 0.
+func TestServer_Rejects_Priority0(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.fr.AllowIllegalWrites = true
+ st.writePriority(0, PriorityParam{StreamDep: 1})
+ })
+}
+
+// No HEADERS frame with a self-dependence.
+func TestServer_Rejects_HeadersSelfDependence(t *testing.T) {
+ testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
+ st.fr.AllowIllegalWrites = true
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ Priority: PriorityParam{StreamDep: 1},
+ })
+ })
+}
+
+// No PRIORTY frame with a self-dependence.
+func TestServer_Rejects_PrioritySelfDependence(t *testing.T) {
+ testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
+ st.fr.AllowIllegalWrites = true
+ st.writePriority(1, PriorityParam{StreamDep: 1})
+ })
+}
+
+func TestServer_Rejects_PushPromise(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ pp := PushPromiseParam{
+ StreamID: 1,
+ PromiseID: 3,
+ }
+ if err := st.fr.WritePushPromise(pp); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// testServerRejectsConn tests that the server hangs up with a GOAWAY
+// frame and a server close after the client does something
+// deserving a CONNECTION_ERROR.
+func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
+ st.addLogFilter("connection error: PROTOCOL_ERROR")
+ defer st.Close()
+ st.greet()
+ writeReq(st)
+
+ st.wantGoAway()
+ errc := make(chan error, 1)
+ go func() {
+ fr, err := st.fr.ReadFrame()
+ if err == nil {
+ err = fmt.Errorf("got frame of type %T", fr)
+ }
+ errc <- err
+ }()
+ select {
+ case err := <-errc:
+ if err != io.EOF {
+ t.Errorf("ReadFrame = %v; want io.EOF", err)
+ }
+ case <-time.After(2 * time.Second):
+ t.Error("timeout waiting for disconnect")
+ }
+}
+
+// testServerRejectsStream tests that the server sends a RST_STREAM with the provided
+// error code after a client sends a bogus request.
+func testServerRejectsStream(t *testing.T, code ErrCode, writeReq func(*serverTester)) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
+ defer st.Close()
+ st.greet()
+ writeReq(st)
+ st.wantRSTStream(1, code)
+}
+
+// testServerRequest sets up an idle HTTP/2 connection and lets you
+// write a single request with writeReq, and then verify that the
+// *http.Request is built correctly in checkReq.
+func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) {
+ gotReq := make(chan bool, 1)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ if r.Body == nil {
+ t.Fatal("nil Body")
+ }
+ checkReq(r)
+ gotReq <- true
+ })
+ defer st.Close()
+
+ st.greet()
+ writeReq(st)
+
+ select {
+ case <-gotReq:
+ case <-time.After(2 * time.Second):
+ t.Error("timeout waiting for request")
+ }
+}
+
+func getSlash(st *serverTester) { st.bodylessReq1() }
+
+func TestServer_Response_NoData(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ // Nothing.
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if !hf.StreamEnded() {
+ t.Fatal("want END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ })
+}
+
+func TestServer_Response_NoData_Header_FooBar(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Set("Foo-Bar", "some-value")
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if !hf.StreamEnded() {
+ t.Fatal("want END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"foo-bar", "some-value"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"content-length", "0"},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ })
+}
+
+func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) {
+ const msg = "<html>this is HTML."
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Set("Content-Type", "foo/bar")
+ io.WriteString(w, msg)
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("don't want END_STREAM, expecting data")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "foo/bar"},
+ {"content-length", strconv.Itoa(len(msg))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ df := st.wantData()
+ if !df.StreamEnded() {
+ t.Error("expected DATA to have END_STREAM flag")
+ }
+ if got := string(df.Data()); got != msg {
+ t.Errorf("got DATA %q; want %q", got, msg)
+ }
+ })
+}
+
+func TestServer_Response_TransferEncoding_chunked(t *testing.T) {
+ const msg = "hi"
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Set("Transfer-Encoding", "chunked") // should be stripped
+ io.WriteString(w, msg)
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"content-length", strconv.Itoa(len(msg))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ })
+}
+
+// Header accessed only after the initial write.
+func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) {
+ const msg = "<html>this is HTML."
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ io.WriteString(w, msg)
+ w.Header().Set("foo", "should be ignored")
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/html; charset=utf-8"},
+ {"content-length", strconv.Itoa(len(msg))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ })
+}
+
+// Header accessed before the initial write and later mutated.
+func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) {
+ const msg = "<html>this is HTML."
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Set("foo", "proper value")
+ io.WriteString(w, msg)
+ w.Header().Set("foo", "should be ignored")
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"foo", "proper value"},
+ {"content-type", "text/html; charset=utf-8"},
+ {"content-length", strconv.Itoa(len(msg))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ })
+}
+
+func TestServer_Response_Data_SniffLenType(t *testing.T) {
+ const msg = "<html>this is HTML."
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ io.WriteString(w, msg)
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("don't want END_STREAM, expecting data")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/html; charset=utf-8"},
+ {"content-length", strconv.Itoa(len(msg))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ df := st.wantData()
+ if !df.StreamEnded() {
+ t.Error("expected DATA to have END_STREAM flag")
+ }
+ if got := string(df.Data()); got != msg {
+ t.Errorf("got DATA %q; want %q", got, msg)
+ }
+ })
+}
+
+func TestServer_Response_Header_Flush_MidWrite(t *testing.T) {
+ const msg = "<html>this is HTML"
+ const msg2 = ", and this is the next chunk"
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ io.WriteString(w, msg)
+ w.(http.Flusher).Flush()
+ io.WriteString(w, msg2)
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/html; charset=utf-8"}, // sniffed
+ // and no content-length
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ {
+ df := st.wantData()
+ if df.StreamEnded() {
+ t.Error("unexpected END_STREAM flag")
+ }
+ if got := string(df.Data()); got != msg {
+ t.Errorf("got DATA %q; want %q", got, msg)
+ }
+ }
+ {
+ df := st.wantData()
+ if !df.StreamEnded() {
+ t.Error("wanted END_STREAM flag on last data chunk")
+ }
+ if got := string(df.Data()); got != msg2 {
+ t.Errorf("got DATA %q; want %q", got, msg2)
+ }
+ }
+ })
+}
+
+func TestServer_Response_LargeWrite(t *testing.T) {
+ const size = 1 << 20
+ const maxFrameSize = 16 << 10
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ n, err := w.Write(bytes.Repeat([]byte("a"), size))
+ if err != nil {
+ return fmt.Errorf("Write error: %v", err)
+ }
+ if n != size {
+ return fmt.Errorf("wrong size %d from Write", n)
+ }
+ return nil
+ }, func(st *serverTester) {
+ if err := st.fr.WriteSettings(
+ Setting{SettingInitialWindowSize, 0},
+ Setting{SettingMaxFrameSize, maxFrameSize},
+ ); err != nil {
+ t.Fatal(err)
+ }
+ st.wantSettingsAck()
+
+ getSlash(st) // make the single request
+
+ // Give the handler quota to write:
+ if err := st.fr.WriteWindowUpdate(1, size); err != nil {
+ t.Fatal(err)
+ }
+ // Give the handler quota to write to connection-level
+ // window as well
+ if err := st.fr.WriteWindowUpdate(0, size); err != nil {
+ t.Fatal(err)
+ }
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/plain; charset=utf-8"}, // sniffed
+ // and no content-length
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ var bytes, frames int
+ for {
+ df := st.wantData()
+ bytes += len(df.Data())
+ frames++
+ for _, b := range df.Data() {
+ if b != 'a' {
+ t.Fatal("non-'a' byte seen in DATA")
+ }
+ }
+ if df.StreamEnded() {
+ break
+ }
+ }
+ if bytes != size {
+ t.Errorf("Got %d bytes; want %d", bytes, size)
+ }
+ if want := int(size / maxFrameSize); frames < want || frames > want*2 {
+ t.Errorf("Got %d frames; want %d", frames, size)
+ }
+ })
+}
+
+// Test that the handler can't write more than the client allows
+func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) {
+ // Make these reads. Before each read, the client adds exactly enough
+ // flow-control to satisfy the read. Numbers chosen arbitrarily.
+ reads := []int{123, 1, 13, 127}
+ size := 0
+ for _, n := range reads {
+ size += n
+ }
+
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.(http.Flusher).Flush()
+ n, err := w.Write(bytes.Repeat([]byte("a"), size))
+ if err != nil {
+ return fmt.Errorf("Write error: %v", err)
+ }
+ if n != size {
+ return fmt.Errorf("wrong size %d from Write", n)
+ }
+ return nil
+ }, func(st *serverTester) {
+ // Set the window size to something explicit for this test.
+ // It's also how much initial data we expect.
+ if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, uint32(reads[0])}); err != nil {
+ t.Fatal(err)
+ }
+ st.wantSettingsAck()
+
+ getSlash(st) // make the single request
+
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+
+ df := st.wantData()
+ if got := len(df.Data()); got != reads[0] {
+ t.Fatalf("Initial window size = %d but got DATA with %d bytes", reads[0], got)
+ }
+
+ for _, quota := range reads[1:] {
+ if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil {
+ t.Fatal(err)
+ }
+ df := st.wantData()
+ if int(quota) != len(df.Data()) {
+ t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota)
+ }
+ }
+ })
+}
+
+// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM.
+func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) {
+ const size = 1 << 20
+ const maxFrameSize = 16 << 10
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.(http.Flusher).Flush()
+ errc := make(chan error, 1)
+ go func() {
+ _, err := w.Write(bytes.Repeat([]byte("a"), size))
+ errc <- err
+ }()
+ select {
+ case err := <-errc:
+ if err == nil {
+ return errors.New("unexpected nil error from Write in handler")
+ }
+ return nil
+ case <-time.After(2 * time.Second):
+ return errors.New("timeout waiting for Write in handler")
+ }
+ }, func(st *serverTester) {
+ if err := st.fr.WriteSettings(
+ Setting{SettingInitialWindowSize, 0},
+ Setting{SettingMaxFrameSize, maxFrameSize},
+ ); err != nil {
+ t.Fatal(err)
+ }
+ st.wantSettingsAck()
+
+ getSlash(st) // make the single request
+
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+
+ if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.(http.Flusher).Flush()
+ // Nothing; send empty DATA
+ return nil
+ }, func(st *serverTester) {
+ // Handler gets no data quota:
+ if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil {
+ t.Fatal(err)
+ }
+ st.wantSettingsAck()
+
+ getSlash(st) // make the single request
+
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+
+ df := st.wantData()
+ if got := len(df.Data()); got != 0 {
+ t.Fatalf("unexpected %d DATA bytes; want 0", got)
+ }
+ if !df.StreamEnded() {
+ t.Fatal("DATA didn't have END_STREAM")
+ }
+ })
+}
+
+func TestServer_Response_Automatic100Continue(t *testing.T) {
+ const msg = "foo"
+ const reply = "bar"
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ if v := r.Header.Get("Expect"); v != "" {
+ t.Errorf("Expect header = %q; want empty", v)
+ }
+ buf := make([]byte, len(msg))
+ // This read should trigger the 100-continue being sent.
+ if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg {
+ return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg)
+ }
+ _, err := io.WriteString(w, reply)
+ return err
+ }, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"),
+ EndStream: false,
+ EndHeaders: true,
+ })
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "100"},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Fatalf("Got headers %v; want %v", goth, wanth)
+ }
+
+ // Okay, they sent status 100, so we can send our
+ // gigantic and/or sensitive "foo" payload now.
+ st.writeData(1, true, []byte(msg))
+
+ st.wantWindowUpdate(0, uint32(len(msg)))
+
+ hf = st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("expected data to follow")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth = st.decodeHeader(hf.HeaderBlockFragment())
+ wanth = [][2]string{
+ {":status", "200"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"content-length", strconv.Itoa(len(reply))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+
+ df := st.wantData()
+ if string(df.Data()) != reply {
+ t.Errorf("Client read %q; want %q", df.Data(), reply)
+ }
+ if !df.StreamEnded() {
+ t.Errorf("expect data stream end")
+ }
+ })
+}
+
+func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) {
+ errc := make(chan error, 1)
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ p := []byte("some data.\n")
+ for {
+ _, err := w.Write(p)
+ if err != nil {
+ errc <- err
+ return nil
+ }
+ }
+ }, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: false,
+ EndHeaders: true,
+ })
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ // Close the connection and wait for the handler to (hopefully) notice.
+ st.cc.Close()
+ select {
+ case <-errc:
+ case <-time.After(5 * time.Second):
+ t.Error("timeout")
+ }
+ })
+}
+
+func TestServer_Rejects_Too_Many_Streams(t *testing.T) {
+ const testPath = "/some/path"
+
+ inHandler := make(chan uint32)
+ leaveHandler := make(chan bool)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ id := w.(*responseWriter).rws.stream.id
+ inHandler <- id
+ if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath {
+ t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath)
+ }
+ <-leaveHandler
+ })
+ defer st.Close()
+ st.greet()
+ nextStreamID := uint32(1)
+ streamID := func() uint32 {
+ defer func() { nextStreamID += 2 }()
+ return nextStreamID
+ }
+ sendReq := func(id uint32, headers ...string) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: id,
+ BlockFragment: st.encodeHeader(headers...),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }
+ for i := 0; i < defaultMaxStreams; i++ {
+ sendReq(streamID())
+ <-inHandler
+ }
+ defer func() {
+ for i := 0; i < defaultMaxStreams; i++ {
+ leaveHandler <- true
+ }
+ }()
+
+ // And this one should cross the limit:
+ // (It's also sent as a CONTINUATION, to verify we still track the decoder context,
+ // even if we're rejecting it)
+ rejectID := streamID()
+ headerBlock := st.encodeHeader(":path", testPath)
+ frag1, frag2 := headerBlock[:3], headerBlock[3:]
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: rejectID,
+ BlockFragment: frag1,
+ EndStream: true,
+ EndHeaders: false, // CONTINUATION coming
+ })
+ if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil {
+ t.Fatal(err)
+ }
+ st.wantRSTStream(rejectID, ErrCodeProtocol)
+
+ // But let a handler finish:
+ leaveHandler <- true
+ st.wantHeaders()
+
+ // And now another stream should be able to start:
+ goodID := streamID()
+ sendReq(goodID, ":path", testPath)
+ select {
+ case got := <-inHandler:
+ if got != goodID {
+ t.Errorf("Got stream %d; want %d", got, goodID)
+ }
+ case <-time.After(3 * time.Second):
+ t.Error("timeout waiting for handler")
+ }
+}
+
+// So many response headers that the server needs to use CONTINUATION frames:
+func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ h := w.Header()
+ for i := 0; i < 5000; i++ {
+ h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i))
+ }
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.HeadersEnded() {
+ t.Fatal("got unwanted END_HEADERS flag")
+ }
+ n := 0
+ for {
+ n++
+ cf := st.wantContinuation()
+ if cf.HeadersEnded() {
+ break
+ }
+ }
+ if n < 5 {
+ t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n)
+ }
+ })
+}
+
+// This previously crashed (reported by Mathieu Lonjaret as observed
+// while using Camlistore) because we got a DATA frame from the client
+// after the handler exited and our logic at the time was wrong,
+// keeping a stream in the map in stateClosed, which tickled an
+// invariant check later when we tried to remove that stream (via
+// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop
+// ended.
+func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ // nothing
+ return nil
+ }, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: false, // DATA is coming
+ EndHeaders: true,
+ })
+ hf := st.wantHeaders()
+ if !hf.HeadersEnded() || !hf.StreamEnded() {
+ t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf)
+ }
+
+ // Sent when the a Handler closes while a client has
+ // indicated it's still sending DATA:
+ st.wantRSTStream(1, ErrCodeNo)
+
+ // Now the handler has ended, so it's ended its
+ // stream, but the client hasn't closed its side
+ // (stateClosedLocal). So send more data and verify
+ // it doesn't crash with an internal invariant panic, like
+ // it did before.
+ st.writeData(1, true, []byte("foo"))
+
+ // Get our flow control bytes back, since the handler didn't get them.
+ st.wantWindowUpdate(0, uint32(len("foo")))
+
+ // Sent after a peer sends data anyway (admittedly the
+ // previous RST_STREAM might've still been in-flight),
+ // but they'll get the more friendly 'cancel' code
+ // first.
+ st.wantRSTStream(1, ErrCodeStreamClosed)
+
+ // Set up a bunch of machinery to record the panic we saw
+ // previously.
+ var (
+ panMu sync.Mutex
+ panicVal interface{}
+ )
+
+ testHookOnPanicMu.Lock()
+ testHookOnPanic = func(sc *serverConn, pv interface{}) bool {
+ panMu.Lock()
+ panicVal = pv
+ panMu.Unlock()
+ return true
+ }
+ testHookOnPanicMu.Unlock()
+
+ // Now force the serve loop to end, via closing the connection.
+ st.cc.Close()
+ select {
+ case <-st.sc.doneServing:
+ // Loop has exited.
+ panMu.Lock()
+ got := panicVal
+ panMu.Unlock()
+ if got != nil {
+ t.Errorf("Got panic: %v", got)
+ }
+ case <-time.After(5 * time.Second):
+ t.Error("timeout")
+ }
+ })
+}
+
+func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) }
+func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) }
+
+func testRejectTLS(t *testing.T, max uint16) {
+ st := newServerTester(t, nil, func(c *tls.Config) {
+ c.MaxVersion = max
+ })
+ defer st.Close()
+ gf := st.wantGoAway()
+ if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
+ t.Errorf("Got error code %v; want %v", got, want)
+ }
+}
+
+func TestServer_Rejects_TLSBadCipher(t *testing.T) {
+ st := newServerTester(t, nil, func(c *tls.Config) {
+ // Only list bad ones:
+ c.CipherSuites = []uint16{
+ tls.TLS_RSA_WITH_RC4_128_SHA,
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
+ }
+ })
+ defer st.Close()
+ gf := st.wantGoAway()
+ if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
+ t.Errorf("Got error code %v; want %v", got, want)
+ }
+}
+
+func TestServer_Advertises_Common_Cipher(t *testing.T) {
+ const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ st := newServerTester(t, nil, func(c *tls.Config) {
+ // Have the client only support the one required by the spec.
+ c.CipherSuites = []uint16{requiredSuite}
+ }, func(ts *httptest.Server) {
+ var srv *http.Server = ts.Config
+ // Have the server configured with no specific cipher suites.
+ // This tests that Go's defaults include the required one.
+ srv.TLSConfig = nil
+ })
+ defer st.Close()
+ st.greet()
+}
+
+func (st *serverTester) onHeaderField(f hpack.HeaderField) {
+ if f.Name == "date" {
+ return
+ }
+ st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value})
+}
+
+func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) {
+ st.decodedHeaders = nil
+ if _, err := st.hpackDec.Write(headerBlock); err != nil {
+ st.t.Fatalf("hpack decoding error: %v", err)
+ }
+ if err := st.hpackDec.Close(); err != nil {
+ st.t.Fatalf("hpack decoding error: %v", err)
+ }
+ return st.decodedHeaders
+}
+
+// testServerResponse sets up an idle HTTP/2 connection. The client function should
+// write a single request that must be handled by the handler. This waits up to 5s
+// for client to return, then up to an additional 2s for the handler to return.
+func testServerResponse(t testing.TB,
+ handler func(http.ResponseWriter, *http.Request) error,
+ client func(*serverTester),
+) {
+ errc := make(chan error, 1)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ if r.Body == nil {
+ t.Fatal("nil Body")
+ }
+ errc <- handler(w, r)
+ })
+ defer st.Close()
+
+ donec := make(chan bool)
+ go func() {
+ defer close(donec)
+ st.greet()
+ client(st)
+ }()
+
+ select {
+ case <-donec:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout in client")
+ }
+
+ select {
+ case err := <-errc:
+ if err != nil {
+ t.Fatalf("Error in handler: %v", err)
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout in handler")
+ }
+}
+
+// readBodyHandler returns an http Handler func that reads len(want)
+// bytes from r.Body and fails t if the contents read were not
+// the value of want.
+func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ buf := make([]byte, len(want))
+ _, err := io.ReadFull(r.Body, buf)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if string(buf) != want {
+ t.Errorf("read %q; want %q", buf, want)
+ }
+ }
+}
+
+// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See:
+// https://github.com/tatsuhiro-t/nghttp2/issues/140 &
+// http://sourceforge.net/p/curl/bugs/1472/
+func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) }
+func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) }
+
+func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) {
+ if runtime.GOOS != "linux" {
+ t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
+ }
+ if testing.Short() {
+ t.Skip("skipping curl test in short mode")
+ }
+ requireCurl(t)
+ var gotConn int32
+ testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) }
+
+ const msg = "Hello from curl!\n"
+ ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Foo", "Bar")
+ w.Header().Set("Client-Proto", r.Proto)
+ io.WriteString(w, msg)
+ }))
+ ConfigureServer(ts.Config, &Server{
+ PermitProhibitedCipherSuites: permitProhibitedCipherSuites,
+ })
+ ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
+ ts.StartTLS()
+ defer ts.Close()
+
+ t.Logf("Running test server for curl to hit at: %s", ts.URL)
+ container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL)
+ defer kill(container)
+ resc := make(chan interface{}, 1)
+ go func() {
+ res, err := dockerLogs(container)
+ if err != nil {
+ resc <- err
+ } else {
+ resc <- res
+ }
+ }()
+ select {
+ case res := <-resc:
+ if err, ok := res.(error); ok {
+ t.Fatal(err)
+ }
+ body := string(res.([]byte))
+ // Search for both "key: value" and "key:value", since curl changed their format
+ // Our Dockerfile contains the latest version (no space), but just in case people
+ // didn't rebuild, check both.
+ if !strings.Contains(body, "foo: Bar") && !strings.Contains(body, "foo:Bar") {
+ t.Errorf("didn't see foo: Bar header")
+ t.Logf("Got: %s", body)
+ }
+ if !strings.Contains(body, "client-proto: HTTP/2") && !strings.Contains(body, "client-proto:HTTP/2") {
+ t.Errorf("didn't see client-proto: HTTP/2 header")
+ t.Logf("Got: %s", res)
+ }
+ if !strings.Contains(string(res.([]byte)), msg) {
+ t.Errorf("didn't see %q content", msg)
+ t.Logf("Got: %s", res)
+ }
+ case <-time.After(3 * time.Second):
+ t.Errorf("timeout waiting for curl")
+ }
+
+ if atomic.LoadInt32(&gotConn) == 0 {
+ t.Error("never saw an http2 connection")
+ }
+}
+
+var doh2load = flag.Bool("h2load", false, "Run h2load test")
+
+func TestServerWithH2Load(t *testing.T) {
+ if !*doh2load {
+ t.Skip("Skipping without --h2load flag.")
+ }
+ if runtime.GOOS != "linux" {
+ t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
+ }
+ requireH2load(t)
+
+ msg := strings.Repeat("Hello, h2load!\n", 5000)
+ ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, msg)
+ w.(http.Flusher).Flush()
+ io.WriteString(w, msg)
+ }))
+ ts.StartTLS()
+ defer ts.Close()
+
+ cmd := exec.Command("docker", "run", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl",
+ "-n100000", "-c100", "-m100", ts.URL)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Issue 12843
+func TestServerDoS_MaxHeaderListSize(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
+ defer st.Close()
+
+ // shake hands
+ frameSize := defaultMaxReadFrameSize
+ var advHeaderListSize *uint32
+ st.greetAndCheckSettings(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ if s.Val < minMaxFrameSize {
+ frameSize = minMaxFrameSize
+ } else if s.Val > maxFrameSize {
+ frameSize = maxFrameSize
+ } else {
+ frameSize = int(s.Val)
+ }
+ case SettingMaxHeaderListSize:
+ advHeaderListSize = &s.Val
+ }
+ return nil
+ })
+
+ if advHeaderListSize == nil {
+ t.Errorf("server didn't advertise a max header list size")
+ } else if *advHeaderListSize == 0 {
+ t.Errorf("server advertised a max header list size of 0")
+ }
+
+ st.encodeHeaderField(":method", "GET")
+ st.encodeHeaderField(":path", "/")
+ st.encodeHeaderField(":scheme", "https")
+ cookie := strings.Repeat("*", 4058)
+ st.encodeHeaderField("cookie", cookie)
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.headerBuf.Bytes(),
+ EndStream: true,
+ EndHeaders: false,
+ })
+
+ // Capture the short encoding of a duplicate ~4K cookie, now
+ // that we've already sent it once.
+ st.headerBuf.Reset()
+ st.encodeHeaderField("cookie", cookie)
+
+ // Now send 1MB of it.
+ const size = 1 << 20
+ b := bytes.Repeat(st.headerBuf.Bytes(), size/st.headerBuf.Len())
+ for len(b) > 0 {
+ chunk := b
+ if len(chunk) > frameSize {
+ chunk = chunk[:frameSize]
+ }
+ b = b[len(chunk):]
+ st.fr.WriteContinuation(1, len(b) == 0, chunk)
+ }
+
+ h := st.wantHeaders()
+ if !h.HeadersEnded() {
+ t.Fatalf("Got HEADERS without END_HEADERS set: %v", h)
+ }
+ headers := st.decodeHeader(h.HeaderBlockFragment())
+ want := [][2]string{
+ {":status", "431"},
+ {"content-type", "text/html; charset=utf-8"},
+ {"content-length", "63"},
+ }
+ if !reflect.DeepEqual(headers, want) {
+ t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
+ }
+}
+
+func TestCompressionErrorOnWrite(t *testing.T) {
+ const maxStrLen = 8 << 10
+ var serverConfig *http.Server
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // No response body.
+ }, func(ts *httptest.Server) {
+ serverConfig = ts.Config
+ serverConfig.MaxHeaderBytes = maxStrLen
+ })
+ st.addLogFilter("connection error: COMPRESSION_ERROR")
+ defer st.Close()
+ st.greet()
+
+ maxAllowed := st.sc.framer.maxHeaderStringLen()
+
+ // Crank this up, now that we have a conn connected with the
+ // hpack.Decoder's max string length set has been initialized
+ // from the earlier low ~8K value. We want this higher so don't
+ // hit the max header list size. We only want to test hitting
+ // the max string size.
+ serverConfig.MaxHeaderBytes = 1 << 20
+
+ // First a request with a header that's exactly the max allowed size
+ // for the hpack compression. It's still too long for the header list
+ // size, so we'll get the 431 error, but that keeps the compression
+ // context still valid.
+ hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed))
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: hbf,
+ EndStream: true,
+ EndHeaders: true,
+ })
+ h := st.wantHeaders()
+ if !h.HeadersEnded() {
+ t.Fatalf("Got HEADERS without END_HEADERS set: %v", h)
+ }
+ headers := st.decodeHeader(h.HeaderBlockFragment())
+ want := [][2]string{
+ {":status", "431"},
+ {"content-type", "text/html; charset=utf-8"},
+ {"content-length", "63"},
+ }
+ if !reflect.DeepEqual(headers, want) {
+ t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
+ }
+ df := st.wantData()
+ if !strings.Contains(string(df.Data()), "HTTP Error 431") {
+ t.Errorf("Unexpected data body: %q", df.Data())
+ }
+ if !df.StreamEnded() {
+ t.Fatalf("expect data stream end")
+ }
+
+ // And now send one that's just one byte too big.
+ hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1))
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 3,
+ BlockFragment: hbf,
+ EndStream: true,
+ EndHeaders: true,
+ })
+ ga := st.wantGoAway()
+ if ga.ErrCode != ErrCodeCompression {
+ t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
+ }
+}
+
+func TestCompressionErrorOnClose(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // No response body.
+ })
+ st.addLogFilter("connection error: COMPRESSION_ERROR")
+ defer st.Close()
+ st.greet()
+
+ hbf := st.encodeHeader("foo", "bar")
+ hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails.
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: hbf,
+ EndStream: true,
+ EndHeaders: true,
+ })
+ ga := st.wantGoAway()
+ if ga.ErrCode != ErrCodeCompression {
+ t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
+ }
+}
+
+// test that a server handler can read trailers from a client
+func TestServerReadsTrailers(t *testing.T) {
+ const testBody = "some test body"
+ writeReq := func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader("trailer", "Foo, Bar", "trailer", "Baz"),
+ EndStream: false,
+ EndHeaders: true,
+ })
+ st.writeData(1, false, []byte(testBody))
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeaderRaw(
+ "foo", "foov",
+ "bar", "barv",
+ "baz", "bazv",
+ "surprise", "wasn't declared; shouldn't show up",
+ ),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }
+ checkReq := func(r *http.Request) {
+ wantTrailer := http.Header{
+ "Foo": nil,
+ "Bar": nil,
+ "Baz": nil,
+ }
+ if !reflect.DeepEqual(r.Trailer, wantTrailer) {
+ t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer)
+ }
+ slurp, err := ioutil.ReadAll(r.Body)
+ if string(slurp) != testBody {
+ t.Errorf("read body %q; want %q", slurp, testBody)
+ }
+ if err != nil {
+ t.Fatalf("Body slurp: %v", err)
+ }
+ wantTrailerAfter := http.Header{
+ "Foo": {"foov"},
+ "Bar": {"barv"},
+ "Baz": {"bazv"},
+ }
+ if !reflect.DeepEqual(r.Trailer, wantTrailerAfter) {
+ t.Errorf("final Trailer = %v; want %v", r.Trailer, wantTrailerAfter)
+ }
+ }
+ testServerRequest(t, writeReq, checkReq)
+}
+
+// test that a server handler can send trailers
+func TestServerWritesTrailers_WithFlush(t *testing.T) { testServerWritesTrailers(t, true) }
+func TestServerWritesTrailers_WithoutFlush(t *testing.T) { testServerWritesTrailers(t, false) }
+
+func testServerWritesTrailers(t *testing.T, withFlush bool) {
+ // See https://httpwg.github.io/specs/rfc7540.html#rfc.section.8.1.3
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B")
+ w.Header().Add("Trailer", "Server-Trailer-C")
+ w.Header().Add("Trailer", "Transfer-Encoding, Content-Length, Trailer") // filtered
+
+ // Regular headers:
+ w.Header().Set("Foo", "Bar")
+ w.Header().Set("Content-Length", "5") // len("Hello")
+
+ io.WriteString(w, "Hello")
+ if withFlush {
+ w.(http.Flusher).Flush()
+ }
+ w.Header().Set("Server-Trailer-A", "valuea")
+ w.Header().Set("Server-Trailer-C", "valuec") // skipping B
+ // After a flush, random keys like Server-Surprise shouldn't show up:
+ w.Header().Set("Server-Surpise", "surprise! this isn't predeclared!")
+ // But we do permit promoting keys to trailers after a
+ // flush if they start with the magic
+ // otherwise-invalid "Trailer:" prefix:
+ w.Header().Set("Trailer:Post-Header-Trailer", "hi1")
+ w.Header().Set("Trailer:post-header-trailer2", "hi2")
+ w.Header().Set("Trailer:Range", "invalid")
+ w.Header().Set("Trailer:Foo\x01Bogus", "invalid")
+ w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 2616 14.40")
+ w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 2616 14.40")
+ w.Header().Set("Trailer", "should not be included; Forbidden by RFC 2616 14.40")
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("response HEADERS had END_STREAM")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("response HEADERS didn't have END_HEADERS")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"foo", "Bar"},
+ {"trailer", "Server-Trailer-A, Server-Trailer-B"},
+ {"trailer", "Server-Trailer-C"},
+ {"trailer", "Transfer-Encoding, Content-Length, Trailer"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"content-length", "5"},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
+ }
+ df := st.wantData()
+ if string(df.Data()) != "Hello" {
+ t.Fatalf("Client read %q; want Hello", df.Data())
+ }
+ if df.StreamEnded() {
+ t.Fatalf("data frame had STREAM_ENDED")
+ }
+ tf := st.wantHeaders() // for the trailers
+ if !tf.StreamEnded() {
+ t.Fatalf("trailers HEADERS lacked END_STREAM")
+ }
+ if !tf.HeadersEnded() {
+ t.Fatalf("trailers HEADERS lacked END_HEADERS")
+ }
+ wanth = [][2]string{
+ {"post-header-trailer", "hi1"},
+ {"post-header-trailer2", "hi2"},
+ {"server-trailer-a", "valuea"},
+ {"server-trailer-c", "valuec"},
+ }
+ goth = st.decodeHeader(tf.HeaderBlockFragment())
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
+ }
+ })
+}
+
+// validate transmitted header field names & values
+// golang.org/issue/14048
+func TestServerDoesntWriteInvalidHeaders(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Add("OK1", "x")
+ w.Header().Add("Bad:Colon", "x") // colon (non-token byte) in key
+ w.Header().Add("Bad1\x00", "x") // null in key
+ w.Header().Add("Bad2", "x\x00y") // null in value
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if !hf.StreamEnded() {
+ t.Error("response HEADERS lacked END_STREAM")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("response HEADERS didn't have END_HEADERS")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"ok1", "x"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"content-length", "0"},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
+ }
+ })
+}
+
+func BenchmarkServerGets(b *testing.B) {
+ defer disableGoroutineTracking()()
+ b.ReportAllocs()
+
+ const msg = "Hello, world"
+ st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, msg)
+ })
+ defer st.Close()
+ st.greet()
+
+ // Give the server quota to reply. (plus it has the the 64KB)
+ if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
+ b.Fatal(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ id := 1 + uint32(i)*2
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: id,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ st.wantHeaders()
+ df := st.wantData()
+ if !df.StreamEnded() {
+ b.Fatalf("DATA didn't have END_STREAM; got %v", df)
+ }
+ }
+}
+
+func BenchmarkServerPosts(b *testing.B) {
+ defer disableGoroutineTracking()()
+ b.ReportAllocs()
+
+ const msg = "Hello, world"
+ st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ // Consume the (empty) body from th peer before replying, otherwise
+ // the server will sometimes (depending on scheduling) send the peer a
+ // a RST_STREAM with the CANCEL error code.
+ if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil {
+ b.Errorf("Copy error; got %v, %v; want 0, nil", n, err)
+ }
+ io.WriteString(w, msg)
+ })
+ defer st.Close()
+ st.greet()
+
+ // Give the server quota to reply. (plus it has the the 64KB)
+ if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
+ b.Fatal(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ id := 1 + uint32(i)*2
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: id,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false,
+ EndHeaders: true,
+ })
+ st.writeData(id, true, nil)
+ st.wantHeaders()
+ df := st.wantData()
+ if !df.StreamEnded() {
+ b.Fatalf("DATA didn't have END_STREAM; got %v", df)
+ }
+ }
+}
+
+// Send a stream of messages from server to client in separate data frames.
+// Brings up performance issues seen in long streams.
+// Created to show problem in go issue #18502
+func BenchmarkServerToClientStreamDefaultOptions(b *testing.B) {
+ benchmarkServerToClientStream(b)
+}
+
+// Justification for Change-Id: Iad93420ef6c3918f54249d867098f1dadfa324d8
+// Expect to see memory/alloc reduction by opting in to Frame reuse with the Framer.
+func BenchmarkServerToClientStreamReuseFrames(b *testing.B) {
+ benchmarkServerToClientStream(b, optFramerReuseFrames)
+}
+
+func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) {
+ defer disableGoroutineTracking()()
+ b.ReportAllocs()
+ const msgLen = 1
+ // default window size
+ const windowSize = 1<<16 - 1
+
+ // next message to send from the server and for the client to expect
+ nextMsg := func(i int) []byte {
+ msg := make([]byte, msgLen)
+ msg[0] = byte(i)
+ if len(msg) != msgLen {
+ panic("invalid test setup msg length")
+ }
+ return msg
+ }
+
+ st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ // Consume the (empty) body from th peer before replying, otherwise
+ // the server will sometimes (depending on scheduling) send the peer a
+ // a RST_STREAM with the CANCEL error code.
+ if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil {
+ b.Errorf("Copy error; got %v, %v; want 0, nil", n, err)
+ }
+ for i := 0; i < b.N; i += 1 {
+ w.Write(nextMsg(i))
+ w.(http.Flusher).Flush()
+ }
+ }, newServerOpts...)
+ defer st.Close()
+ st.greet()
+
+ const id = uint32(1)
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: id,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false,
+ EndHeaders: true,
+ })
+
+ st.writeData(id, true, nil)
+ st.wantHeaders()
+
+ var pendingWindowUpdate = uint32(0)
+
+ for i := 0; i < b.N; i += 1 {
+ expected := nextMsg(i)
+ df := st.wantData()
+ if bytes.Compare(expected, df.data) != 0 {
+ b.Fatalf("Bad message received; want %v; got %v", expected, df.data)
+ }
+ // try to send infrequent but large window updates so they don't overwhelm the test
+ pendingWindowUpdate += uint32(len(df.data))
+ if pendingWindowUpdate >= windowSize/2 {
+ if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil {
+ b.Fatal(err)
+ }
+ if err := st.fr.WriteWindowUpdate(id, pendingWindowUpdate); err != nil {
+ b.Fatal(err)
+ }
+ pendingWindowUpdate = 0
+ }
+ }
+ df := st.wantData()
+ if !df.StreamEnded() {
+ b.Fatalf("DATA didn't have END_STREAM; got %v", df)
+ }
+}
+
+// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53
+// Verify we don't hang.
+func TestIssue53(t *testing.T) {
+ const data = "PRI * HTTP/2.0\r\n\r\nSM" +
+ "\r\n\r\n\x00\x00\x00\x01\ainfinfin\ad"
+ s := &http.Server{
+ ErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), "", log.LstdFlags),
+ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.Write([]byte("hello"))
+ }),
+ }
+ s2 := &Server{
+ MaxReadFrameSize: 1 << 16,
+ PermitProhibitedCipherSuites: true,
+ }
+ c := &issue53Conn{[]byte(data), false, false}
+ s2.ServeConn(c, &ServeConnOpts{BaseConfig: s})
+ if !c.closed {
+ t.Fatal("connection is not closed")
+ }
+}
+
+type issue53Conn struct {
+ data []byte
+ closed bool
+ written bool
+}
+
+func (c *issue53Conn) Read(b []byte) (n int, err error) {
+ if len(c.data) == 0 {
+ return 0, io.EOF
+ }
+ n = copy(b, c.data)
+ c.data = c.data[n:]
+ return
+}
+
+func (c *issue53Conn) Write(b []byte) (n int, err error) {
+ c.written = true
+ return len(b), nil
+}
+
+func (c *issue53Conn) Close() error {
+ c.closed = true
+ return nil
+}
+
+func (c *issue53Conn) LocalAddr() net.Addr {
+ return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706}
+}
+func (c *issue53Conn) RemoteAddr() net.Addr {
+ return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706}
+}
+func (c *issue53Conn) SetDeadline(t time.Time) error { return nil }
+func (c *issue53Conn) SetReadDeadline(t time.Time) error { return nil }
+func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil }
+
+// golang.org/issue/12895
+func TestConfigureServer(t *testing.T) {
+ tests := []struct {
+ name string
+ tlsConfig *tls.Config
+ wantErr string
+ }{
+ {
+ name: "empty server",
+ },
+ {
+ name: "just the required cipher suite",
+ tlsConfig: &tls.Config{
+ CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
+ },
+ },
+ {
+ name: "just the alternative required cipher suite",
+ tlsConfig: &tls.Config{
+ CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256},
+ },
+ },
+ {
+ name: "missing required cipher suite",
+ tlsConfig: &tls.Config{
+ CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384},
+ },
+ wantErr: "is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.",
+ },
+ {
+ name: "required after bad",
+ tlsConfig: &tls.Config{
+ CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
+ },
+ wantErr: "contains an HTTP/2-approved cipher suite (0xc02f), but it comes after",
+ },
+ {
+ name: "bad after required",
+ tlsConfig: &tls.Config{
+ CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA},
+ },
+ },
+ }
+ for _, tt := range tests {
+ srv := &http.Server{TLSConfig: tt.tlsConfig}
+ err := ConfigureServer(srv, nil)
+ if (err != nil) != (tt.wantErr != "") {
+ if tt.wantErr != "" {
+ t.Errorf("%s: success, but want error", tt.name)
+ } else {
+ t.Errorf("%s: unexpected error: %v", tt.name, err)
+ }
+ }
+ if err != nil && tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) {
+ t.Errorf("%s: err = %v; want substring %q", tt.name, err, tt.wantErr)
+ }
+ if err == nil && !srv.TLSConfig.PreferServerCipherSuites {
+ t.Errorf("%s: PreferServerCipherSuite is false; want true", tt.name)
+ }
+ }
+}
+
+func TestServerRejectHeadWithBody(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // No response body.
+ })
+ defer st.Close()
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "HEAD"),
+ EndStream: false, // what we're testing, a bogus HEAD request with body
+ EndHeaders: true,
+ })
+ st.wantRSTStream(1, ErrCodeProtocol)
+}
+
+func TestServerNoAutoContentLengthOnHead(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // No response body. (or smaller than one frame)
+ })
+ defer st.Close()
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "HEAD"),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ h := st.wantHeaders()
+ headers := st.decodeHeader(h.HeaderBlockFragment())
+ want := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/plain; charset=utf-8"},
+ }
+ if !reflect.DeepEqual(headers, want) {
+ t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
+ }
+}
+
+// golang.org/issue/13495
+func TestServerNoDuplicateContentType(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ w.Header()["Content-Type"] = []string{""}
+ fmt.Fprintf(w, "<html><head></head><body>hi</body></html>")
+ })
+ defer st.Close()
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ h := st.wantHeaders()
+ headers := st.decodeHeader(h.HeaderBlockFragment())
+ want := [][2]string{
+ {":status", "200"},
+ {"content-type", ""},
+ {"content-length", "41"},
+ }
+ if !reflect.DeepEqual(headers, want) {
+ t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
+ }
+}
+
+func disableGoroutineTracking() (restore func()) {
+ old := DebugGoroutines
+ DebugGoroutines = false
+ return func() { DebugGoroutines = old }
+}
+
+func BenchmarkServer_GetRequest(b *testing.B) {
+ defer disableGoroutineTracking()()
+ b.ReportAllocs()
+ const msg = "Hello, world."
+ st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ n, err := io.Copy(ioutil.Discard, r.Body)
+ if err != nil || n > 0 {
+ b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err)
+ }
+ io.WriteString(w, msg)
+ })
+ defer st.Close()
+
+ st.greet()
+ // Give the server quota to reply. (plus it has the the 64KB)
+ if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
+ b.Fatal(err)
+ }
+ hbf := st.encodeHeader(":method", "GET")
+ for i := 0; i < b.N; i++ {
+ streamID := uint32(1 + 2*i)
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: hbf,
+ EndStream: true,
+ EndHeaders: true,
+ })
+ st.wantHeaders()
+ st.wantData()
+ }
+}
+
+func BenchmarkServer_PostRequest(b *testing.B) {
+ defer disableGoroutineTracking()()
+ b.ReportAllocs()
+ const msg = "Hello, world."
+ st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ n, err := io.Copy(ioutil.Discard, r.Body)
+ if err != nil || n > 0 {
+ b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err)
+ }
+ io.WriteString(w, msg)
+ })
+ defer st.Close()
+ st.greet()
+ // Give the server quota to reply. (plus it has the the 64KB)
+ if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
+ b.Fatal(err)
+ }
+ hbf := st.encodeHeader(":method", "POST")
+ for i := 0; i < b.N; i++ {
+ streamID := uint32(1 + 2*i)
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: hbf,
+ EndStream: false,
+ EndHeaders: true,
+ })
+ st.writeData(streamID, true, nil)
+ st.wantHeaders()
+ st.wantData()
+ }
+}
+
+type connStateConn struct {
+ net.Conn
+ cs tls.ConnectionState
+}
+
+func (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs }
+
+// golang.org/issue/12737 -- handle any net.Conn, not just
+// *tls.Conn.
+func TestServerHandleCustomConn(t *testing.T) {
+ var s Server
+ c1, c2 := net.Pipe()
+ clientDone := make(chan struct{})
+ handlerDone := make(chan struct{})
+ var req *http.Request
+ go func() {
+ defer close(clientDone)
+ defer c2.Close()
+ fr := NewFramer(c2, c2)
+ io.WriteString(c2, ClientPreface)
+ fr.WriteSettings()
+ fr.WriteSettingsAck()
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() {
+ t.Errorf("Got %v; want non-ACK SettingsFrame", summarizeFrame(f))
+ return
+ }
+ f, err = fr.ReadFrame()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() {
+ t.Errorf("Got %v; want ACK SettingsFrame", summarizeFrame(f))
+ return
+ }
+ var henc hpackEncoder
+ fr.WriteHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: henc.encodeHeaderRaw(t, ":method", "GET", ":path", "/", ":scheme", "https", ":authority", "foo.com"),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ go io.Copy(ioutil.Discard, c2)
+ <-handlerDone
+ }()
+ const testString = "my custom ConnectionState"
+ fakeConnState := tls.ConnectionState{
+ ServerName: testString,
+ Version: tls.VersionTLS12,
+ CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ }
+ go s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{
+ BaseConfig: &http.Server{
+ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer close(handlerDone)
+ req = r
+ }),
+ }})
+ select {
+ case <-clientDone:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout waiting for handler")
+ }
+ if req.TLS == nil {
+ t.Fatalf("Request.TLS is nil. Got: %#v", req)
+ }
+ if req.TLS.ServerName != testString {
+ t.Fatalf("Request.TLS = %+v; want ServerName of %q", req.TLS, testString)
+ }
+}
+
+// golang.org/issue/14214
+func TestServer_Rejects_ConnHeaders(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ t.Error("should not get to Handler")
+ })
+ defer st.Close()
+ st.greet()
+ st.bodylessReq1("connection", "foo")
+ hf := st.wantHeaders()
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "400"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"x-content-type-options", "nosniff"},
+ {"content-length", "51"},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+}
+
+type hpackEncoder struct {
+ enc *hpack.Encoder
+ buf bytes.Buffer
+}
+
+func (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte {
+ if len(headers)%2 == 1 {
+ panic("odd number of kv args")
+ }
+ he.buf.Reset()
+ if he.enc == nil {
+ he.enc = hpack.NewEncoder(&he.buf)
+ }
+ for len(headers) > 0 {
+ k, v := headers[0], headers[1]
+ err := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+ if err != nil {
+ t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
+ }
+ headers = headers[2:]
+ }
+ return he.buf.Bytes()
+}
+
+func TestCheckValidHTTP2Request(t *testing.T) {
+ tests := []struct {
+ h http.Header
+ want error
+ }{
+ {
+ h: http.Header{"Te": {"trailers"}},
+ want: nil,
+ },
+ {
+ h: http.Header{"Te": {"trailers", "bogus"}},
+ want: errors.New(`request header "TE" may only be "trailers" in HTTP/2`),
+ },
+ {
+ h: http.Header{"Foo": {""}},
+ want: nil,
+ },
+ {
+ h: http.Header{"Connection": {""}},
+ want: errors.New(`request header "Connection" is not valid in HTTP/2`),
+ },
+ {
+ h: http.Header{"Proxy-Connection": {""}},
+ want: errors.New(`request header "Proxy-Connection" is not valid in HTTP/2`),
+ },
+ {
+ h: http.Header{"Keep-Alive": {""}},
+ want: errors.New(`request header "Keep-Alive" is not valid in HTTP/2`),
+ },
+ {
+ h: http.Header{"Upgrade": {""}},
+ want: errors.New(`request header "Upgrade" is not valid in HTTP/2`),
+ },
+ }
+ for i, tt := range tests {
+ got := checkValidHTTP2RequestHeaders(tt.h)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%d. checkValidHTTP2Request = %v; want %v", i, got, tt.want)
+ }
+ }
+}
+
+// golang.org/issue/14030
+func TestExpect100ContinueAfterHandlerWrites(t *testing.T) {
+ const msg = "Hello"
+ const msg2 = "World"
+
+ doRead := make(chan bool, 1)
+ defer close(doRead) // fallback cleanup
+
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, msg)
+ w.(http.Flusher).Flush()
+
+ // Do a read, which might force a 100-continue status to be sent.
+ <-doRead
+ r.Body.Read(make([]byte, 10))
+
+ io.WriteString(w, msg2)
+
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20))
+ req.Header.Set("Expect", "100-continue")
+
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+
+ buf := make([]byte, len(msg))
+ if _, err := io.ReadFull(res.Body, buf); err != nil {
+ t.Fatal(err)
+ }
+ if string(buf) != msg {
+ t.Fatalf("msg = %q; want %q", buf, msg)
+ }
+
+ doRead <- true
+
+ if _, err := io.ReadFull(res.Body, buf); err != nil {
+ t.Fatal(err)
+ }
+ if string(buf) != msg2 {
+ t.Fatalf("second msg = %q; want %q", buf, msg2)
+ }
+}
+
+type funcReader func([]byte) (n int, err error)
+
+func (f funcReader) Read(p []byte) (n int, err error) { return f(p) }
+
+// golang.org/issue/16481 -- return flow control when streams close with unread data.
+// (The Server version of the bug. See also TestUnreadFlowControlReturned_Transport)
+func TestUnreadFlowControlReturned_Server(t *testing.T) {
+ unblock := make(chan bool, 1)
+ defer close(unblock)
+
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // Don't read the 16KB request body. Wait until the client's
+ // done sending it and then return. This should cause the Server
+ // to then return those 16KB of flow control to the client.
+ <-unblock
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ // This previously hung on the 4th iteration.
+ for i := 0; i < 6; i++ {
+ body := io.MultiReader(
+ io.LimitReader(neverEnding('A'), 16<<10),
+ funcReader(func([]byte) (n int, err error) {
+ unblock <- true
+ return 0, io.EOF
+ }),
+ )
+ req, _ := http.NewRequest("POST", st.ts.URL, body)
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+ }
+
+}
+
+func TestServerIdleTimeout(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ }, func(h2s *Server) {
+ h2s.IdleTimeout = 500 * time.Millisecond
+ })
+ defer st.Close()
+
+ st.greet()
+ ga := st.wantGoAway()
+ if ga.ErrCode != ErrCodeNo {
+ t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode)
+ }
+}
+
+func TestServerIdleTimeout_AfterRequest(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ const timeout = 250 * time.Millisecond
+
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(timeout * 2)
+ }, func(h2s *Server) {
+ h2s.IdleTimeout = timeout
+ })
+ defer st.Close()
+
+ st.greet()
+
+ // Send a request which takes twice the timeout. Verifies the
+ // idle timeout doesn't fire while we're in a request:
+ st.bodylessReq1()
+ st.wantHeaders()
+
+ // But the idle timeout should be rearmed after the request
+ // is done:
+ ga := st.wantGoAway()
+ if ga.ErrCode != ErrCodeNo {
+ t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode)
+ }
+}
+
+// grpc-go closes the Request.Body currently with a Read.
+// Verify that it doesn't race.
+// See https://github.com/grpc/grpc-go/pull/938
+func TestRequestBodyReadCloseRace(t *testing.T) {
+ for i := 0; i < 100; i++ {
+ body := &requestBody{
+ pipe: &pipe{
+ b: new(bytes.Buffer),
+ },
+ }
+ body.pipe.CloseWithError(io.EOF)
+
+ done := make(chan bool, 1)
+ buf := make([]byte, 10)
+ go func() {
+ time.Sleep(1 * time.Millisecond)
+ body.Close()
+ done <- true
+ }()
+ body.Read(buf)
+ <-done
+ }
+}
+
+func TestIssue20704Race(t *testing.T) {
+ if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
+ t.Skip("skipping in short mode")
+ }
+ const (
+ itemSize = 1 << 10
+ itemCount = 100
+ )
+
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ for i := 0; i < itemCount; i++ {
+ _, err := w.Write(make([]byte, itemSize))
+ if err != nil {
+ return
+ }
+ }
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ cl := &http.Client{Transport: tr}
+
+ for i := 0; i < 1000; i++ {
+ resp, err := cl.Get(st.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Force a RST stream to the server by closing without
+ // reading the body:
+ resp.Body.Close()
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml
new file mode 100644
index 0000000..31a84be
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml
@@ -0,0 +1,5021 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="lib/rfc2629.xslt"?>
+<?rfc toc="yes" ?>
+<?rfc symrefs="yes" ?>
+<?rfc sortrefs="yes" ?>
+<?rfc compact="yes"?>
+<?rfc subcompact="no" ?>
+<?rfc linkmailto="no" ?>
+<?rfc editing="no" ?>
+<?rfc comments="yes" ?>
+<?rfc inline="yes"?>
+<?rfc rfcedstyle="yes"?>
+<?rfc-ext allow-markup-in-artwork="yes" ?>
+<?rfc-ext include-index="no" ?>
+
+<rfc ipr="trust200902"
+ category="std"
+ docName="draft-ietf-httpbis-http2-latest"
+ x:maturity-level="proposed"
+ xmlns:x="http://purl.org/net/xml2rfc/ext">
+ <x:feedback template="mailto:ietf-http-wg@w3.org?subject={docname},%20%22{section}%22&amp;body=&lt;{ref}&gt;:"/>
+ <front>
+ <title abbrev="HTTP/2">Hypertext Transfer Protocol version 2</title>
+
+ <author initials="M." surname="Belshe" fullname="Mike Belshe">
+ <organization>Twist</organization>
+ <address>
+ <email>mbelshe@chromium.org</email>
+ </address>
+ </author>
+
+ <author initials="R." surname="Peon" fullname="Roberto Peon">
+ <organization>Google, Inc</organization>
+ <address>
+ <email>fenix@google.com</email>
+ </address>
+ </author>
+
+ <author initials="M." surname="Thomson" fullname="Martin Thomson" role="editor">
+ <organization>Mozilla</organization>
+ <address>
+ <postal>
+ <street>331 E Evelyn Street</street>
+ <city>Mountain View</city>
+ <region>CA</region>
+ <code>94041</code>
+ <country>US</country>
+ </postal>
+ <email>martin.thomson@gmail.com</email>
+ </address>
+ </author>
+
+ <date year="2014" />
+ <area>Applications</area>
+ <workgroup>HTTPbis</workgroup>
+ <keyword>HTTP</keyword>
+ <keyword>SPDY</keyword>
+ <keyword>Web</keyword>
+
+ <abstract>
+ <t>
+ This specification describes an optimized expression of the semantics of the Hypertext
+ Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a
+ reduced perception of latency by introducing header field compression and allowing multiple
+ concurrent messages on the same connection. It also introduces unsolicited push of
+ representations from servers to clients.
+ </t>
+ <t>
+ This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax.
+ HTTP's existing semantics remain unchanged.
+ </t>
+ </abstract>
+
+ <note title="Editorial Note (To be removed by RFC Editor)">
+ <t>
+ Discussion of this draft takes place on the HTTPBIS working group mailing list
+ (ietf-http-wg@w3.org), which is archived at <eref
+ target="https://lists.w3.org/Archives/Public/ietf-http-wg/"/>.
+ </t>
+ <t>
+ Working Group information can be found at <eref
+ target="https://tools.ietf.org/wg/httpbis/"/>; that specific to HTTP/2 are at <eref
+ target="https://http2.github.io/"/>.
+ </t>
+ <t>
+ The changes in this draft are summarized in <xref
+ target="change.log"/>.
+ </t>
+ </note>
+
+ </front>
+
+ <middle>
+ <section anchor="intro" title="Introduction">
+
+ <t>
+ The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the
+ HTTP/1.1 message format (<xref target="RFC7230" x:fmt="," x:rel="#http.message"/>) has
+ several characteristics that have a negative overall effect on application performance
+ today.
+ </t>
+ <t>
+ In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given
+ TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed
+ request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1
+ clients that need to make many requests typically use multiple connections to a server in
+ order to achieve concurrency and thereby reduce latency.
+ </t>
+ <t>
+ Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary
+ network traffic, as well as causing the initial <xref target="TCP">TCP</xref> congestion
+ window to quickly fill. This can result in excessive latency when multiple requests are
+ made on a new TCP connection.
+ </t>
+ <t>
+ HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an
+ underlying connection. Specifically, it allows interleaving of request and response
+ messages on the same connection and uses an efficient coding for HTTP header fields. It
+ also allows prioritization of requests, letting more important requests complete more
+ quickly, further improving performance.
+ </t>
+ <t>
+ The resulting protocol is more friendly to the network, because fewer TCP connections can
+ be used in comparison to HTTP/1.x. This means less competition with other flows, and
+ longer-lived connections, which in turn leads to better utilization of available network
+ capacity.
+ </t>
+ <t>
+ Finally, HTTP/2 also enables more efficient processing of messages through use of binary
+ message framing.
+ </t>
+ </section>
+
+ <section anchor="Overview" title="HTTP/2 Protocol Overview">
+ <t>
+ HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core
+ features of HTTP/1.1, but aims to be more efficient in several ways.
+ </t>
+ <t>
+ The basic protocol unit in HTTP/2 is a <xref target="FrameHeader">frame</xref>. Each frame
+ type serves a different purpose. For example, <x:ref>HEADERS</x:ref> and
+ <x:ref>DATA</x:ref> frames form the basis of <xref target="HttpSequence">HTTP requests and
+ responses</xref>; other frame types like <x:ref>SETTINGS</x:ref>,
+ <x:ref>WINDOW_UPDATE</x:ref>, and <x:ref>PUSH_PROMISE</x:ref> are used in support of other
+ HTTP/2 features.
+ </t>
+ <t>
+ Multiplexing of requests is achieved by having each HTTP request-response exchange
+ associated with its own <xref target="StreamsLayer">stream</xref>. Streams are largely
+ independent of each other, so a blocked or stalled request or response does not prevent
+ progress on other streams.
+ </t>
+ <t>
+ Flow control and prioritization ensure that it is possible to efficiently use multiplexed
+ streams. <xref target="FlowControl">Flow control</xref> helps to ensure that only data that
+ can be used by a receiver is transmitted. <xref
+ target="StreamPriority">Prioritization</xref> ensures that limited resources can be directed
+ to the most important streams first.
+ </t>
+ <t>
+ HTTP/2 adds a new interaction mode, whereby a server can <xref target="PushResources">push
+ responses to a client</xref>. Server push allows a server to speculatively send a client
+ data that the server anticipates the client will need, trading off some network usage
+ against a potential latency gain. The server does this by synthesizing a request, which it
+ sends as a <x:ref>PUSH_PROMISE</x:ref> frame. The server is then able to send a response to
+ the synthetic request on a separate stream.
+ </t>
+ <t>
+ Frames that contain HTTP header fields are <xref target="HeaderBlock">compressed</xref>.
+ HTTP requests can be highly redundant, so compression can reduce the size of requests and
+ responses significantly.
+ </t>
+
+ <section title="Document Organization">
+ <t>
+ The HTTP/2 specification is split into four parts:
+ <list style="symbols">
+ <t>
+ <xref target="starting">Starting HTTP/2</xref> covers how an HTTP/2 connection is
+ initiated.
+ </t>
+ <t>
+ The <xref target="FramingLayer">framing</xref> and <xref
+ target="StreamsLayer">streams</xref> layers describe the way HTTP/2 frames are
+ structured and formed into multiplexed streams.
+ </t>
+ <t>
+ <xref target="FrameTypes">Frame</xref> and <xref target="ErrorCodes">error</xref>
+ definitions include details of the frame and error types used in HTTP/2.
+ </t>
+ <t>
+ <xref target="HTTPLayer">HTTP mappings</xref> and <xref target="HttpExtra">additional
+ requirements</xref> describe how HTTP semantics are expressed using frames and
+ streams.
+ </t>
+ </list>
+ </t>
+ <t>
+ While some of the frame and stream layer concepts are isolated from HTTP, this
+ specification does not define a completely generic framing layer. The framing and streams
+ layers are tailored to the needs of the HTTP protocol and server push.
+ </t>
+ </section>
+
+ <section title="Conventions and Terminology">
+ <t>
+ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD
+ NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as
+ described in <xref target="RFC2119">RFC 2119</xref>.
+ </t>
+ <t>
+ All numeric values are in network byte order. Values are unsigned unless otherwise
+ indicated. Literal values are provided in decimal or hexadecimal as appropriate.
+ Hexadecimal literals are prefixed with <spanx style="verb">0x</spanx> to distinguish them
+ from decimal literals.
+ </t>
+ <t>
+ The following terms are used:
+ <list style="hanging">
+ <t hangText="client:">
+ The endpoint initiating the HTTP/2 connection.
+ </t>
+ <t hangText="connection:">
+ A transport-layer connection between two endpoints.
+ </t>
+ <t hangText="connection error:">
+ An error that affects the entire HTTP/2 connection.
+ </t>
+ <t hangText="endpoint:">
+ Either the client or server of the connection.
+ </t>
+ <t hangText="frame:">
+ The smallest unit of communication within an HTTP/2 connection, consisting of a header
+ and a variable-length sequence of octets structured according to the frame type.
+ </t>
+ <t hangText="peer:">
+ An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint
+ that is remote to the primary subject of discussion.
+ </t>
+ <t hangText="receiver:">
+ An endpoint that is receiving frames.
+ </t>
+ <t hangText="sender:">
+ An endpoint that is transmitting frames.
+ </t>
+ <t hangText="server:">
+ The endpoint which did not initiate the HTTP/2 connection.
+ </t>
+ <t hangText="stream:">
+ A bi-directional flow of frames across a virtual channel within the HTTP/2 connection.
+ </t>
+ <t hangText="stream error:">
+ An error on the individual HTTP/2 stream.
+ </t>
+ </list>
+ </t>
+ <t>
+ Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined
+ in <xref target="RFC7230" x:fmt="of" x:rel="#intermediaries"/>.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="starting" title="Starting HTTP/2">
+ <t>
+ An HTTP/2 connection is an application layer protocol running on top of a TCP connection
+ (<xref target="TCP"/>). The client is the TCP connection initiator.
+ </t>
+ <t>
+ HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same
+ default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result,
+ implementations processing requests for target resource URIs like <spanx
+ style="verb">http://example.org/foo</spanx> or <spanx
+ style="verb">https://example.com/bar</spanx> are required to first discover whether the
+ upstream server (the immediate peer to which the client wishes to establish a connection)
+ supports HTTP/2.
+ </t>
+
+ <t>
+ The means by which support for HTTP/2 is determined is different for "http" and "https"
+ URIs. Discovery for "http" URIs is described in <xref target="discover-http"/>. Discovery
+ for "https" URIs is described in <xref target="discover-https"/>.
+ </t>
+
+ <section anchor="versioning" title="HTTP/2 Version Identification">
+ <t>
+ The protocol defined in this document has two identifiers.
+ <list style="symbols">
+ <x:lt>
+ <t>
+ The string "h2" identifies the protocol where HTTP/2 uses <xref
+ target="TLS12">TLS</xref>. This identifier is used in the <xref
+ target="TLS-ALPN">TLS application layer protocol negotiation extension (ALPN)</xref>
+ field and any place that HTTP/2 over TLS is identified.
+ </t>
+ <t>
+ The "h2" string is serialized into an ALPN protocol identifier as the two octet
+ sequence: 0x68, 0x32.
+ </t>
+ </x:lt>
+ <x:lt>
+ <t>
+ The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP.
+ This identifier is used in the HTTP/1.1 Upgrade header field and any place that
+ HTTP/2 over TCP is identified.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+ <t>
+ Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message
+ semantics described in this document.
+ </t>
+ <t>
+ <cref>RFC Editor's Note: please remove the remainder of this section prior to the
+ publication of a final version of this document.</cref>
+ </t>
+ <t>
+ Only implementations of the final, published RFC can identify themselves as "h2" or "h2c".
+ Until such an RFC exists, implementations MUST NOT identify themselves using these
+ strings.
+ </t>
+ <t>
+ Examples and text throughout the rest of this document use "h2" as a matter of
+ editorial convenience only. Implementations of draft versions MUST NOT identify using
+ this string.
+ </t>
+ <t>
+ Implementations of draft versions of the protocol MUST add the string "-" and the
+ corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11
+ over TLS is identified using the string "h2-11".
+ </t>
+ <t>
+ Non-compatible experiments that are based on these draft versions MUST append the string
+ "-" and an experiment name to the identifier. For example, an experimental implementation
+ of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself
+ as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in
+ <xref target="RFC7230" x:fmt="of" x:rel="#field.components"/>. Experimenters are
+ encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list.
+ </t>
+ </section>
+
+ <section anchor="discover-http" title="Starting HTTP/2 for &quot;http&quot; URIs">
+ <t>
+ A client that makes a request for an "http" URI without prior knowledge about support for
+ HTTP/2 uses the HTTP Upgrade mechanism (<xref target="RFC7230" x:fmt="of"
+ x:rel="#header.upgrade"/>). The client makes an HTTP/1.1 request that includes an Upgrade
+ header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include
+ exactly one <xref target="Http2SettingsHeader">HTTP2-Settings</xref> header field.
+ </t>
+ <figure>
+ <preamble>For example:</preamble>
+ <artwork type="message/http; msgtype=&#34;request&#34;" x:indent-with=" "><![CDATA[
+GET / HTTP/1.1
+Host: server.example.com
+Connection: Upgrade, HTTP2-Settings
+Upgrade: h2c
+HTTP2-Settings: <base64url encoding of HTTP/2 SETTINGS payload>
+
+]]></artwork>
+ </figure>
+ <t>
+ Requests that contain an entity body MUST be sent in their entirety before the client can
+ send HTTP/2 frames. This means that a large request entity can block the use of the
+ connection until it is completely sent.
+ </t>
+ <t>
+ If concurrency of an initial request with subsequent requests is important, an OPTIONS
+ request can be used to perform the upgrade to HTTP/2, at the cost of an additional
+ round-trip.
+ </t>
+ <t>
+ A server that does not support HTTP/2 can respond to the request as though the Upgrade
+ header field were absent:
+ </t>
+ <figure>
+ <artwork type="message/http; msgtype=&#34;response&#34;" x:indent-with=" ">
+HTTP/1.1 200 OK
+Content-Length: 243
+Content-Type: text/html
+
+...
+</artwork>
+ </figure>
+ <t>
+ A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with
+ "h2" implies HTTP/2 over TLS, which is instead negotiated as described in <xref
+ target="discover-https"/>.
+ </t>
+ <t>
+ A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols)
+ response. After the empty line that terminates the 101 response, the server can begin
+ sending HTTP/2 frames. These frames MUST include a response to the request that initiated
+ the Upgrade.
+ </t>
+
+ <figure>
+ <preamble>
+ For example:
+ </preamble>
+ <artwork type="message/http; msgtype=&#34;response&#34;" x:indent-with=" ">
+HTTP/1.1 101 Switching Protocols
+Connection: Upgrade
+Upgrade: h2c
+
+[ HTTP/2 connection ...
+</artwork>
+ </figure>
+ <t>
+ The first HTTP/2 frame sent by the server is a <x:ref>SETTINGS</x:ref> frame (<xref
+ target="SETTINGS"/>) as the server connection preface (<xref
+ target="ConnectionHeader"/>). Upon receiving the 101 response, the client sends a <xref
+ target="ConnectionHeader">connection preface</xref>, which includes a
+ <x:ref>SETTINGS</x:ref> frame.
+ </t>
+ <t>
+ The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is
+ assigned <xref target="pri-default">default priority values</xref>. Stream 1 is
+ implicitly half closed from the client toward the server, since the request is completed
+ as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the
+ response.
+ </t>
+
+ <section anchor="Http2SettingsHeader" title="HTTP2-Settings Header Field">
+ <t>
+ A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one <spanx
+ style="verb">HTTP2-Settings</spanx> header field. The <spanx
+ style="verb">HTTP2-Settings</spanx> header field is a connection-specific header field
+ that includes parameters that govern the HTTP/2 connection, provided in anticipation of
+ the server accepting the request to upgrade.
+ </t>
+ <figure>
+ <artwork type="abnf" x:indent-with=" "><![CDATA[
+HTTP2-Settings = token68
+]]></artwork>
+ </figure>
+ <t>
+ A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present,
+ or if more than one is present. A server MUST NOT send this header field.
+ </t>
+
+ <t>
+ The content of the <spanx style="verb">HTTP2-Settings</spanx> header field is the
+ payload of a <x:ref>SETTINGS</x:ref> frame (<xref target="SETTINGS"/>), encoded as a
+ base64url string (that is, the URL- and filename-safe Base64 encoding described in <xref
+ target="RFC4648" x:fmt="of" x:sec="5"/>, with any trailing '=' characters omitted). The
+ <xref target="RFC5234">ABNF</xref> production for <spanx style="verb">token68</spanx> is
+ defined in <xref target="RFC7235" x:fmt="of" x:rel="#challenge.and.response"/>.
+ </t>
+ <t>
+ Since the upgrade is only intended to apply to the immediate connection, a client
+ sending <spanx style="verb">HTTP2-Settings</spanx> MUST also send <spanx
+ style="verb">HTTP2-Settings</spanx> as a connection option in the <spanx
+ style="verb">Connection</spanx> header field to prevent it from being forwarded
+ downstream.
+ </t>
+ <t>
+ A server decodes and interprets these values as it would any other
+ <x:ref>SETTINGS</x:ref> frame. <xref target="SettingsSync">Acknowledgement of the
+ SETTINGS parameters</xref> is not necessary, since a 101 response serves as implicit
+ acknowledgment. Providing these values in the Upgrade request gives a client an
+ opportunity to provide parameters prior to receiving any frames from the server.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="discover-https" title="Starting HTTP/2 for &quot;https&quot; URIs">
+ <t>
+ A client that makes a request to an "https" URI uses <xref target="TLS12">TLS</xref>
+ with the <xref target="TLS-ALPN">application layer protocol negotiation extension</xref>.
+ </t>
+ <t>
+ HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a
+ client or selected by a server.
+ </t>
+ <t>
+ Once TLS negotiation is complete, both the client and the server send a <xref
+ target="ConnectionHeader">connection preface</xref>.
+ </t>
+ </section>
+
+ <section anchor="known-http" title="Starting HTTP/2 with Prior Knowledge">
+ <t>
+ A client can learn that a particular server supports HTTP/2 by other means. For example,
+ <xref target="ALT-SVC"/> describes a mechanism for advertising this capability.
+ </t>
+ <t>
+ A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2,
+ after the <xref target="ConnectionHeader">connection preface</xref>; a server can
+ identify such a connection by the presence of the connection preface. This only affects
+ the establishment of HTTP/2 connections over cleartext TCP; implementations that support
+ HTTP/2 over TLS MUST use <xref target="TLS-ALPN">protocol negotiation in TLS</xref>.
+ </t>
+ <t>
+ Without additional information, prior support for HTTP/2 is not a strong signal that a
+ given server will support HTTP/2 for future connections. For example, it is possible for
+ server configurations to change, for configurations to differ between instances in
+ clustered servers, or for network conditions to change.
+ </t>
+ </section>
+
+ <section anchor="ConnectionHeader" title="HTTP/2 Connection Preface">
+ <t>
+ Upon establishment of a TCP connection and determination that HTTP/2 will be used by both
+ peers, each endpoint MUST send a connection preface as a final confirmation and to
+ establish the initial SETTINGS parameters for the HTTP/2 connection. The client and
+ server each send a different connection preface.
+ </t>
+ <t>
+ The client connection preface starts with a sequence of 24 octets, which in hex notation
+ are:
+ </t>
+ <figure>
+ <artwork type="inline" x:indent-with=" "><![CDATA[
+0x505249202a20485454502f322e300d0a0d0a534d0d0a0d0a
+]]></artwork>
+ </figure>
+ <t>
+ (the string <spanx style="verb">PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n</spanx>). This sequence
+ is followed by a <x:ref>SETTINGS</x:ref> frame (<xref target="SETTINGS"/>). The
+ <x:ref>SETTINGS</x:ref> frame MAY be empty. The client sends the client connection
+ preface immediately upon receipt of a 101 Switching Protocols response (indicating a
+ successful upgrade), or as the first application data octets of a TLS connection. If
+ starting an HTTP/2 connection with prior knowledge of server support for the protocol, the
+ client connection preface is sent upon connection establishment.
+ </t>
+ <t>
+ <list>
+ <t>
+ The client connection preface is selected so that a large proportion of HTTP/1.1 or
+ HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note
+ that this does not address the concerns raised in <xref target="TALKING"/>.
+ </t>
+ </list>
+ </t>
+ <t>
+ The server connection preface consists of a potentially empty <x:ref>SETTINGS</x:ref>
+ frame (<xref target="SETTINGS"/>) that MUST be the first frame the server sends in the
+ HTTP/2 connection.
+ </t>
+ <t>
+ The <x:ref>SETTINGS</x:ref> frames received from a peer as part of the connection preface
+ MUST be acknowledged (see <xref target="SettingsSync"/>) after sending the connection
+ preface.
+ </t>
+ <t>
+ To avoid unnecessary latency, clients are permitted to send additional frames to the
+ server immediately after sending the client connection preface, without waiting to receive
+ the server connection preface. It is important to note, however, that the server
+ connection preface <x:ref>SETTINGS</x:ref> frame might include parameters that necessarily
+ alter how a client is expected to communicate with the server. Upon receiving the
+ <x:ref>SETTINGS</x:ref> frame, the client is expected to honor any parameters established.
+ In some configurations, it is possible for the server to transmit <x:ref>SETTINGS</x:ref>
+ before the client sends additional frames, providing an opportunity to avoid this issue.
+ </t>
+ <t>
+ Clients and servers MUST treat an invalid connection preface as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>. A <x:ref>GOAWAY</x:ref> frame (<xref target="GOAWAY"/>)
+ MAY be omitted in this case, since an invalid preface indicates that the peer is not using
+ HTTP/2.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="FramingLayer" title="HTTP Frames">
+ <t>
+ Once the HTTP/2 connection is established, endpoints can begin exchanging frames.
+ </t>
+
+ <section anchor="FrameHeader" title="Frame Format">
+ <t>
+ All frames begin with a fixed 9-octet header followed by a variable-length payload.
+ </t>
+ <figure title="Frame Layout">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Length (24) |
+ +---------------+---------------+---------------+
+ | Type (8) | Flags (8) |
+ +-+-+-----------+---------------+-------------------------------+
+ |R| Stream Identifier (31) |
+ +=+=============================================================+
+ | Frame Payload (0...) ...
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The fields of the frame header are defined as:
+ <list style="hanging">
+ <x:lt hangText="Length:">
+ <t>
+ The length of the frame payload expressed as an unsigned 24-bit integer. Values
+ greater than 2<x:sup>14</x:sup> (16,384) MUST NOT be sent unless the receiver has
+ set a larger value for <x:ref>SETTINGS_MAX_FRAME_SIZE</x:ref>.
+ </t>
+ <t>
+ The 9 octets of the frame header are not included in this value.
+ </t>
+ </x:lt>
+ <x:lt hangText="Type:">
+ <t>
+ The 8-bit type of the frame. The frame type determines the format and semantics of
+ the frame. Implementations MUST ignore and discard any frame that has a type that
+ is unknown.
+ </t>
+ </x:lt>
+ <x:lt hangText="Flags:">
+ <t>
+ An 8-bit field reserved for frame-type specific boolean flags.
+ </t>
+ <t>
+ Flags are assigned semantics specific to the indicated frame type. Flags that have
+ no defined semantics for a particular frame type MUST be ignored, and MUST be left
+ unset (0) when sending.
+ </t>
+ </x:lt>
+ <x:lt hangText="R:">
+ <t>
+ A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST
+ remain unset (0) when sending and MUST be ignored when receiving.
+ </t>
+ </x:lt>
+ <x:lt hangText="Stream Identifier:">
+ <t>
+ A 31-bit stream identifier (see <xref target="StreamIdentifiers"/>). The value 0 is
+ reserved for frames that are associated with the connection as a whole as opposed to
+ an individual stream.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+ <t>
+ The structure and content of the frame payload is dependent entirely on the frame type.
+ </t>
+ </section>
+
+ <section anchor="FrameSize" title="Frame Size">
+ <t>
+ The size of a frame payload is limited by the maximum size that a receiver advertises in
+ the <x:ref>SETTINGS_MAX_FRAME_SIZE</x:ref> setting. This setting can have any value
+ between 2<x:sup>14</x:sup> (16,384) and 2<x:sup>24</x:sup>-1 (16,777,215) octets,
+ inclusive.
+ </t>
+ <t>
+ All implementations MUST be capable of receiving and minimally processing frames up to
+ 2<x:sup>14</x:sup> octets in length, plus the 9 octet <xref target="FrameHeader">frame
+ header</xref>. The size of the frame header is not included when describing frame sizes.
+ <list style="hanging">
+ <t hangText="Note:">
+ Certain frame types, such as <xref target="PING">PING</xref>, impose additional limits
+ on the amount of payload data allowed.
+ </t>
+ </list>
+ </t>
+ <t>
+ If a frame size exceeds any defined limit, or is too small to contain mandatory frame
+ data, the endpoint MUST send a <x:ref>FRAME_SIZE_ERROR</x:ref> error. A frame size error
+ in a frame that could alter the state of the entire connection MUST be treated as a <xref
+ target="ConnectionErrorHandler">connection error</xref>; this includes any frame carrying
+ a <xref target="HeaderBlock">header block</xref> (that is, <x:ref>HEADERS</x:ref>,
+ <x:ref>PUSH_PROMISE</x:ref>, and <x:ref>CONTINUATION</x:ref>), <x:ref>SETTINGS</x:ref>,
+ and any <x:ref>WINDOW_UPDATE</x:ref> frame with a stream identifier of 0.
+ </t>
+ <t>
+ Endpoints are not obligated to use all available space in a frame. Responsiveness can be
+ improved by using frames that are smaller than the permitted maximum size. Sending large
+ frames can result in delays in sending time-sensitive frames (such
+ <x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, or <x:ref>PRIORITY</x:ref>)
+ which if blocked by the transmission of a large frame, could affect performance.
+ </t>
+ </section>
+
+ <section anchor="HeaderBlock" title="Header Compression and Decompression">
+ <t>
+ Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values.
+ They are used within HTTP request and response messages as well as server push operations
+ (see <xref target="PushResources" />).
+ </t>
+ <t>
+ Header lists are collections of zero or more header fields. When transmitted over a
+ connection, a header list is serialized into a header block using <xref
+ target="COMPRESSION">HTTP Header Compression</xref>. The serialized header block is then
+ divided into one or more octet sequences, called header block fragments, and transmitted
+ within the payload of <xref target="HEADERS">HEADERS</xref>, <xref
+ target="PUSH_PROMISE">PUSH_PROMISE</xref> or <xref
+ target="CONTINUATION">CONTINUATION</xref> frames.
+ </t>
+ <t>
+ The <xref target="COOKIE">Cookie header field</xref> is treated specially by the HTTP
+ mapping (see <xref target="CompressCookie"/>).
+ </t>
+ <t>
+ A receiving endpoint reassembles the header block by concatenating its fragments, then
+ decompresses the block to reconstruct the header list.
+ </t>
+ <t>
+ A complete header block consists of either:
+ <list style="symbols">
+ <t>
+ a single <x:ref>HEADERS</x:ref> or <x:ref>PUSH_PROMISE</x:ref> frame,
+ with the END_HEADERS flag set, or
+ </t>
+ <t>
+ a <x:ref>HEADERS</x:ref> or <x:ref>PUSH_PROMISE</x:ref> frame with the END_HEADERS
+ flag cleared and one or more <x:ref>CONTINUATION</x:ref> frames,
+ where the last <x:ref>CONTINUATION</x:ref> frame has the END_HEADERS flag set.
+ </t>
+ </list>
+ </t>
+ <t>
+ Header compression is stateful. One compression context and one decompression context is
+ used for the entire connection. Each header block is processed as a discrete unit.
+ Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved
+ frames of any other type or from any other stream. The last frame in a sequence of
+ <x:ref>HEADERS</x:ref> or <x:ref>CONTINUATION</x:ref> frames MUST have the END_HEADERS
+ flag set. The last frame in a sequence of <x:ref>PUSH_PROMISE</x:ref> or
+ <x:ref>CONTINUATION</x:ref> frames MUST have the END_HEADERS flag set. This allows a
+ header block to be logically equivalent to a single frame.
+ </t>
+ <t>
+ Header block fragments can only be sent as the payload of <x:ref>HEADERS</x:ref>,
+ <x:ref>PUSH_PROMISE</x:ref> or <x:ref>CONTINUATION</x:ref> frames, because these frames
+ carry data that can modify the compression context maintained by a receiver. An endpoint
+ receiving <x:ref>HEADERS</x:ref>, <x:ref>PUSH_PROMISE</x:ref> or
+ <x:ref>CONTINUATION</x:ref> frames MUST reassemble header blocks and perform decompression
+ even if the frames are to be discarded. A receiver MUST terminate the connection with a
+ <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>COMPRESSION_ERROR</x:ref> if it does not decompress a header block.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="StreamsLayer" title="Streams and Multiplexing">
+ <t>
+ A "stream" is an independent, bi-directional sequence of frames exchanged between the client
+ and server within an HTTP/2 connection. Streams have several important characteristics:
+ <list style="symbols">
+ <t>
+ A single HTTP/2 connection can contain multiple concurrently open streams, with either
+ endpoint interleaving frames from multiple streams.
+ </t>
+ <t>
+ Streams can be established and used unilaterally or shared by either the client or
+ server.
+ </t>
+ <t>
+ Streams can be closed by either endpoint.
+ </t>
+ <t>
+ The order in which frames are sent on a stream is significant. Recipients process frames
+ in the order they are received. In particular, the order of <x:ref>HEADERS</x:ref>,
+ and <x:ref>DATA</x:ref> frames is semantically significant.
+ </t>
+ <t>
+ Streams are identified by an integer. Stream identifiers are assigned to streams by the
+ endpoint initiating the stream.
+ </t>
+ </list>
+ </t>
+
+ <section anchor="StreamStates" title="Stream States">
+ <t>
+ The lifecycle of a stream is shown in <xref target="StreamStatesFigure"/>.
+ </t>
+
+ <figure anchor="StreamStatesFigure" title="Stream States">
+ <artwork type="drawing">
+ <![CDATA[
+ +--------+
+ PP | | PP
+ ,--------| idle |--------.
+ / | | \
+ v +--------+ v
+ +----------+ | +----------+
+ | | | H | |
+ ,---| reserved | | | reserved |---.
+ | | (local) | v | (remote) | |
+ | +----------+ +--------+ +----------+ |
+ | | ES | | ES | |
+ | | H ,-------| open |-------. | H |
+ | | / | | \ | |
+ | v v +--------+ v v |
+ | +----------+ | +----------+ |
+ | | half | | | half | |
+ | | closed | | R | closed | |
+ | | (remote) | | | (local) | |
+ | +----------+ | +----------+ |
+ | | v | |
+ | | ES / R +--------+ ES / R | |
+ | `----------->| |<-----------' |
+ | R | closed | R |
+ `-------------------->| |<--------------------'
+ +--------+
+
+ H: HEADERS frame (with implied CONTINUATIONs)
+ PP: PUSH_PROMISE frame (with implied CONTINUATIONs)
+ ES: END_STREAM flag
+ R: RST_STREAM frame
+]]>
+ </artwork>
+ </figure>
+
+ <t>
+ Note that this diagram shows stream state transitions and the frames and flags that affect
+ those transitions only. In this regard, <x:ref>CONTINUATION</x:ref> frames do not result
+ in state transitions; they are effectively part of the <x:ref>HEADERS</x:ref> or
+ <x:ref>PUSH_PROMISE</x:ref> that they follow. For this purpose, the END_STREAM flag is
+ processed as a separate event to the frame that bears it; a <x:ref>HEADERS</x:ref> frame
+ with the END_STREAM flag set can cause two state transitions.
+ </t>
+ <t>
+ Both endpoints have a subjective view of the state of a stream that could be different
+ when frames are in transit. Endpoints do not coordinate the creation of streams; they are
+ created unilaterally by either endpoint. The negative consequences of a mismatch in
+ states are limited to the "closed" state after sending <x:ref>RST_STREAM</x:ref>, where
+ frames might be received for some time after closing.
+ </t>
+ <t>
+ Streams have the following states:
+ <list style="hanging">
+
+ <x:lt hangText="idle:">
+ <t>
+ <vspace blankLines="0"/>
+ All streams start in the "idle" state. In this state, no frames have been
+ exchanged.
+ </t>
+ <t>
+ The following transitions are valid from this state:
+ <list style="symbols">
+ <t>
+ Sending or receiving a <x:ref>HEADERS</x:ref> frame causes the stream to become
+ "open". The stream identifier is selected as described in <xref
+ target="StreamIdentifiers"/>. The same <x:ref>HEADERS</x:ref> frame can also
+ cause a stream to immediately become "half closed".
+ </t>
+ <t>
+ Sending a <x:ref>PUSH_PROMISE</x:ref> frame marks the associated stream for
+ later use. The stream state for the reserved stream transitions to "reserved
+ (local)".
+ </t>
+ <t>
+ Receiving a <x:ref>PUSH_PROMISE</x:ref> frame marks the associated stream as
+ reserved by the remote peer. The state of the stream becomes "reserved
+ (remote)".
+ </t>
+ </list>
+ </t>
+ <t>
+ Receiving any frames other than <x:ref>HEADERS</x:ref> or
+ <x:ref>PUSH_PROMISE</x:ref> on a stream in this state MUST be treated as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+
+ <x:lt hangText="reserved (local):">
+ <t>
+ <vspace blankLines="0"/>
+ A stream in the "reserved (local)" state is one that has been promised by sending a
+ <x:ref>PUSH_PROMISE</x:ref> frame. A <x:ref>PUSH_PROMISE</x:ref> frame reserves an
+ idle stream by associating the stream with an open stream that was initiated by the
+ remote peer (see <xref target="PushResources"/>).
+ </t>
+ <t>
+ In this state, only the following transitions are possible:
+ <list style="symbols">
+ <t>
+ The endpoint can send a <x:ref>HEADERS</x:ref> frame. This causes the stream to
+ open in a "half closed (remote)" state.
+ </t>
+ <t>
+ Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame to cause the stream
+ to become "closed". This releases the stream reservation.
+ </t>
+ </list>
+ </t>
+ <t>
+ An endpoint MUST NOT send any type of frame other than <x:ref>HEADERS</x:ref> or
+ <x:ref>RST_STREAM</x:ref> in this state.
+ </t>
+ <t>
+ A <x:ref>PRIORITY</x:ref> frame MAY be received in this state. Receiving any type
+ of frame other than <x:ref>RST_STREAM</x:ref> or <x:ref>PRIORITY</x:ref> on a stream
+ in this state MUST be treated as a <xref target="ConnectionErrorHandler">connection
+ error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+
+ <x:lt hangText="reserved (remote):">
+ <t>
+ <vspace blankLines="0"/>
+ A stream in the "reserved (remote)" state has been reserved by a remote peer.
+ </t>
+ <t>
+ In this state, only the following transitions are possible:
+ <list style="symbols">
+ <t>
+ Receiving a <x:ref>HEADERS</x:ref> frame causes the stream to transition to
+ "half closed (local)".
+ </t>
+ <t>
+ Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame to cause the stream
+ to become "closed". This releases the stream reservation.
+ </t>
+ </list>
+ </t>
+ <t>
+ An endpoint MAY send a <x:ref>PRIORITY</x:ref> frame in this state to reprioritize
+ the reserved stream. An endpoint MUST NOT send any type of frame other than
+ <x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, or <x:ref>PRIORITY</x:ref>
+ in this state.
+ </t>
+ <t>
+ Receiving any type of frame other than <x:ref>HEADERS</x:ref> or
+ <x:ref>RST_STREAM</x:ref> on a stream in this state MUST be treated as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+
+ <x:lt hangText="open:">
+ <t>
+ <vspace blankLines="0"/>
+ A stream in the "open" state may be used by both peers to send frames of any type.
+ In this state, sending peers observe advertised <xref target="FlowControl">stream
+ level flow control limits</xref>.
+ </t>
+ <t>
+ From this state either endpoint can send a frame with an END_STREAM flag set, which
+ causes the stream to transition into one of the "half closed" states: an endpoint
+ sending an END_STREAM flag causes the stream state to become "half closed (local)";
+ an endpoint receiving an END_STREAM flag causes the stream state to become "half
+ closed (remote)".
+ </t>
+ <t>
+ Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame from this state, causing
+ it to transition immediately to "closed".
+ </t>
+ </x:lt>
+
+ <x:lt hangText="half closed (local):">
+ <t>
+ <vspace blankLines="0"/>
+ A stream that is in the "half closed (local)" state cannot be used for sending
+ frames. Only <x:ref>WINDOW_UPDATE</x:ref>, <x:ref>PRIORITY</x:ref> and
+ <x:ref>RST_STREAM</x:ref> frames can be sent in this state.
+ </t>
+ <t>
+ A stream transitions from this state to "closed" when a frame that contains an
+ END_STREAM flag is received, or when either peer sends a <x:ref>RST_STREAM</x:ref>
+ frame.
+ </t>
+ <t>
+ A receiver can ignore <x:ref>WINDOW_UPDATE</x:ref> frames in this state, which might
+ arrive for a short period after a frame bearing the END_STREAM flag is sent.
+ </t>
+ <t>
+ <x:ref>PRIORITY</x:ref> frames received in this state are used to reprioritize
+ streams that depend on the current stream.
+ </t>
+ </x:lt>
+
+ <x:lt hangText="half closed (remote):">
+ <t>
+ <vspace blankLines="0"/>
+ A stream that is "half closed (remote)" is no longer being used by the peer to send
+ frames. In this state, an endpoint is no longer obligated to maintain a receiver
+ flow control window if it performs flow control.
+ </t>
+ <t>
+ If an endpoint receives additional frames for a stream that is in this state, other
+ than <x:ref>WINDOW_UPDATE</x:ref>, <x:ref>PRIORITY</x:ref> or
+ <x:ref>RST_STREAM</x:ref>, it MUST respond with a <xref
+ target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>STREAM_CLOSED</x:ref>.
+ </t>
+ <t>
+ A stream that is "half closed (remote)" can be used by the endpoint to send frames
+ of any type. In this state, the endpoint continues to observe advertised <xref
+ target="FlowControl">stream level flow control limits</xref>.
+ </t>
+ <t>
+ A stream can transition from this state to "closed" by sending a frame that contains
+ an END_STREAM flag, or when either peer sends a <x:ref>RST_STREAM</x:ref> frame.
+ </t>
+ </x:lt>
+
+ <x:lt hangText="closed:">
+ <t>
+ <vspace blankLines="0"/>
+ The "closed" state is the terminal state.
+ </t>
+ <t>
+ An endpoint MUST NOT send frames other than <x:ref>PRIORITY</x:ref> on a closed
+ stream. An endpoint that receives any frame other than <x:ref>PRIORITY</x:ref>
+ after receiving a <x:ref>RST_STREAM</x:ref> MUST treat that as a <xref
+ target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>STREAM_CLOSED</x:ref>. Similarly, an endpoint that receives any frames after
+ receiving a frame with the END_STREAM flag set MUST treat that as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>STREAM_CLOSED</x:ref>, unless the frame is permitted as described below.
+ </t>
+ <t>
+ <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>RST_STREAM</x:ref> frames can be received in
+ this state for a short period after a <x:ref>DATA</x:ref> or <x:ref>HEADERS</x:ref>
+ frame containing an END_STREAM flag is sent. Until the remote peer receives and
+ processes <x:ref>RST_STREAM</x:ref> or the frame bearing the END_STREAM flag, it
+ might send frames of these types. Endpoints MUST ignore
+ <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>RST_STREAM</x:ref> frames received in this
+ state, though endpoints MAY choose to treat frames that arrive a significant time
+ after sending END_STREAM as a <xref target="ConnectionErrorHandler">connection
+ error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ <x:ref>PRIORITY</x:ref> frames can be sent on closed streams to prioritize streams
+ that are dependent on the closed stream. Endpoints SHOULD process
+ <x:ref>PRIORITY</x:ref> frame, though they can be ignored if the stream has been
+ removed from the dependency tree (see <xref target="priority-gc"/>).
+ </t>
+ <t>
+ If this state is reached as a result of sending a <x:ref>RST_STREAM</x:ref> frame,
+ the peer that receives the <x:ref>RST_STREAM</x:ref> might have already sent - or
+ enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint
+ MUST ignore frames that it receives on closed streams after it has sent a
+ <x:ref>RST_STREAM</x:ref> frame. An endpoint MAY choose to limit the period over
+ which it ignores frames and treat frames that arrive after this time as being in
+ error.
+ </t>
+ <t>
+ Flow controlled frames (i.e., <x:ref>DATA</x:ref>) received after sending
+ <x:ref>RST_STREAM</x:ref> are counted toward the connection flow control window.
+ Even though these frames might be ignored, because they are sent before the sender
+ receives the <x:ref>RST_STREAM</x:ref>, the sender will consider the frames to count
+ against the flow control window.
+ </t>
+ <t>
+ An endpoint might receive a <x:ref>PUSH_PROMISE</x:ref> frame after it sends
+ <x:ref>RST_STREAM</x:ref>. <x:ref>PUSH_PROMISE</x:ref> causes a stream to become
+ "reserved" even if the associated stream has been reset. Therefore, a
+ <x:ref>RST_STREAM</x:ref> is needed to close an unwanted promised stream.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+ <t>
+ In the absence of more specific guidance elsewhere in this document, implementations
+ SHOULD treat the receipt of a frame that is not expressly permitted in the description of
+ a state as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>. Frame of unknown types are ignored.
+ </t>
+ <t>
+ An example of the state transitions for an HTTP request/response exchange can be found in
+ <xref target="HttpSequence"/>. An example of the state transitions for server push can be
+ found in <xref target="PushRequests"/> and <xref target="PushResponses"/>.
+ </t>
+
+ <section anchor="StreamIdentifiers" title="Stream Identifiers">
+ <t>
+ Streams are identified with an unsigned 31-bit integer. Streams initiated by a client
+ MUST use odd-numbered stream identifiers; those initiated by the server MUST use
+ even-numbered stream identifiers. A stream identifier of zero (0x0) is used for
+ connection control messages; the stream identifier zero cannot be used to establish a
+ new stream.
+ </t>
+ <t>
+ HTTP/1.1 requests that are upgraded to HTTP/2 (see <xref target="discover-http"/>) are
+ responded to with a stream identifier of one (0x1). After the upgrade
+ completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1
+ cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1.
+ </t>
+ <t>
+ The identifier of a newly established stream MUST be numerically greater than all
+ streams that the initiating endpoint has opened or reserved. This governs streams that
+ are opened using a <x:ref>HEADERS</x:ref> frame and streams that are reserved using
+ <x:ref>PUSH_PROMISE</x:ref>. An endpoint that receives an unexpected stream identifier
+ MUST respond with a <xref target="ConnectionErrorHandler">connection error</xref> of
+ type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ The first use of a new stream identifier implicitly closes all streams in the "idle"
+ state that might have been initiated by that peer with a lower-valued stream identifier.
+ For example, if a client sends a <x:ref>HEADERS</x:ref> frame on stream 7 without ever
+ sending a frame on stream 5, then stream 5 transitions to the "closed" state when the
+ first frame for stream 7 is sent or received.
+ </t>
+ <t>
+ Stream identifiers cannot be reused. Long-lived connections can result in an endpoint
+ exhausting the available range of stream identifiers. A client that is unable to
+ establish a new stream identifier can establish a new connection for new streams. A
+ server that is unable to establish a new stream identifier can send a
+ <x:ref>GOAWAY</x:ref> frame so that the client is forced to open a new connection for
+ new streams.
+ </t>
+ </section>
+
+ <section title="Stream Concurrency">
+ <t>
+ A peer can limit the number of concurrently active streams using the
+ <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> parameter (see <xref
+ target="SettingValues"/>) within a <x:ref>SETTINGS</x:ref> frame. The maximum concurrent
+ streams setting is specific to each endpoint and applies only to the peer that receives
+ the setting. That is, clients specify the maximum number of concurrent streams the
+ server can initiate, and servers specify the maximum number of concurrent streams the
+ client can initiate.
+ </t>
+ <t>
+ Streams that are in the "open" state, or either of the "half closed" states count toward
+ the maximum number of streams that an endpoint is permitted to open. Streams in any of
+ these three states count toward the limit advertised in the
+ <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> setting. Streams in either of the
+ "reserved" states do not count toward the stream limit.
+ </t>
+ <t>
+ Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a
+ <x:ref>HEADERS</x:ref> frame that causes their advertised concurrent stream limit to be
+ exceeded MUST treat this as a <xref target="StreamErrorHandler">stream error</xref>. An
+ endpoint that wishes to reduce the value of
+ <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> to a value that is below the current
+ number of open streams can either close streams that exceed the new value or allow
+ streams to complete.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="FlowControl" title="Flow Control">
+ <t>
+ Using streams for multiplexing introduces contention over use of the TCP connection,
+ resulting in blocked streams. A flow control scheme ensures that streams on the same
+ connection do not destructively interfere with each other. Flow control is used for both
+ individual streams and for the connection as a whole.
+ </t>
+ <t>
+ HTTP/2 provides for flow control through use of the <xref
+ target="WINDOW_UPDATE">WINDOW_UPDATE frame</xref>.
+ </t>
+
+ <section anchor="fc-principles" title="Flow Control Principles">
+ <t>
+ HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be
+ used without requiring protocol changes. Flow control in HTTP/2 has the following
+ characteristics:
+ <list style="numbers">
+ <t>
+ Flow control is specific to a connection; i.e., it is "hop-by-hop", not
+ "end-to-end".
+ </t>
+ <t>
+ Flow control is based on window update frames. Receivers advertise how many octets
+ they are prepared to receive on a stream and for the entire connection. This is a
+ credit-based scheme.
+ </t>
+ <t>
+ Flow control is directional with overall control provided by the receiver. A
+ receiver MAY choose to set any window size that it desires for each stream and for
+ the entire connection. A sender MUST respect flow control limits imposed by a
+ receiver. Clients, servers and intermediaries all independently advertise their
+ flow control window as a receiver and abide by the flow control limits set by
+ their peer when sending.
+ </t>
+ <t>
+ The initial value for the flow control window is 65,535 octets for both new streams
+ and the overall connection.
+ </t>
+ <t>
+ The frame type determines whether flow control applies to a frame. Of the frames
+ specified in this document, only <x:ref>DATA</x:ref> frames are subject to flow
+ control; all other frame types do not consume space in the advertised flow control
+ window. This ensures that important control frames are not blocked by flow control.
+ </t>
+ <t>
+ Flow control cannot be disabled.
+ </t>
+ <t>
+ HTTP/2 defines only the format and semantics of the <x:ref>WINDOW_UPDATE</x:ref>
+ frame (<xref target="WINDOW_UPDATE"/>). This document does not stipulate how a
+ receiver decides when to send this frame or the value that it sends, nor does it
+ specify how a sender chooses to send packets. Implementations are able to select
+ any algorithm that suits their needs.
+ </t>
+ </list>
+ </t>
+ <t>
+ Implementations are also responsible for managing how requests and responses are sent
+ based on priority; choosing how to avoid head of line blocking for requests; and
+ managing the creation of new streams. Algorithm choices for these could interact with
+ any flow control algorithm.
+ </t>
+ </section>
+
+ <section anchor="DisableFlowControl" title="Appropriate Use of Flow Control">
+ <t>
+ Flow control is defined to protect endpoints that are operating under resource
+ constraints. For example, a proxy needs to share memory between many connections, and
+ also might have a slow upstream connection and a fast downstream one. Flow control
+ addresses cases where the receiver is unable process data on one stream, yet wants to
+ continue to process other streams in the same connection.
+ </t>
+ <t>
+ Deployments that do not require this capability can advertise a flow control window of
+ the maximum size, incrementing the available space when new data is received. This
+ effectively disables flow control for that receiver. Conversely, a sender is always
+ subject to the flow control window advertised by the receiver.
+ </t>
+ <t>
+ Deployments with constrained resources (for example, memory) can employ flow control to
+ limit the amount of memory a peer can consume. Note, however, that this can lead to
+ suboptimal use of available network resources if flow control is enabled without
+ knowledge of the bandwidth-delay product (see <xref target="RFC1323"/>).
+ </t>
+ <t>
+ Even with full awareness of the current bandwidth-delay product, implementation of flow
+ control can be difficult. When using flow control, the receiver MUST read from the TCP
+ receive buffer in a timely fashion. Failure to do so could lead to a deadlock when
+ critical frames, such as <x:ref>WINDOW_UPDATE</x:ref>, are not read and acted upon.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="StreamPriority" title="Stream priority">
+ <t>
+ A client can assign a priority for a new stream by including prioritization information in
+ the <xref target="HEADERS">HEADERS frame</xref> that opens the stream. For an existing
+ stream, the <xref target="PRIORITY">PRIORITY frame</xref> can be used to change the
+ priority.
+ </t>
+ <t>
+ The purpose of prioritization is to allow an endpoint to express how it would prefer its
+ peer allocate resources when managing concurrent streams. Most importantly, priority can
+ be used to select streams for transmitting frames when there is limited capacity for
+ sending.
+ </t>
+ <t>
+ Streams can be prioritized by marking them as dependent on the completion of other streams
+ (<xref target="pri-depend"/>). Each dependency is assigned a relative weight, a number
+ that is used to determine the relative proportion of available resources that are assigned
+ to streams dependent on the same stream.
+ </t>
+ <!--
+ Note that stream dependencies have not yet been validated in practice. The theory
+ might be fairly sound, but there are no implementations currently sending these. If it
+ turns out that they are not useful, or actively harmful, implementations will be requested
+ to avoid creating stream dependencies.
+ -->
+ <t>
+ Explicitly setting the priority for a stream is input to a prioritization process. It
+ does not guarantee any particular processing or transmission order for the stream relative
+ to any other stream. An endpoint cannot force a peer to process concurrent streams in a
+ particular order using priority. Expressing priority is therefore only ever a suggestion.
+ </t>
+ <t>
+ Providing prioritization information is optional, so default values are used if no
+ explicit indicator is provided (<xref target="pri-default"/>).
+ </t>
+
+ <section title="Stream Dependencies" anchor="pri-depend">
+ <t>
+ Each stream can be given an explicit dependency on another stream. Including a
+ dependency expresses a preference to allocate resources to the identified stream rather
+ than to the dependent stream.
+ </t>
+ <t>
+ A stream that is not dependent on any other stream is given a stream dependency of 0x0.
+ In other words, the non-existent stream 0 forms the root of the tree.
+ </t>
+ <t>
+ A stream that depends on another stream is a dependent stream. The stream upon which a
+ stream is dependent is a parent stream. A dependency on a stream that is not currently
+ in the tree - such as a stream in the "idle" state - results in that stream being given
+ a <xref target="pri-default">default priority</xref>.
+ </t>
+ <t>
+ When assigning a dependency on another stream, the stream is added as a new dependency
+ of the parent stream. Dependent streams that share the same parent are not ordered with
+ respect to each other. For example, if streams B and C are dependent on stream A, and
+ if stream D is created with a dependency on stream A, this results in a dependency order
+ of A followed by B, C, and D in any order.
+ </t>
+ <figure title="Example of Default Dependency Creation">
+ <artwork type="inline"><![CDATA[
+ A A
+ / \ ==> /|\
+ B C B D C
+]]></artwork>
+ </figure>
+ <t>
+ An exclusive flag allows for the insertion of a new level of dependencies. The
+ exclusive flag causes the stream to become the sole dependency of its parent stream,
+ causing other dependencies to become dependent on the exclusive stream. In the
+ previous example, if stream D is created with an exclusive dependency on stream A, this
+ results in D becoming the dependency parent of B and C.
+ </t>
+ <figure title="Example of Exclusive Dependency Creation">
+ <artwork type="inline"><![CDATA[
+ A
+ A |
+ / \ ==> D
+ B C / \
+ B C
+]]></artwork>
+ </figure>
+ <t>
+ Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all
+ of the streams that it depends on (the chain of parent streams up to 0x0) are either
+ closed, or it is not possible to make progress on them.
+ </t>
+ <t>
+ A stream cannot depend on itself. An endpoint MUST treat this as a <xref
+ target="StreamErrorHandler">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </section>
+
+ <section title="Dependency Weighting">
+ <t>
+ All dependent streams are allocated an integer weight between 1 and 256 (inclusive).
+ </t>
+ <t>
+ Streams with the same parent SHOULD be allocated resources proportionally based on their
+ weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A
+ with weight 12, and if no progress can be made on A, stream B ideally receives one third
+ of the resources allocated to stream C.
+ </t>
+ </section>
+
+ <section anchor="reprioritize" title="Reprioritization">
+ <t>
+ Stream priorities are changed using the <x:ref>PRIORITY</x:ref> frame. Setting a
+ dependency causes a stream to become dependent on the identified parent stream.
+ </t>
+ <t>
+ Dependent streams move with their parent stream if the parent is reprioritized. Setting
+ a dependency with the exclusive flag for a reprioritized stream moves all the
+ dependencies of the new parent stream to become dependent on the reprioritized stream.
+ </t>
+ <t>
+ If a stream is made dependent on one of its own dependencies, the formerly dependent
+ stream is first moved to be dependent on the reprioritized stream's previous parent.
+ The moved dependency retains its weight.
+ </t>
+ <figure title="Example of Dependency Reordering">
+ <preamble>
+ For example, consider an original dependency tree where B and C depend on A, D and E
+ depend on C, and F depends on D. If A is made dependent on D, then D takes the place
+ of A. All other dependency relationships stay the same, except for F, which becomes
+ dependent on A if the reprioritization is exclusive.
+ </preamble>
+ <artwork type="inline"><![CDATA[
+ ? ? ? ?
+ | / \ | |
+ A D A D D
+ / \ / / \ / \ |
+ B C ==> F B C ==> F A OR A
+ / \ | / \ /|\
+ D E E B C B C F
+ | | |
+ F E E
+ (intermediate) (non-exclusive) (exclusive)
+]]></artwork>
+ </figure>
+ </section>
+
+ <section anchor="priority-gc" title="Prioritization State Management">
+ <t>
+ When a stream is removed from the dependency tree, its dependencies can be moved to
+ become dependent on the parent of the closed stream. The weights of new dependencies
+ are recalculated by distributing the weight of the dependency of the closed stream
+ proportionally based on the weights of its dependencies.
+ </t>
+ <t>
+ Streams that are removed from the dependency tree cause some prioritization information
+ to be lost. Resources are shared between streams with the same parent stream, which
+ means that if a stream in that set closes or becomes blocked, any spare capacity
+ allocated to a stream is distributed to the immediate neighbors of the stream. However,
+ if the common dependency is removed from the tree, those streams share resources with
+ streams at the next highest level.
+ </t>
+ <t>
+ For example, assume streams A and B share a parent, and streams C and D both depend on
+ stream A. Prior to the removal of stream A, if streams A and D are unable to proceed,
+ then stream C receives all the resources dedicated to stream A. If stream A is removed
+ from the tree, the weight of stream A is divided between streams C and D. If stream D
+ is still unable to proceed, this results in stream C receiving a reduced proportion of
+ resources. For equal starting weights, C receives one third, rather than one half, of
+ available resources.
+ </t>
+ <t>
+ It is possible for a stream to become closed while prioritization information that
+ creates a dependency on that stream is in transit. If a stream identified in a
+ dependency has no associated priority information, then the dependent stream is instead
+ assigned a <xref target="pri-default">default priority</xref>. This potentially creates
+ suboptimal prioritization, since the stream could be given a priority that is different
+ to what is intended.
+ </t>
+ <t>
+ To avoid these problems, an endpoint SHOULD retain stream prioritization state for a
+ period after streams become closed. The longer state is retained, the lower the chance
+ that streams are assigned incorrect or default priority values.
+ </t>
+ <t>
+ This could create a large state burden for an endpoint, so this state MAY be limited.
+ An endpoint MAY apply a fixed upper limit on the number of closed streams for which
+ prioritization state is tracked to limit state exposure. The amount of additional state
+ an endpoint maintains could be dependent on load; under high load, prioritization state
+ can be discarded to limit resource commitments. In extreme cases, an endpoint could
+ even discard prioritization state for active or reserved streams. If a fixed limit is
+ applied, endpoints SHOULD maintain state for at least as many streams as allowed by
+ their setting for <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref>.
+ </t>
+ <t>
+ An endpoint receiving a <x:ref>PRIORITY</x:ref> frame that changes the priority of a
+ closed stream SHOULD alter the dependencies of the streams that depend on it, if it has
+ retained enough state to do so.
+ </t>
+ </section>
+
+ <section title="Default Priorities" anchor="pri-default">
+ <t>
+ Providing priority information is optional. Streams are assigned a non-exclusive
+ dependency on stream 0x0 by default. <xref target="PushResources">Pushed streams</xref>
+ initially depend on their associated stream. In both cases, streams are assigned a
+ default weight of 16.
+ </t>
+ </section>
+ </section>
+
+ <section title="Error Handling">
+ <t>
+ HTTP/2 framing permits two classes of error:
+ <list style="symbols">
+ <t>
+ An error condition that renders the entire connection unusable is a connection error.
+ </t>
+ <t>
+ An error in an individual stream is a stream error.
+ </t>
+ </list>
+ </t>
+ <t>
+ A list of error codes is included in <xref target="ErrorCodes"/>.
+ </t>
+
+ <section anchor="ConnectionErrorHandler" title="Connection Error Handling">
+ <t>
+ A connection error is any error which prevents further processing of the framing layer,
+ or which corrupts any connection state.
+ </t>
+ <t>
+ An endpoint that encounters a connection error SHOULD first send a <x:ref>GOAWAY</x:ref>
+ frame (<xref target="GOAWAY"/>) with the stream identifier of the last stream that it
+ successfully received from its peer. The <x:ref>GOAWAY</x:ref> frame includes an error
+ code that indicates why the connection is terminating. After sending the
+ <x:ref>GOAWAY</x:ref> frame, the endpoint MUST close the TCP connection.
+ </t>
+ <t>
+ It is possible that the <x:ref>GOAWAY</x:ref> will not be reliably received by the
+ receiving endpoint (see <xref target="RFC7230" x:fmt=","
+ x:rel="#persistent.tear-down"/>). In the event of a connection error,
+ <x:ref>GOAWAY</x:ref> only provides a best effort attempt to communicate with the peer
+ about why the connection is being terminated.
+ </t>
+ <t>
+ An endpoint can end a connection at any time. In particular, an endpoint MAY choose to
+ treat a stream error as a connection error. Endpoints SHOULD send a
+ <x:ref>GOAWAY</x:ref> frame when ending a connection, providing that circumstances
+ permit it.
+ </t>
+ </section>
+
+ <section anchor="StreamErrorHandler" title="Stream Error Handling">
+ <t>
+ A stream error is an error related to a specific stream that does not affect processing
+ of other streams.
+ </t>
+ <t>
+ An endpoint that detects a stream error sends a <x:ref>RST_STREAM</x:ref> frame (<xref
+ target="RST_STREAM"/>) that contains the stream identifier of the stream where the error
+ occurred. The <x:ref>RST_STREAM</x:ref> frame includes an error code that indicates the
+ type of error.
+ </t>
+ <t>
+ A <x:ref>RST_STREAM</x:ref> is the last frame that an endpoint can send on a stream.
+ The peer that sends the <x:ref>RST_STREAM</x:ref> frame MUST be prepared to receive any
+ frames that were sent or enqueued for sending by the remote peer. These frames can be
+ ignored, except where they modify connection state (such as the state maintained for
+ <xref target="HeaderBlock">header compression</xref>, or flow control).
+ </t>
+ <t>
+ Normally, an endpoint SHOULD NOT send more than one <x:ref>RST_STREAM</x:ref> frame for
+ any stream. However, an endpoint MAY send additional <x:ref>RST_STREAM</x:ref> frames if
+ it receives frames on a closed stream after more than a round-trip time. This behavior
+ is permitted to deal with misbehaving implementations.
+ </t>
+ <t>
+ An endpoint MUST NOT send a <x:ref>RST_STREAM</x:ref> in response to an
+ <x:ref>RST_STREAM</x:ref> frame, to avoid looping.
+ </t>
+ </section>
+
+ <section title="Connection Termination">
+ <t>
+ If the TCP connection is closed or reset while streams remain in open or half closed
+ states, then the endpoint MUST assume that those streams were abnormally interrupted and
+ could be incomplete.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="extensibility" title="Extending HTTP/2">
+ <t>
+ HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide
+ additional services or alter any aspect of the protocol, within the limitations described
+ in this section. Extensions are effective only within the scope of a single HTTP/2
+ connection.
+ </t>
+ <t>
+ Extensions are permitted to use new <xref target="FrameHeader">frame types</xref>, new
+ <xref target="SettingValues">settings</xref>, or new <xref target="ErrorCodes">error
+ codes</xref>. Registries are established for managing these extension points: <xref
+ target="iana-frames">frame types</xref>, <xref target="iana-settings">settings</xref> and
+ <xref target="iana-errors">error codes</xref>.
+ </t>
+ <t>
+ Implementations MUST ignore unknown or unsupported values in all extensible protocol
+ elements. Implementations MUST discard frames that have unknown or unsupported types.
+ This means that any of these extension points can be safely used by extensions without
+ prior arrangement or negotiation. However, extension frames that appear in the middle of
+ a <xref target="HeaderBlock">header block</xref> are not permitted; these MUST be treated
+ as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ However, extensions that could change the semantics of existing protocol components MUST
+ be negotiated before being used. For example, an extension that changes the layout of the
+ <x:ref>HEADERS</x:ref> frame cannot be used until the peer has given a positive signal
+ that this is acceptable. In this case, it could also be necessary to coordinate when the
+ revised layout comes into effect. Note that treating any frame other than
+ <x:ref>DATA</x:ref> frames as flow controlled is such a change in semantics, and can only
+ be done through negotiation.
+ </t>
+ <t>
+ This document doesn't mandate a specific method for negotiating the use of an extension,
+ but notes that a <xref target="SettingValues">setting</xref> could be used for that
+ purpose. If both peers set a value that indicates willingness to use the extension, then
+ the extension can be used. If a setting is used for extension negotiation, the initial
+ value MUST be defined so that the extension is initially disabled.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="FrameTypes" title="Frame Definitions">
+ <t>
+ This specification defines a number of frame types, each identified by a unique 8-bit type
+ code. Each frame type serves a distinct purpose either in the establishment and management
+ of the connection as a whole, or of individual streams.
+ </t>
+ <t>
+ The transmission of specific frame types can alter the state of a connection. If endpoints
+ fail to maintain a synchronized view of the connection state, successful communication
+ within the connection will no longer be possible. Therefore, it is important that endpoints
+ have a shared comprehension of how the state is affected by the use any given frame.
+ </t>
+
+ <section anchor="DATA" title="DATA">
+ <t>
+ DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated
+ with a stream. One or more DATA frames are used, for instance, to carry HTTP request or
+ response payloads.
+ </t>
+ <t>
+ DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to
+ obscure the size of messages.
+ </t>
+ <figure title="DATA Frame Payload">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Pad Length? (8)|
+ +---------------+-----------------------------------------------+
+ | Data (*) ...
+ +---------------------------------------------------------------+
+ | Padding (*) ...
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The DATA frame contains the following fields:
+ <list style="hanging">
+ <t hangText="Pad Length:">
+ An 8-bit field containing the length of the frame padding in units of octets. This
+ field is optional and is only present if the PADDED flag is set.
+ </t>
+ <t hangText="Data:">
+ Application data. The amount of data is the remainder of the frame payload after
+ subtracting the length of the other fields that are present.
+ </t>
+ <t hangText="Padding:">
+ Padding octets that contain no application semantic value. Padding octets MUST be set
+ to zero when sending and ignored when receiving.
+ </t>
+ </list>
+ </t>
+
+ <t>
+ The DATA frame defines the following flags:
+ <list style="hanging">
+ <t hangText="END_STREAM (0x1):">
+ Bit 1 being set indicates that this frame is the last that the endpoint will send for
+ the identified stream. Setting this flag causes the stream to enter one of <xref
+ target="StreamStates">the "half closed" states or the "closed" state</xref>.
+ </t>
+ <t hangText="PADDED (0x8):">
+ Bit 4 being set indicates that the Pad Length field and any padding that it describes
+ is present.
+ </t>
+ </list>
+ </t>
+ <t>
+ DATA frames MUST be associated with a stream. If a DATA frame is received whose stream
+ identifier field is 0x0, the recipient MUST respond with a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ DATA frames are subject to flow control and can only be sent when a stream is in the
+ "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow
+ control, including Pad Length and Padding fields if present. If a DATA frame is received
+ whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond
+ with a <xref target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>STREAM_CLOSED</x:ref>.
+ </t>
+ <t>
+ The total number of padding octets is determined by the value of the Pad Length field. If
+ the length of the padding is greater than the length of the frame payload, the recipient
+ MUST treat this as a <xref target="ConnectionErrorHandler">connection error</xref> of
+ type <x:ref>PROTOCOL_ERROR</x:ref>.
+ <list style="hanging">
+ <t hangText="Note:">
+ A frame can be increased in size by one octet by including a Pad Length field with a
+ value of zero.
+ </t>
+ </list>
+ </t>
+ <t>
+ Padding is a security feature; see <xref target="padding"/>.
+ </t>
+ </section>
+
+ <section anchor="HEADERS" title="HEADERS">
+ <t>
+ The HEADERS frame (type=0x1) is used to <xref target="StreamStates">open a stream</xref>,
+ and additionally carries a header block fragment. HEADERS frames can be sent on a stream
+ in the "open" or "half closed (remote)" states.
+ </t>
+ <figure title="HEADERS Frame Payload">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Pad Length? (8)|
+ +-+-------------+-----------------------------------------------+
+ |E| Stream Dependency? (31) |
+ +-+-------------+-----------------------------------------------+
+ | Weight? (8) |
+ +-+-------------+-----------------------------------------------+
+ | Header Block Fragment (*) ...
+ +---------------------------------------------------------------+
+ | Padding (*) ...
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The HEADERS frame payload has the following fields:
+ <list style="hanging">
+ <t hangText="Pad Length:">
+ An 8-bit field containing the length of the frame padding in units of octets. This
+ field is only present if the PADDED flag is set.
+ </t>
+ <t hangText="E:">
+ A single bit flag indicates that the stream dependency is exclusive, see <xref
+ target="StreamPriority"/>. This field is only present if the PRIORITY flag is set.
+ </t>
+ <t hangText="Stream Dependency:">
+ A 31-bit stream identifier for the stream that this stream depends on, see <xref
+ target="StreamPriority"/>. This field is only present if the PRIORITY flag is set.
+ </t>
+ <t hangText="Weight:">
+ An 8-bit weight for the stream, see <xref target="StreamPriority"/>. Add one to the
+ value to obtain a weight between 1 and 256. This field is only present if the
+ PRIORITY flag is set.
+ </t>
+ <t hangText="Header Block Fragment:">
+ A <xref target="HeaderBlock">header block fragment</xref>.
+ </t>
+ <t hangText="Padding:">
+ Padding octets that contain no application semantic value. Padding octets MUST be set
+ to zero when sending and ignored when receiving.
+ </t>
+ </list>
+ </t>
+
+ <t>
+ The HEADERS frame defines the following flags:
+ <list style="hanging">
+ <x:lt hangText="END_STREAM (0x1):">
+ <t>
+ Bit 1 being set indicates that the <xref target="HeaderBlock">header block</xref> is
+ the last that the endpoint will send for the identified stream. Setting this flag
+ causes the stream to enter one of <xref target="StreamStates">"half closed"
+ states</xref>.
+ </t>
+ <t>
+ A HEADERS frame carries the END_STREAM flag that signals the end of a stream.
+ However, a HEADERS frame with the END_STREAM flag set can be followed by
+ <x:ref>CONTINUATION</x:ref> frames on the same stream. Logically, the
+ <x:ref>CONTINUATION</x:ref> frames are part of the HEADERS frame.
+ </t>
+ </x:lt>
+ <x:lt hangText="END_HEADERS (0x4):">
+ <t>
+ Bit 3 being set indicates that this frame contains an entire <xref
+ target="HeaderBlock">header block</xref> and is not followed by any
+ <x:ref>CONTINUATION</x:ref> frames.
+ </t>
+ <t>
+ A HEADERS frame without the END_HEADERS flag set MUST be followed by a
+ <x:ref>CONTINUATION</x:ref> frame for the same stream. A receiver MUST treat the
+ receipt of any other type of frame or a frame on a different stream as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ <x:lt hangText="PADDED (0x8):">
+ <t>
+ Bit 4 being set indicates that the Pad Length field and any padding that it
+ describes is present.
+ </t>
+ </x:lt>
+ <x:lt hangText="PRIORITY (0x20):">
+ <t>
+ Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight
+ fields are present; see <xref target="StreamPriority"/>.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+
+ <t>
+ The payload of a HEADERS frame contains a <xref target="HeaderBlock">header block
+ fragment</xref>. A header block that does not fit within a HEADERS frame is continued in
+ a <xref target="CONTINUATION">CONTINUATION frame</xref>.
+ </t>
+
+ <t>
+ HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose
+ stream identifier field is 0x0, the recipient MUST respond with a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <t>
+ The HEADERS frame changes the connection state as described in <xref
+ target="HeaderBlock"/>.
+ </t>
+
+ <t>
+ The HEADERS frame includes optional padding. Padding fields and flags are identical to
+ those defined for <xref target="DATA">DATA frames</xref>.
+ </t>
+ <t>
+ Prioritization information in a HEADERS frame is logically equivalent to a separate
+ <x:ref>PRIORITY</x:ref> frame, but inclusion in HEADERS avoids the potential for churn in
+ stream prioritization when new streams are created. Priorization fields in HEADERS frames
+ subsequent to the first on a stream <xref target="reprioritize">reprioritize the
+ stream</xref>.
+ </t>
+ </section>
+
+ <section anchor="PRIORITY" title="PRIORITY">
+ <t>
+ The PRIORITY frame (type=0x2) specifies the <xref target="StreamPriority">sender-advised
+ priority of a stream</xref>. It can be sent at any time for an existing stream, including
+ closed streams. This enables reprioritization of existing streams.
+ </t>
+ <figure title="PRIORITY Frame Payload">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |E| Stream Dependency (31) |
+ +-+-------------+-----------------------------------------------+
+ | Weight (8) |
+ +-+-------------+
+]]></artwork>
+ </figure>
+ <t>
+ The payload of a PRIORITY frame contains the following fields:
+ <list style="hanging">
+ <t hangText="E:">
+ A single bit flag indicates that the stream dependency is exclusive, see <xref
+ target="StreamPriority"/>.
+ </t>
+ <t hangText="Stream Dependency:">
+ A 31-bit stream identifier for the stream that this stream depends on, see <xref
+ target="StreamPriority"/>.
+ </t>
+ <t hangText="Weight:">
+ An 8-bit weight for the identified stream dependency, see <xref
+ target="StreamPriority"/>. Add one to the value to obtain a weight between 1 and 256.
+ </t>
+ </list>
+ </t>
+
+ <t>
+ The PRIORITY frame does not define any flags.
+ </t>
+
+ <t>
+ The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received
+ with a stream identifier of 0x0, the recipient MUST respond with a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open",
+ "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be
+ sent between consecutive frames that comprise a single <xref target="HeaderBlock">header
+ block</xref>. Note that this frame could arrive after processing or frame sending has
+ completed, which would cause it to have no effect on the current stream. For a stream
+ that is in the "half closed (remote)" or "closed" - state, this frame can only affect
+ processing of the current stream and not frame transmission.
+ </t>
+ <t>
+ The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state.
+ This allows for the reprioritization of a group of dependent streams by altering the
+ priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a
+ closed stream risks being ignored due to the peer having discarded priority state
+ information for that stream.
+ </t>
+ </section>
+
+ <section anchor="RST_STREAM" title="RST_STREAM">
+ <t>
+ The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by
+ the initiator of a stream, it indicates that they wish to cancel the stream or that an
+ error condition has occurred. When sent by the receiver of a stream, it indicates that
+ either the receiver is rejecting the stream, requesting that the stream be cancelled, or
+ that an error condition has occurred.
+ </t>
+ <figure title="RST_STREAM Frame Payload">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Error Code (32) |
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+
+ <t>
+ The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the <xref
+ target="ErrorCodes">error code</xref>. The error code indicates why the stream is being
+ terminated.
+ </t>
+
+ <t>
+ The RST_STREAM frame does not define any flags.
+ </t>
+
+ <t>
+ The RST_STREAM frame fully terminates the referenced stream and causes it to enter the
+ closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send
+ additional frames for that stream, with the exception of <x:ref>PRIORITY</x:ref>. However,
+ after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process
+ additional frames sent on the stream that might have been sent by the peer prior to the
+ arrival of the RST_STREAM.
+ </t>
+
+ <t>
+ RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received
+ with a stream identifier of 0x0, the recipient MUST treat this as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <t>
+ RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM
+ frame identifying an idle stream is received, the recipient MUST treat this as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ </section>
+
+ <section anchor="SETTINGS" title="SETTINGS">
+ <t>
+ The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints
+ communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is
+ also used to acknowledge the receipt of those parameters. Individually, a SETTINGS
+ parameter can also be referred to as a "setting".
+ </t>
+ <t>
+ SETTINGS parameters are not negotiated; they describe characteristics of the sending peer,
+ which are used by the receiving peer. Different values for the same parameter can be
+ advertised by each peer. For example, a client might set a high initial flow control
+ window, whereas a server might set a lower value to conserve resources.
+ </t>
+
+ <t>
+ A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be
+ sent at any other time by either endpoint over the lifetime of the connection.
+ Implementations MUST support all of the parameters defined by this specification.
+ </t>
+
+ <t>
+ Each parameter in a SETTINGS frame replaces any existing value for that parameter.
+ Parameters are processed in the order in which they appear, and a receiver of a SETTINGS
+ frame does not need to maintain any state other than the current value of its
+ parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by
+ a receiver.
+ </t>
+ <t>
+ SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS
+ frame defines the following flag:
+ <list style="hanging">
+ <t hangText="ACK (0x1):">
+ Bit 1 being set indicates that this frame acknowledges receipt and application of the
+ peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST
+ be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value
+ other than 0 MUST be treated as a <xref target="ConnectionErrorHandler">connection
+ error</xref> of type <x:ref>FRAME_SIZE_ERROR</x:ref>. For more info, see <xref
+ target="SettingsSync">Settings Synchronization</xref>.
+ </t>
+ </list>
+ </t>
+ <t>
+ SETTINGS frames always apply to a connection, never a single stream. The stream
+ identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS
+ frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond
+ with a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame
+ MUST be treated as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <section title="SETTINGS Format" anchor="SettingFormat">
+ <t>
+ The payload of a SETTINGS frame consists of zero or more parameters, each consisting of
+ an unsigned 16-bit setting identifier and an unsigned 32-bit value.
+ </t>
+
+ <figure title="Setting Format">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Identifier (16) |
+ +-------------------------------+-------------------------------+
+ | Value (32) |
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ </section>
+
+ <section anchor="SettingValues" title="Defined SETTINGS Parameters">
+ <t>
+ The following parameters are defined:
+ <list style="hanging">
+ <x:lt hangText="SETTINGS_HEADER_TABLE_SIZE (0x1):"
+ anchor="SETTINGS_HEADER_TABLE_SIZE">
+ <t>
+ Allows the sender to inform the remote endpoint of the maximum size of the header
+ compression table used to decode header blocks, in octets. The encoder can select
+ any size equal to or less than this value by using signaling specific to the
+ header compression format inside a header block. The initial value is 4,096
+ octets.
+ </t>
+ </x:lt>
+ <x:lt hangText="SETTINGS_ENABLE_PUSH (0x2):"
+ anchor="SETTINGS_ENABLE_PUSH">
+ <t>
+ This setting can be use to disable <xref target="PushResources">server
+ push</xref>. An endpoint MUST NOT send a <x:ref>PUSH_PROMISE</x:ref> frame if it
+ receives this parameter set to a value of 0. An endpoint that has both set this
+ parameter to 0 and had it acknowledged MUST treat the receipt of a
+ <x:ref>PUSH_PROMISE</x:ref> frame as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ The initial value is 1, which indicates that server push is permitted. Any value
+ other than 0 or 1 MUST be treated as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ <x:lt hangText="SETTINGS_MAX_CONCURRENT_STREAMS (0x3):"
+ anchor="SETTINGS_MAX_CONCURRENT_STREAMS">
+ <t>
+ Indicates the maximum number of concurrent streams that the sender will allow.
+ This limit is directional: it applies to the number of streams that the sender
+ permits the receiver to create. Initially there is no limit to this value. It is
+ recommended that this value be no smaller than 100, so as to not unnecessarily
+ limit parallelism.
+ </t>
+ <t>
+ A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special
+ by endpoints. A zero value does prevent the creation of new streams, however this
+ can also happen for any limit that is exhausted with active streams. Servers
+ SHOULD only set a zero value for short durations; if a server does not wish to
+ accept requests, closing the connection could be preferable.
+ </t>
+ </x:lt>
+ <x:lt hangText="SETTINGS_INITIAL_WINDOW_SIZE (0x4):"
+ anchor="SETTINGS_INITIAL_WINDOW_SIZE">
+ <t>
+ Indicates the sender's initial window size (in octets) for stream level flow
+ control. The initial value is 2<x:sup>16</x:sup>-1 (65,535) octets.
+ </t>
+ <t>
+ This setting affects the window size of all streams, including existing streams,
+ see <xref target="InitialWindowSize"/>.
+ </t>
+ <t>
+ Values above the maximum flow control window size of 2<x:sup>31</x:sup>-1 MUST
+ be treated as a <xref target="ConnectionErrorHandler">connection error</xref> of
+ type <x:ref>FLOW_CONTROL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ <x:lt hangText="SETTINGS_MAX_FRAME_SIZE (0x5):"
+ anchor="SETTINGS_MAX_FRAME_SIZE">
+ <t>
+ Indicates the size of the largest frame payload that the sender is willing to
+ receive, in octets.
+ </t>
+ <t>
+ The initial value is 2<x:sup>14</x:sup> (16,384) octets. The value advertised by
+ an endpoint MUST be between this initial value and the maximum allowed frame size
+ (2<x:sup>24</x:sup>-1 or 16,777,215 octets), inclusive. Values outside this range
+ MUST be treated as a <xref target="ConnectionErrorHandler">connection error</xref>
+ of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ <x:lt hangText="SETTINGS_MAX_HEADER_LIST_SIZE (0x6):"
+ anchor="SETTINGS_MAX_HEADER_LIST_SIZE">
+ <t>
+ This advisory setting informs a peer of the maximum size of header list that the
+ sender is prepared to accept, in octets. The value is based on the uncompressed
+ size of header fields, including the length of the name and value in octets plus
+ an overhead of 32 octets for each header field.
+ </t>
+ <t>
+ For any given request, a lower limit than what is advertised MAY be enforced. The
+ initial value of this setting is unlimited.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+ <t>
+ An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier
+ MUST ignore that setting.
+ </t>
+ </section>
+
+ <section anchor="SettingsSync" title="Settings Synchronization">
+ <t>
+ Most values in SETTINGS benefit from or require an understanding of when the peer has
+ received and applied the changed parameter values. In order to provide
+ such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag
+ is not set MUST apply the updated parameters as soon as possible upon receipt.
+ </t>
+ <t>
+ The values in the SETTINGS frame MUST be processed in the order they appear, with no
+ other frame processing between values. Unsupported parameters MUST be ignored. Once
+ all values have been processed, the recipient MUST immediately emit a SETTINGS frame
+ with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender
+ of the altered parameters can rely on the setting having been applied.
+ </t>
+ <t>
+ If the sender of a SETTINGS frame does not receive an acknowledgement within a
+ reasonable amount of time, it MAY issue a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>SETTINGS_TIMEOUT</x:ref>.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="PUSH_PROMISE" title="PUSH_PROMISE">
+ <t>
+ The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of
+ streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned
+ 31-bit identifier of the stream the endpoint plans to create along with a set of headers
+ that provide additional context for the stream. <xref target="PushResources"/> contains a
+ thorough description of the use of PUSH_PROMISE frames.
+ </t>
+
+ <figure title="PUSH_PROMISE Payload Format">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Pad Length? (8)|
+ +-+-------------+-----------------------------------------------+
+ |R| Promised Stream ID (31) |
+ +-+-----------------------------+-------------------------------+
+ | Header Block Fragment (*) ...
+ +---------------------------------------------------------------+
+ | Padding (*) ...
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The PUSH_PROMISE frame payload has the following fields:
+ <list style="hanging">
+ <t hangText="Pad Length:">
+ An 8-bit field containing the length of the frame padding in units of octets. This
+ field is only present if the PADDED flag is set.
+ </t>
+ <t hangText="R:">
+ A single reserved bit.
+ </t>
+ <t hangText="Promised Stream ID:">
+ An unsigned 31-bit integer that identifies the stream that is reserved by the
+ PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next
+ stream sent by the sender (see <xref target="StreamIdentifiers">new stream
+ identifier</xref>).
+ </t>
+ <t hangText="Header Block Fragment:">
+ A <xref target="HeaderBlock">header block fragment</xref> containing request header
+ fields.
+ </t>
+ <t hangText="Padding:">
+ Padding octets.
+ </t>
+ </list>
+ </t>
+
+ <t>
+ The PUSH_PROMISE frame defines the following flags:
+ <list style="hanging">
+ <x:lt hangText="END_HEADERS (0x4):">
+ <t>
+ Bit 3 being set indicates that this frame contains an entire <xref
+ target="HeaderBlock">header block</xref> and is not followed by any
+ <x:ref>CONTINUATION</x:ref> frames.
+ </t>
+ <t>
+ A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a
+ CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any
+ other type of frame or a frame on a different stream as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ <x:lt hangText="PADDED (0x8):">
+ <t>
+ Bit 4 being set indicates that the Pad Length field and any padding that it
+ describes is present.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+
+ <t>
+ PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream
+ identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the
+ stream identifier field specifies the value 0x0, a recipient MUST respond with a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <t>
+ Promised streams are not required to be used in the order they are promised. The
+ PUSH_PROMISE only reserves stream identifiers for later use.
+ </t>
+
+ <t>
+ PUSH_PROMISE MUST NOT be sent if the <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting of the
+ peer endpoint is set to 0. An endpoint that has set this setting and has received
+ acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a
+ <x:ref>RST_STREAM</x:ref> referencing the promised stream identifier back to the sender of
+ the PUSH_PROMISE.
+ </t>
+
+ <t>
+ A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a <xref
+ target="HeaderBlock">header block</xref> potentially modifies the state maintained for
+ header compression. PUSH_PROMISE also reserves a stream for later use, causing the
+ promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a
+ stream unless that stream is either "open" or "half closed (remote)"; the sender MUST
+ ensure that the promised stream is a valid choice for a <xref
+ target="StreamIdentifiers">new stream identifier</xref> (that is, the promised stream MUST
+ be in the "idle" state).
+ </t>
+ <t>
+ Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream
+ state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a
+ stream that is neither "open" nor "half closed (local)" as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>. However, an endpoint that has sent
+ <x:ref>RST_STREAM</x:ref> on the associated stream MUST handle PUSH_PROMISE frames that
+ might have been created before the <x:ref>RST_STREAM</x:ref> frame is received and
+ processed.
+ </t>
+ <t>
+ A receiver MUST treat the receipt of a PUSH_PROMISE that promises an <xref
+ target="StreamIdentifiers">illegal stream identifier</xref> (that is, an identifier for a
+ stream that is not currently in the "idle" state) as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <t>
+ The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical
+ to those defined for <xref target="DATA">DATA frames</xref>.
+ </t>
+ </section>
+
+ <section anchor="PING" title="PING">
+ <t>
+ The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the
+ sender, as well as determining whether an idle connection is still functional. PING
+ frames can be sent from any endpoint.
+ </t>
+ <figure title="PING Payload Format">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ | Opaque Data (64) |
+ | |
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+
+ <t>
+ In addition to the frame header, PING frames MUST contain 8 octets of data in the payload.
+ A sender can include any value it chooses and use those bytes in any fashion.
+ </t>
+ <t>
+ Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with
+ the ACK flag set in response, with an identical payload. PING responses SHOULD be given
+ higher priority than any other frame.
+ </t>
+
+ <t>
+ The PING frame defines the following flags:
+ <list style="hanging">
+ <t hangText="ACK (0x1):">
+ Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST
+ set this flag in PING responses. An endpoint MUST NOT respond to PING frames
+ containing this flag.
+ </t>
+ </list>
+ </t>
+ <t>
+ PING frames are not associated with any individual stream. If a PING frame is received
+ with a stream identifier field value other than 0x0, the recipient MUST respond with a
+ <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ Receipt of a PING frame with a length field value other than 8 MUST be treated as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>FRAME_SIZE_ERROR</x:ref>.
+ </t>
+
+ </section>
+
+ <section anchor="GOAWAY" title="GOAWAY">
+ <t>
+ The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this
+ connection. GOAWAY can be sent by either the client or the server. Once sent, the sender
+ will ignore frames sent on any new streams with identifiers higher than the included last
+ stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the
+ connection, although a new connection can be established for new streams.
+ </t>
+ <t>
+ The purpose of this frame is to allow an endpoint to gracefully stop accepting new
+ streams, while still finishing processing of previously established streams. This enables
+ administrative actions, like server maintainance.
+ </t>
+ <t>
+ There is an inherent race condition between an endpoint starting new streams and the
+ remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream
+ identifier of the last peer-initiated stream which was or might be processed on the
+ sending endpoint in this connection. For instance, if the server sends a GOAWAY frame,
+ the identified stream is the highest numbered stream initiated by the client.
+ </t>
+ <t>
+ If the receiver of the GOAWAY has sent data on streams with a higher stream identifier
+ than what is indicated in the GOAWAY frame, those streams are not or will not be
+ processed. The receiver of the GOAWAY frame can treat the streams as though they had
+ never been created at all, thereby allowing those streams to be retried later on a new
+ connection.
+ </t>
+ <t>
+ Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote
+ can know whether a stream has been partially processed or not. For example, if an HTTP
+ client sends a POST at the same time that a server closes a connection, the client cannot
+ know if the server started to process that POST request if the server does not send a
+ GOAWAY frame to indicate what streams it might have acted on.
+ </t>
+ <t>
+ An endpoint might choose to close a connection without sending GOAWAY for misbehaving
+ peers.
+ </t>
+
+ <figure title="GOAWAY Payload Format">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R| Last-Stream-ID (31) |
+ +-+-------------------------------------------------------------+
+ | Error Code (32) |
+ +---------------------------------------------------------------+
+ | Additional Debug Data (*) |
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The GOAWAY frame does not define any flags.
+ </t>
+ <t>
+ The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat
+ a <x:ref>GOAWAY</x:ref> frame with a stream identifier other than 0x0 as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ The last stream identifier in the GOAWAY frame contains the highest numbered stream
+ identifier for which the sender of the GOAWAY frame might have taken some action on, or
+ might yet take action on. All streams up to and including the identified stream might
+ have been processed in some way. The last stream identifier can be set to 0 if no streams
+ were processed.
+ <list style="hanging">
+ <t hangText="Note:">
+ In this context, "processed" means that some data from the stream was passed to some
+ higher layer of software that might have taken some action as a result.
+ </t>
+ </list>
+ If a connection terminates without a GOAWAY frame, the last stream identifier is
+ effectively the highest possible stream identifier.
+ </t>
+ <t>
+ On streams with lower or equal numbered identifiers that were not closed completely prior
+ to the connection being closed, re-attempting requests, transactions, or any protocol
+ activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or
+ DELETE. Any protocol activity that uses higher numbered streams can be safely retried
+ using a new connection.
+ </t>
+ <t>
+ Activity on streams numbered lower or equal to the last stream identifier might still
+ complete successfully. The sender of a GOAWAY frame might gracefully shut down a
+ connection by sending a GOAWAY frame, maintaining the connection in an open state until
+ all in-progress streams complete.
+ </t>
+ <t>
+ An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an
+ endpoint that sends GOAWAY with <x:ref>NO_ERROR</x:ref> during graceful shutdown could
+ subsequently encounter an condition that requires immediate termination of the connection.
+ The last stream identifier from the last GOAWAY frame received indicates which streams
+ could have been acted upon. Endpoints MUST NOT increase the value they send in the last
+ stream identifier, since the peers might already have retried unprocessed requests on
+ another connection.
+ </t>
+ <t>
+ A client that is unable to retry requests loses all requests that are in flight when the
+ server closes the connection. This is especially true for intermediaries that might
+ not be serving clients using HTTP/2. A server that is attempting to gracefully shut down
+ a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to
+ 2<x:sup>31</x:sup>-1 and a <x:ref>NO_ERROR</x:ref> code. This signals to the client that
+ a shutdown is imminent and that no further requests can be initiated. After waiting at
+ least one round trip time, the server can send another GOAWAY frame with an updated last
+ stream identifier. This ensures that a connection can be cleanly shut down without losing
+ requests.
+ </t>
+
+ <t>
+ After sending a GOAWAY frame, the sender can discard frames for streams with identifiers
+ higher than the identified last stream. However, any frames that alter connection state
+ cannot be completely ignored. For instance, <x:ref>HEADERS</x:ref>,
+ <x:ref>PUSH_PROMISE</x:ref> and <x:ref>CONTINUATION</x:ref> frames MUST be minimally
+ processed to ensure the state maintained for header compression is consistent (see <xref
+ target="HeaderBlock"/>); similarly DATA frames MUST be counted toward the connection flow
+ control window. Failure to process these frames can cause flow control or header
+ compression state to become unsynchronized.
+ </t>
+
+ <t>
+ The GOAWAY frame also contains a 32-bit <xref target="ErrorCodes">error code</xref> that
+ contains the reason for closing the connection.
+ </t>
+ <t>
+ Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug
+ data is intended for diagnostic purposes only and carries no semantic value. Debug
+ information could contain security- or privacy-sensitive data. Logged or otherwise
+ persistently stored debug data MUST have adequate safeguards to prevent unauthorized
+ access.
+ </t>
+ </section>
+
+ <section anchor="WINDOW_UPDATE" title="WINDOW_UPDATE">
+ <t>
+ The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see <xref
+ target="FlowControl"/> for an overview.
+ </t>
+ <t>
+ Flow control operates at two levels: on each individual stream and on the entire
+ connection.
+ </t>
+ <t>
+ Both types of flow control are hop-by-hop; that is, only between the two endpoints.
+ Intermediaries do not forward WINDOW_UPDATE frames between dependent connections.
+ However, throttling of data transfer by any receiver can indirectly cause the propagation
+ of flow control information toward the original sender.
+ </t>
+ <t>
+ Flow control only applies to frames that are identified as being subject to flow control.
+ Of the frame types defined in this document, this includes only <x:ref>DATA</x:ref> frames.
+ Frames that are exempt from flow control MUST be accepted and processed, unless the
+ receiver is unable to assign resources to handling the frame. A receiver MAY respond with
+ a <xref target="StreamErrorHandler">stream error</xref> or <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>FLOW_CONTROL_ERROR</x:ref> if it is unable to accept a frame.
+ </t>
+ <figure title="WINDOW_UPDATE Payload Format">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R| Window Size Increment (31) |
+ +-+-------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer
+ indicating the number of octets that the sender can transmit in addition to the existing
+ flow control window. The legal range for the increment to the flow control window is 1 to
+ 2<x:sup>31</x:sup>-1 (0x7fffffff) octets.
+ </t>
+ <t>
+ The WINDOW_UPDATE frame does not define any flags.
+ </t>
+ <t>
+ The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the
+ former case, the frame's stream identifier indicates the affected stream; in the latter,
+ the value "0" indicates that the entire connection is the subject of the frame.
+ </t>
+ <t>
+ A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window
+ increment of 0 as a <xref target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>; errors on the connection flow control window MUST be
+ treated as a <xref target="ConnectionErrorHandler">connection error</xref>.
+ </t>
+ <t>
+ WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag.
+ This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)"
+ or "closed" stream. A receiver MUST NOT treat this as an error, see <xref
+ target="StreamStates"/>.
+ </t>
+ <t>
+ A receiver that receives a flow controlled frame MUST always account for its contribution
+ against the connection flow control window, unless the receiver treats this as a <xref
+ target="ConnectionErrorHandler">connection error</xref>. This is necessary even if the
+ frame is in error. Since the sender counts the frame toward the flow control window, if
+ the receiver does not, the flow control window at sender and receiver can become
+ different.
+ </t>
+
+ <section title="The Flow Control Window">
+ <t>
+ Flow control in HTTP/2 is implemented using a window kept by each sender on every
+ stream. The flow control window is a simple integer value that indicates how many octets
+ of data the sender is permitted to transmit; as such, its size is a measure of the
+ buffering capacity of the receiver.
+ </t>
+ <t>
+ Two flow control windows are applicable: the stream flow control window and the
+ connection flow control window. The sender MUST NOT send a flow controlled frame with a
+ length that exceeds the space available in either of the flow control windows advertised
+ by the receiver. Frames with zero length with the END_STREAM flag set (that is, an
+ empty <x:ref>DATA</x:ref> frame) MAY be sent if there is no available space in either
+ flow control window.
+ </t>
+ <t>
+ For flow control calculations, the 9 octet frame header is not counted.
+ </t>
+ <t>
+ After sending a flow controlled frame, the sender reduces the space available in both
+ windows by the length of the transmitted frame.
+ </t>
+ <t>
+ The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up
+ space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream
+ and connection level flow control windows.
+ </t>
+ <t>
+ A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the
+ amount specified in the frame.
+ </t>
+ <t>
+ A sender MUST NOT allow a flow control window to exceed 2<x:sup>31</x:sup>-1 octets.
+ If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this
+ maximum it MUST terminate either the stream or the connection, as appropriate. For
+ streams, the sender sends a <x:ref>RST_STREAM</x:ref> with the error code of
+ <x:ref>FLOW_CONTROL_ERROR</x:ref> code; for the connection, a <x:ref>GOAWAY</x:ref>
+ frame with a <x:ref>FLOW_CONTROL_ERROR</x:ref> code.
+ </t>
+ <t>
+ Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are
+ completely asynchronous with respect to each other. This property allows a receiver to
+ aggressively update the window size kept by the sender to prevent streams from stalling.
+ </t>
+ </section>
+
+ <section anchor="InitialWindowSize" title="Initial Flow Control Window Size">
+ <t>
+ When an HTTP/2 connection is first established, new streams are created with an initial
+ flow control window size of 65,535 octets. The connection flow control window is 65,535
+ octets. Both endpoints can adjust the initial window size for new streams by including
+ a value for <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> in the <x:ref>SETTINGS</x:ref>
+ frame that forms part of the connection preface. The connection flow control window can
+ only be changed using WINDOW_UPDATE frames.
+ </t>
+ <t>
+ Prior to receiving a <x:ref>SETTINGS</x:ref> frame that sets a value for
+ <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref>, an endpoint can only use the default
+ initial window size when sending flow controlled frames. Similarly, the connection flow
+ control window is set to the default initial window size until a WINDOW_UPDATE frame is
+ received.
+ </t>
+ <t>
+ A <x:ref>SETTINGS</x:ref> frame can alter the initial flow control window size for all
+ current streams. When the value of <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> changes,
+ a receiver MUST adjust the size of all stream flow control windows that it maintains by
+ the difference between the new value and the old value.
+ </t>
+ <t>
+ A change to <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> can cause the available space in
+ a flow control window to become negative. A sender MUST track the negative flow control
+ window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE
+ frames that cause the flow control window to become positive.
+ </t>
+ <t>
+ For example, if the client sends 60KB immediately on connection establishment, and the
+ server sets the initial window size to be 16KB, the client will recalculate the
+ available flow control window to be -44KB on receipt of the <x:ref>SETTINGS</x:ref>
+ frame. The client retains a negative flow control window until WINDOW_UPDATE frames
+ restore the window to being positive, after which the client can resume sending.
+ </t>
+ <t>
+ A <x:ref>SETTINGS</x:ref> frame cannot alter the connection flow control window.
+ </t>
+ <t>
+ An endpoint MUST treat a change to <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> that
+ causes any flow control window to exceed the maximum size as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>FLOW_CONTROL_ERROR</x:ref>.
+ </t>
+ </section>
+
+ <section title="Reducing the Stream Window Size">
+ <t>
+ A receiver that wishes to use a smaller flow control window than the current size can
+ send a new <x:ref>SETTINGS</x:ref> frame. However, the receiver MUST be prepared to
+ receive data that exceeds this window size, since the sender might send data that
+ exceeds the lower limit prior to processing the <x:ref>SETTINGS</x:ref> frame.
+ </t>
+ <t>
+ After sending a SETTINGS frame that reduces the initial flow control window size, a
+ receiver has two options for handling streams that exceed flow control limits:
+ <list style="numbers">
+ <t>
+ The receiver can immediately send <x:ref>RST_STREAM</x:ref> with
+ <x:ref>FLOW_CONTROL_ERROR</x:ref> error code for the affected streams.
+ </t>
+ <t>
+ The receiver can accept the streams and tolerate the resulting head of line
+ blocking, sending WINDOW_UPDATE frames as it consumes data.
+ </t>
+ </list>
+ </t>
+ </section>
+ </section>
+
+ <section anchor="CONTINUATION" title="CONTINUATION">
+ <t>
+ The CONTINUATION frame (type=0x9) is used to continue a sequence of <xref
+ target="HeaderBlock">header block fragments</xref>. Any number of CONTINUATION frames can
+ be sent on an existing stream, as long as the preceding frame is on the same stream and is
+ a <x:ref>HEADERS</x:ref>, <x:ref>PUSH_PROMISE</x:ref> or CONTINUATION frame without the
+ END_HEADERS flag set.
+ </t>
+
+ <figure title="CONTINUATION Frame Payload">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Header Block Fragment (*) ...
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The CONTINUATION frame payload contains a <xref target="HeaderBlock">header block
+ fragment</xref>.
+ </t>
+
+ <t>
+ The CONTINUATION frame defines the following flag:
+ <list style="hanging">
+ <x:lt hangText="END_HEADERS (0x4):">
+ <t>
+ Bit 3 being set indicates that this frame ends a <xref target="HeaderBlock">header
+ block</xref>.
+ </t>
+ <t>
+ If the END_HEADERS bit is not set, this frame MUST be followed by another
+ CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or
+ a frame on a different stream as a <xref target="ConnectionErrorHandler">connection
+ error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+
+ <t>
+ The CONTINUATION frame changes the connection state as defined in <xref
+ target="HeaderBlock" />.
+ </t>
+
+ <t>
+ CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received
+ whose stream identifier field is 0x0, the recipient MUST respond with a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type PROTOCOL_ERROR.
+ </t>
+
+ <t>
+ A CONTINUATION frame MUST be preceded by a <x:ref>HEADERS</x:ref>,
+ <x:ref>PUSH_PROMISE</x:ref> or CONTINUATION frame without the END_HEADERS flag set. A
+ recipient that observes violation of this rule MUST respond with a <xref
+ target="ConnectionErrorHandler"> connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="ErrorCodes" title="Error Codes">
+ <t>
+ Error codes are 32-bit fields that are used in <x:ref>RST_STREAM</x:ref> and
+ <x:ref>GOAWAY</x:ref> frames to convey the reasons for the stream or connection error.
+ </t>
+
+ <t>
+ Error codes share a common code space. Some error codes apply only to either streams or the
+ entire connection and have no defined semantics in the other context.
+ </t>
+
+ <t>
+ The following error codes are defined:
+ <list style="hanging">
+ <t hangText="NO_ERROR (0x0):" anchor="NO_ERROR">
+ The associated condition is not as a result of an error. For example, a
+ <x:ref>GOAWAY</x:ref> might include this code to indicate graceful shutdown of a
+ connection.
+ </t>
+ <t hangText="PROTOCOL_ERROR (0x1):" anchor="PROTOCOL_ERROR">
+ The endpoint detected an unspecific protocol error. This error is for use when a more
+ specific error code is not available.
+ </t>
+ <t hangText="INTERNAL_ERROR (0x2):" anchor="INTERNAL_ERROR">
+ The endpoint encountered an unexpected internal error.
+ </t>
+ <t hangText="FLOW_CONTROL_ERROR (0x3):" anchor="FLOW_CONTROL_ERROR">
+ The endpoint detected that its peer violated the flow control protocol.
+ </t>
+ <t hangText="SETTINGS_TIMEOUT (0x4):" anchor="SETTINGS_TIMEOUT">
+ The endpoint sent a <x:ref>SETTINGS</x:ref> frame, but did not receive a response in a
+ timely manner. See <xref target="SettingsSync">Settings Synchronization</xref>.
+ </t>
+ <t hangText="STREAM_CLOSED (0x5):" anchor="STREAM_CLOSED">
+ The endpoint received a frame after a stream was half closed.
+ </t>
+ <t hangText="FRAME_SIZE_ERROR (0x6):" anchor="FRAME_SIZE_ERROR">
+ The endpoint received a frame with an invalid size.
+ </t>
+ <t hangText="REFUSED_STREAM (0x7):" anchor="REFUSED_STREAM">
+ The endpoint refuses the stream prior to performing any application processing, see
+ <xref target="Reliability"/> for details.
+ </t>
+ <t hangText="CANCEL (0x8):" anchor="CANCEL">
+ Used by the endpoint to indicate that the stream is no longer needed.
+ </t>
+ <t hangText="COMPRESSION_ERROR (0x9):" anchor="COMPRESSION_ERROR">
+ The endpoint is unable to maintain the header compression context for the connection.
+ </t>
+ <t hangText="CONNECT_ERROR (0xa):" anchor="CONNECT_ERROR">
+ The connection established in response to a <xref target="CONNECT">CONNECT
+ request</xref> was reset or abnormally closed.
+ </t>
+ <t hangText="ENHANCE_YOUR_CALM (0xb):" anchor="ENHANCE_YOUR_CALM">
+ The endpoint detected that its peer is exhibiting a behavior that might be generating
+ excessive load.
+ </t>
+ <t hangText="INADEQUATE_SECURITY (0xc):" anchor="INADEQUATE_SECURITY">
+ The underlying transport has properties that do not meet minimum security
+ requirements (see <xref target="TLSUsage"/>).
+ </t>
+ </list>
+ </t>
+ <t>
+ Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be
+ treated by an implementation as being equivalent to <x:ref>INTERNAL_ERROR</x:ref>.
+ </t>
+ </section>
+
+ <section anchor="HTTPLayer" title="HTTP Message Exchanges">
+ <t>
+ HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means
+ that, from the application perspective, the features of the protocol are largely
+ unchanged. To achieve this, all request and response semantics are preserved, although the
+ syntax of conveying those semantics has changed.
+ </t>
+ <t>
+ Thus, the specification and requirements of HTTP/1.1 Semantics and Content <xref
+ target="RFC7231"/>, Conditional Requests <xref target="RFC7232"/>, Range Requests <xref
+ target="RFC7233"/>, Caching <xref target="RFC7234"/> and Authentication <xref
+ target="RFC7235"/> are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax
+ and Routing <xref target="RFC7230"/>, such as the HTTP and HTTPS URI schemes, are also
+ applicable in HTTP/2, but the expression of those semantics for this protocol are defined
+ in the sections below.
+ </t>
+
+ <section anchor="HttpSequence" title="HTTP Request/Response Exchange">
+ <t>
+ A client sends an HTTP request on a new stream, using a previously unused <xref
+ target="StreamIdentifiers">stream identifier</xref>. A server sends an HTTP response on
+ the same stream as the request.
+ </t>
+ <t>
+ An HTTP message (request or response) consists of:
+ <list style="numbers">
+ <t>
+ for a response only, zero or more <x:ref>HEADERS</x:ref> frames (each followed by zero
+ or more <x:ref>CONTINUATION</x:ref> frames) containing the message headers of
+ informational (1xx) HTTP responses (see <xref target="RFC7230" x:fmt=","
+ x:rel="#header.fields"/> and <xref target="RFC7231" x:fmt="," x:rel="#status.1xx"/>),
+ and
+ </t>
+ <t>
+ one <x:ref>HEADERS</x:ref> frame (followed by zero or more <x:ref>CONTINUATION</x:ref>
+ frames) containing the message headers (see <xref target="RFC7230" x:fmt=","
+ x:rel="#header.fields"/>), and
+ </t>
+ <t>
+ zero or more <x:ref>DATA</x:ref> frames containing the message payload (see <xref
+ target="RFC7230" x:fmt="," x:rel="#message.body"/>), and
+ </t>
+ <t>
+ optionally, one <x:ref>HEADERS</x:ref> frame, followed by zero or more
+ <x:ref>CONTINUATION</x:ref> frames containing the trailer-part, if present (see <xref
+ target="RFC7230" x:fmt="," x:rel="#chunked.trailer.part"/>).
+ </t>
+ </list>
+ The last frame in the sequence bears an END_STREAM flag, noting that a
+ <x:ref>HEADERS</x:ref> frame bearing the END_STREAM flag can be followed by
+ <x:ref>CONTINUATION</x:ref> frames that carry any remaining portions of the header block.
+ </t>
+ <t>
+ Other frames (from any stream) MUST NOT occur between either <x:ref>HEADERS</x:ref> frame
+ and any <x:ref>CONTINUATION</x:ref> frames that might follow.
+ </t>
+
+ <t>
+ Trailing header fields are carried in a header block that also terminates the stream.
+ That is, a sequence starting with a <x:ref>HEADERS</x:ref> frame, followed by zero or more
+ <x:ref>CONTINUATION</x:ref> frames, where the <x:ref>HEADERS</x:ref> frame bears an
+ END_STREAM flag. Header blocks after the first that do not terminate the stream are not
+ part of an HTTP request or response.
+ </t>
+ <t>
+ A <x:ref>HEADERS</x:ref> frame (and associated <x:ref>CONTINUATION</x:ref> frames) can
+ only appear at the start or end of a stream. An endpoint that receives a
+ <x:ref>HEADERS</x:ref> frame without the END_STREAM flag set after receiving a final
+ (non-informational) status code MUST treat the corresponding request or response as <xref
+ target="malformed">malformed</xref>.
+ </t>
+
+ <t>
+ An HTTP request/response exchange fully consumes a single stream. A request starts with
+ the <x:ref>HEADERS</x:ref> frame that puts the stream into an "open" state. The request
+ ends with a frame bearing END_STREAM, which causes the stream to become "half closed
+ (local)" for the client and "half closed (remote)" for the server. A response starts with
+ a <x:ref>HEADERS</x:ref> frame and ends with a frame bearing END_STREAM, which places the
+ stream in the "closed" state.
+ <!-- Yes, the response might be completed before the request does, but that's not a detail
+ we need to expand upon. It's complicated enough explaining this as it is. -->
+ </t>
+
+ <section anchor="informational-responses" title="Upgrading From HTTP/2">
+ <t>
+ HTTP/2 removes support for the 101 (Switching Protocols) informational status code
+ (<xref target="RFC7231" x:fmt="," x:rel="#status.101"/>).
+ </t>
+ <t>
+ The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol.
+ Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate
+ their use (see <xref target="starting"/>).
+ </t>
+ </section>
+
+ <section anchor="HttpHeaders" title="HTTP Header Fields">
+ <t>
+ HTTP header fields carry information as a series of key-value pairs. For a listing of
+ registered HTTP headers, see the Message Header Field Registry maintained at <eref
+ target="https://www.iana.org/assignments/message-headers"/>.
+ </t>
+
+ <section anchor="PseudoHeaderFields" title="Pseudo-Header Fields">
+ <t>
+ While HTTP/1.x used the message start-line (see <xref target="RFC7230" x:fmt=","
+ x:rel="#start.line"/>) to convey the target URI and method of the request, and the
+ status code for the response, HTTP/2 uses special pseudo-header fields beginning with
+ ':' character (ASCII 0x3a) for this purpose.
+ </t>
+ <t>
+ Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate
+ pseudo-header fields other than those defined in this document.
+ </t>
+ <t>
+ Pseudo-header fields are only valid in the context in which they are defined.
+ Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header
+ fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST
+ NOT appear in trailers. Endpoints MUST treat a request or response that contains
+ undefined or invalid pseudo-header fields as <xref
+ target="malformed">malformed</xref>.
+ </t>
+ <t>
+ Just as in HTTP/1.x, header field names are strings of ASCII characters that are
+ compared in a case-insensitive fashion. However, header field names MUST be converted
+ to lowercase prior to their encoding in HTTP/2. A request or response containing
+ uppercase header field names MUST be treated as <xref
+ target="malformed">malformed</xref>.
+ </t>
+ <t>
+ All pseudo-header fields MUST appear in the header block before regular header fields.
+ Any request or response that contains a pseudo-header field that appears in a header
+ block after a regular header field MUST be treated as <xref
+ target="malformed">malformed</xref>.
+ </t>
+ </section>
+
+ <section title="Connection-Specific Header Fields">
+ <t>
+ HTTP/2 does not use the <spanx style="verb">Connection</spanx> header field to
+ indicate connection-specific header fields; in this protocol, connection-specific
+ metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message
+ containing connection-specific header fields; any message containing
+ connection-specific header fields MUST be treated as <xref
+ target="malformed">malformed</xref>.
+ </t>
+ <t>
+ This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need
+ to remove any header fields nominated by the Connection header field, along with the
+ Connection header field itself. Such intermediaries SHOULD also remove other
+ connection-specific header fields, such as Keep-Alive, Proxy-Connection,
+ Transfer-Encoding and Upgrade, even if they are not nominated by Connection.
+ </t>
+ <t>
+ One exception to this is the TE header field, which MAY be present in an HTTP/2
+ request, but when it is MUST NOT contain any value other than "trailers".
+ </t>
+ <t>
+ <list style="hanging">
+ <t hangText="Note:">
+ HTTP/2 purposefully does not support upgrade to another protocol. The handshake
+ methods described in <xref target="starting"/> are believed sufficient to
+ negotiate the use of alternative protocols.
+ </t>
+ </list>
+ </t>
+ </section>
+
+ <section anchor="HttpRequest" title="Request Pseudo-Header Fields">
+ <t>
+ The following pseudo-header fields are defined for HTTP/2 requests:
+ <list style="symbols">
+ <x:lt>
+ <t>
+ The <spanx style="verb">:method</spanx> pseudo-header field includes the HTTP
+ method (<xref target="RFC7231" x:fmt="," x:rel="#methods"/>).
+ </t>
+ </x:lt>
+ <x:lt>
+ <t>
+ The <spanx style="verb">:scheme</spanx> pseudo-header field includes the scheme
+ portion of the target URI (<xref target="RFC3986" x:fmt="," x:sec="3.1"/>).
+ </t>
+ <t>
+ <spanx style="verb">:scheme</spanx> is not restricted to <spanx
+ style="verb">http</spanx> and <spanx style="verb">https</spanx> schemed URIs. A
+ proxy or gateway can translate requests for non-HTTP schemes, enabling the use
+ of HTTP to interact with non-HTTP services.
+ </t>
+ </x:lt>
+ <x:lt>
+ <t>
+ The <spanx style="verb">:authority</spanx> pseudo-header field includes the
+ authority portion of the target URI (<xref target="RFC3986" x:fmt=","
+ x:sec="3.2"/>). The authority MUST NOT include the deprecated <spanx
+ style="verb">userinfo</spanx> subcomponent for <spanx style="verb">http</spanx>
+ or <spanx style="verb">https</spanx> schemed URIs.
+ </t>
+ <t>
+ To ensure that the HTTP/1.1 request line can be reproduced accurately, this
+ pseudo-header field MUST be omitted when translating from an HTTP/1.1 request
+ that has a request target in origin or asterisk form (see <xref
+ target="RFC7230" x:fmt="," x:rel="#request-target"/>). Clients that generate
+ HTTP/2 requests directly SHOULD use the <spanx>:authority</spanx> pseudo-header
+ field instead of the <spanx style="verb">Host</spanx> header field. An
+ intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a <spanx
+ style="verb">Host</spanx> header field if one is not present in a request by
+ copying the value of the <spanx style="verb">:authority</spanx> pseudo-header
+ field.
+ </t>
+ </x:lt>
+ <x:lt>
+ <t>
+ The <spanx style="verb">:path</spanx> pseudo-header field includes the path and
+ query parts of the target URI (the <spanx style="verb">path-absolute</spanx>
+ production from <xref target="RFC3986"/> and optionally a '?' character
+ followed by the <spanx style="verb">query</spanx> production, see <xref
+ target="RFC3986" x:fmt="," x:sec="3.3"/> and <xref target="RFC3986" x:fmt=","
+ x:sec="3.4"/>). A request in asterisk form includes the value '*' for the
+ <spanx style="verb">:path</spanx> pseudo-header field.
+ </t>
+ <t>
+ This pseudo-header field MUST NOT be empty for <spanx style="verb">http</spanx>
+ or <spanx style="verb">https</spanx> URIs; <spanx style="verb">http</spanx> or
+ <spanx style="verb">https</spanx> URIs that do not contain a path component
+ MUST include a value of '/'. The exception to this rule is an OPTIONS request
+ for an <spanx style="verb">http</spanx> or <spanx style="verb">https</spanx>
+ URI that does not include a path component; these MUST include a <spanx
+ style="verb">:path</spanx> pseudo-header field with a value of '*' (see <xref
+ target="RFC7230" x:fmt="," x:rel="#asterisk-form"/>).
+ </t>
+ </x:lt>
+ </list>
+ </t>
+ <t>
+ All HTTP/2 requests MUST include exactly one valid value for the <spanx
+ style="verb">:method</spanx>, <spanx style="verb">:scheme</spanx>, and <spanx
+ style="verb">:path</spanx> pseudo-header fields, unless it is a <xref
+ target="CONNECT">CONNECT request</xref>. An HTTP request that omits mandatory
+ pseudo-header fields is <xref target="malformed">malformed</xref>.
+ </t>
+ <t>
+ HTTP/2 does not define a way to carry the version identifier that is included in the
+ HTTP/1.1 request line.
+ </t>
+ </section>
+
+ <section anchor="HttpResponse" title="Response Pseudo-Header Fields">
+ <t>
+ For HTTP/2 responses, a single <spanx style="verb">:status</spanx> pseudo-header
+ field is defined that carries the HTTP status code field (see <xref target="RFC7231"
+ x:fmt="," x:rel="#status.codes"/>). This pseudo-header field MUST be included in all
+ responses, otherwise the response is <xref target="malformed">malformed</xref>.
+ </t>
+ <t>
+ HTTP/2 does not define a way to carry the version or reason phrase that is included in
+ an HTTP/1.1 status line.
+ </t>
+ </section>
+
+ <section anchor="CompressCookie" title="Compressing the Cookie Header Field">
+ <t>
+ The <xref target="COOKIE">Cookie header field</xref> can carry a significant amount of
+ redundant data.
+ </t>
+ <t>
+ The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs").
+ This header field doesn't follow the list construction rules in HTTP (see <xref
+ target="RFC7230" x:fmt="," x:rel="#field.order"/>), which prevents cookie-pairs from
+ being separated into different name-value pairs. This can significantly reduce
+ compression efficiency as individual cookie-pairs are updated.
+ </t>
+ <t>
+ To allow for better compression efficiency, the Cookie header field MAY be split into
+ separate header fields, each with one or more cookie-pairs. If there are multiple
+ Cookie header fields after decompression, these MUST be concatenated into a single
+ octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ")
+ before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a
+ generic HTTP server application.
+ </t>
+ <figure>
+ <preamble>
+ Therefore, the following two lists of Cookie header fields are semantically
+ equivalent.
+ </preamble>
+ <artwork type="inline"><![CDATA[
+ cookie: a=b; c=d; e=f
+
+ cookie: a=b
+ cookie: c=d
+ cookie: e=f
+]]></artwork>
+ </figure>
+ </section>
+
+ <section anchor="malformed" title="Malformed Requests and Responses">
+ <t>
+ A malformed request or response is one that is an otherwise valid sequence of HTTP/2
+ frames, but is otherwise invalid due to the presence of extraneous frames, prohibited
+ header fields, the absence of mandatory header fields, or the inclusion of uppercase
+ header field names.
+ </t>
+ <t>
+ A request or response that includes an entity body can include a <spanx
+ style="verb">content-length</spanx> header field. A request or response is also
+ malformed if the value of a <spanx style="verb">content-length</spanx> header field
+ does not equal the sum of the <x:ref>DATA</x:ref> frame payload lengths that form the
+ body. A response that is defined to have no payload, as described in <xref
+ target="RFC7230" x:fmt="," x:rel="#header.content-length"/>, can have a non-zero
+ <spanx style="verb">content-length</spanx> header field, even though no content is
+ included in <x:ref>DATA</x:ref> frames.
+ </t>
+ <t>
+ Intermediaries that process HTTP requests or responses (i.e., any intermediary not
+ acting as a tunnel) MUST NOT forward a malformed request or response. Malformed
+ requests or responses that are detected MUST be treated as a <xref
+ target="StreamErrorHandler">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ For malformed requests, a server MAY send an HTTP response prior to closing or
+ resetting the stream. Clients MUST NOT accept a malformed response. Note that these
+ requirements are intended to protect against several types of common attacks against
+ HTTP; they are deliberately strict, because being permissive can expose
+ implementations to these vulnerabilities.
+ </t>
+ </section>
+ </section>
+
+ <section title="Examples">
+ <t>
+ This section shows HTTP/1.1 requests and responses, with illustrations of equivalent
+ HTTP/2 requests and responses.
+ </t>
+ <t>
+ An HTTP GET request includes request header fields and no body and is therefore
+ transmitted as a single <x:ref>HEADERS</x:ref> frame, followed by zero or more
+ <x:ref>CONTINUATION</x:ref> frames containing the serialized block of request header
+ fields. The <x:ref>HEADERS</x:ref> frame in the following has both the END_HEADERS and
+ END_STREAM flags set; no <x:ref>CONTINUATION</x:ref> frames are sent:
+ </t>
+
+ <figure>
+ <artwork type="inline"><![CDATA[
+ GET /resource HTTP/1.1 HEADERS
+ Host: example.org ==> + END_STREAM
+ Accept: image/jpeg + END_HEADERS
+ :method = GET
+ :scheme = https
+ :path = /resource
+ host = example.org
+ accept = image/jpeg
+]]></artwork>
+ </figure>
+
+ <t>
+ Similarly, a response that includes only response header fields is transmitted as a
+ <x:ref>HEADERS</x:ref> frame (again, followed by zero or more
+ <x:ref>CONTINUATION</x:ref> frames) containing the serialized block of response header
+ fields.
+ </t>
+
+ <figure>
+ <artwork type="inline"><![CDATA[
+ HTTP/1.1 304 Not Modified HEADERS
+ ETag: "xyzzy" ==> + END_STREAM
+ Expires: Thu, 23 Jan ... + END_HEADERS
+ :status = 304
+ etag = "xyzzy"
+ expires = Thu, 23 Jan ...
+]]></artwork>
+ </figure>
+
+ <t>
+ An HTTP POST request that includes request header fields and payload data is transmitted
+ as one <x:ref>HEADERS</x:ref> frame, followed by zero or more
+ <x:ref>CONTINUATION</x:ref> frames containing the request header fields, followed by one
+ or more <x:ref>DATA</x:ref> frames, with the last <x:ref>CONTINUATION</x:ref> (or
+ <x:ref>HEADERS</x:ref>) frame having the END_HEADERS flag set and the final
+ <x:ref>DATA</x:ref> frame having the END_STREAM flag set:
+ </t>
+
+ <figure>
+ <artwork type="inline"><![CDATA[
+ POST /resource HTTP/1.1 HEADERS
+ Host: example.org ==> - END_STREAM
+ Content-Type: image/jpeg - END_HEADERS
+ Content-Length: 123 :method = POST
+ :path = /resource
+ {binary data} :scheme = https
+
+ CONTINUATION
+ + END_HEADERS
+ content-type = image/jpeg
+ host = example.org
+ content-length = 123
+
+ DATA
+ + END_STREAM
+ {binary data}
+]]></artwork>
+ <postamble>
+ Note that data contributing to any given header field could be spread between header
+ block fragments. The allocation of header fields to frames in this example is
+ illustrative only.
+ </postamble>
+ </figure>
+
+ <t>
+ A response that includes header fields and payload data is transmitted as a
+ <x:ref>HEADERS</x:ref> frame, followed by zero or more <x:ref>CONTINUATION</x:ref>
+ frames, followed by one or more <x:ref>DATA</x:ref> frames, with the last
+ <x:ref>DATA</x:ref> frame in the sequence having the END_STREAM flag set:
+ </t>
+
+ <figure>
+ <artwork type="inline"><![CDATA[
+ HTTP/1.1 200 OK HEADERS
+ Content-Type: image/jpeg ==> - END_STREAM
+ Content-Length: 123 + END_HEADERS
+ :status = 200
+ {binary data} content-type = image/jpeg
+ content-length = 123
+
+ DATA
+ + END_STREAM
+ {binary data}
+]]></artwork>
+ </figure>
+
+ <t>
+ Trailing header fields are sent as a header block after both the request or response
+ header block and all the <x:ref>DATA</x:ref> frames have been sent. The
+ <x:ref>HEADERS</x:ref> frame starting the trailers header block has the END_STREAM flag
+ set.
+ </t>
+
+ <figure>
+ <artwork type="inline"><![CDATA[
+ HTTP/1.1 200 OK HEADERS
+ Content-Type: image/jpeg ==> - END_STREAM
+ Transfer-Encoding: chunked + END_HEADERS
+ Trailer: Foo :status = 200
+ content-length = 123
+ 123 content-type = image/jpeg
+ {binary data} trailer = Foo
+ 0
+ Foo: bar DATA
+ - END_STREAM
+ {binary data}
+
+ HEADERS
+ + END_STREAM
+ + END_HEADERS
+ foo = bar
+]]></artwork>
+ </figure>
+
+
+ <figure>
+ <preamble>
+ An informational response using a 1xx status code other than 101 is transmitted as a
+ <x:ref>HEADERS</x:ref> frame, followed by zero or more <x:ref>CONTINUATION</x:ref>
+ frames:
+ </preamble>
+ <artwork type="inline"><![CDATA[
+ HTTP/1.1 103 BAR HEADERS
+ Extension-Field: bar ==> - END_STREAM
+ + END_HEADERS
+ :status = 103
+ extension-field = bar
+]]></artwork>
+ </figure>
+ </section>
+
+ <section anchor="Reliability" title="Request Reliability Mechanisms in HTTP/2">
+ <t>
+ In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error
+ occurs, because there is no means to determine the nature of the error. It is possible
+ that some server processing occurred prior to the error, which could result in
+ undesirable effects if the request were reattempted.
+ </t>
+ <t>
+ HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has
+ not been processed:
+ <list style="symbols">
+ <t>
+ The <x:ref>GOAWAY</x:ref> frame indicates the highest stream number that might have
+ been processed. Requests on streams with higher numbers are therefore guaranteed to
+ be safe to retry.
+ </t>
+ <t>
+ The <x:ref>REFUSED_STREAM</x:ref> error code can be included in a
+ <x:ref>RST_STREAM</x:ref> frame to indicate that the stream is being closed prior to
+ any processing having occurred. Any request that was sent on the reset stream can
+ be safely retried.
+ </t>
+ </list>
+ </t>
+ <t>
+ Requests that have not been processed have not failed; clients MAY automatically retry
+ them, even those with non-idempotent methods.
+ </t>
+ <t>
+ A server MUST NOT indicate that a stream has not been processed unless it can guarantee
+ that fact. If frames that are on a stream are passed to the application layer for any
+ stream, then <x:ref>REFUSED_STREAM</x:ref> MUST NOT be used for that stream, and a
+ <x:ref>GOAWAY</x:ref> frame MUST include a stream identifier that is greater than or
+ equal to the given stream identifier.
+ </t>
+ <t>
+ In addition to these mechanisms, the <x:ref>PING</x:ref> frame provides a way for a
+ client to easily test a connection. Connections that remain idle can become broken as
+ some middleboxes (for instance, network address translators, or load balancers) silently
+ discard connection bindings. The <x:ref>PING</x:ref> frame allows a client to safely
+ test whether a connection is still active without sending a request.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="PushResources" title="Server Push">
+ <t>
+ HTTP/2 allows a server to pre-emptively send (or "push") responses (along with
+ corresponding "promised" requests) to a client in association with a previous
+ client-initiated request. This can be useful when the server knows the client will need
+ to have those responses available in order to fully process the response to the original
+ request.
+ </t>
+
+ <t>
+ Pushing additional message exchanges in this fashion is optional, and is negotiated
+ between individual endpoints. The <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting can be set
+ to 0 to indicate that server push is disabled.
+ </t>
+ <t>
+ Promised requests MUST be cacheable (see <xref target="RFC7231" x:fmt=","
+ x:rel="#cacheable.methods"/>), MUST be safe (see <xref target="RFC7231" x:fmt=","
+ x:rel="#safe.methods"/>) and MUST NOT include a request body. Clients that receive a
+ promised request that is not cacheable, unsafe or that includes a request body MUST
+ reset the stream with a <xref target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ Pushed responses that are cacheable (see <xref target="RFC7234" x:fmt=","
+ x:rel="#response.cacheability"/>) can be stored by the client, if it implements a HTTP
+ cache. Pushed responses are considered successfully validated on the origin server (e.g.,
+ if the "no-cache" cache response directive <xref target="RFC7234" x:fmt=","
+ x:rel="#cache-response-directive"/> is present) while the stream identified by the
+ promised stream ID is still open.
+ </t>
+ <t>
+ Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY
+ be made available to the application separately.
+ </t>
+ <t>
+ An intermediary can receive pushes from the server and choose not to forward them on to
+ the client. In other words, how to make use of the pushed information is up to that
+ intermediary. Equally, the intermediary might choose to make additional pushes to the
+ client, without any action taken by the server.
+ </t>
+ <t>
+ A client cannot push. Thus, servers MUST treat the receipt of a
+ <x:ref>PUSH_PROMISE</x:ref> frame as a <xref target="ConnectionErrorHandler">connection
+ error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>. Clients MUST reject any attempt to
+ change the <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting to a value other than 0 by treating
+ the message as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <section anchor="PushRequests" title="Push Requests">
+ <t>
+ Server push is semantically equivalent to a server responding to a request; however, in
+ this case that request is also sent by the server, as a <x:ref>PUSH_PROMISE</x:ref>
+ frame.
+ </t>
+ <t>
+ The <x:ref>PUSH_PROMISE</x:ref> frame includes a header block that contains a complete
+ set of request header fields that the server attributes to the request. It is not
+ possible to push a response to a request that includes a request body.
+ </t>
+
+ <t>
+ Pushed responses are always associated with an explicit request from the client. The
+ <x:ref>PUSH_PROMISE</x:ref> frames sent by the server are sent on that explicit
+ request's stream. The <x:ref>PUSH_PROMISE</x:ref> frame also includes a promised stream
+ identifier, chosen from the stream identifiers available to the server (see <xref
+ target="StreamIdentifiers"/>).
+ </t>
+
+ <t>
+ The header fields in <x:ref>PUSH_PROMISE</x:ref> and any subsequent
+ <x:ref>CONTINUATION</x:ref> frames MUST be a valid and complete set of <xref
+ target="HttpRequest">request header fields</xref>. The server MUST include a method in
+ the <spanx style="verb">:method</spanx> header field that is safe and cacheable. If a
+ client receives a <x:ref>PUSH_PROMISE</x:ref> that does not include a complete and valid
+ set of header fields, or the <spanx style="verb">:method</spanx> header field identifies
+ a method that is not safe, it MUST respond with a <xref
+ target="StreamErrorHandler">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <t>
+ The server SHOULD send <x:ref>PUSH_PROMISE</x:ref> (<xref target="PUSH_PROMISE"/>)
+ frames prior to sending any frames that reference the promised responses. This avoids a
+ race where clients issue requests prior to receiving any <x:ref>PUSH_PROMISE</x:ref>
+ frames.
+ </t>
+ <t>
+ For example, if the server receives a request for a document containing embedded links
+ to multiple image files, and the server chooses to push those additional images to the
+ client, sending push promises before the <x:ref>DATA</x:ref> frames that contain the
+ image links ensures that the client is able to see the promises before discovering
+ embedded links. Similarly, if the server pushes responses referenced by the header block
+ (for instance, in Link header fields), sending the push promises before sending the
+ header block ensures that clients do not request them.
+ </t>
+
+ <t>
+ <x:ref>PUSH_PROMISE</x:ref> frames MUST NOT be sent by the client.
+ </t>
+ <t>
+ <x:ref>PUSH_PROMISE</x:ref> frames can be sent by the server in response to any
+ client-initiated stream, but the stream MUST be in either the "open" or "half closed
+ (remote)" state with respect to the server. <x:ref>PUSH_PROMISE</x:ref> frames are
+ interspersed with the frames that comprise a response, though they cannot be
+ interspersed with <x:ref>HEADERS</x:ref> and <x:ref>CONTINUATION</x:ref> frames that
+ comprise a single header block.
+ </t>
+ <t>
+ Sending a <x:ref>PUSH_PROMISE</x:ref> frame creates a new stream and puts the stream
+ into the “reserved (local)” state for the server and the “reserved (remote)” state for
+ the client.
+ </t>
+ </section>
+
+ <section anchor="PushResponses" title="Push Responses">
+ <t>
+ After sending the <x:ref>PUSH_PROMISE</x:ref> frame, the server can begin delivering the
+ pushed response as a <xref target="HttpResponse">response</xref> on a server-initiated
+ stream that uses the promised stream identifier. The server uses this stream to
+ transmit an HTTP response, using the same sequence of frames as defined in <xref
+ target="HttpSequence"/>. This stream becomes <xref target="StreamStates">"half closed"
+ to the client</xref> after the initial <x:ref>HEADERS</x:ref> frame is sent.
+ </t>
+
+ <t>
+ Once a client receives a <x:ref>PUSH_PROMISE</x:ref> frame and chooses to accept the
+ pushed response, the client SHOULD NOT issue any requests for the promised response
+ until after the promised stream has closed.
+ </t>
+
+ <t>
+ If the client determines, for any reason, that it does not wish to receive the pushed
+ response from the server, or if the server takes too long to begin sending the promised
+ response, the client can send an <x:ref>RST_STREAM</x:ref> frame, using either the
+ <x:ref>CANCEL</x:ref> or <x:ref>REFUSED_STREAM</x:ref> codes, and referencing the pushed
+ stream's identifier.
+ </t>
+ <t>
+ A client can use the <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> setting to limit the
+ number of responses that can be concurrently pushed by a server. Advertising a
+ <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> value of zero disables server push by
+ preventing the server from creating the necessary streams. This does not prohibit a
+ server from sending <x:ref>PUSH_PROMISE</x:ref> frames; clients need to reset any
+ promised streams that are not wanted.
+ </t>
+
+ <t>
+ Clients receiving a pushed response MUST validate that either the server is
+ authoritative (see <xref target="authority"/>), or the proxy that provided the pushed
+ response is configured for the corresponding request. For example, a server that offers
+ a certificate for only the <spanx style="verb">example.com</spanx> DNS-ID or Common Name
+ is not permitted to push a response for <spanx
+ style="verb">https://www.example.org/doc</spanx>.
+ </t>
+ <t>
+ The response for a <x:ref>PUSH_PROMISE</x:ref> stream begins with a
+ <x:ref>HEADERS</x:ref> frame, which immediately puts the stream into the “half closed
+ (remote)” state for the server and “half closed (local)” state for the client, and ends
+ with a frame bearing END_STREAM, which places the stream in the "closed" state.
+ <list style="hanging">
+ <t hangText="Note:">
+ The client never sends a frame with the END_STREAM flag for a server push.
+ </t>
+ </list>
+ </t>
+ </section>
+
+ </section>
+
+ <section anchor="CONNECT" title="The CONNECT Method">
+ <t>
+ In HTTP/1.x, the pseudo-method CONNECT (<xref target="RFC7231" x:fmt=","
+ x:rel="#CONNECT"/>) is used to convert an HTTP connection into a tunnel to a remote host.
+ CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin
+ server for the purposes of interacting with <spanx style="verb">https</spanx> resources.
+ </t>
+ <t>
+ In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to
+ a remote host, for similar purposes. The HTTP header field mapping works as defined in
+ <xref target="HttpRequest">Request Header Fields</xref>, with a few
+ differences. Specifically:
+ <list style="symbols">
+ <t>
+ The <spanx style="verb">:method</spanx> header field is set to <spanx
+ style="verb">CONNECT</spanx>.
+ </t>
+ <t>
+ The <spanx style="verb">:scheme</spanx> and <spanx style="verb">:path</spanx> header
+ fields MUST be omitted.
+ </t>
+ <t>
+ The <spanx style="verb">:authority</spanx> header field contains the host and port to
+ connect to (equivalent to the authority-form of the request-target of CONNECT
+ requests, see <xref target="RFC7230" x:fmt="," x:rel="#request-target"/>).
+ </t>
+ </list>
+ </t>
+ <t>
+ A proxy that supports CONNECT establishes a <xref target="TCP">TCP connection</xref> to
+ the server identified in the <spanx style="verb">:authority</spanx> header field. Once
+ this connection is successfully established, the proxy sends a <x:ref>HEADERS</x:ref>
+ frame containing a 2xx series status code to the client, as defined in <xref
+ target="RFC7231" x:fmt="," x:rel="#CONNECT"/>.
+ </t>
+ <t>
+ After the initial <x:ref>HEADERS</x:ref> frame sent by each peer, all subsequent
+ <x:ref>DATA</x:ref> frames correspond to data sent on the TCP connection. The payload of
+ any <x:ref>DATA</x:ref> frames sent by the client is transmitted by the proxy to the TCP
+ server; data received from the TCP server is assembled into <x:ref>DATA</x:ref> frames by
+ the proxy. Frame types other than <x:ref>DATA</x:ref> or stream management frames
+ (<x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, and <x:ref>PRIORITY</x:ref>)
+ MUST NOT be sent on a connected stream, and MUST be treated as a <xref
+ target="StreamErrorHandler">stream error</xref> if received.
+ </t>
+ <t>
+ The TCP connection can be closed by either peer. The END_STREAM flag on a
+ <x:ref>DATA</x:ref> frame is treated as being equivalent to the TCP FIN bit. A client is
+ expected to send a <x:ref>DATA</x:ref> frame with the END_STREAM flag set after receiving
+ a frame bearing the END_STREAM flag. A proxy that receives a <x:ref>DATA</x:ref> frame
+ with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP
+ segment. A proxy that receives a TCP segment with the FIN bit set sends a
+ <x:ref>DATA</x:ref> frame with the END_STREAM flag set. Note that the final TCP segment
+ or <x:ref>DATA</x:ref> frame could be empty.
+ </t>
+ <t>
+ A TCP connection error is signaled with <x:ref>RST_STREAM</x:ref>. A proxy treats any
+ error in the TCP connection, which includes receiving a TCP segment with the RST bit set,
+ as a <xref target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>CONNECT_ERROR</x:ref>. Correspondingly, a proxy MUST send a TCP segment with the
+ RST bit set if it detects an error with the stream or the HTTP/2 connection.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="HttpExtra" title="Additional HTTP Requirements/Considerations">
+ <t>
+ This section outlines attributes of the HTTP protocol that improve interoperability, reduce
+ exposure to known security vulnerabilities, or reduce the potential for implementation
+ variation.
+ </t>
+
+ <section title="Connection Management">
+ <t>
+ HTTP/2 connections are persistent. For best performance, it is expected clients will not
+ close connections until it is determined that no further communication with a server is
+ necessary (for example, when a user navigates away from a particular web page), or until
+ the server closes the connection.
+ </t>
+ <t>
+ Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair,
+ where host is derived from a URI, a selected <xref target="ALT-SVC">alternative
+ service</xref>, or a configured proxy.
+ </t>
+ <t>
+ A client can create additional connections as replacements, either to replace connections
+ that are near to exhausting the available <xref target="StreamIdentifiers">stream
+ identifier space</xref>, to refresh the keying material for a TLS connection, or to
+ replace connections that have encountered <xref
+ target="ConnectionErrorHandler">errors</xref>.
+ </t>
+ <t>
+ A client MAY open multiple connections to the same IP address and TCP port using different
+ <xref target="TLS-EXT">Server Name Indication</xref> values or to provide different TLS
+ client certificates, but SHOULD avoid creating multiple connections with the same
+ configuration.
+ </t>
+ <t>
+ Servers are encouraged to maintain open connections for as long as possible, but are
+ permitted to terminate idle connections if necessary. When either endpoint chooses to
+ close the transport-layer TCP connection, the terminating endpoint SHOULD first send a
+ <x:ref>GOAWAY</x:ref> (<xref target="GOAWAY"/>) frame so that both endpoints can reliably
+ determine whether previously sent frames have been processed and gracefully complete or
+ terminate any necessary remaining tasks.
+ </t>
+
+ <section anchor="reuse" title="Connection Reuse">
+ <t>
+ Connections that are made to an origin servers, either directly or through a tunnel
+ created using the <xref target="CONNECT">CONNECT method</xref> MAY be reused for
+ requests with multiple different URI authority components. A connection can be reused
+ as long as the origin server is <xref target="authority">authoritative</xref>. For
+ <spanx style="verb">http</spanx> resources, this depends on the host having resolved to
+ the same IP address.
+ </t>
+ <t>
+ For <spanx style="verb">https</spanx> resources, connection reuse additionally depends
+ on having a certificate that is valid for the host in the URI. An origin server might
+ offer a certificate with multiple <spanx style="verb">subjectAltName</spanx> attributes,
+ or names with wildcards, one of which is valid for the authority in the URI. For
+ example, a certificate with a <spanx style="verb">subjectAltName</spanx> of <spanx
+ style="verb">*.example.com</spanx> might permit the use of the same connection for
+ requests to URIs starting with <spanx style="verb">https://a.example.com/</spanx> and
+ <spanx style="verb">https://b.example.com/</spanx>.
+ </t>
+ <t>
+ In some deployments, reusing a connection for multiple origins can result in requests
+ being directed to the wrong origin server. For example, TLS termination might be
+ performed by a middlebox that uses the TLS <xref target="TLS-EXT">Server Name Indication
+ (SNI)</xref> extension to select an origin server. This means that it is possible
+ for clients to send confidential information to servers that might not be the intended
+ target for the request, even though the server is otherwise authoritative.
+ </t>
+ <t>
+ A server that does not wish clients to reuse connections can indicate that it is not
+ authoritative for a request by sending a 421 (Misdirected Request) status code in response
+ to the request (see <xref target="MisdirectedRequest"/>).
+ </t>
+ <t>
+ A client that is configured to use a proxy over HTTP/2 directs requests to that proxy
+ through a single connection. That is, all requests sent via a proxy reuse the
+ connection to the proxy.
+ </t>
+ </section>
+
+ <section anchor="MisdirectedRequest" title="The 421 (Misdirected Request) Status Code">
+ <t>
+ The 421 (Misdirected Request) status code indicates that the request was directed at a
+ server that is not able to produce a response. This can be sent by a server that is not
+ configured to produce responses for the combination of scheme and authority that are
+ included in the request URI.
+ </t>
+ <t>
+ Clients receiving a 421 (Misdirected Request) response from a server MAY retry the
+ request - whether the request method is idempotent or not - over a different connection.
+ This is possible if a connection is reused (<xref target="reuse"/>) or if an alternative
+ service is selected (<xref target="ALT-SVC"/>).
+ </t>
+ <t>
+ This status code MUST NOT be generated by proxies.
+ </t>
+ <t>
+ A 421 response is cacheable by default; i.e., unless otherwise indicated by the method
+ definition or explicit cache controls (see <xref target="RFC7234"
+ x:rel="#heuristic.freshness" x:fmt="of"/>).
+ </t>
+ </section>
+ </section>
+
+ <section title="Use of TLS Features" anchor="TLSUsage">
+ <t>
+ Implementations of HTTP/2 MUST support <xref target="TLS12">TLS 1.2</xref> for HTTP/2 over
+ TLS. The general TLS usage guidance in <xref target="TLSBCP"/> SHOULD be followed, with
+ some additional restrictions that are specific to HTTP/2.
+ </t>
+
+ <t>
+ An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on
+ feature set and cipher suite described in this section. Due to implementation
+ limitations, it might not be possible to fail TLS negotiation. An endpoint MUST
+ immediately terminate an HTTP/2 connection that does not meet these minimum requirements
+ with a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>INADEQUATE_SECURITY</x:ref>.
+ </t>
+
+ <section anchor="TLSFeatures" title="TLS Features">
+ <t>
+ The TLS implementation MUST support the <xref target="TLS-EXT">Server Name Indication
+ (SNI)</xref> extension to TLS. HTTP/2 clients MUST indicate the target domain name when
+ negotiating TLS.
+ </t>
+ <t>
+ The TLS implementation MUST disable compression. TLS compression can lead to the
+ exposure of information that would not otherwise be revealed <xref target="RFC3749"/>.
+ Generic compression is unnecessary since HTTP/2 provides compression features that are
+ more aware of context and therefore likely to be more appropriate for use for
+ performance, security or other reasons.
+ </t>
+ <t>
+ The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS
+ renegotiation as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>. Note that disabling renegotiation can result in
+ long-lived connections becoming unusable due to limits on the number of messages the
+ underlying cipher suite can encipher.
+ </t>
+ <t>
+ A client MAY use renegotiation to provide confidentiality protection for client
+ credentials offered in the handshake, but any renegotiation MUST occur prior to sending
+ the connection preface. A server SHOULD request a client certificate if it sees a
+ renegotiation request immediately after establishing a connection.
+ </t>
+ <t>
+ This effectively prevents the use of renegotiation in response to a request for a
+ specific protected resource. A future specification might provide a way to support this
+ use case. <!-- <cref> We are tracking this in a non-blocking fashion in issue #496 and
+ with a new draft. -->
+ </t>
+ </section>
+
+ <section title="TLS Cipher Suites">
+ <t>
+ The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST
+ only be used with cipher suites that have ephemeral key exchange, such as the <xref
+ target="TLS12">ephemeral Diffie-Hellman (DHE)</xref> or the <xref
+ target="RFC4492">elliptic curve variant (ECDHE)</xref>. Ephemeral key exchange MUST
+ have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE.
+ Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher
+ suites that use stream or block ciphers. Authenticated Encryption with Additional Data
+ (AEAD) modes, such as the <xref target="RFC5288">Galois Counter Model (GCM) mode for
+ AES</xref> are acceptable.
+ </t>
+ <t>
+ The effect of these restrictions is that TLS 1.2 implementations could have
+ non-intersecting sets of available cipher suites, since these prevent the use of the
+ cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of
+ HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 <xref
+ target="TLS-ECDHE"/> with P256 <xref target="FIPS186"/>.
+ </t>
+ <t>
+ Clients MAY advertise support of cipher suites that are prohibited by the above
+ restrictions in order to allow for connection to servers that do not support HTTP/2.
+ This enables a fallback to protocols without these constraints without the additional
+ latency imposed by using a separate connection for fallback.
+ </t>
+ </section>
+ </section>
+ </section>
+
+ <section anchor="security" title="Security Considerations">
+ <section title="Server Authority" anchor="authority">
+ <t>
+ HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is
+ authoritative in providing a given response, see <xref target="RFC7230" x:fmt=","
+ x:rel="#establishing.authority"/>. This relies on local name resolution for the "http"
+ URI scheme, and the authenticated server identity for the "https" scheme (see <xref
+ target="RFC2818" x:fmt="," x:sec="3"/>).
+ </t>
+ </section>
+
+ <section title="Cross-Protocol Attacks">
+ <t>
+ In a cross-protocol attack, an attacker causes a client to initiate a transaction in one
+ protocol toward a server that understands a different protocol. An attacker might be able
+ to cause the transaction to appear as valid transaction in the second protocol. In
+ combination with the capabilities of the web context, this can be used to interact with
+ poorly protected servers in private networks.
+ </t>
+ <t>
+ Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient
+ protection against cross protocol attacks. ALPN provides a positive indication that a
+ server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based
+ protocols.
+ </t>
+ <t>
+ The encryption in TLS makes it difficult for attackers to control the data which could be
+ used in a cross-protocol attack on a cleartext protocol.
+ </t>
+ <t>
+ The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks.
+ The <xref target="ConnectionHeader">connection preface</xref> contains a string that is
+ designed to confuse HTTP/1.1 servers, but no special protection is offered for other
+ protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an
+ Upgrade header field in addition to the client connection preface could be exposed to a
+ cross-protocol attack.
+ </t>
+ </section>
+
+ <section title="Intermediary Encapsulation Attacks">
+ <t>
+ HTTP/2 header field names and values are encoded as sequences of octets with a length
+ prefix. This enables HTTP/2 to carry any string of octets as the name or value of a
+ header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1
+ directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might
+ exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal
+ header fields, extra header fields, or even new messages that are entirely falsified.
+ </t>
+ <t>
+ Header field names or values that contain characters not permitted by HTTP/1.1, including
+ carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an
+ intermediary, as stipulated in <xref target="RFC7230" x:rel="#field.parsing" x:fmt=","/>.
+ </t>
+ <t>
+ Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker.
+ Intermediaries that perform translation to HTTP/2 MUST remove any instances of the <spanx
+ style="verb">obs-fold</spanx> production from header field values.
+ </t>
+ </section>
+
+ <section title="Cacheability of Pushed Responses">
+ <t>
+ Pushed responses do not have an explicit request from the client; the request
+ is provided by the server in the <x:ref>PUSH_PROMISE</x:ref> frame.
+ </t>
+ <t>
+ Caching responses that are pushed is possible based on the guidance provided by the origin
+ server in the Cache-Control header field. However, this can cause issues if a single
+ server hosts more than one tenant. For example, a server might offer multiple users each
+ a small portion of its URI space.
+ </t>
+ <t>
+ Where multiple tenants share space on the same server, that server MUST ensure that
+ tenants are not able to push representations of resources that they do not have authority
+ over. Failure to enforce this would allow a tenant to provide a representation that would
+ be served out of cache, overriding the actual representation that the authoritative tenant
+ provides.
+ </t>
+ <t>
+ Pushed responses for which an origin server is not authoritative (see
+ <xref target="authority"/>) are never cached or used.
+ </t>
+ </section>
+
+ <section anchor="dos" title="Denial of Service Considerations">
+ <t>
+ An HTTP/2 connection can demand a greater commitment of resources to operate than a
+ HTTP/1.1 connection. The use of header compression and flow control depend on a
+ commitment of resources for storing a greater amount of state. Settings for these
+ features ensure that memory commitments for these features are strictly bounded.
+ </t>
+ <t>
+ The number of <x:ref>PUSH_PROMISE</x:ref> frames is not constrained in the same fashion.
+ A client that accepts server push SHOULD limit the number of streams it allows to be in
+ the "reserved (remote)" state. Excessive number of server push streams can be treated as
+ a <xref target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>ENHANCE_YOUR_CALM</x:ref>.
+ </t>
+ <t>
+ Processing capacity cannot be guarded as effectively as state capacity.
+ </t>
+ <t>
+ The <x:ref>SETTINGS</x:ref> frame can be abused to cause a peer to expend additional
+ processing time. This might be done by pointlessly changing SETTINGS parameters, setting
+ multiple undefined parameters, or changing the same setting multiple times in the same
+ frame. <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>PRIORITY</x:ref> frames can be abused to
+ cause an unnecessary waste of resources.
+ </t>
+ <t>
+ Large numbers of small or empty frames can be abused to cause a peer to expend time
+ processing frame headers. Note however that some uses are entirely legitimate, such as
+ the sending of an empty <x:ref>DATA</x:ref> frame to end a stream.
+ </t>
+ <t>
+ Header compression also offers some opportunities to waste processing resources; see <xref
+ target="COMPRESSION" x:fmt="of" x:rel="#Security"/> for more details on potential abuses.
+ </t>
+ <t>
+ Limits in <x:ref>SETTINGS</x:ref> parameters cannot be reduced instantaneously, which
+ leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In
+ particular, immediately after establishing a connection, limits set by a server are not
+ known to clients and could be exceeded without being an obvious protocol violation.
+ </t>
+ <t>
+ All these features - i.e., <x:ref>SETTINGS</x:ref> changes, small frames, header
+ compression - have legitimate uses. These features become a burden only when they are
+ used unnecessarily or to excess.
+ </t>
+ <t>
+ An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of
+ service attack. Implementations SHOULD track the use of these features and set limits on
+ their use. An endpoint MAY treat activity that is suspicious as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>ENHANCE_YOUR_CALM</x:ref>.
+ </t>
+
+ <section anchor="MaxHeaderBlock" title="Limits on Header Block Size">
+ <t>
+ A large <xref target="HeaderBlock">header block</xref> can cause an implementation to
+ commit a large amount of state. Header fields that are critical for routing can appear
+ toward the end of a header block, which prevents streaming of header fields to their
+ ultimate destination. For this an other reasons, such as ensuring cache correctness,
+ means that an endpoint might need to buffer the entire header block. Since there is no
+ hard limit to the size of a header block, some endpoints could be forced commit a large
+ amount of available memory for header fields.
+ </t>
+ <t>
+ An endpoint can use the <x:ref>SETTINGS_MAX_HEADER_LIST_SIZE</x:ref> to advise peers of
+ limits that might apply on the size of header blocks. This setting is only advisory, so
+ endpoints MAY choose to send header blocks that exceed this limit and risk having the
+ request or response being treated as malformed. This setting specific to a connection,
+ so any request or response could encounter a hop with a lower, unknown limit. An
+ intermediary can attempt to avoid this problem by passing on values presented by
+ different peers, but they are not obligated to do so.
+ </t>
+ <t>
+ A server that receives a larger header block than it is willing to handle can send an
+ HTTP 431 (Request Header Fields Too Large) status code <xref target="RFC6585"/>. A
+ client can discard responses that it cannot process. The header block MUST be processed
+ to ensure a consistent connection state, unless the connection is closed.
+ </t>
+ </section>
+ </section>
+
+ <section title="Use of Compression">
+ <t>
+ HTTP/2 enables greater use of compression for both header fields (<xref
+ target="HeaderBlock"/>) and entity bodies. Compression can allow an attacker to recover
+ secret data when it is compressed in the same context as data under attacker control.
+ </t>
+ <t>
+ There are demonstrable attacks on compression that exploit the characteristics of the web
+ (e.g., <xref target="BREACH"/>). The attacker induces multiple requests containing
+ varying plaintext, observing the length of the resulting ciphertext in each, which
+ reveals a shorter length when a guess about the secret is correct.
+ </t>
+ <t>
+ Implementations communicating on a secure channel MUST NOT compress content that includes
+ both confidential and attacker-controlled data unless separate compression dictionaries
+ are used for each source of data. Compression MUST NOT be used if the source of data
+ cannot be reliably determined. Generic stream compression, such as that provided by TLS
+ MUST NOT be used with HTTP/2 (<xref target="TLSFeatures"/>).
+ </t>
+ <t>
+ Further considerations regarding the compression of header fields are described in <xref
+ target="COMPRESSION"/>.
+ </t>
+ </section>
+
+ <section title="Use of Padding" anchor="padding">
+ <t>
+ Padding within HTTP/2 is not intended as a replacement for general purpose padding, such
+ as might be provided by <xref target="TLS12">TLS</xref>. Redundant padding could even be
+ counterproductive. Correct application can depend on having specific knowledge of the
+ data that is being padded.
+ </t>
+ <t>
+ To mitigate attacks that rely on compression, disabling or limiting compression might be
+ preferable to padding as a countermeasure.
+ </t>
+ <t>
+ Padding can be used to obscure the exact size of frame content, and is provided to
+ mitigate specific attacks within HTTP. For example, attacks where compressed content
+ includes both attacker-controlled plaintext and secret data (see for example, <xref
+ target="BREACH"/>).
+ </t>
+ <t>
+ Use of padding can result in less protection than might seem immediately obvious. At
+ best, padding only makes it more difficult for an attacker to infer length information by
+ increasing the number of frames an attacker has to observe. Incorrectly implemented
+ padding schemes can be easily defeated. In particular, randomized padding with a
+ predictable distribution provides very little protection; similarly, padding payloads to a
+ fixed size exposes information as payload sizes cross the fixed size boundary, which could
+ be possible if an attacker can control plaintext.
+ </t>
+ <t>
+ Intermediaries SHOULD retain padding for <x:ref>DATA</x:ref> frames, but MAY drop padding
+ for <x:ref>HEADERS</x:ref> and <x:ref>PUSH_PROMISE</x:ref> frames. A valid reason for an
+ intermediary to change the amount of padding of frames is to improve the protections that
+ padding provides.
+ </t>
+ </section>
+
+ <section title="Privacy Considerations">
+ <t>
+ Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions
+ of a single client or server over time. This includes the value of settings, the manner
+ in which flow control windows are managed, the way priorities are allocated to streams,
+ timing of reactions to stimulus, and handling of any optional features.
+ </t>
+ <t>
+ As far as this creates observable differences in behavior, they could be used as a basis
+ for fingerprinting a specific client, as defined in <xref target="HTML5" x:fmt="of"
+ x:sec="1.8" x:rel="introduction.html#fingerprint"/>.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="iana" title="IANA Considerations">
+ <t>
+ A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation
+ (ALPN) Protocol IDs" registry established in <xref target="TLS-ALPN"/>.
+ </t>
+ <t>
+ This document establishes a registry for frame types, settings, and error codes. These new
+ registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section.
+ </t>
+ <t>
+ This document registers the <spanx style="verb">HTTP2-Settings</spanx> header field for
+ use in HTTP; and the 421 (Misdirected Request) status code.
+ </t>
+ <t>
+ This document registers the <spanx style="verb">PRI</spanx> method for use in HTTP, to avoid
+ collisions with the <xref target="ConnectionHeader">connection preface</xref>.
+ </t>
+
+ <section anchor="iana-alpn" title="Registration of HTTP/2 Identification Strings">
+ <t>
+ This document creates two registrations for the identification of HTTP/2 in the
+ "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in <xref
+ target="TLS-ALPN"/>.
+ </t>
+ <t>
+ The "h2" string identifies HTTP/2 when used over TLS:
+ <list style="hanging">
+ <t hangText="Protocol:">HTTP/2 over TLS</t>
+ <t hangText="Identification Sequence:">0x68 0x32 ("h2")</t>
+ <t hangText="Specification:">This document</t>
+ </list>
+ </t>
+ <t>
+ The "h2c" string identifies HTTP/2 when used over cleartext TCP:
+ <list style="hanging">
+ <t hangText="Protocol:">HTTP/2 over TCP</t>
+ <t hangText="Identification Sequence:">0x68 0x32 0x63 ("h2c")</t>
+ <t hangText="Specification:">This document</t>
+ </list>
+ </t>
+ </section>
+
+ <section anchor="iana-frames" title="Frame Type Registry">
+ <t>
+ This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame
+ Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under
+ either of the <xref target="RFC5226">"IETF Review" or "IESG Approval" policies</xref> for
+ values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for
+ experimental use.
+ </t>
+ <t>
+ New entries in this registry require the following information:
+ <list style="hanging">
+ <t hangText="Frame Type:">
+ A name or label for the frame type.
+ </t>
+ <t hangText="Code:">
+ The 8-bit code assigned to the frame type.
+ </t>
+ <t hangText="Specification:">
+ A reference to a specification that includes a description of the frame layout,
+ it's semantics and flags that the frame type uses, including any parts of the frame
+ that are conditionally present based on the value of flags.
+ </t>
+ </list>
+ </t>
+ <t>
+ The entries in the following table are registered by this document.
+ </t>
+ <texttable align="left" suppress-title="true">
+ <ttcol>Frame Type</ttcol>
+ <ttcol>Code</ttcol>
+ <ttcol>Section</ttcol>
+ <c>DATA</c><c>0x0</c><c><xref target="DATA"/></c>
+ <c>HEADERS</c><c>0x1</c><c><xref target="HEADERS"/></c>
+ <c>PRIORITY</c><c>0x2</c><c><xref target="PRIORITY"/></c>
+ <c>RST_STREAM</c><c>0x3</c><c><xref target="RST_STREAM"/></c>
+ <c>SETTINGS</c><c>0x4</c><c><xref target="SETTINGS"/></c>
+ <c>PUSH_PROMISE</c><c>0x5</c><c><xref target="PUSH_PROMISE"/></c>
+ <c>PING</c><c>0x6</c><c><xref target="PING"/></c>
+ <c>GOAWAY</c><c>0x7</c><c><xref target="GOAWAY"/></c>
+ <c>WINDOW_UPDATE</c><c>0x8</c><c><xref target="WINDOW_UPDATE"/></c>
+ <c>CONTINUATION</c><c>0x9</c><c><xref target="CONTINUATION"/></c>
+ </texttable>
+ </section>
+
+ <section anchor="iana-settings" title="Settings Registry">
+ <t>
+ This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry
+ manages a 16-bit space. The "HTTP/2 Settings" registry operates under the <xref
+ target="RFC5226">"Expert Review" policy</xref> for values in the range from 0x0000 to
+ 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use.
+ </t>
+ <t>
+ New registrations are advised to provide the following information:
+ <list style="hanging">
+ <t hangText="Name:">
+ A symbolic name for the setting. Specifying a setting name is optional.
+ </t>
+ <t hangText="Code:">
+ The 16-bit code assigned to the setting.
+ </t>
+ <t hangText="Initial Value:">
+ An initial value for the setting.
+ </t>
+ <t hangText="Specification:">
+ An optional reference to a specification that describes the use of the setting.
+ </t>
+ </list>
+ </t>
+ <t>
+ An initial set of setting registrations can be found in <xref target="SettingValues"/>.
+ </t>
+ <texttable align="left" suppress-title="true">
+ <ttcol>Name</ttcol>
+ <ttcol>Code</ttcol>
+ <ttcol>Initial Value</ttcol>
+ <ttcol>Specification</ttcol>
+ <c>HEADER_TABLE_SIZE</c>
+ <c>0x1</c><c>4096</c><c><xref target="SettingValues"/></c>
+ <c>ENABLE_PUSH</c>
+ <c>0x2</c><c>1</c><c><xref target="SettingValues"/></c>
+ <c>MAX_CONCURRENT_STREAMS</c>
+ <c>0x3</c><c>(infinite)</c><c><xref target="SettingValues"/></c>
+ <c>INITIAL_WINDOW_SIZE</c>
+ <c>0x4</c><c>65535</c><c><xref target="SettingValues"/></c>
+ <c>MAX_FRAME_SIZE</c>
+ <c>0x5</c><c>16384</c><c><xref target="SettingValues"/></c>
+ <c>MAX_HEADER_LIST_SIZE</c>
+ <c>0x6</c><c>(infinite)</c><c><xref target="SettingValues"/></c>
+ </texttable>
+
+ </section>
+
+ <section anchor="iana-errors" title="Error Code Registry">
+ <t>
+ This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code"
+ registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the
+ <xref target="RFC5226">"Expert Review" policy</xref>.
+ </t>
+ <t>
+ Registrations for error codes are required to include a description of the error code. An
+ expert reviewer is advised to examine new registrations for possible duplication with
+ existing error codes. Use of existing registrations is to be encouraged, but not
+ mandated.
+ </t>
+ <t>
+ New registrations are advised to provide the following information:
+ <list style="hanging">
+ <t hangText="Name:">
+ A name for the error code. Specifying an error code name is optional.
+ </t>
+ <t hangText="Code:">
+ The 32-bit error code value.
+ </t>
+ <t hangText="Description:">
+ A brief description of the error code semantics, longer if no detailed specification
+ is provided.
+ </t>
+ <t hangText="Specification:">
+ An optional reference for a specification that defines the error code.
+ </t>
+ </list>
+ </t>
+ <t>
+ The entries in the following table are registered by this document.
+ </t>
+ <texttable align="left" suppress-title="true">
+ <ttcol>Name</ttcol>
+ <ttcol>Code</ttcol>
+ <ttcol>Description</ttcol>
+ <ttcol>Specification</ttcol>
+ <c>NO_ERROR</c><c>0x0</c>
+ <c>Graceful shutdown</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>PROTOCOL_ERROR</c><c>0x1</c>
+ <c>Protocol error detected</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>INTERNAL_ERROR</c><c>0x2</c>
+ <c>Implementation fault</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>FLOW_CONTROL_ERROR</c><c>0x3</c>
+ <c>Flow control limits exceeded</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>SETTINGS_TIMEOUT</c><c>0x4</c>
+ <c>Settings not acknowledged</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>STREAM_CLOSED</c><c>0x5</c>
+ <c>Frame received for closed stream</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>FRAME_SIZE_ERROR</c><c>0x6</c>
+ <c>Frame size incorrect</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>REFUSED_STREAM</c><c>0x7</c>
+ <c>Stream not processed</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>CANCEL</c><c>0x8</c>
+ <c>Stream cancelled</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>COMPRESSION_ERROR</c><c>0x9</c>
+ <c>Compression state not updated</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>CONNECT_ERROR</c><c>0xa</c>
+ <c>TCP connection error for CONNECT method</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>ENHANCE_YOUR_CALM</c><c>0xb</c>
+ <c>Processing capacity exceeded</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>INADEQUATE_SECURITY</c><c>0xc</c>
+ <c>Negotiated TLS parameters not acceptable</c>
+ <c><xref target="ErrorCodes"/></c>
+ </texttable>
+
+ </section>
+
+ <section title="HTTP2-Settings Header Field Registration">
+ <t>
+ This section registers the <spanx style="verb">HTTP2-Settings</spanx> header field in the
+ <xref target="BCP90">Permanent Message Header Field Registry</xref>.
+ <list style="hanging">
+ <t hangText="Header field name:">
+ HTTP2-Settings
+ </t>
+ <t hangText="Applicable protocol:">
+ http
+ </t>
+ <t hangText="Status:">
+ standard
+ </t>
+ <t hangText="Author/Change controller:">
+ IETF
+ </t>
+ <t hangText="Specification document(s):">
+ <xref target="Http2SettingsHeader"/> of this document
+ </t>
+ <t hangText="Related information:">
+ This header field is only used by an HTTP/2 client for Upgrade-based negotiation.
+ </t>
+ </list>
+ </t>
+ </section>
+
+ <section title="PRI Method Registration">
+ <t>
+ This section registers the <spanx style="verb">PRI</spanx> method in the HTTP Method
+ Registry (<xref target="RFC7231" x:fmt="," x:rel="#method.registry"/>).
+ <list style="hanging">
+ <t hangText="Method Name:">
+ PRI
+ </t>
+ <t hangText="Safe">
+ No
+ </t>
+ <t hangText="Idempotent">
+ No
+ </t>
+ <t hangText="Specification document(s)">
+ <xref target="ConnectionHeader"/> of this document
+ </t>
+ <t hangText="Related information:">
+ This method is never used by an actual client. This method will appear to be used
+ when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection
+ preface.
+ </t>
+ </list>
+ </t>
+ </section>
+
+ <section title="The 421 (Misdirected Request) HTTP Status Code"
+ anchor="iana-MisdirectedRequest">
+ <t>
+ This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext
+ Transfer Protocol (HTTP) Status Code Registry (<xref target="RFC7231" x:fmt=","
+ x:rel="#status.code.registry"/>).
+ </t>
+ <t>
+ <list style="hanging">
+ <t hangText="Status Code:">
+ 421
+ </t>
+ <t hangText="Short Description:">
+ Misdirected Request
+ </t>
+ <t hangText="Specification:">
+ <xref target="MisdirectedRequest"/> of this document
+ </t>
+ </list>
+ </t>
+ </section>
+
+ </section>
+
+ <section title="Acknowledgements">
+ <t>
+ This document includes substantial input from the following individuals:
+ <list style="symbols">
+ <t>
+ Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin
+ Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin
+ Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY
+ contributors).
+ </t>
+ <t>
+ Gabriel Montenegro and Willy Tarreau (Upgrade mechanism).
+ </t>
+ <t>
+ William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto
+ Peon, Rob Trace (Flow control).
+ </t>
+ <t>
+ Mike Bishop (Extensibility).
+ </t>
+ <t>
+ Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan
+ (Substantial editorial contributions).
+ </t>
+ <t>
+ Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp.
+ </t>
+ <t>
+ Alexey Melnikov was an editor of this document during 2013.
+ </t>
+ <t>
+ A substantial proportion of Martin's contribution was supported by Microsoft during his
+ employment there.
+ </t>
+ </list>
+ </t>
+ </section>
+ </middle>
+
+ <back>
+ <references title="Normative References">
+ <reference anchor="COMPRESSION">
+ <front>
+ <title>HPACK - Header Compression for HTTP/2</title>
+ <author initials="H." surname="Ruellan" fullname="Herve Ruellan"/>
+ <author initials="R." surname="Peon" fullname="Roberto Peon"/>
+ <date month="July" year="2014" />
+ </front>
+ <seriesInfo name="Internet-Draft" value="draft-ietf-httpbis-header-compression-09" />
+ <x:source href="refs/draft-ietf-httpbis-header-compression-09.xml"/>
+ </reference>
+
+ <reference anchor="TCP">
+ <front>
+ <title abbrev="Transmission Control Protocol">
+ Transmission Control Protocol
+ </title>
+ <author initials="J." surname="Postel" fullname="Jon Postel">
+ <organization>University of Southern California (USC)/Information Sciences
+ Institute</organization>
+ </author>
+ <date year="1981" month="September" />
+ </front>
+ <seriesInfo name="STD" value="7" />
+ <seriesInfo name="RFC" value="793" />
+ </reference>
+
+ <reference anchor="RFC2119">
+ <front>
+ <title>
+ Key words for use in RFCs to Indicate Requirement Levels
+ </title>
+ <author initials="S." surname="Bradner" fullname="Scott Bradner">
+ <organization>Harvard University</organization>
+ <address><email>sob@harvard.edu</email></address>
+ </author>
+ <date month="March" year="1997"/>
+ </front>
+ <seriesInfo name="BCP" value="14"/>
+ <seriesInfo name="RFC" value="2119"/>
+ </reference>
+
+ <reference anchor="RFC2818">
+ <front>
+ <title>
+ HTTP Over TLS
+ </title>
+ <author initials="E." surname="Rescorla" fullname="Eric Rescorla"/>
+ <date month="May" year="2000"/>
+ </front>
+ <seriesInfo name="RFC" value="2818"/>
+ </reference>
+
+ <reference anchor="RFC3986">
+ <front>
+ <title abbrev="URI Generic Syntax">Uniform Resource Identifier (URI): Generic
+ Syntax</title>
+ <author initials="T." surname="Berners-Lee" fullname="Tim Berners-Lee"></author>
+ <author initials="R." surname="Fielding" fullname="Roy T. Fielding"></author>
+ <author initials="L." surname="Masinter" fullname="Larry Masinter"></author>
+ <date year="2005" month="January" />
+ </front>
+ <seriesInfo name="STD" value="66" />
+ <seriesInfo name="RFC" value="3986" />
+ </reference>
+
+ <reference anchor="RFC4648">
+ <front>
+ <title>The Base16, Base32, and Base64 Data Encodings</title>
+ <author fullname="S. Josefsson" initials="S." surname="Josefsson"/>
+ <date year="2006" month="October"/>
+ </front>
+ <seriesInfo value="4648" name="RFC"/>
+ </reference>
+
+ <reference anchor="RFC5226">
+ <front>
+ <title>Guidelines for Writing an IANA Considerations Section in RFCs</title>
+ <author initials="T." surname="Narten" fullname="T. Narten"/>
+ <author initials="H." surname="Alvestrand" fullname="H. Alvestrand"/>
+ <date year="2008" month="May" />
+ </front>
+ <seriesInfo name="BCP" value="26" />
+ <seriesInfo name="RFC" value="5226" />
+ </reference>
+
+ <reference anchor="RFC5234">
+ <front>
+ <title>Augmented BNF for Syntax Specifications: ABNF</title>
+ <author initials="D." surname="Crocker" fullname="D. Crocker"/>
+ <author initials="P." surname="Overell" fullname="P. Overell"/>
+ <date year="2008" month="January" />
+ </front>
+ <seriesInfo name="STD" value="68" />
+ <seriesInfo name="RFC" value="5234" />
+ </reference>
+
+ <reference anchor="TLS12">
+ <front>
+ <title>The Transport Layer Security (TLS) Protocol Version 1.2</title>
+ <author initials="T." surname="Dierks" fullname="Tim Dierks"/>
+ <author initials="E." surname="Rescorla" fullname="Eric Rescorla"/>
+ <date year="2008" month="August" />
+ </front>
+ <seriesInfo name="RFC" value="5246" />
+ </reference>
+
+ <reference anchor="TLS-EXT">
+ <front>
+ <title>
+ Transport Layer Security (TLS) Extensions: Extension Definitions
+ </title>
+ <author initials="D." surname="Eastlake" fullname="D. Eastlake"/>
+ <date year="2011" month="January"/>
+ </front>
+ <seriesInfo name="RFC" value="6066"/>
+ </reference>
+
+ <reference anchor="TLS-ALPN">
+ <front>
+ <title>Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension</title>
+ <author initials="S." surname="Friedl" fullname="Stephan Friedl"></author>
+ <author initials="A." surname="Popov" fullname="Andrei Popov"></author>
+ <author initials="A." surname="Langley" fullname="Adam Langley"></author>
+ <author initials="E." surname="Stephan" fullname="Emile Stephan"></author>
+ <date month="July" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7301" />
+ </reference>
+
+ <reference anchor="TLS-ECDHE">
+ <front>
+ <title>
+ TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois
+ Counter Mode (GCM)
+ </title>
+ <author initials="E." surname="Rescorla" fullname="E. Rescorla"/>
+ <date year="2008" month="August" />
+ </front>
+ <seriesInfo name="RFC" value="5289" />
+ </reference>
+
+ <reference anchor="FIPS186">
+ <front>
+ <title>
+ Digital Signature Standard (DSS)
+ </title>
+ <author><organization>NIST</organization></author>
+ <date year="2013" month="July" />
+ </front>
+ <seriesInfo name="FIPS" value="PUB 186-4" />
+ </reference>
+
+ <reference anchor="RFC7230">
+ <front>
+ <title>
+ Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing</title>
+ <author fullname="Roy T. Fielding" initials="R." role="editor" surname="Fielding">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author fullname="Julian F. Reschke" initials="J. F." role="editor" surname="Reschke">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7230" />
+ <x:source href="refs/rfc7230.xml"
+ basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230"/>
+ </reference>
+ <reference anchor="RFC7231">
+ <front>
+ <title>
+ Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content</title>
+ <author fullname="Roy T. Fielding" initials="R." role="editor" surname="Fielding">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author fullname="Julian F. Reschke" initials="J. F." role="editor" surname="Reschke">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7231" />
+ <x:source href="refs/rfc7231.xml"
+ basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7231"/>
+ </reference>
+ <reference anchor="RFC7232">
+ <front>
+ <title>Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests</title>
+ <author fullname="Roy T. Fielding" initials="R." role="editor" surname="Fielding">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author fullname="Julian F. Reschke" initials="J. F." role="editor" surname="Reschke">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7232" />
+ </reference>
+ <reference anchor="RFC7233">
+ <front>
+ <title>Hypertext Transfer Protocol (HTTP/1.1): Range Requests</title>
+ <author initials="R." surname="Fielding" fullname="Roy T. Fielding" role="editor">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author initials="Y." surname="Lafon" fullname="Yves Lafon" role="editor">
+ <organization abbrev="W3C">World Wide Web Consortium</organization>
+ <address><email>ylafon@w3.org</email></address>
+ </author>
+ <author initials="J. F." surname="Reschke" fullname="Julian F. Reschke" role="editor">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7233" />
+ </reference>
+ <reference anchor="RFC7234">
+ <front>
+ <title>Hypertext Transfer Protocol (HTTP/1.1): Caching</title>
+ <author initials="R." surname="Fielding" fullname="Roy T. Fielding" role="editor">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author fullname="Mark Nottingham" initials="M." role="editor" surname="Nottingham">
+ <organization>Akamai</organization>
+ <address><email>mnot@mnot.net</email></address>
+ </author>
+ <author initials="J. F." surname="Reschke" fullname="Julian F. Reschke" role="editor">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7234"/>
+ <x:source href="refs/rfc7234.xml"
+ basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7234"/>
+ </reference>
+ <reference anchor="RFC7235">
+ <front>
+ <title>Hypertext Transfer Protocol (HTTP/1.1): Authentication</title>
+ <author initials="R." surname="Fielding" fullname="Roy T. Fielding" role="editor">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author initials="J. F." surname="Reschke" fullname="Julian F. Reschke" role="editor">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7235"/>
+ <x:source href="refs/rfc7235.xml"
+ basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7235"/>
+ </reference>
+
+ <reference anchor="COOKIE">
+ <front>
+ <title>HTTP State Management Mechanism</title>
+ <author initials="A." surname="Barth" fullname="A. Barth"/>
+ <date year="2011" month="April" />
+ </front>
+ <seriesInfo name="RFC" value="6265" />
+ </reference>
+ </references>
+
+ <references title="Informative References">
+ <reference anchor="RFC1323">
+ <front>
+ <title>
+ TCP Extensions for High Performance
+ </title>
+ <author initials="V." surname="Jacobson" fullname="Van Jacobson"></author>
+ <author initials="B." surname="Braden" fullname="Bob Braden"></author>
+ <author initials="D." surname="Borman" fullname="Dave Borman"></author>
+ <date year="1992" month="May" />
+ </front>
+ <seriesInfo name="RFC" value="1323" />
+ </reference>
+
+ <reference anchor="RFC3749">
+ <front>
+ <title>Transport Layer Security Protocol Compression Methods</title>
+ <author initials="S." surname="Hollenbeck" fullname="S. Hollenbeck"/>
+ <date year="2004" month="May" />
+ </front>
+ <seriesInfo name="RFC" value="3749" />
+ </reference>
+
+ <reference anchor="RFC6585">
+ <front>
+ <title>Additional HTTP Status Codes</title>
+ <author initials="M." surname="Nottingham" fullname="Mark Nottingham"/>
+ <author initials="R." surname="Fielding" fullname="Roy Fielding"/>
+ <date year="2012" month="April" />
+ </front>
+ <seriesInfo name="RFC" value="6585" />
+ </reference>
+
+ <reference anchor="RFC4492">
+ <front>
+ <title>
+ Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS)
+ </title>
+ <author initials="S." surname="Blake-Wilson" fullname="S. Blake-Wilson"/>
+ <author initials="N." surname="Bolyard" fullname="N. Bolyard"/>
+ <author initials="V." surname="Gupta" fullname="V. Gupta"/>
+ <author initials="C." surname="Hawk" fullname="C. Hawk"/>
+ <author initials="B." surname="Moeller" fullname="B. Moeller"/>
+ <date year="2006" month="May" />
+ </front>
+ <seriesInfo name="RFC" value="4492" />
+ </reference>
+
+ <reference anchor="RFC5288">
+ <front>
+ <title>
+ AES Galois Counter Mode (GCM) Cipher Suites for TLS
+ </title>
+ <author initials="J." surname="Salowey" fullname="J. Salowey"/>
+ <author initials="A." surname="Choudhury" fullname="A. Choudhury"/>
+ <author initials="D." surname="McGrew" fullname="D. McGrew"/>
+ <date year="2008" month="August" />
+ </front>
+ <seriesInfo name="RFC" value="5288" />
+ </reference>
+
+ <reference anchor='HTML5'
+ target='http://www.w3.org/TR/2014/CR-html5-20140731/'>
+ <front>
+ <title>HTML5</title>
+ <author fullname='Robin Berjon' surname='Berjon' initials='R.'/>
+ <author fullname='Steve Faulkner' surname='Faulkner' initials='S.'/>
+ <author fullname='Travis Leithead' surname='Leithead' initials='T.'/>
+ <author fullname='Erika Doyle Navara' surname='Doyle Navara' initials='E.'/>
+ <author fullname='Edward O&apos;Connor' surname='O&apos;Connor' initials='E.'/>
+ <author fullname='Silvia Pfeiffer' surname='Pfeiffer' initials='S.'/>
+ <date year='2014' month='July' day='31'/>
+ </front>
+ <seriesInfo name='W3C Candidate Recommendation' value='CR-html5-20140731'/>
+ <annotation>
+ Latest version available at
+ <eref target='http://www.w3.org/TR/html5/'/>.
+ </annotation>
+ </reference>
+
+ <reference anchor="TALKING" target="http://w2spconf.com/2011/papers/websocket.pdf">
+ <front>
+ <title>
+ Talking to Yourself for Fun and Profit
+ </title>
+ <author initials="L-S." surname="Huang"/>
+ <author initials="E." surname="Chen"/>
+ <author initials="A." surname="Barth"/>
+ <author initials="E." surname="Rescorla"/>
+ <author initials="C." surname="Jackson"/>
+ <date year="2011" />
+ </front>
+ </reference>
+
+ <reference anchor="BREACH"
+ target="http://breachattack.com/resources/BREACH%20-%20SSL,%20gone%20in%2030%20seconds.pdf">
+ <front>
+ <title>
+ BREACH: Reviving the CRIME Attack
+ </title>
+ <author initials="Y." surname="Gluck"/>
+ <author initials="N." surname="Harris"/>
+ <author initials="A." surname="Prado"/>
+ <date year="2013" month="July" day="12"/>
+ </front>
+ </reference>
+
+ <reference anchor="BCP90">
+ <front>
+ <title>Registration Procedures for Message Header Fields</title>
+ <author initials="G." surname="Klyne" fullname="G. Klyne">
+ <organization>Nine by Nine</organization>
+ <address><email>GK-IETF@ninebynine.org</email></address>
+ </author>
+ <author initials="M." surname="Nottingham" fullname="M. Nottingham">
+ <organization>BEA Systems</organization>
+ <address><email>mnot@pobox.com</email></address>
+ </author>
+ <author initials="J." surname="Mogul" fullname="J. Mogul">
+ <organization>HP Labs</organization>
+ <address><email>JeffMogul@acm.org</email></address>
+ </author>
+ <date year="2004" month="September" />
+ </front>
+ <seriesInfo name="BCP" value="90" />
+ <seriesInfo name="RFC" value="3864" />
+ </reference>
+
+ <reference anchor="TLSBCP">
+ <front>
+ <title>Recommendations for Secure Use of TLS and DTLS</title>
+ <author initials="Y" surname="Sheffer" fullname="Yaron Sheffer">
+ <organization />
+ </author>
+ <author initials="R" surname="Holz" fullname="Ralph Holz">
+ <organization />
+ </author>
+ <author initials="P" surname="Saint-Andre" fullname="Peter Saint-Andre">
+ <organization />
+ </author>
+ <date month="June" day="23" year="2014" />
+ </front>
+ <seriesInfo name="Internet-Draft" value="draft-ietf-uta-tls-bcp-01" />
+ </reference>
+
+ <reference anchor="ALT-SVC">
+ <front>
+ <title>
+ HTTP Alternative Services
+ </title>
+ <author initials="M." surname="Nottingham" fullname="Mark Nottingham">
+ <organization>Akamai</organization>
+ </author>
+ <author initials="P." surname="McManus" fullname="Patrick McManus">
+ <organization>Mozilla</organization>
+ </author>
+ <author initials="J." surname="Reschke" fullname="Julian Reschke">
+ <organization>greenbytes</organization>
+ </author>
+ <date year="2014" month="April"/>
+ </front>
+ <seriesInfo name="Internet-Draft" value="draft-ietf-httpbis-alt-svc-02"/>
+ <x:source href="refs/draft-ietf-httpbis-alt-svc-02.xml"/>
+ </reference>
+ </references>
+
+ <section title="Change Log" anchor="change.log">
+ <t>
+ This section is to be removed by RFC Editor before publication.
+ </t>
+
+ <section title="Since draft-ietf-httpbis-http2-14" anchor="changes.since.draft-ietf-httpbis-http2-14">
+ <t>
+ Renamed Not Authoritative status code to Misdirected Request.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-13" anchor="changes.since.draft-ietf-httpbis-http2-13">
+ <t>
+ Pseudo-header fields are now required to appear strictly before regular ones.
+ </t>
+ <t>
+ Restored 1xx series status codes, except 101.
+ </t>
+ <t>
+ Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting
+ to limit the damage.
+ </t>
+ <t>
+ Added a setting to advise peers of header set size limits.
+ </t>
+ <t>
+ Removed segments.
+ </t>
+ <t>
+ Made non-semantic-bearing <x:ref>HEADERS</x:ref> frames illegal in the HTTP mapping.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-12" anchor="changes.since.draft-ietf-httpbis-http2-12">
+ <t>
+ Restored extensibility options.
+ </t>
+ <t>
+ Restricting TLS cipher suites to AEAD only.
+ </t>
+ <t>
+ Removing Content-Encoding requirements.
+ </t>
+ <t>
+ Permitting the use of <x:ref>PRIORITY</x:ref> after stream close.
+ </t>
+ <t>
+ Removed ALTSVC frame.
+ </t>
+ <t>
+ Removed BLOCKED frame.
+ </t>
+ <t>
+ Reducing the maximum padding size to 256 octets; removing padding from
+ <x:ref>CONTINUATION</x:ref> frames.
+ </t>
+ <t>
+ Removed per-frame GZIP compression.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-11" anchor="changes.since.draft-ietf-httpbis-http2-11">
+ <t>
+ Added BLOCKED frame (at risk).
+ </t>
+ <t>
+ Simplified priority scheme.
+ </t>
+ <t>
+ Added <x:ref>DATA</x:ref> per-frame GZIP compression.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-10" anchor="changes.since.draft-ietf-httpbis-http2-10">
+ <t>
+ Changed "connection header" to "connection preface" to avoid confusion.
+ </t>
+ <t>
+ Added dependency-based stream prioritization.
+ </t>
+ <t>
+ Added "h2c" identifier to distinguish between cleartext and secured HTTP/2.
+ </t>
+ <t>
+ Adding missing padding to <x:ref>PUSH_PROMISE</x:ref>.
+ </t>
+ <t>
+ Integrate ALTSVC frame and supporting text.
+ </t>
+ <t>
+ Dropping requirement on "deflate" Content-Encoding.
+ </t>
+ <t>
+ Improving security considerations around use of compression.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-09" anchor="changes.since.draft-ietf-httpbis-http2-09">
+ <t>
+ Adding padding for data frames.
+ </t>
+ <t>
+ Renumbering frame types, error codes, and settings.
+ </t>
+ <t>
+ Adding INADEQUATE_SECURITY error code.
+ </t>
+ <t>
+ Updating TLS usage requirements to 1.2; forbidding TLS compression.
+ </t>
+ <t>
+ Removing extensibility for frames and settings.
+ </t>
+ <t>
+ Changing setting identifier size.
+ </t>
+ <t>
+ Removing the ability to disable flow control.
+ </t>
+ <t>
+ Changing the protocol identification token to "h2".
+ </t>
+ <t>
+ Changing the use of :authority to make it optional and to allow userinfo in non-HTTP
+ cases.
+ </t>
+ <t>
+ Allowing split on 0x0 for Cookie.
+ </t>
+ <t>
+ Reserved PRI method in HTTP/1.1 to avoid possible future collisions.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-08" anchor="changes.since.draft-ietf-httpbis-http2-08">
+ <t>
+ Added cookie crumbling for more efficient header compression.
+ </t>
+ <t>
+ Added header field ordering with the value-concatenation mechanism.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-07" anchor="changes.since.draft-ietf-httpbis-http2-07">
+ <t>
+ Marked draft for implementation.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-06" anchor="changes.since.draft-ietf-httpbis-http2-06">
+ <t>
+ Adding definition for CONNECT method.
+ </t>
+ <t>
+ Constraining the use of push to safe, cacheable methods with no request body.
+ </t>
+ <t>
+ Changing from :host to :authority to remove any potential confusion.
+ </t>
+ <t>
+ Adding setting for header compression table size.
+ </t>
+ <t>
+ Adding settings acknowledgement.
+ </t>
+ <t>
+ Removing unnecessary and potentially problematic flags from CONTINUATION.
+ </t>
+ <t>
+ Added denial of service considerations.
+ </t>
+ </section>
+ <section title="Since draft-ietf-httpbis-http2-05" anchor="changes.since.draft-ietf-httpbis-http2-05">
+ <t>
+ Marking the draft ready for implementation.
+ </t>
+ <t>
+ Renumbering END_PUSH_PROMISE flag.
+ </t>
+ <t>
+ Editorial clarifications and changes.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-04" anchor="changes.since.draft-ietf-httpbis-http2-04">
+ <t>
+ Added CONTINUATION frame for HEADERS and PUSH_PROMISE.
+ </t>
+ <t>
+ PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is
+ zero.
+ </t>
+ <t>
+ Push expanded to allow all safe methods without a request body.
+ </t>
+ <t>
+ Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1
+ hop-by-hop header fields.
+ </t>
+ <t>
+ Requiring that intermediaries not forward requests with missing or illegal routing
+ :-headers.
+ </t>
+ <t>
+ Clarified requirements around handling different frames after stream close, stream reset
+ and <x:ref>GOAWAY</x:ref>.
+ </t>
+ <t>
+ Added more specific prohibitions for sending of different frame types in various stream
+ states.
+ </t>
+ <t>
+ Making the last received setting value the effective value.
+ </t>
+ <t>
+ Clarified requirements on TLS version, extension and ciphers.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-03" anchor="changes.since.draft-ietf-httpbis-http2-03">
+ <t>
+ Committed major restructuring atrocities.
+ </t>
+ <t>
+ Added reference to first header compression draft.
+ </t>
+ <t>
+ Added more formal description of frame lifecycle.
+ </t>
+ <t>
+ Moved END_STREAM (renamed from FINAL) back to <x:ref>HEADERS</x:ref>/<x:ref>DATA</x:ref>.
+ </t>
+ <t>
+ Removed HEADERS+PRIORITY, added optional priority to <x:ref>HEADERS</x:ref> frame.
+ </t>
+ <t>
+ Added <x:ref>PRIORITY</x:ref> frame.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-02" anchor="changes.since.draft-ietf-httpbis-http2-02">
+ <t>
+ Added continuations to frames carrying header blocks.
+ </t>
+ <t>
+ Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful
+ concepts, like cookies.
+ </t>
+ <t>
+ Removed "message".
+ </t>
+ <t>
+ Switched to TLS ALPN from NPN.
+ </t>
+ <t>
+ Editorial changes.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-01" anchor="changes.since.draft-ietf-httpbis-http2-01">
+ <t>
+ Added IANA considerations section for frame types, error codes and settings.
+ </t>
+ <t>
+ Removed data frame compression.
+ </t>
+ <t>
+ Added <x:ref>PUSH_PROMISE</x:ref>.
+ </t>
+ <t>
+ Added globally applicable flags to framing.
+ </t>
+ <t>
+ Removed zlib-based header compression mechanism.
+ </t>
+ <t>
+ Updated references.
+ </t>
+ <t>
+ Clarified stream identifier reuse.
+ </t>
+ <t>
+ Removed CREDENTIALS frame and associated mechanisms.
+ </t>
+ <t>
+ Added advice against naive implementation of flow control.
+ </t>
+ <t>
+ Added session header section.
+ </t>
+ <t>
+ Restructured frame header. Removed distinction between data and control frames.
+ </t>
+ <t>
+ Altered flow control properties to include session-level limits.
+ </t>
+ <t>
+ Added note on cacheability of pushed resources and multiple tenant servers.
+ </t>
+ <t>
+ Changed protocol label form based on discussions.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-00" anchor="changes.since.draft-ietf-httpbis-http2-00">
+ <t>
+ Changed title throughout.
+ </t>
+ <t>
+ Removed section on Incompatibilities with SPDY draft#2.
+ </t>
+ <t>
+ Changed <x:ref>INTERNAL_ERROR</x:ref> on <x:ref>GOAWAY</x:ref> to have a value of 2 <eref
+ target="https://groups.google.com/forum/?fromgroups#!topic/spdy-dev/cfUef2gL3iU"/>.
+ </t>
+ <t>
+ Replaced abstract and introduction.
+ </t>
+ <t>
+ Added section on starting HTTP/2.0, including upgrade mechanism.
+ </t>
+ <t>
+ Removed unused references.
+ </t>
+ <t>
+ Added <xref target="fc-principles">flow control principles</xref> based on <eref
+ target="https://tools.ietf.org/html/draft-montenegro-httpbis-http2-fc-principles-01"/>.
+ </t>
+ </section>
+
+ <section title="Since draft-mbelshe-httpbis-spdy-00" anchor="changes.since.draft-mbelshe-httpbis-spdy-00">
+ <t>
+ Adopted as base for draft-ietf-httpbis-http2.
+ </t>
+ <t>
+ Updated authors/editors list.
+ </t>
+ <t>
+ Added status note.
+ </t>
+ </section>
+ </section>
+
+ </back>
+</rfc>
+<!--
+ vim:et:tw=100:sw=2:
+ -->
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
new file mode 100644
index 0000000..4392a09
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -0,0 +1,2284 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "crypto/rand"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "math"
+ mathrand "math/rand"
+ "net"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/idna"
+ "golang.org/x/net/lex/httplex"
+)
+
+const (
+ // transportDefaultConnFlow is how many connection-level flow control
+ // tokens we give the server at start-up, past the default 64k.
+ transportDefaultConnFlow = 1 << 30
+
+ // transportDefaultStreamFlow is how many stream-level flow
+ // control tokens we announce to the peer, and how many bytes
+ // we buffer per stream.
+ transportDefaultStreamFlow = 4 << 20
+
+ // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
+ // a stream-level WINDOW_UPDATE for at a time.
+ transportDefaultStreamMinRefresh = 4 << 10
+
+ defaultUserAgent = "Go-http-client/2.0"
+)
+
+// Transport is an HTTP/2 Transport.
+//
+// A Transport internally caches connections to servers. It is safe
+// for concurrent use by multiple goroutines.
+type Transport struct {
+ // DialTLS specifies an optional dial function for creating
+ // TLS connections for requests.
+ //
+ // If DialTLS is nil, tls.Dial is used.
+ //
+ // If the returned net.Conn has a ConnectionState method like tls.Conn,
+ // it will be used to set http.Response.TLS.
+ DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client. If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // ConnPool optionally specifies an alternate connection pool to use.
+ // If nil, the default is used.
+ ConnPool ClientConnPool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // AllowHTTP, if true, permits HTTP/2 requests using the insecure,
+ // plain-text "http" scheme. Note that this does not enable h2c support.
+ AllowHTTP bool
+
+ // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
+ // send in the initial settings frame. It is how many bytes
+ // of response headers are allowed. Unlike the http2 spec, zero here
+ // means to use a default limit (currently 10MB). If you actually
+ // want to advertise an ulimited value to the peer, Transport
+ // interprets the highest possible value here (0xffffffff or 1<<32-1)
+ // to mean no limit.
+ MaxHeaderListSize uint32
+
+ // t1, if non-nil, is the standard library Transport using
+ // this transport. Its settings are used (but not its
+ // RoundTrip method, etc).
+ t1 *http.Transport
+
+ connPoolOnce sync.Once
+ connPoolOrDef ClientConnPool // non-nil version of ConnPool
+}
+
+func (t *Transport) maxHeaderListSize() uint32 {
+ if t.MaxHeaderListSize == 0 {
+ return 10 << 20
+ }
+ if t.MaxHeaderListSize == 0xffffffff {
+ return 0
+ }
+ return t.MaxHeaderListSize
+}
+
+func (t *Transport) disableCompression() bool {
+ return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
+}
+
+var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6")
+
+// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
+// It requires Go 1.6 or later and returns an error if the net/http package is too old
+// or if t1 has already been HTTP/2-enabled.
+func ConfigureTransport(t1 *http.Transport) error {
+ _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go
+ return err
+}
+
+func (t *Transport) connPool() ClientConnPool {
+ t.connPoolOnce.Do(t.initConnPool)
+ return t.connPoolOrDef
+}
+
+func (t *Transport) initConnPool() {
+ if t.ConnPool != nil {
+ t.connPoolOrDef = t.ConnPool
+ } else {
+ t.connPoolOrDef = &clientConnPool{t: t}
+ }
+}
+
+// ClientConn is the state of a single HTTP/2 client connection to an
+// HTTP/2 server.
+type ClientConn struct {
+ t *Transport
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tlsState *tls.ConnectionState // nil only for specialized impls
+ singleUse bool // whether being used for a single http.Request
+
+ // readLoop goroutine fields:
+ readerDone chan struct{} // closed on error
+ readerErr error // set before readerDone is closed
+
+ idleTimeout time.Duration // or 0 for never
+ idleTimer *time.Timer
+
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow flow // our conn-level flow control quota (cs.flow is per stream)
+ inflow flow // peer's conn-level flow control
+ closed bool
+ wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
+ goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
+ goAwayDebug string // goAway frame's debug data, retained as a string
+ streams map[uint32]*clientStream // client-initiated
+ nextStreamID uint32
+ pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
+ pings map[[8]byte]chan struct{} // in flight ping data to notification channel
+ bw *bufio.Writer
+ br *bufio.Reader
+ fr *Framer
+ lastActive time.Time
+ // Settings from peer: (also guarded by mu)
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ peerMaxHeaderListSize uint64
+ initialWindowSize uint32
+
+ hbuf bytes.Buffer // HPACK encoder writes into this
+ henc *hpack.Encoder
+ freeBuf [][]byte
+
+ wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
+ werr error // first write error that has occurred
+}
+
+// clientStream is the state for a single HTTP/2 stream. One of these
+// is created for each Transport.RoundTrip call.
+type clientStream struct {
+ cc *ClientConn
+ req *http.Request
+ trace *clientTrace // or nil
+ ID uint32
+ resc chan resAndError
+ bufPipe pipe // buffered pipe with the flow-controlled response payload
+ startedWrite bool // started request body write; guarded by cc.mu
+ requestedGzip bool
+ on100 func() // optional code to run if get a 100 continue response
+
+ flow flow // guarded by cc.mu
+ inflow flow // guarded by cc.mu
+ bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+ readErr error // sticky read error; owned by transportResponseBody.Read
+ stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
+ didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu
+
+ peerReset chan struct{} // closed on peer reset
+ resetErr error // populated before peerReset is closed
+
+ done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
+
+ // owned by clientConnReadLoop:
+ firstByte bool // got the first response byte
+ pastHeaders bool // got first MetaHeadersFrame (actual headers)
+ pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+
+ trailer http.Header // accumulated trailers
+ resTrailer *http.Header // client's Response.Trailer
+}
+
+// awaitRequestCancel waits for the user to cancel a request or for the done
+// channel to be signaled. A non-nil error is returned only if the request was
+// canceled.
+func awaitRequestCancel(req *http.Request, done <-chan struct{}) error {
+ ctx := reqContext(req)
+ if req.Cancel == nil && ctx.Done() == nil {
+ return nil
+ }
+ select {
+ case <-req.Cancel:
+ return errRequestCanceled
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-done:
+ return nil
+ }
+}
+
+// awaitRequestCancel waits for the user to cancel a request, its context to
+// expire, or for the request to be done (any way it might be removed from the
+// cc.streams map: peer reset, successful completion, TCP connection breakage,
+// etc). If the request is canceled, then cs will be canceled and closed.
+func (cs *clientStream) awaitRequestCancel(req *http.Request) {
+ if err := awaitRequestCancel(req, cs.done); err != nil {
+ cs.cancelStream()
+ cs.bufPipe.CloseWithError(err)
+ }
+}
+
+func (cs *clientStream) cancelStream() {
+ cc := cs.cc
+ cc.mu.Lock()
+ didReset := cs.didReset
+ cs.didReset = true
+ cc.mu.Unlock()
+
+ if !didReset {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ cc.forgetStreamID(cs.ID)
+ }
+}
+
+// checkResetOrDone reports any error sent in a RST_STREAM frame by the
+// server, or errStreamClosed if the stream is complete.
+func (cs *clientStream) checkResetOrDone() error {
+ select {
+ case <-cs.peerReset:
+ return cs.resetErr
+ case <-cs.done:
+ return errStreamClosed
+ default:
+ return nil
+ }
+}
+
+func (cs *clientStream) getStartedWrite() bool {
+ cc := cs.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cs.startedWrite
+}
+
+func (cs *clientStream) abortRequestBodyWrite(err error) {
+ if err == nil {
+ panic("nil error")
+ }
+ cc := cs.cc
+ cc.mu.Lock()
+ cs.stopReqBody = err
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+type stickyErrWriter struct {
+ w io.Writer
+ err *error
+}
+
+func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
+ if *sew.err != nil {
+ return 0, *sew.err
+ }
+ n, err = sew.w.Write(p)
+ *sew.err = err
+ return
+}
+
+var ErrNoCachedConn = errors.New("http2: no cached connection was available")
+
+// RoundTripOpt are options for the Transport.RoundTripOpt method.
+type RoundTripOpt struct {
+ // OnlyCachedConn controls whether RoundTripOpt may
+ // create a new TCP connection. If set true and
+ // no cached connection is available, RoundTripOpt
+ // will return ErrNoCachedConn.
+ OnlyCachedConn bool
+}
+
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return t.RoundTripOpt(req, RoundTripOpt{})
+}
+
+// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
+// and returns a host:port. The port 443 is added if needed.
+func authorityAddr(scheme string, authority string) (addr string) {
+ host, port, err := net.SplitHostPort(authority)
+ if err != nil { // authority didn't have a port
+ port = "443"
+ if scheme == "http" {
+ port = "80"
+ }
+ host = authority
+ }
+ if a, err := idna.ToASCII(host); err == nil {
+ host = a
+ }
+ // IPv6 address literal, without a port:
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ return host + ":" + port
+ }
+ return net.JoinHostPort(host, port)
+}
+
+// RoundTripOpt is like RoundTrip, but takes options.
+func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
+ if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
+ return nil, errors.New("http2: unsupported scheme")
+ }
+
+ addr := authorityAddr(req.URL.Scheme, req.URL.Host)
+ for retry := 0; ; retry++ {
+ cc, err := t.connPool().GetClientConn(req, addr)
+ if err != nil {
+ t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
+ return nil, err
+ }
+ traceGotConn(req, cc)
+ res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req)
+ if err != nil && retry <= 6 {
+ if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil {
+ // After the first retry, do exponential backoff with 10% jitter.
+ if retry == 0 {
+ continue
+ }
+ backoff := float64(uint(1) << (uint(retry) - 1))
+ backoff += backoff * (0.1 * mathrand.Float64())
+ select {
+ case <-time.After(time.Second * time.Duration(backoff)):
+ continue
+ case <-reqContext(req).Done():
+ return nil, reqContext(req).Err()
+ }
+ }
+ }
+ if err != nil {
+ t.vlogf("RoundTrip failure: %v", err)
+ return nil, err
+ }
+ return res, nil
+ }
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle.
+// It does not interrupt any connections currently in use.
+func (t *Transport) CloseIdleConnections() {
+ if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok {
+ cp.closeIdleConnections()
+ }
+}
+
+var (
+ errClientConnClosed = errors.New("http2: client conn is closed")
+ errClientConnUnusable = errors.New("http2: client conn not usable")
+ errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+)
+
+// shouldRetryRequest is called by RoundTrip when a request fails to get
+// response headers. It is always called with a non-nil error.
+// It returns either a request to retry (either the same request, or a
+// modified clone), or an error if the request can't be replayed.
+func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) {
+ if !canRetryError(err) {
+ return nil, err
+ }
+ if !afterBodyWrite {
+ return req, nil
+ }
+ // If the Body is nil (or http.NoBody), it's safe to reuse
+ // this request and its Body.
+ if req.Body == nil || reqBodyIsNoBody(req.Body) {
+ return req, nil
+ }
+ // Otherwise we depend on the Request having its GetBody
+ // func defined.
+ getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody
+ if getBody == nil {
+ return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
+ }
+ body, err := getBody()
+ if err != nil {
+ return nil, err
+ }
+ newReq := *req
+ newReq.Body = body
+ return &newReq, nil
+}
+
+func canRetryError(err error) bool {
+ if err == errClientConnUnusable || err == errClientConnGotGoAway {
+ return true
+ }
+ if se, ok := err.(StreamError); ok {
+ return se.Code == ErrCodeRefusedStream
+ }
+ return false
+}
+
+func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host))
+ if err != nil {
+ return nil, err
+ }
+ return t.newClientConn(tconn, singleUse)
+}
+
+func (t *Transport) newTLSConfig(host string) *tls.Config {
+ cfg := new(tls.Config)
+ if t.TLSClientConfig != nil {
+ *cfg = *cloneTLSConfig(t.TLSClientConfig)
+ }
+ if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
+ cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
+ }
+ if cfg.ServerName == "" {
+ cfg.ServerName = host
+ }
+ return cfg
+}
+
+func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) {
+ if t.DialTLS != nil {
+ return t.DialTLS
+ }
+ return t.dialTLSDefault
+}
+
+func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ cn, err := tls.Dial(network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ if err := cn.Handshake(); err != nil {
+ return nil, err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := cn.VerifyHostname(cfg.ServerName); err != nil {
+ return nil, err
+ }
+ }
+ state := cn.ConnectionState()
+ if p := state.NegotiatedProtocol; p != NextProtoTLS {
+ return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
+ }
+ if !state.NegotiatedProtocolIsMutual {
+ return nil, errors.New("http2: could not negotiate protocol mutually")
+ }
+ return cn, nil
+}
+
+// disableKeepAlives reports whether connections should be closed as
+// soon as possible after handling the first request.
+func (t *Transport) disableKeepAlives() bool {
+ return t.t1 != nil && t.t1.DisableKeepAlives
+}
+
+func (t *Transport) expectContinueTimeout() time.Duration {
+ if t.t1 == nil {
+ return 0
+ }
+ return transportExpectContinueTimeout(t.t1)
+}
+
+func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
+ return t.newClientConn(c, false)
+}
+
+func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+ cc := &ClientConn{
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ streams: make(map[uint32]*clientStream),
+ singleUse: singleUse,
+ wantSettingsAck: true,
+ pings: make(map[[8]byte]chan struct{}),
+ }
+ if d := t.idleConnTimeout(); d != 0 {
+ cc.idleTimeout = d
+ cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
+ }
+ if VerboseLogs {
+ t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
+ }
+
+ cc.cond = sync.NewCond(&cc.mu)
+ cc.flow.add(int32(initialWindowSize))
+
+ // TODO: adjust this writer size to account for frame size +
+ // MTU + crypto/tls record padding.
+ cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
+ cc.br = bufio.NewReader(c)
+ cc.fr = NewFramer(cc.bw, cc.br)
+ cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
+
+ // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
+ // henc in response to SETTINGS frames?
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+
+ if cs, ok := c.(connectionStater); ok {
+ state := cs.ConnectionState()
+ cc.tlsState = &state
+ }
+
+ initialSettings := []Setting{
+ {ID: SettingEnablePush, Val: 0},
+ {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
+ }
+ if max := t.maxHeaderListSize(); max != 0 {
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
+ }
+
+ cc.bw.Write(clientPreface)
+ cc.fr.WriteSettings(initialSettings...)
+ cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
+ cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
+ cc.bw.Flush()
+ if cc.werr != nil {
+ return nil, cc.werr
+ }
+
+ go cc.readLoop()
+ return cc, nil
+}
+
+func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ old := cc.goAway
+ cc.goAway = f
+
+ // Merge the previous and current GoAway error frames.
+ if cc.goAwayDebug == "" {
+ cc.goAwayDebug = string(f.DebugData())
+ }
+ if old != nil && old.ErrCode != ErrCodeNo {
+ cc.goAway.ErrCode = old.ErrCode
+ }
+ last := f.LastStreamID
+ for streamID, cs := range cc.streams {
+ if streamID > last {
+ select {
+ case cs.resc <- resAndError{err: errClientConnGotGoAway}:
+ default:
+ }
+ }
+ }
+}
+
+// CanTakeNewRequest reports whether the connection can take a new request,
+// meaning it has not been closed or received or sent a GOAWAY.
+func (cc *ClientConn) CanTakeNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.canTakeNewRequestLocked()
+}
+
+func (cc *ClientConn) canTakeNewRequestLocked() bool {
+ if cc.singleUse && cc.nextStreamID > 1 {
+ return false
+ }
+ return cc.goAway == nil && !cc.closed &&
+ int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32
+}
+
+// onIdleTimeout is called from a time.AfterFunc goroutine. It will
+// only be called when we're idle, but because we're coming from a new
+// goroutine, there could be a new request coming in at the same time,
+// so this simply calls the synchronized closeIfIdle to shut down this
+// connection. The timer could just call closeIfIdle, but this is more
+// clear.
+func (cc *ClientConn) onIdleTimeout() {
+ cc.closeIfIdle()
+}
+
+func (cc *ClientConn) closeIfIdle() {
+ cc.mu.Lock()
+ if len(cc.streams) > 0 {
+ cc.mu.Unlock()
+ return
+ }
+ cc.closed = true
+ nextID := cc.nextStreamID
+ // TODO: do clients send GOAWAY too? maybe? Just Close:
+ cc.mu.Unlock()
+
+ if VerboseLogs {
+ cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
+ }
+ cc.tconn.Close()
+}
+
+const maxAllocFrameSize = 512 << 10
+
+// frameBuffer returns a scratch buffer suitable for writing DATA frames.
+// They're capped at the min of the peer's max frame size or 512KB
+// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
+// bufers.
+func (cc *ClientConn) frameScratchBuffer() []byte {
+ cc.mu.Lock()
+ size := cc.maxFrameSize
+ if size > maxAllocFrameSize {
+ size = maxAllocFrameSize
+ }
+ for i, buf := range cc.freeBuf {
+ if len(buf) >= int(size) {
+ cc.freeBuf[i] = nil
+ cc.mu.Unlock()
+ return buf[:size]
+ }
+ }
+ cc.mu.Unlock()
+ return make([]byte, size)
+}
+
+func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
+ if len(cc.freeBuf) < maxBufs {
+ cc.freeBuf = append(cc.freeBuf, buf)
+ return
+ }
+ for i, old := range cc.freeBuf {
+ if old == nil {
+ cc.freeBuf[i] = buf
+ return
+ }
+ }
+ // forget about it.
+}
+
+// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
+// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
+var errRequestCanceled = errors.New("net/http: request canceled")
+
+func commaSeparatedTrailers(req *http.Request) (string, error) {
+ keys := make([]string, 0, len(req.Trailer))
+ for k := range req.Trailer {
+ k = http.CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return "", &badStringError{"invalid Trailer key", k}
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ return strings.Join(keys, ","), nil
+ }
+ return "", nil
+}
+
+func (cc *ClientConn) responseHeaderTimeout() time.Duration {
+ if cc.t.t1 != nil {
+ return cc.t.t1.ResponseHeaderTimeout
+ }
+ // No way to do this (yet?) with just an http2.Transport. Probably
+ // no need. Request.Cancel this is the new way. We only need to support
+ // this for compatibility with the old http.Transport fields when
+ // we're doing transparent http2.
+ return 0
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
+// Certain headers are special-cased as okay but not transmitted later.
+func checkConnHeaders(req *http.Request) error {
+ if v := req.Header.Get("Upgrade"); v != "" {
+ return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
+ }
+ if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
+ return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
+ }
+ if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") {
+ return fmt.Errorf("http2: invalid Connection request header: %q", vv)
+ }
+ return nil
+}
+
+// actualContentLength returns a sanitized version of
+// req.ContentLength, where 0 actually means zero (not unknown) and -1
+// means unknown.
+func actualContentLength(req *http.Request) int64 {
+ if req.Body == nil || reqBodyIsNoBody(req.Body) {
+ return 0
+ }
+ if req.ContentLength != 0 {
+ return req.ContentLength
+ }
+ return -1
+}
+
+func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ resp, _, err := cc.roundTrip(req)
+ return resp, err
+}
+
+func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) {
+ if err := checkConnHeaders(req); err != nil {
+ return nil, false, err
+ }
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+
+ trailers, err := commaSeparatedTrailers(req)
+ if err != nil {
+ return nil, false, err
+ }
+ hasTrailers := trailers != ""
+
+ cc.mu.Lock()
+ if err := cc.awaitOpenSlotForRequest(req); err != nil {
+ cc.mu.Unlock()
+ return nil, false, err
+ }
+
+ body := req.Body
+ contentLen := actualContentLength(req)
+ hasBody := contentLen != 0
+
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ var requestedGzip bool
+ if !cc.t.disableCompression() &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ req.Method != "HEAD" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ requestedGzip = true
+ }
+
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
+ hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
+ if err != nil {
+ cc.mu.Unlock()
+ return nil, false, err
+ }
+
+ cs := cc.newStream()
+ cs.req = req
+ cs.trace = requestTrace(req)
+ cs.requestedGzip = requestedGzip
+ bodyWriter := cc.t.getBodyWriterState(cs, body)
+ cs.on100 = bodyWriter.on100
+
+ cc.wmu.Lock()
+ endStream := !hasBody && !hasTrailers
+ werr := cc.writeHeaders(cs.ID, endStream, hdrs)
+ cc.wmu.Unlock()
+ traceWroteHeaders(cs.trace)
+ cc.mu.Unlock()
+
+ if werr != nil {
+ if hasBody {
+ req.Body.Close() // per RoundTripper contract
+ bodyWriter.cancel()
+ }
+ cc.forgetStreamID(cs.ID)
+ // Don't bother sending a RST_STREAM (our write already failed;
+ // no need to keep writing)
+ traceWroteRequest(cs.trace, werr)
+ return nil, false, werr
+ }
+
+ var respHeaderTimer <-chan time.Time
+ if hasBody {
+ bodyWriter.scheduleBodyWrite()
+ } else {
+ traceWroteRequest(cs.trace, nil)
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
+ }
+
+ readLoopResCh := cs.resc
+ bodyWritten := false
+ ctx := reqContext(req)
+
+ handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) {
+ res := re.res
+ if re.err != nil || res.StatusCode > 299 {
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWrite)
+ }
+ if re.err != nil {
+ cc.forgetStreamID(cs.ID)
+ return nil, cs.getStartedWrite(), re.err
+ }
+ res.Request = req
+ res.TLS = cc.tlsState
+ return res, false, nil
+ }
+
+ for {
+ select {
+ case re := <-readLoopResCh:
+ return handleReadLoopResponse(re)
+ case <-respHeaderTimer:
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ cc.forgetStreamID(cs.ID)
+ return nil, cs.getStartedWrite(), errTimeout
+ case <-ctx.Done():
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ cc.forgetStreamID(cs.ID)
+ return nil, cs.getStartedWrite(), ctx.Err()
+ case <-req.Cancel:
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ cc.forgetStreamID(cs.ID)
+ return nil, cs.getStartedWrite(), errRequestCanceled
+ case <-cs.peerReset:
+ // processResetStream already removed the
+ // stream from the streams map; no need for
+ // forgetStreamID.
+ return nil, cs.getStartedWrite(), cs.resetErr
+ case err := <-bodyWriter.resc:
+ // Prefer the read loop's response, if available. Issue 16102.
+ select {
+ case re := <-readLoopResCh:
+ return handleReadLoopResponse(re)
+ default:
+ }
+ if err != nil {
+ return nil, cs.getStartedWrite(), err
+ }
+ bodyWritten = true
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
+ }
+ }
+}
+
+// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams.
+// Must hold cc.mu.
+func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
+ var waitingForConn chan struct{}
+ var waitingForConnErr error // guarded by cc.mu
+ for {
+ cc.lastActive = time.Now()
+ if cc.closed || !cc.canTakeNewRequestLocked() {
+ return errClientConnUnusable
+ }
+ if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
+ if waitingForConn != nil {
+ close(waitingForConn)
+ }
+ return nil
+ }
+ // Unfortunately, we cannot wait on a condition variable and channel at
+ // the same time, so instead, we spin up a goroutine to check if the
+ // request is canceled while we wait for a slot to open in the connection.
+ if waitingForConn == nil {
+ waitingForConn = make(chan struct{})
+ go func() {
+ if err := awaitRequestCancel(req, waitingForConn); err != nil {
+ cc.mu.Lock()
+ waitingForConnErr = err
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+ }
+ }()
+ }
+ cc.pendingRequests++
+ cc.cond.Wait()
+ cc.pendingRequests--
+ if waitingForConnErr != nil {
+ return waitingForConnErr
+ }
+ }
+}
+
+// requires cc.wmu be held
+func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
+ first := true // first frame written (HEADERS is first, then CONTINUATION)
+ frameSize := int(cc.maxFrameSize)
+ for len(hdrs) > 0 && cc.werr == nil {
+ chunk := hdrs
+ if len(chunk) > frameSize {
+ chunk = chunk[:frameSize]
+ }
+ hdrs = hdrs[len(chunk):]
+ endHeaders := len(hdrs) == 0
+ if first {
+ cc.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: chunk,
+ EndStream: endStream,
+ EndHeaders: endHeaders,
+ })
+ first = false
+ } else {
+ cc.fr.WriteContinuation(streamID, endHeaders, chunk)
+ }
+ }
+ // TODO(bradfitz): this Flush could potentially block (as
+ // could the WriteHeaders call(s) above), which means they
+ // wouldn't respond to Request.Cancel being readable. That's
+ // rare, but this should probably be in a goroutine.
+ cc.bw.Flush()
+ return cc.werr
+}
+
+// internal error values; they don't escape to callers
+var (
+ // abort request body write; don't send cancel
+ errStopReqBodyWrite = errors.New("http2: aborting request body write")
+
+ // abort request body write, but send stream reset of cancel.
+ errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+)
+
+func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
+ cc := cs.cc
+ sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
+ buf := cc.frameScratchBuffer()
+ defer cc.putFrameScratchBuffer(buf)
+
+ defer func() {
+ traceWroteRequest(cs.trace, err)
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
+ cerr := bodyCloser.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ req := cs.req
+ hasTrailers := req.Trailer != nil
+
+ var sawEOF bool
+ for !sawEOF {
+ n, err := body.Read(buf)
+ if err == io.EOF {
+ sawEOF = true
+ err = nil
+ } else if err != nil {
+ return err
+ }
+
+ remain := buf[:n]
+ for len(remain) > 0 && err == nil {
+ var allowed int32
+ allowed, err = cs.awaitFlowControl(len(remain))
+ switch {
+ case err == errStopReqBodyWrite:
+ return err
+ case err == errStopReqBodyWriteAndCancel:
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ return err
+ case err != nil:
+ return err
+ }
+ cc.wmu.Lock()
+ data := remain[:allowed]
+ remain = remain[allowed:]
+ sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
+ err = cc.fr.WriteData(cs.ID, sentEnd, data)
+ if err == nil {
+ // TODO(bradfitz): this flush is for latency, not bandwidth.
+ // Most requests won't need this. Make this opt-in or
+ // opt-out? Use some heuristic on the body type? Nagel-like
+ // timers? Based on 'n'? Only last chunk of this for loop,
+ // unless flow control tokens are low? For now, always.
+ // If we change this, see comment below.
+ err = cc.bw.Flush()
+ }
+ cc.wmu.Unlock()
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ if sentEnd {
+ // Already sent END_STREAM (which implies we have no
+ // trailers) and flushed, because currently all
+ // WriteData frames above get a flush. So we're done.
+ return nil
+ }
+
+ var trls []byte
+ if hasTrailers {
+ cc.mu.Lock()
+ trls, err = cc.encodeTrailers(req)
+ cc.mu.Unlock()
+ if err != nil {
+ cc.writeStreamReset(cs.ID, ErrCodeInternal, err)
+ cc.forgetStreamID(cs.ID)
+ return err
+ }
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ // Two ways to send END_STREAM: either with trailers, or
+ // with an empty DATA frame.
+ if len(trls) > 0 {
+ err = cc.writeHeaders(cs.ID, true, trls)
+ } else {
+ err = cc.fr.WriteData(cs.ID, true, nil)
+ }
+ if ferr := cc.bw.Flush(); ferr != nil && err == nil {
+ err = ferr
+ }
+ return err
+}
+
+// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
+// control tokens from the server.
+// It returns either the non-zero number of tokens taken or an error
+// if the stream is dead.
+func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
+ cc := cs.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if cc.closed {
+ return 0, errClientConnClosed
+ }
+ if cs.stopReqBody != nil {
+ return 0, cs.stopReqBody
+ }
+ if err := cs.checkResetOrDone(); err != nil {
+ return 0, err
+ }
+ if a := cs.flow.available(); a > 0 {
+ take := a
+ if int(take) > maxBytes {
+
+ take = int32(maxBytes) // can't truncate int; take is int32
+ }
+ if take > int32(cc.maxFrameSize) {
+ take = int32(cc.maxFrameSize)
+ }
+ cs.flow.take(take)
+ return take, nil
+ }
+ cc.cond.Wait()
+ }
+}
+
+type badStringError struct {
+ what string
+ str string
+}
+
+func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
+ cc.hbuf.Reset()
+
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+ host, err := httplex.PunycodeHostPort(host)
+ if err != nil {
+ return nil, err
+ }
+
+ var path string
+ if req.Method != "CONNECT" {
+ path = req.URL.RequestURI()
+ if !validPseudoPath(path) {
+ orig := path
+ path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+ if !validPseudoPath(path) {
+ if req.URL.Opaque != "" {
+ return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+ } else {
+ return nil, fmt.Errorf("invalid request :path %q", orig)
+ }
+ }
+ }
+ }
+
+ // Check for any invalid headers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
+ for k, vv := range req.Header {
+ if !httplex.ValidHeaderFieldName(k) {
+ return nil, fmt.Errorf("invalid HTTP header name %q", k)
+ }
+ for _, v := range vv {
+ if !httplex.ValidHeaderFieldValue(v) {
+ return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
+ }
+ }
+ }
+
+ enumerateHeaders := func(f func(name, value string)) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production (see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ f(":authority", host)
+ f(":method", req.Method)
+ if req.Method != "CONNECT" {
+ f(":path", path)
+ f(":scheme", req.URL.Scheme)
+ }
+ if trailers != "" {
+ f("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") {
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") ||
+ strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") ||
+ strings.EqualFold(k, "keep-alive") {
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
+ continue
+ } else if strings.EqualFold(k, "user-agent") {
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+
+ }
+
+ for _, v := range vv {
+ f(k, v)
+ }
+ }
+ if shouldSendReqContentLength(req.Method, contentLength) {
+ f("content-length", strconv.FormatInt(contentLength, 10))
+ }
+ if addGzipHeader {
+ f("accept-encoding", "gzip")
+ }
+ if !didUA {
+ f("user-agent", defaultUserAgent)
+ }
+ }
+
+ // Do a first pass over the headers counting bytes to ensure
+ // we don't exceed cc.peerMaxHeaderListSize. This is done as a
+ // separate pass before encoding the headers to prevent
+ // modifying the hpack state.
+ hlSize := uint64(0)
+ enumerateHeaders(func(name, value string) {
+ hf := hpack.HeaderField{Name: name, Value: value}
+ hlSize += uint64(hf.Size())
+ })
+
+ if hlSize > cc.peerMaxHeaderListSize {
+ return nil, errRequestHeaderListSize
+ }
+
+ // Header list size is ok. Write the headers.
+ enumerateHeaders(func(name, value string) {
+ cc.writeHeader(strings.ToLower(name), value)
+ })
+
+ return cc.hbuf.Bytes(), nil
+}
+
+// shouldSendReqContentLength reports whether the http2.Transport should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) {
+ cc.hbuf.Reset()
+
+ hlSize := uint64(0)
+ for k, vv := range req.Trailer {
+ for _, v := range vv {
+ hf := hpack.HeaderField{Name: k, Value: v}
+ hlSize += uint64(hf.Size())
+ }
+ }
+ if hlSize > cc.peerMaxHeaderListSize {
+ return nil, errRequestHeaderListSize
+ }
+
+ for k, vv := range req.Trailer {
+ // Transfer-Encoding, etc.. have already been filtered at the
+ // start of RoundTrip
+ lowKey := strings.ToLower(k)
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ return cc.hbuf.Bytes(), nil
+}
+
+func (cc *ClientConn) writeHeader(name, value string) {
+ if VerboseLogs {
+ log.Printf("http2: Transport encoding header %q = %q", name, value)
+ }
+ cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+}
+
+type resAndError struct {
+ res *http.Response
+ err error
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) newStream() *clientStream {
+ cs := &clientStream{
+ cc: cc,
+ ID: cc.nextStreamID,
+ resc: make(chan resAndError, 1),
+ peerReset: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+ cs.flow.add(int32(cc.initialWindowSize))
+ cs.flow.setConnFlow(&cc.flow)
+ cs.inflow.add(transportDefaultStreamFlow)
+ cs.inflow.setConnFlow(&cc.inflow)
+ cc.nextStreamID += 2
+ cc.streams[cs.ID] = cs
+ return cs
+}
+
+func (cc *ClientConn) forgetStreamID(id uint32) {
+ cc.streamByID(id, true)
+}
+
+func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cs := cc.streams[id]
+ if andRemove && cs != nil && !cc.closed {
+ cc.lastActive = time.Now()
+ delete(cc.streams, id)
+ if len(cc.streams) == 0 && cc.idleTimer != nil {
+ cc.idleTimer.Reset(cc.idleTimeout)
+ }
+ close(cs.done)
+ // Wake up checkResetOrDone via clientStream.awaitFlowControl and
+ // wake up RoundTrip if there is a pending request.
+ cc.cond.Broadcast()
+ }
+ return cs
+}
+
+// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
+type clientConnReadLoop struct {
+ cc *ClientConn
+ activeRes map[uint32]*clientStream // keyed by streamID
+ closeWhenIdle bool
+}
+
+// readLoop runs in its own goroutine and reads and dispatches frames.
+func (cc *ClientConn) readLoop() {
+ rl := &clientConnReadLoop{
+ cc: cc,
+ activeRes: make(map[uint32]*clientStream),
+ }
+
+ defer rl.cleanup()
+ cc.readerErr = rl.run()
+ if ce, ok := cc.readerErr.(ConnectionError); ok {
+ cc.wmu.Lock()
+ cc.fr.WriteGoAway(0, ErrCode(ce), nil)
+ cc.wmu.Unlock()
+ }
+}
+
+// GoAwayError is returned by the Transport when the server closes the
+// TCP connection after sending a GOAWAY frame.
+type GoAwayError struct {
+ LastStreamID uint32
+ ErrCode ErrCode
+ DebugData string
+}
+
+func (e GoAwayError) Error() string {
+ return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
+ e.LastStreamID, e.ErrCode, e.DebugData)
+}
+
+func isEOFOrNetReadError(err error) bool {
+ if err == io.EOF {
+ return true
+ }
+ ne, ok := err.(*net.OpError)
+ return ok && ne.Op == "read"
+}
+
+func (rl *clientConnReadLoop) cleanup() {
+ cc := rl.cc
+ defer cc.tconn.Close()
+ defer cc.t.connPool().MarkDead(cc)
+ defer close(cc.readerDone)
+
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+
+ // Close any response bodies if the server closes prematurely.
+ // TODO: also do this if we've written the headers but not
+ // gotten a response yet.
+ err := cc.readerErr
+ cc.mu.Lock()
+ if cc.goAway != nil && isEOFOrNetReadError(err) {
+ err = GoAwayError{
+ LastStreamID: cc.goAway.LastStreamID,
+ ErrCode: cc.goAway.ErrCode,
+ DebugData: cc.goAwayDebug,
+ }
+ } else if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ for _, cs := range rl.activeRes {
+ cs.bufPipe.CloseWithError(err)
+ }
+ for _, cs := range cc.streams {
+ select {
+ case cs.resc <- resAndError{err: err}:
+ default:
+ }
+ close(cs.done)
+ }
+ cc.closed = true
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+func (rl *clientConnReadLoop) run() error {
+ cc := rl.cc
+ rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
+ gotReply := false // ever saw a HEADERS reply
+ gotSettings := false
+ for {
+ f, err := cc.fr.ReadFrame()
+ if err != nil {
+ cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
+ }
+ if se, ok := err.(StreamError); ok {
+ if cs := cc.streamByID(se.StreamID, false); cs != nil {
+ cs.cc.writeStreamReset(cs.ID, se.Code, err)
+ cs.cc.forgetStreamID(cs.ID)
+ if se.Cause == nil {
+ se.Cause = cc.fr.errDetail
+ }
+ rl.endStreamError(cs, se)
+ }
+ continue
+ } else if err != nil {
+ return err
+ }
+ if VerboseLogs {
+ cc.vlogf("http2: Transport received %s", summarizeFrame(f))
+ }
+ if !gotSettings {
+ if _, ok := f.(*SettingsFrame); !ok {
+ cc.logf("protocol error: received %T before a SETTINGS frame", f)
+ return ConnectionError(ErrCodeProtocol)
+ }
+ gotSettings = true
+ }
+ maybeIdle := false // whether frame might transition us to idle
+
+ switch f := f.(type) {
+ case *MetaHeadersFrame:
+ err = rl.processHeaders(f)
+ maybeIdle = true
+ gotReply = true
+ case *DataFrame:
+ err = rl.processData(f)
+ maybeIdle = true
+ case *GoAwayFrame:
+ err = rl.processGoAway(f)
+ maybeIdle = true
+ case *RSTStreamFrame:
+ err = rl.processResetStream(f)
+ maybeIdle = true
+ case *SettingsFrame:
+ err = rl.processSettings(f)
+ case *PushPromiseFrame:
+ err = rl.processPushPromise(f)
+ case *WindowUpdateFrame:
+ err = rl.processWindowUpdate(f)
+ case *PingFrame:
+ err = rl.processPing(f)
+ default:
+ cc.logf("Transport: unhandled response frame type %T", f)
+ }
+ if err != nil {
+ if VerboseLogs {
+ cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
+ }
+ return err
+ }
+ if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
+ cc := rl.cc
+ if f.StreamEnded() {
+ // Issue 20521: If the stream has ended, streamByID() causes
+ // clientStream.done to be closed, which causes the request's bodyWriter
+ // to be closed with an errStreamClosed, which may be received by
+ // clientConn.RoundTrip before the result of processing these headers.
+ // Deferring stream closure allows the header processing to occur first.
+ // clientConn.RoundTrip may still receive the bodyWriter error first, but
+ // the fix for issue 16102 prioritises any response.
+ defer cc.streamByID(f.StreamID, true)
+ }
+ cs := cc.streamByID(f.StreamID, false)
+ if cs == nil {
+ // We'd get here if we canceled a request while the
+ // server had its response still in flight. So if this
+ // was just something we canceled, ignore it.
+ return nil
+ }
+ if !cs.firstByte {
+ if cs.trace != nil {
+ // TODO(bradfitz): move first response byte earlier,
+ // when we first read the 9 byte header, not waiting
+ // until all the HEADERS+CONTINUATION frames have been
+ // merged. This works for now.
+ traceFirstResponseByte(cs.trace)
+ }
+ cs.firstByte = true
+ }
+ if !cs.pastHeaders {
+ cs.pastHeaders = true
+ } else {
+ return rl.processTrailers(cs, f)
+ }
+
+ res, err := rl.handleResponse(cs, f)
+ if err != nil {
+ if _, ok := err.(ConnectionError); ok {
+ return err
+ }
+ // Any other error type is a stream error.
+ cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
+ cs.resc <- resAndError{err: err}
+ return nil // return nil from process* funcs to keep conn alive
+ }
+ if res == nil {
+ // (nil, nil) special case. See handleResponse docs.
+ return nil
+ }
+ if res.Body != noBody {
+ rl.activeRes[cs.ID] = cs
+ }
+ cs.resTrailer = &res.Trailer
+ cs.resc <- resAndError{res: res}
+ return nil
+}
+
+// may return error types nil, or ConnectionError. Any other error value
+// is a StreamError of type ErrCodeProtocol. The returned error in that case
+// is the detail.
+//
+// As a special case, handleResponse may return (nil, nil) to skip the
+// frame (currently only used for 100 expect continue). This special
+// case is going away after Issue 13851 is fixed.
+func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
+ if f.Truncated {
+ return nil, errResponseHeaderListSize
+ }
+
+ status := f.PseudoValue("status")
+ if status == "" {
+ return nil, errors.New("missing status pseudo header")
+ }
+ statusCode, err := strconv.Atoi(status)
+ if err != nil {
+ return nil, errors.New("malformed non-numeric status pseudo header")
+ }
+
+ if statusCode == 100 {
+ traceGot100Continue(cs.trace)
+ if cs.on100 != nil {
+ cs.on100() // forces any write delay timer to fire
+ }
+ cs.pastHeaders = false // do it all again
+ return nil, nil
+ }
+
+ header := make(http.Header)
+ res := &http.Response{
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ Header: header,
+ StatusCode: statusCode,
+ Status: status + " " + http.StatusText(statusCode),
+ }
+ for _, hf := range f.RegularFields() {
+ key := http.CanonicalHeaderKey(hf.Name)
+ if key == "Trailer" {
+ t := res.Trailer
+ if t == nil {
+ t = make(http.Header)
+ res.Trailer = t
+ }
+ foreachHeaderElement(hf.Value, func(v string) {
+ t[http.CanonicalHeaderKey(v)] = nil
+ })
+ } else {
+ header[key] = append(header[key], hf.Value)
+ }
+ }
+
+ streamEnded := f.StreamEnded()
+ isHead := cs.req.Method == "HEAD"
+ if !streamEnded || isHead {
+ res.ContentLength = -1
+ if clens := res.Header["Content-Length"]; len(clens) == 1 {
+ if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
+ res.ContentLength = clen64
+ } else {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ } else if len(clens) > 1 {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ }
+
+ if streamEnded || isHead {
+ res.Body = noBody
+ return res, nil
+ }
+
+ cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}
+ cs.bytesRemain = res.ContentLength
+ res.Body = transportResponseBody{cs}
+ go cs.awaitRequestCancel(cs.req)
+
+ if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
+ res.Header.Del("Content-Encoding")
+ res.Header.Del("Content-Length")
+ res.ContentLength = -1
+ res.Body = &gzipReader{body: res.Body}
+ setResponseUncompressed(res)
+ }
+ return res, nil
+}
+
+func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error {
+ if cs.pastTrailers {
+ // Too many HEADERS frames for this stream.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ cs.pastTrailers = true
+ if !f.StreamEnded() {
+ // We expect that any headers for trailers also
+ // has END_STREAM.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if len(f.PseudoFields()) > 0 {
+ // No pseudo header fields are defined for trailers.
+ // TODO: ConnectionError might be overly harsh? Check.
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ trailer := make(http.Header)
+ for _, hf := range f.RegularFields() {
+ key := http.CanonicalHeaderKey(hf.Name)
+ trailer[key] = append(trailer[key], hf.Value)
+ }
+ cs.trailer = trailer
+
+ rl.endStream(cs)
+ return nil
+}
+
+// transportResponseBody is the concrete type of Transport.RoundTrip's
+// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.
+// On Close it sends RST_STREAM if EOF wasn't already seen.
+type transportResponseBody struct {
+ cs *clientStream
+}
+
+func (b transportResponseBody) Read(p []byte) (n int, err error) {
+ cs := b.cs
+ cc := cs.cc
+
+ if cs.readErr != nil {
+ return 0, cs.readErr
+ }
+ n, err = b.cs.bufPipe.Read(p)
+ if cs.bytesRemain != -1 {
+ if int64(n) > cs.bytesRemain {
+ n = int(cs.bytesRemain)
+ if err == nil {
+ err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
+ cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
+ }
+ cs.readErr = err
+ return int(cs.bytesRemain), err
+ }
+ cs.bytesRemain -= int64(n)
+ if err == io.EOF && cs.bytesRemain > 0 {
+ err = io.ErrUnexpectedEOF
+ cs.readErr = err
+ return n, err
+ }
+ }
+ if n == 0 {
+ // No flow control tokens to send back.
+ return
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ var connAdd, streamAdd int32
+ // Check the conn-level first, before the stream-level.
+ if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
+ connAdd = transportDefaultConnFlow - v
+ cc.inflow.add(connAdd)
+ }
+ if err == nil { // No need to refresh if the stream is over or failed.
+ // Consider any buffered body data (read from the conn but not
+ // consumed by the client) when computing flow control for this
+ // stream.
+ v := int(cs.inflow.available()) + cs.bufPipe.Len()
+ if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
+ streamAdd = int32(transportDefaultStreamFlow - v)
+ cs.inflow.add(streamAdd)
+ }
+ }
+ if connAdd != 0 || streamAdd != 0 {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if connAdd != 0 {
+ cc.fr.WriteWindowUpdate(0, mustUint31(connAdd))
+ }
+ if streamAdd != 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd))
+ }
+ cc.bw.Flush()
+ }
+ return
+}
+
+var errClosedResponseBody = errors.New("http2: response body closed")
+
+func (b transportResponseBody) Close() error {
+ cs := b.cs
+ cc := cs.cc
+
+ serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
+ unread := cs.bufPipe.Len()
+
+ if unread > 0 || !serverSentStreamEnd {
+ cc.mu.Lock()
+ cc.wmu.Lock()
+ if !serverSentStreamEnd {
+ cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
+ cs.didReset = true
+ }
+ // Return connection-level flow control.
+ if unread > 0 {
+ cc.inflow.add(int32(unread))
+ cc.fr.WriteWindowUpdate(0, uint32(unread))
+ }
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ cc.mu.Unlock()
+ }
+
+ cs.bufPipe.BreakWithError(errClosedResponseBody)
+ cc.forgetStreamID(cs.ID)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processData(f *DataFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ data := f.Data()
+ if cs == nil {
+ cc.mu.Lock()
+ neverSent := cc.nextStreamID
+ cc.mu.Unlock()
+ if f.StreamID >= neverSent {
+ // We never asked for this.
+ cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // We probably did ask for this, but canceled. Just ignore it.
+ // TODO: be stricter here? only silently ignore things which
+ // we canceled, but not things which were closed normally
+ // by the peer? Tough without accumulating too much state.
+
+ // But at least return their flow control:
+ if f.Length > 0 {
+ cc.mu.Lock()
+ cc.inflow.add(int32(f.Length))
+ cc.mu.Unlock()
+
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(f.Length))
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+ return nil
+ }
+ if !cs.firstByte {
+ cc.logf("protocol error: received DATA before a HEADERS frame")
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ })
+ return nil
+ }
+ if f.Length > 0 {
+ if cs.req.Method == "HEAD" && len(data) > 0 {
+ cc.logf("protocol error: received DATA on a HEAD request")
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ })
+ return nil
+ }
+ // Check connection-level flow control.
+ cc.mu.Lock()
+ if cs.inflow.available() >= int32(f.Length) {
+ cs.inflow.take(int32(f.Length))
+ } else {
+ cc.mu.Unlock()
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ var refund int
+ if pad := int(f.Length) - len(data); pad > 0 {
+ refund += pad
+ }
+ // Return len(data) now if the stream is already closed,
+ // since data will never be read.
+ didReset := cs.didReset
+ if didReset {
+ refund += len(data)
+ }
+ if refund > 0 {
+ cc.inflow.add(int32(refund))
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(refund))
+ if !didReset {
+ cs.inflow.add(int32(refund))
+ cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
+ }
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+ cc.mu.Unlock()
+
+ if len(data) > 0 && !didReset {
+ if _, err := cs.bufPipe.Write(data); err != nil {
+ rl.endStreamError(cs, err)
+ return err
+ }
+ }
+ }
+
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
+ return nil
+}
+
+var errInvalidTrailers = errors.New("http2: invalid trailers")
+
+func (rl *clientConnReadLoop) endStream(cs *clientStream) {
+ // TODO: check that any declared content-length matches, like
+ // server.go's (*stream).endStream method.
+ rl.endStreamError(cs, nil)
+}
+
+func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
+ var code func()
+ if err == nil {
+ err = io.EOF
+ code = cs.copyTrailers
+ }
+ if isConnectionCloseRequest(cs.req) {
+ rl.closeWhenIdle = true
+ }
+ cs.bufPipe.closeWithErrorAndCode(err, code)
+ delete(rl.activeRes, cs.ID)
+
+ select {
+ case cs.resc <- resAndError{err: err}:
+ default:
+ }
+}
+
+func (cs *clientStream) copyTrailers() {
+ for k, vv := range cs.trailer {
+ t := cs.resTrailer
+ if *t == nil {
+ *t = make(http.Header)
+ }
+ (*t)[k] = vv
+ }
+}
+
+func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
+ cc := rl.cc
+ cc.t.connPool().MarkDead(cc)
+ if f.ErrCode != 0 {
+ // TODO: deal with GOAWAY more. particularly the error code
+ cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
+ }
+ cc.setGoAway(f)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ if f.IsAck() {
+ if cc.wantSettingsAck {
+ cc.wantSettingsAck = false
+ return nil
+ }
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ err := f.ForeachSetting(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ case SettingMaxHeaderListSize:
+ cc.peerMaxHeaderListSize = uint64(s.Val)
+ case SettingInitialWindowSize:
+ // Values above the maximum flow-control
+ // window size of 2^31-1 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ if s.Val > math.MaxInt32 {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+
+ // Adjust flow control of currently-open
+ // frames by the difference of the old initial
+ // window size and this one.
+ delta := int32(s.Val) - int32(cc.initialWindowSize)
+ for _, cs := range cc.streams {
+ cs.flow.add(delta)
+ }
+ cc.cond.Broadcast()
+
+ cc.initialWindowSize = s.Val
+ default:
+ // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
+ cc.vlogf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+ return cc.werr
+}
+
+func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, false)
+ if f.StreamID != 0 && cs == nil {
+ return nil
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ fl := &cc.flow
+ if cs != nil {
+ fl = &cs.flow
+ }
+ if !fl.add(int32(f.Increment)) {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ cc.cond.Broadcast()
+ return nil
+}
+
+func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
+ cs := rl.cc.streamByID(f.StreamID, true)
+ if cs == nil {
+ // TODO: return error if server tries to RST_STEAM an idle stream
+ return nil
+ }
+ select {
+ case <-cs.peerReset:
+ // Already reset.
+ // This is the only goroutine
+ // which closes this, so there
+ // isn't a race.
+ default:
+ err := streamError(cs.ID, f.ErrCode)
+ cs.resetErr = err
+ close(cs.peerReset)
+ cs.bufPipe.CloseWithError(err)
+ cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+ }
+ delete(rl.activeRes, cs.ID)
+ return nil
+}
+
+// Ping sends a PING frame to the server and waits for the ack.
+// Public implementation is in go17.go and not_go17.go
+func (cc *ClientConn) ping(ctx contextContext) error {
+ c := make(chan struct{})
+ // Generate a random payload
+ var p [8]byte
+ for {
+ if _, err := rand.Read(p[:]); err != nil {
+ return err
+ }
+ cc.mu.Lock()
+ // check for dup before insert
+ if _, found := cc.pings[p]; !found {
+ cc.pings[p] = c
+ cc.mu.Unlock()
+ break
+ }
+ cc.mu.Unlock()
+ }
+ cc.wmu.Lock()
+ if err := cc.fr.WritePing(false, p); err != nil {
+ cc.wmu.Unlock()
+ return err
+ }
+ if err := cc.bw.Flush(); err != nil {
+ cc.wmu.Unlock()
+ return err
+ }
+ cc.wmu.Unlock()
+ select {
+ case <-c:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cc.readerDone:
+ // connection closed
+ return cc.readerErr
+ }
+}
+
+func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
+ if f.IsAck() {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ // If ack, notify listener if any
+ if c, ok := cc.pings[f.Data]; ok {
+ close(c)
+ delete(cc.pings, f.Data)
+ }
+ return nil
+ }
+ cc := rl.cc
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(true, f.Data); err != nil {
+ return err
+ }
+ return cc.bw.Flush()
+}
+
+func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
+ // We told the peer we don't want them.
+ // Spec says:
+ // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
+ // setting of the peer endpoint is set to 0. An endpoint that
+ // has set this setting and has received acknowledgement MUST
+ // treat the receipt of a PUSH_PROMISE frame as a connection
+ // error (Section 5.4.1) of type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+}
+
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
+ // TODO: map err to more interesting error codes, once the
+ // HTTP community comes up with some. But currently for
+ // RST_STREAM there's no equivalent to GOAWAY frame's debug
+ // data, and the error codes are all pretty vague ("cancel").
+ cc.wmu.Lock()
+ cc.fr.WriteRSTStream(streamID, code)
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+}
+
+var (
+ errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
+ errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
+ errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
+)
+
+func (cc *ClientConn) logf(format string, args ...interface{}) {
+ cc.t.logf(format, args...)
+}
+
+func (cc *ClientConn) vlogf(format string, args ...interface{}) {
+ cc.t.vlogf(format, args...)
+}
+
+func (t *Transport) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ t.logf(format, args...)
+ }
+}
+
+func (t *Transport) logf(format string, args ...interface{}) {
+ log.Printf(format, args...)
+}
+
+var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
+
+func strSliceContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+type erringRoundTripper struct{ err error }
+
+func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type gzipReader struct {
+ body io.ReadCloser // underlying Response.Body
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // sticky error
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ if gz.zr == nil {
+ gz.zr, err = gzip.NewReader(gz.body)
+ if err != nil {
+ gz.zerr = err
+ return 0, err
+ }
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ return gz.body.Close()
+}
+
+type errorReader struct{ err error }
+
+func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
+
+// bodyWriterState encapsulates various state around the Transport's writing
+// of the request body, particularly regarding doing delayed writes of the body
+// when the request contains "Expect: 100-continue".
+type bodyWriterState struct {
+ cs *clientStream
+ timer *time.Timer // if non-nil, we're doing a delayed write
+ fnonce *sync.Once // to call fn with
+ fn func() // the code to run in the goroutine, writing the body
+ resc chan error // result of fn's execution
+ delay time.Duration // how long we should delay a delayed write for
+}
+
+func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) {
+ s.cs = cs
+ if body == nil {
+ return
+ }
+ resc := make(chan error, 1)
+ s.resc = resc
+ s.fn = func() {
+ cs.cc.mu.Lock()
+ cs.startedWrite = true
+ cs.cc.mu.Unlock()
+ resc <- cs.writeRequestBody(body, cs.req.Body)
+ }
+ s.delay = t.expectContinueTimeout()
+ if s.delay == 0 ||
+ !httplex.HeaderValuesContainsToken(
+ cs.req.Header["Expect"],
+ "100-continue") {
+ return
+ }
+ s.fnonce = new(sync.Once)
+
+ // Arm the timer with a very large duration, which we'll
+ // intentionally lower later. It has to be large now because
+ // we need a handle to it before writing the headers, but the
+ // s.delay value is defined to not start until after the
+ // request headers were written.
+ const hugeDuration = 365 * 24 * time.Hour
+ s.timer = time.AfterFunc(hugeDuration, func() {
+ s.fnonce.Do(s.fn)
+ })
+ return
+}
+
+func (s bodyWriterState) cancel() {
+ if s.timer != nil {
+ s.timer.Stop()
+ }
+}
+
+func (s bodyWriterState) on100() {
+ if s.timer == nil {
+ // If we didn't do a delayed write, ignore the server's
+ // bogus 100 continue response.
+ return
+ }
+ s.timer.Stop()
+ go func() { s.fnonce.Do(s.fn) }()
+}
+
+// scheduleBodyWrite starts writing the body, either immediately (in
+// the common case) or after the delay timeout. It should not be
+// called until after the headers have been written.
+func (s bodyWriterState) scheduleBodyWrite() {
+ if s.timer == nil {
+ // We're not doing a delayed write (see
+ // getBodyWriterState), so just start the writing
+ // goroutine immediately.
+ go s.fn()
+ return
+ }
+ traceWait100Continue(s.cs.trace)
+ if s.timer.Stop() {
+ s.timer.Reset(s.delay)
+ }
+}
+
+// isConnectionCloseRequest reports whether req should use its own
+// connection for a single request and then close the connection.
+func isConnectionCloseRequest(req *http.Request) bool {
+ return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close")
+}
diff --git a/vendor/golang.org/x/net/http2/transport_test.go b/vendor/golang.org/x/net/http2/transport_test.go
new file mode 100644
index 0000000..30d7b5d
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/transport_test.go
@@ -0,0 +1,3796 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+var (
+ extNet = flag.Bool("extnet", false, "do external network tests")
+ transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport")
+ insecure = flag.Bool("insecure", false, "insecure TLS dials") // TODO: dead code. remove?
+)
+
+var tlsConfigInsecure = &tls.Config{InsecureSkipVerify: true}
+
+type testContext struct{}
+
+func (testContext) Done() <-chan struct{} { return make(chan struct{}) }
+func (testContext) Err() error { panic("should not be called") }
+func (testContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false }
+func (testContext) Value(key interface{}) interface{} { return nil }
+
+func TestTransportExternal(t *testing.T) {
+ if !*extNet {
+ t.Skip("skipping external network test")
+ }
+ req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil)
+ rt := &Transport{TLSClientConfig: tlsConfigInsecure}
+ res, err := rt.RoundTrip(req)
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ res.Write(os.Stdout)
+}
+
+type fakeTLSConn struct {
+ net.Conn
+}
+
+func (c *fakeTLSConn) ConnectionState() tls.ConnectionState {
+ return tls.ConnectionState{
+ Version: tls.VersionTLS12,
+ CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ }
+}
+
+func startH2cServer(t *testing.T) net.Listener {
+ h2Server := &Server{}
+ l := newLocalListener(t)
+ go func() {
+ conn, err := l.Accept()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ h2Server.ServeConn(&fakeTLSConn{conn}, &ServeConnOpts{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "Hello, %v, http: %v", r.URL.Path, r.TLS == nil)
+ })})
+ }()
+ return l
+}
+
+func TestTransportH2c(t *testing.T) {
+ l := startH2cServer(t)
+ defer l.Close()
+ req, err := http.NewRequest("GET", "http://"+l.Addr().String()+"/foobar", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tr := &Transport{
+ AllowHTTP: true,
+ DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ return net.Dial(network, addr)
+ },
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.ProtoMajor != 2 {
+ t.Fatal("proto not h2c")
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := string(body), "Hello, /foobar, http: true"; got != want {
+ t.Fatalf("response got %v, want %v", got, want)
+ }
+}
+
+func TestTransport(t *testing.T) {
+ const body = "sup"
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, body)
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+
+ t.Logf("Got res: %+v", res)
+ if g, w := res.StatusCode, 200; g != w {
+ t.Errorf("StatusCode = %v; want %v", g, w)
+ }
+ if g, w := res.Status, "200 OK"; g != w {
+ t.Errorf("Status = %q; want %q", g, w)
+ }
+ wantHeader := http.Header{
+ "Content-Length": []string{"3"},
+ "Content-Type": []string{"text/plain; charset=utf-8"},
+ "Date": []string{"XXX"}, // see cleanDate
+ }
+ cleanDate(res)
+ if !reflect.DeepEqual(res.Header, wantHeader) {
+ t.Errorf("res Header = %v; want %v", res.Header, wantHeader)
+ }
+ if res.Request != req {
+ t.Errorf("Response.Request = %p; want %p", res.Request, req)
+ }
+ if res.TLS == nil {
+ t.Error("Response.TLS = nil; want non-nil")
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Errorf("Body read: %v", err)
+ } else if string(slurp) != body {
+ t.Errorf("Body = %q; want %q", slurp, body)
+ }
+}
+
+func onSameConn(t *testing.T, modReq func(*http.Request)) bool {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, r.RemoteAddr)
+ }, optOnlyServer, func(c net.Conn, st http.ConnState) {
+ t.Logf("conn %v is now state %v", c.RemoteAddr(), st)
+ })
+ defer st.Close()
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ get := func() string {
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ modReq(req)
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatalf("Body read: %v", err)
+ }
+ addr := strings.TrimSpace(string(slurp))
+ if addr == "" {
+ t.Fatalf("didn't get an addr in response")
+ }
+ return addr
+ }
+ first := get()
+ second := get()
+ return first == second
+}
+
+func TestTransportReusesConns(t *testing.T) {
+ if !onSameConn(t, func(*http.Request) {}) {
+ t.Errorf("first and second responses were on different connections")
+ }
+}
+
+func TestTransportReusesConn_RequestClose(t *testing.T) {
+ if onSameConn(t, func(r *http.Request) { r.Close = true }) {
+ t.Errorf("first and second responses were not on different connections")
+ }
+}
+
+func TestTransportReusesConn_ConnClose(t *testing.T) {
+ if onSameConn(t, func(r *http.Request) { r.Header.Set("Connection", "close") }) {
+ t.Errorf("first and second responses were not on different connections")
+ }
+}
+
+// Tests that the Transport only keeps one pending dial open per destination address.
+// https://golang.org/issue/13397
+func TestTransportGroupsPendingDials(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, r.RemoteAddr)
+ }, optOnlyServer)
+ defer st.Close()
+ tr := &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ }
+ defer tr.CloseIdleConnections()
+ var (
+ mu sync.Mutex
+ dials = map[string]int{}
+ )
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer res.Body.Close()
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Errorf("Body read: %v", err)
+ }
+ addr := strings.TrimSpace(string(slurp))
+ if addr == "" {
+ t.Errorf("didn't get an addr in response")
+ }
+ mu.Lock()
+ dials[addr]++
+ mu.Unlock()
+ }()
+ }
+ wg.Wait()
+ if len(dials) != 1 {
+ t.Errorf("saw %d dials; want 1: %v", len(dials), dials)
+ }
+ tr.CloseIdleConnections()
+ if err := retry(50, 10*time.Millisecond, func() error {
+ cp, ok := tr.connPool().(*clientConnPool)
+ if !ok {
+ return fmt.Errorf("Conn pool is %T; want *clientConnPool", tr.connPool())
+ }
+ cp.mu.Lock()
+ defer cp.mu.Unlock()
+ if len(cp.dialing) != 0 {
+ return fmt.Errorf("dialing map = %v; want empty", cp.dialing)
+ }
+ if len(cp.conns) != 0 {
+ return fmt.Errorf("conns = %v; want empty", cp.conns)
+ }
+ if len(cp.keys) != 0 {
+ return fmt.Errorf("keys = %v; want empty", cp.keys)
+ }
+ return nil
+ }); err != nil {
+ t.Errorf("State of pool after CloseIdleConnections: %v", err)
+ }
+}
+
+func retry(tries int, delay time.Duration, fn func() error) error {
+ var err error
+ for i := 0; i < tries; i++ {
+ err = fn()
+ if err == nil {
+ return nil
+ }
+ time.Sleep(delay)
+ }
+ return err
+}
+
+func TestTransportAbortClosesPipes(t *testing.T) {
+ shutdown := make(chan struct{})
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ w.(http.Flusher).Flush()
+ <-shutdown
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+ defer close(shutdown) // we must shutdown before st.Close() to avoid hanging
+
+ done := make(chan struct{})
+ requestMade := make(chan struct{})
+ go func() {
+ defer close(done)
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ close(requestMade)
+ _, err = ioutil.ReadAll(res.Body)
+ if err == nil {
+ t.Error("expected error from res.Body.Read")
+ }
+ }()
+
+ <-requestMade
+ // Now force the serve loop to end, via closing the connection.
+ st.closeConn()
+ // deadlock? that's a bug.
+ select {
+ case <-done:
+ case <-time.After(3 * time.Second):
+ t.Fatal("timeout")
+ }
+}
+
+// TODO: merge this with TestTransportBody to make TestTransportRequest? This
+// could be a table-driven test with extra goodies.
+func TestTransportPath(t *testing.T) {
+ gotc := make(chan *url.URL, 1)
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ gotc <- r.URL
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ const (
+ path = "/testpath"
+ query = "q=1"
+ )
+ surl := st.ts.URL + path + "?" + query
+ req, err := http.NewRequest("POST", surl, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := &http.Client{Transport: tr}
+ res, err := c.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ got := <-gotc
+ if got.Path != path {
+ t.Errorf("Read Path = %q; want %q", got.Path, path)
+ }
+ if got.RawQuery != query {
+ t.Errorf("Read RawQuery = %q; want %q", got.RawQuery, query)
+ }
+}
+
+func randString(n int) string {
+ rnd := rand.New(rand.NewSource(int64(n)))
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = byte(rnd.Intn(256))
+ }
+ return string(b)
+}
+
+type panicReader struct{}
+
+func (panicReader) Read([]byte) (int, error) { panic("unexpected Read") }
+func (panicReader) Close() error { panic("unexpected Close") }
+
+func TestActualContentLength(t *testing.T) {
+ tests := []struct {
+ req *http.Request
+ want int64
+ }{
+ // Verify we don't read from Body:
+ 0: {
+ req: &http.Request{Body: panicReader{}},
+ want: -1,
+ },
+ // nil Body means 0, regardless of ContentLength:
+ 1: {
+ req: &http.Request{Body: nil, ContentLength: 5},
+ want: 0,
+ },
+ // ContentLength is used if set.
+ 2: {
+ req: &http.Request{Body: panicReader{}, ContentLength: 5},
+ want: 5,
+ },
+ // http.NoBody means 0, not -1.
+ 3: {
+ req: &http.Request{Body: go18httpNoBody()},
+ want: 0,
+ },
+ }
+ for i, tt := range tests {
+ got := actualContentLength(tt.req)
+ if got != tt.want {
+ t.Errorf("test[%d]: got %d; want %d", i, got, tt.want)
+ }
+ }
+}
+
+func TestTransportBody(t *testing.T) {
+ bodyTests := []struct {
+ body string
+ noContentLen bool
+ }{
+ {body: "some message"},
+ {body: "some message", noContentLen: true},
+ {body: strings.Repeat("a", 1<<20), noContentLen: true},
+ {body: strings.Repeat("a", 1<<20)},
+ {body: randString(16<<10 - 1)},
+ {body: randString(16 << 10)},
+ {body: randString(16<<10 + 1)},
+ {body: randString(512<<10 - 1)},
+ {body: randString(512 << 10)},
+ {body: randString(512<<10 + 1)},
+ {body: randString(1<<20 - 1)},
+ {body: randString(1 << 20)},
+ {body: randString(1<<20 + 2)},
+ }
+
+ type reqInfo struct {
+ req *http.Request
+ slurp []byte
+ err error
+ }
+ gotc := make(chan reqInfo, 1)
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ slurp, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ gotc <- reqInfo{err: err}
+ } else {
+ gotc <- reqInfo{req: r, slurp: slurp}
+ }
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+
+ for i, tt := range bodyTests {
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ var body io.Reader = strings.NewReader(tt.body)
+ if tt.noContentLen {
+ body = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods
+ }
+ req, err := http.NewRequest("POST", st.ts.URL, body)
+ if err != nil {
+ t.Fatalf("#%d: %v", i, err)
+ }
+ c := &http.Client{Transport: tr}
+ res, err := c.Do(req)
+ if err != nil {
+ t.Fatalf("#%d: %v", i, err)
+ }
+ defer res.Body.Close()
+ ri := <-gotc
+ if ri.err != nil {
+ t.Errorf("#%d: read error: %v", i, ri.err)
+ continue
+ }
+ if got := string(ri.slurp); got != tt.body {
+ t.Errorf("#%d: Read body mismatch.\n got: %q (len %d)\nwant: %q (len %d)", i, shortString(got), len(got), shortString(tt.body), len(tt.body))
+ }
+ wantLen := int64(len(tt.body))
+ if tt.noContentLen && tt.body != "" {
+ wantLen = -1
+ }
+ if ri.req.ContentLength != wantLen {
+ t.Errorf("#%d. handler got ContentLength = %v; want %v", i, ri.req.ContentLength, wantLen)
+ }
+ }
+}
+
+func shortString(v string) string {
+ const maxLen = 100
+ if len(v) <= maxLen {
+ return v
+ }
+ return fmt.Sprintf("%v[...%d bytes omitted...]%v", v[:maxLen/2], len(v)-maxLen, v[len(v)-maxLen/2:])
+}
+
+func TestTransportDialTLS(t *testing.T) {
+ var mu sync.Mutex // guards following
+ var gotReq, didDial bool
+
+ ts := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ mu.Lock()
+ gotReq = true
+ mu.Unlock()
+ },
+ optOnlyServer,
+ )
+ defer ts.Close()
+ tr := &Transport{
+ DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) {
+ mu.Lock()
+ didDial = true
+ mu.Unlock()
+ cfg.InsecureSkipVerify = true
+ c, err := tls.Dial(netw, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return c, c.Handshake()
+ },
+ }
+ defer tr.CloseIdleConnections()
+ client := &http.Client{Transport: tr}
+ res, err := client.Get(ts.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+ mu.Lock()
+ if !gotReq {
+ t.Error("didn't get request")
+ }
+ if !didDial {
+ t.Error("didn't use dial hook")
+ }
+}
+
+func TestConfigureTransport(t *testing.T) {
+ t1 := &http.Transport{}
+ err := ConfigureTransport(t1)
+ if err == errTransportVersion {
+ t.Skip(err)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := fmt.Sprintf("%#v", t1); !strings.Contains(got, `"h2"`) {
+ // Laziness, to avoid buildtags.
+ t.Errorf("stringification of HTTP/1 transport didn't contain \"h2\": %v", got)
+ }
+ wantNextProtos := []string{"h2", "http/1.1"}
+ if t1.TLSClientConfig == nil {
+ t.Errorf("nil t1.TLSClientConfig")
+ } else if !reflect.DeepEqual(t1.TLSClientConfig.NextProtos, wantNextProtos) {
+ t.Errorf("TLSClientConfig.NextProtos = %q; want %q", t1.TLSClientConfig.NextProtos, wantNextProtos)
+ }
+ if err := ConfigureTransport(t1); err == nil {
+ t.Error("unexpected success on second call to ConfigureTransport")
+ }
+
+ // And does it work?
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, r.Proto)
+ }, optOnlyServer)
+ defer st.Close()
+
+ t1.TLSClientConfig.InsecureSkipVerify = true
+ c := &http.Client{Transport: t1}
+ res, err := c.Get(st.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := string(slurp), "HTTP/2.0"; got != want {
+ t.Errorf("body = %q; want %q", got, want)
+ }
+}
+
+type capitalizeReader struct {
+ r io.Reader
+}
+
+func (cr capitalizeReader) Read(p []byte) (n int, err error) {
+ n, err = cr.r.Read(p)
+ for i, b := range p[:n] {
+ if b >= 'a' && b <= 'z' {
+ p[i] = b - ('a' - 'A')
+ }
+ }
+ return
+}
+
+type flushWriter struct {
+ w io.Writer
+}
+
+func (fw flushWriter) Write(p []byte) (n int, err error) {
+ n, err = fw.w.Write(p)
+ if f, ok := fw.w.(http.Flusher); ok {
+ f.Flush()
+ }
+ return
+}
+
+type clientTester struct {
+ t *testing.T
+ tr *Transport
+ sc, cc net.Conn // server and client conn
+ fr *Framer // server's framer
+ client func() error
+ server func() error
+}
+
+func newClientTester(t *testing.T) *clientTester {
+ var dialOnce struct {
+ sync.Mutex
+ dialed bool
+ }
+ ct := &clientTester{
+ t: t,
+ }
+ ct.tr = &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ dialOnce.Lock()
+ defer dialOnce.Unlock()
+ if dialOnce.dialed {
+ return nil, errors.New("only one dial allowed in test mode")
+ }
+ dialOnce.dialed = true
+ return ct.cc, nil
+ },
+ }
+
+ ln := newLocalListener(t)
+ cc, err := net.Dial("tcp", ln.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+
+ }
+ sc, err := ln.Accept()
+ if err != nil {
+ t.Fatal(err)
+ }
+ ln.Close()
+ ct.cc = cc
+ ct.sc = sc
+ ct.fr = NewFramer(sc, sc)
+ return ct
+}
+
+func newLocalListener(t *testing.T) net.Listener {
+ ln, err := net.Listen("tcp4", "127.0.0.1:0")
+ if err == nil {
+ return ln
+ }
+ ln, err = net.Listen("tcp6", "[::1]:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ return ln
+}
+
+func (ct *clientTester) greet(settings ...Setting) {
+ buf := make([]byte, len(ClientPreface))
+ _, err := io.ReadFull(ct.sc, buf)
+ if err != nil {
+ ct.t.Fatalf("reading client preface: %v", err)
+ }
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ ct.t.Fatalf("Reading client settings frame: %v", err)
+ }
+ if sf, ok := f.(*SettingsFrame); !ok {
+ ct.t.Fatalf("Wanted client settings frame; got %v", f)
+ _ = sf // stash it away?
+ }
+ if err := ct.fr.WriteSettings(settings...); err != nil {
+ ct.t.Fatal(err)
+ }
+ if err := ct.fr.WriteSettingsAck(); err != nil {
+ ct.t.Fatal(err)
+ }
+}
+
+func (ct *clientTester) readNonSettingsFrame() (Frame, error) {
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := f.(*SettingsFrame); ok {
+ continue
+ }
+ return f, nil
+ }
+}
+
+func (ct *clientTester) cleanup() {
+ ct.tr.CloseIdleConnections()
+}
+
+func (ct *clientTester) run() {
+ errc := make(chan error, 2)
+ ct.start("client", errc, ct.client)
+ ct.start("server", errc, ct.server)
+ defer ct.cleanup()
+ for i := 0; i < 2; i++ {
+ if err := <-errc; err != nil {
+ ct.t.Error(err)
+ return
+ }
+ }
+}
+
+func (ct *clientTester) start(which string, errc chan<- error, fn func() error) {
+ go func() {
+ finished := false
+ var err error
+ defer func() {
+ if !finished {
+ err = fmt.Errorf("%s goroutine didn't finish.", which)
+ } else if err != nil {
+ err = fmt.Errorf("%s: %v", which, err)
+ }
+ errc <- err
+ }()
+ err = fn()
+ finished = true
+ }()
+}
+
+func (ct *clientTester) readFrame() (Frame, error) {
+ return readFrameTimeout(ct.fr, 2*time.Second)
+}
+
+func (ct *clientTester) firstHeaders() (*HeadersFrame, error) {
+ for {
+ f, err := ct.readFrame()
+ if err != nil {
+ return nil, fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
+ }
+ switch f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ continue
+ }
+ hf, ok := f.(*HeadersFrame)
+ if !ok {
+ return nil, fmt.Errorf("Got %T; want HeadersFrame", f)
+ }
+ return hf, nil
+ }
+}
+
+type countingReader struct {
+ n *int64
+}
+
+func (r countingReader) Read(p []byte) (n int, err error) {
+ for i := range p {
+ p[i] = byte(i)
+ }
+ atomic.AddInt64(r.n, int64(len(p)))
+ return len(p), err
+}
+
+func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) }
+func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) }
+
+func testTransportReqBodyAfterResponse(t *testing.T, status int) {
+ const bodySize = 10 << 20
+ clientDone := make(chan struct{})
+ ct := newClientTester(t)
+ ct.client = func() error {
+ defer ct.cc.(*net.TCPConn).CloseWrite()
+ defer close(clientDone)
+
+ var n int64 // atomic
+ req, err := http.NewRequest("PUT", "https://dummy.tld/", io.LimitReader(countingReader{&n}, bodySize))
+ if err != nil {
+ return err
+ }
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("RoundTrip: %v", err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != status {
+ return fmt.Errorf("status code = %v; want %v", res.StatusCode, status)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return fmt.Errorf("Slurp: %v", err)
+ }
+ if len(slurp) > 0 {
+ return fmt.Errorf("unexpected body: %q", slurp)
+ }
+ if status == 200 {
+ if got := atomic.LoadInt64(&n); got != bodySize {
+ return fmt.Errorf("For 200 response, Transport wrote %d bytes; want %d", got, bodySize)
+ }
+ } else {
+ if got := atomic.LoadInt64(&n); got == 0 || got >= bodySize {
+ return fmt.Errorf("For %d response, Transport wrote %d bytes; want (0,%d) exclusive", status, got, bodySize)
+ }
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ var dataRecv int64
+ var closed bool
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ select {
+ case <-clientDone:
+ // If the client's done, it
+ // will have reported any
+ // errors on its side.
+ return nil
+ default:
+ return err
+ }
+ }
+ //println(fmt.Sprintf("server got frame: %v", f))
+ switch f := f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ case *HeadersFrame:
+ if !f.HeadersEnded() {
+ return fmt.Errorf("headers should have END_HEADERS be ended: %v", f)
+ }
+ if f.StreamEnded() {
+ return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f)
+ }
+ case *DataFrame:
+ dataLen := len(f.Data())
+ if dataLen > 0 {
+ if dataRecv == 0 {
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+ }
+ if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil {
+ return err
+ }
+ if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil {
+ return err
+ }
+ }
+ dataRecv += int64(dataLen)
+
+ if !closed && ((status != 200 && dataRecv > 0) ||
+ (status == 200 && dataRecv == bodySize)) {
+ closed = true
+ if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil {
+ return err
+ }
+ }
+ default:
+ return fmt.Errorf("Unexpected client frame %v", f)
+ }
+ }
+ }
+ ct.run()
+}
+
+// See golang.org/issue/13444
+func TestTransportFullDuplex(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(200) // redundant but for clarity
+ w.(http.Flusher).Flush()
+ io.Copy(flushWriter{w}, capitalizeReader{r.Body})
+ fmt.Fprintf(w, "bye.\n")
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ c := &http.Client{Transport: tr}
+
+ pr, pw := io.Pipe()
+ req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr))
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.ContentLength = -1
+ res, err := c.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ t.Fatalf("StatusCode = %v; want %v", res.StatusCode, 200)
+ }
+ bs := bufio.NewScanner(res.Body)
+ want := func(v string) {
+ if !bs.Scan() {
+ t.Fatalf("wanted to read %q but Scan() = false, err = %v", v, bs.Err())
+ }
+ }
+ write := func(v string) {
+ _, err := io.WriteString(pw, v)
+ if err != nil {
+ t.Fatalf("pipe write: %v", err)
+ }
+ }
+ write("foo\n")
+ want("FOO")
+ write("bar\n")
+ want("BAR")
+ pw.Close()
+ want("bye.")
+ if err := bs.Err(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestTransportConnectRequest(t *testing.T) {
+ gotc := make(chan *http.Request, 1)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ gotc <- r
+ }, optOnlyServer)
+ defer st.Close()
+
+ u, err := url.Parse(st.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ c := &http.Client{Transport: tr}
+
+ tests := []struct {
+ req *http.Request
+ want string
+ }{
+ {
+ req: &http.Request{
+ Method: "CONNECT",
+ Header: http.Header{},
+ URL: u,
+ },
+ want: u.Host,
+ },
+ {
+ req: &http.Request{
+ Method: "CONNECT",
+ Header: http.Header{},
+ URL: u,
+ Host: "example.com:123",
+ },
+ want: "example.com:123",
+ },
+ }
+
+ for i, tt := range tests {
+ res, err := c.Do(tt.req)
+ if err != nil {
+ t.Errorf("%d. RoundTrip = %v", i, err)
+ continue
+ }
+ res.Body.Close()
+ req := <-gotc
+ if req.Method != "CONNECT" {
+ t.Errorf("method = %q; want CONNECT", req.Method)
+ }
+ if req.Host != tt.want {
+ t.Errorf("Host = %q; want %q", req.Host, tt.want)
+ }
+ if req.URL.Host != tt.want {
+ t.Errorf("URL.Host = %q; want %q", req.URL.Host, tt.want)
+ }
+ }
+}
+
+type headerType int
+
+const (
+ noHeader headerType = iota // omitted
+ oneHeader
+ splitHeader // broken into continuation on purpose
+)
+
+const (
+ f0 = noHeader
+ f1 = oneHeader
+ f2 = splitHeader
+ d0 = false
+ d1 = true
+)
+
+// Test all 36 combinations of response frame orders:
+// (3 ways of 100-continue) * (2 ways of headers) * (2 ways of data) * (3 ways of trailers):func TestTransportResponsePattern_00f0(t *testing.T) { testTransportResponsePattern(h0, h1, false, h0) }
+// Generated by http://play.golang.org/p/SScqYKJYXd
+func TestTransportResPattern_c0h1d0t0(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f0) }
+func TestTransportResPattern_c0h1d0t1(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f1) }
+func TestTransportResPattern_c0h1d0t2(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f2) }
+func TestTransportResPattern_c0h1d1t0(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f0) }
+func TestTransportResPattern_c0h1d1t1(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f1) }
+func TestTransportResPattern_c0h1d1t2(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f2) }
+func TestTransportResPattern_c0h2d0t0(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f0) }
+func TestTransportResPattern_c0h2d0t1(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f1) }
+func TestTransportResPattern_c0h2d0t2(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f2) }
+func TestTransportResPattern_c0h2d1t0(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f0) }
+func TestTransportResPattern_c0h2d1t1(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f1) }
+func TestTransportResPattern_c0h2d1t2(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f2) }
+func TestTransportResPattern_c1h1d0t0(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f0) }
+func TestTransportResPattern_c1h1d0t1(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f1) }
+func TestTransportResPattern_c1h1d0t2(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f2) }
+func TestTransportResPattern_c1h1d1t0(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f0) }
+func TestTransportResPattern_c1h1d1t1(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f1) }
+func TestTransportResPattern_c1h1d1t2(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f2) }
+func TestTransportResPattern_c1h2d0t0(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f0) }
+func TestTransportResPattern_c1h2d0t1(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f1) }
+func TestTransportResPattern_c1h2d0t2(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f2) }
+func TestTransportResPattern_c1h2d1t0(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f0) }
+func TestTransportResPattern_c1h2d1t1(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f1) }
+func TestTransportResPattern_c1h2d1t2(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f2) }
+func TestTransportResPattern_c2h1d0t0(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f0) }
+func TestTransportResPattern_c2h1d0t1(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f1) }
+func TestTransportResPattern_c2h1d0t2(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f2) }
+func TestTransportResPattern_c2h1d1t0(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f0) }
+func TestTransportResPattern_c2h1d1t1(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f1) }
+func TestTransportResPattern_c2h1d1t2(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f2) }
+func TestTransportResPattern_c2h2d0t0(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f0) }
+func TestTransportResPattern_c2h2d0t1(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f1) }
+func TestTransportResPattern_c2h2d0t2(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f2) }
+func TestTransportResPattern_c2h2d1t0(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f0) }
+func TestTransportResPattern_c2h2d1t1(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f1) }
+func TestTransportResPattern_c2h2d1t2(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f2) }
+
+func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerType, withData bool, trailers headerType) {
+ const reqBody = "some request body"
+ const resBody = "some response body"
+
+ if resHeader == noHeader {
+ // TODO: test 100-continue followed by immediate
+ // server stream reset, without headers in the middle?
+ panic("invalid combination")
+ }
+
+ ct := newClientTester(t)
+ ct.client = func() error {
+ req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody))
+ if expect100Continue != noHeader {
+ req.Header.Set("Expect", "100-continue")
+ }
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("RoundTrip: %v", err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ return fmt.Errorf("status code = %v; want 200", res.StatusCode)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return fmt.Errorf("Slurp: %v", err)
+ }
+ wantBody := resBody
+ if !withData {
+ wantBody = ""
+ }
+ if string(slurp) != wantBody {
+ return fmt.Errorf("body = %q; want %q", slurp, wantBody)
+ }
+ if trailers == noHeader {
+ if len(res.Trailer) > 0 {
+ t.Errorf("Trailer = %v; want none", res.Trailer)
+ }
+ } else {
+ want := http.Header{"Some-Trailer": {"some-value"}}
+ if !reflect.DeepEqual(res.Trailer, want) {
+ t.Errorf("Trailer = %v; want %v", res.Trailer, want)
+ }
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return err
+ }
+ endStream := false
+ send := func(mode headerType) {
+ hbf := buf.Bytes()
+ switch mode {
+ case oneHeader:
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.Header().StreamID,
+ EndHeaders: true,
+ EndStream: endStream,
+ BlockFragment: hbf,
+ })
+ case splitHeader:
+ if len(hbf) < 2 {
+ panic("too small")
+ }
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.Header().StreamID,
+ EndHeaders: false,
+ EndStream: endStream,
+ BlockFragment: hbf[:1],
+ })
+ ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:])
+ default:
+ panic("bogus mode")
+ }
+ }
+ switch f := f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ case *DataFrame:
+ if !f.StreamEnded() {
+ // No need to send flow control tokens. The test request body is tiny.
+ continue
+ }
+ // Response headers (1+ frames; 1 or 2 in this test, but never 0)
+ {
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "x-foo", Value: "blah"})
+ enc.WriteField(hpack.HeaderField{Name: "x-bar", Value: "more"})
+ if trailers != noHeader {
+ enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "some-trailer"})
+ }
+ endStream = withData == false && trailers == noHeader
+ send(resHeader)
+ }
+ if withData {
+ endStream = trailers == noHeader
+ ct.fr.WriteData(f.StreamID, endStream, []byte(resBody))
+ }
+ if trailers != noHeader {
+ endStream = true
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"})
+ send(trailers)
+ }
+ if endStream {
+ return nil
+ }
+ case *HeadersFrame:
+ if expect100Continue != noHeader {
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"})
+ send(expect100Continue)
+ }
+ }
+ }
+ }
+ ct.run()
+}
+
+func TestTransportReceiveUndeclaredTrailer(t *testing.T) {
+ ct := newClientTester(t)
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("RoundTrip: %v", err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ return fmt.Errorf("status code = %v; want 200", res.StatusCode)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, nil)
+ }
+ if len(slurp) > 0 {
+ return fmt.Errorf("body = %q; want nothing", slurp)
+ }
+ if _, ok := res.Trailer["Some-Trailer"]; !ok {
+ return fmt.Errorf("expected Some-Trailer")
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+
+ var n int
+ var hf *HeadersFrame
+ for hf == nil && n < 10 {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return err
+ }
+ hf, _ = f.(*HeadersFrame)
+ n++
+ }
+
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+
+ // send headers without Trailer header
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+
+ // send trailers
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "I'm an undeclared Trailer!"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: buf.Bytes(),
+ })
+ return nil
+ }
+ ct.run()
+}
+
+func TestTransportInvalidTrailer_Pseudo1(t *testing.T) {
+ testTransportInvalidTrailer_Pseudo(t, oneHeader)
+}
+func TestTransportInvalidTrailer_Pseudo2(t *testing.T) {
+ testTransportInvalidTrailer_Pseudo(t, splitHeader)
+}
+func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) {
+ testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) {
+ enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"})
+ enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"})
+ })
+}
+
+func TestTransportInvalidTrailer_Capital1(t *testing.T) {
+ testTransportInvalidTrailer_Capital(t, oneHeader)
+}
+func TestTransportInvalidTrailer_Capital2(t *testing.T) {
+ testTransportInvalidTrailer_Capital(t, splitHeader)
+}
+func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) {
+ testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) {
+ enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"})
+ enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"})
+ })
+}
+func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) {
+ testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) {
+ enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"})
+ })
+}
+func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) {
+ testInvalidTrailer(t, oneHeader, headerFieldValueError("has\nnewline"), func(enc *hpack.Encoder) {
+ enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"})
+ })
+}
+
+func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) {
+ ct := newClientTester(t)
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("RoundTrip: %v", err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ return fmt.Errorf("status code = %v; want 200", res.StatusCode)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ se, ok := err.(StreamError)
+ if !ok || se.Cause != wantErr {
+ return fmt.Errorf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", slurp, err, wantErr, wantErr)
+ }
+ if len(slurp) > 0 {
+ return fmt.Errorf("body = %q; want nothing", slurp)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return err
+ }
+ switch f := f.(type) {
+ case *HeadersFrame:
+ var endStream bool
+ send := func(mode headerType) {
+ hbf := buf.Bytes()
+ switch mode {
+ case oneHeader:
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: true,
+ EndStream: endStream,
+ BlockFragment: hbf,
+ })
+ case splitHeader:
+ if len(hbf) < 2 {
+ panic("too small")
+ }
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: false,
+ EndStream: endStream,
+ BlockFragment: hbf[:1],
+ })
+ ct.fr.WriteContinuation(f.StreamID, true, hbf[1:])
+ default:
+ panic("bogus mode")
+ }
+ }
+ // Response headers (1+ frames; 1 or 2 in this test, but never 0)
+ {
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "declared"})
+ endStream = false
+ send(oneHeader)
+ }
+ // Trailers:
+ {
+ endStream = true
+ buf.Reset()
+ writeTrailer(enc)
+ send(trailers)
+ }
+ return nil
+ }
+ }
+ }
+ ct.run()
+}
+
+// headerListSize returns the HTTP2 header list size of h.
+// http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE
+// http://httpwg.org/specs/rfc7540.html#MaxHeaderBlock
+func headerListSize(h http.Header) (size uint32) {
+ for k, vv := range h {
+ for _, v := range vv {
+ hf := hpack.HeaderField{Name: k, Value: v}
+ size += hf.Size()
+ }
+ }
+ return size
+}
+
+// padHeaders adds data to an http.Header until headerListSize(h) ==
+// limit. Due to the way header list sizes are calculated, padHeaders
+// cannot add fewer than len("Pad-Headers") + 32 bytes to h, and will
+// call t.Fatal if asked to do so. PadHeaders first reserves enough
+// space for an empty "Pad-Headers" key, then adds as many copies of
+// filler as possible. Any remaining bytes necessary to push the
+// header list size up to limit are added to h["Pad-Headers"].
+func padHeaders(t *testing.T, h http.Header, limit uint64, filler string) {
+ if limit > 0xffffffff {
+ t.Fatalf("padHeaders: refusing to pad to more than 2^32-1 bytes. limit = %v", limit)
+ }
+ hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""}
+ minPadding := uint64(hf.Size())
+ size := uint64(headerListSize(h))
+
+ minlimit := size + minPadding
+ if limit < minlimit {
+ t.Fatalf("padHeaders: limit %v < %v", limit, minlimit)
+ }
+
+ // Use a fixed-width format for name so that fieldSize
+ // remains constant.
+ nameFmt := "Pad-Headers-%06d"
+ hf = hpack.HeaderField{Name: fmt.Sprintf(nameFmt, 1), Value: filler}
+ fieldSize := uint64(hf.Size())
+
+ // Add as many complete filler values as possible, leaving
+ // room for at least one empty "Pad-Headers" key.
+ limit = limit - minPadding
+ for i := 0; size+fieldSize < limit; i++ {
+ name := fmt.Sprintf(nameFmt, i)
+ h.Add(name, filler)
+ size += fieldSize
+ }
+
+ // Add enough bytes to reach limit.
+ remain := limit - size
+ lastValue := strings.Repeat("*", int(remain))
+ h.Add("Pad-Headers", lastValue)
+}
+
+func TestPadHeaders(t *testing.T) {
+ check := func(h http.Header, limit uint32, fillerLen int) {
+ if h == nil {
+ h = make(http.Header)
+ }
+ filler := strings.Repeat("f", fillerLen)
+ padHeaders(t, h, uint64(limit), filler)
+ gotSize := headerListSize(h)
+ if gotSize != limit {
+ t.Errorf("Got size = %v; want %v", gotSize, limit)
+ }
+ }
+ // Try all possible combinations for small fillerLen and limit.
+ hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""}
+ minLimit := hf.Size()
+ for limit := minLimit; limit <= 128; limit++ {
+ for fillerLen := 0; uint32(fillerLen) <= limit; fillerLen++ {
+ check(nil, limit, fillerLen)
+ }
+ }
+
+ // Try a few tests with larger limits, plus cumulative
+ // tests. Since these tests are cumulative, tests[i+1].limit
+ // must be >= tests[i].limit + minLimit. See the comment on
+ // padHeaders for more info on why the limit arg has this
+ // restriction.
+ tests := []struct {
+ fillerLen int
+ limit uint32
+ }{
+ {
+ fillerLen: 64,
+ limit: 1024,
+ },
+ {
+ fillerLen: 1024,
+ limit: 1286,
+ },
+ {
+ fillerLen: 256,
+ limit: 2048,
+ },
+ {
+ fillerLen: 1024,
+ limit: 10 * 1024,
+ },
+ {
+ fillerLen: 1023,
+ limit: 11 * 1024,
+ },
+ }
+ h := make(http.Header)
+ for _, tc := range tests {
+ check(nil, tc.limit, tc.fillerLen)
+ check(h, tc.limit, tc.fillerLen)
+ }
+}
+
+func TestTransportChecksRequestHeaderListSize(t *testing.T) {
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ // Consume body & force client to send
+ // trailers before writing response.
+ // ioutil.ReadAll returns non-nil err for
+ // requests that attempt to send greater than
+ // maxHeaderListSize bytes of trailers, since
+ // those requests generate a stream reset.
+ ioutil.ReadAll(r.Body)
+ r.Body.Close()
+ },
+ func(ts *httptest.Server) {
+ ts.Config.MaxHeaderBytes = 16 << 10
+ },
+ optOnlyServer,
+ optQuiet,
+ )
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ checkRoundTrip := func(req *http.Request, wantErr error, desc string) {
+ res, err := tr.RoundTrip(req)
+ if err != wantErr {
+ if res != nil {
+ res.Body.Close()
+ }
+ t.Errorf("%v: RoundTrip err = %v; want %v", desc, err, wantErr)
+ return
+ }
+ if err == nil {
+ if res == nil {
+ t.Errorf("%v: response nil; want non-nil.", desc)
+ return
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ t.Errorf("%v: response status = %v; want %v", desc, res.StatusCode, http.StatusOK)
+ }
+ return
+ }
+ if res != nil {
+ t.Errorf("%v: RoundTrip err = %v but response non-nil", desc, err)
+ }
+ }
+ headerListSizeForRequest := func(req *http.Request) (size uint64) {
+ contentLen := actualContentLength(req)
+ trailers, err := commaSeparatedTrailers(req)
+ if err != nil {
+ t.Fatalf("headerListSizeForRequest: %v", err)
+ }
+ cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff}
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+ cc.mu.Lock()
+ hdrs, err := cc.encodeHeaders(req, true, trailers, contentLen)
+ cc.mu.Unlock()
+ if err != nil {
+ t.Fatalf("headerListSizeForRequest: %v", err)
+ }
+ hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(hf hpack.HeaderField) {
+ size += uint64(hf.Size())
+ })
+ if len(hdrs) > 0 {
+ if _, err := hpackDec.Write(hdrs); err != nil {
+ t.Fatalf("headerListSizeForRequest: %v", err)
+ }
+ }
+ return size
+ }
+ // Create a new Request for each test, rather than reusing the
+ // same Request, to avoid a race when modifying req.Headers.
+ // See https://github.com/golang/go/issues/21316
+ newRequest := func() *http.Request {
+ // Body must be non-nil to enable writing trailers.
+ body := strings.NewReader("hello")
+ req, err := http.NewRequest("POST", st.ts.URL, body)
+ if err != nil {
+ t.Fatalf("newRequest: NewRequest: %v", err)
+ }
+ return req
+ }
+
+ // Make an arbitrary request to ensure we get the server's
+ // settings frame and initialize peerMaxHeaderListSize.
+ req := newRequest()
+ checkRoundTrip(req, nil, "Initial request")
+
+ // Get the ClientConn associated with the request and validate
+ // peerMaxHeaderListSize.
+ addr := authorityAddr(req.URL.Scheme, req.URL.Host)
+ cc, err := tr.connPool().GetClientConn(req, addr)
+ if err != nil {
+ t.Fatalf("GetClientConn: %v", err)
+ }
+ cc.mu.Lock()
+ peerSize := cc.peerMaxHeaderListSize
+ cc.mu.Unlock()
+ st.scMu.Lock()
+ wantSize := uint64(st.sc.maxHeaderListSize())
+ st.scMu.Unlock()
+ if peerSize != wantSize {
+ t.Errorf("peerMaxHeaderListSize = %v; want %v", peerSize, wantSize)
+ }
+
+ // Sanity check peerSize. (*serverConn) maxHeaderListSize adds
+ // 320 bytes of padding.
+ wantHeaderBytes := uint64(st.ts.Config.MaxHeaderBytes) + 320
+ if peerSize != wantHeaderBytes {
+ t.Errorf("peerMaxHeaderListSize = %v; want %v.", peerSize, wantHeaderBytes)
+ }
+
+ // Pad headers & trailers, but stay under peerSize.
+ req = newRequest()
+ req.Header = make(http.Header)
+ req.Trailer = make(http.Header)
+ filler := strings.Repeat("*", 1024)
+ padHeaders(t, req.Trailer, peerSize, filler)
+ // cc.encodeHeaders adds some default headers to the request,
+ // so we need to leave room for those.
+ defaultBytes := headerListSizeForRequest(req)
+ padHeaders(t, req.Header, peerSize-defaultBytes, filler)
+ checkRoundTrip(req, nil, "Headers & Trailers under limit")
+
+ // Add enough header bytes to push us over peerSize.
+ req = newRequest()
+ req.Header = make(http.Header)
+ padHeaders(t, req.Header, peerSize, filler)
+ checkRoundTrip(req, errRequestHeaderListSize, "Headers over limit")
+
+ // Push trailers over the limit.
+ req = newRequest()
+ req.Trailer = make(http.Header)
+ padHeaders(t, req.Trailer, peerSize+1, filler)
+ checkRoundTrip(req, errRequestHeaderListSize, "Trailers over limit")
+
+ // Send headers with a single large value.
+ req = newRequest()
+ filler = strings.Repeat("*", int(peerSize))
+ req.Header = make(http.Header)
+ req.Header.Set("Big", filler)
+ checkRoundTrip(req, errRequestHeaderListSize, "Single large header")
+
+ // Send trailers with a single large value.
+ req = newRequest()
+ req.Trailer = make(http.Header)
+ req.Trailer.Set("Big", filler)
+ checkRoundTrip(req, errRequestHeaderListSize, "Single large trailer")
+}
+
+func TestTransportChecksResponseHeaderListSize(t *testing.T) {
+ ct := newClientTester(t)
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != errResponseHeaderListSize {
+ if res != nil {
+ res.Body.Close()
+ }
+ size := int64(0)
+ for k, vv := range res.Header {
+ for _, v := range vv {
+ size += int64(len(k)) + int64(len(v)) + 32
+ }
+ }
+ return fmt.Errorf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return err
+ }
+ switch f := f.(type) {
+ case *HeadersFrame:
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ large := strings.Repeat("a", 1<<10)
+ for i := 0; i < 5042; i++ {
+ enc.WriteField(hpack.HeaderField{Name: large, Value: large})
+ }
+ if size, want := buf.Len(), 6329; size != want {
+ // Note: this number might change if
+ // our hpack implementation
+ // changes. That's fine. This is
+ // just a sanity check that our
+ // response can fit in a single
+ // header block fragment frame.
+ return fmt.Errorf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want)
+ }
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: buf.Bytes(),
+ })
+ return nil
+ }
+ }
+ }
+ ct.run()
+}
+
+// Test that the the Transport returns a typed error from Response.Body.Read calls
+// when the server sends an error. (here we use a panic, since that should generate
+// a stream error, but others like cancel should be similar)
+func TestTransportBodyReadErrorType(t *testing.T) {
+ doPanic := make(chan bool, 1)
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ w.(http.Flusher).Flush() // force headers out
+ <-doPanic
+ panic("boom")
+ },
+ optOnlyServer,
+ optQuiet,
+ )
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ c := &http.Client{Transport: tr}
+
+ res, err := c.Get(st.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ doPanic <- true
+ buf := make([]byte, 100)
+ n, err := res.Body.Read(buf)
+ want := StreamError{StreamID: 0x1, Code: 0x2}
+ if !reflect.DeepEqual(want, err) {
+ t.Errorf("Read = %v, %#v; want error %#v", n, err, want)
+ }
+}
+
+// golang.org/issue/13924
+// This used to fail after many iterations, especially with -race:
+// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race
+func TestTransportDoubleCloseOnWriteError(t *testing.T) {
+ var (
+ mu sync.Mutex
+ conn net.Conn // to close if set
+ )
+
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ mu.Lock()
+ defer mu.Unlock()
+ if conn != nil {
+ conn.Close()
+ }
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+
+ tr := &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ tc, err := tls.Dial(network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ conn = tc
+ return tc, nil
+ },
+ }
+ defer tr.CloseIdleConnections()
+ c := &http.Client{Transport: tr}
+ c.Get(st.ts.URL)
+}
+
+// Test that the http1 Transport.DisableKeepAlives option is respected
+// and connections are closed as soon as idle.
+// See golang.org/issue/14008
+func TestTransportDisableKeepAlives(t *testing.T) {
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "hi")
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+
+ connClosed := make(chan struct{}) // closed on tls.Conn.Close
+ tr := &Transport{
+ t1: &http.Transport{
+ DisableKeepAlives: true,
+ },
+ TLSClientConfig: tlsConfigInsecure,
+ DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ tc, err := tls.Dial(network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return &noteCloseConn{Conn: tc, closefn: func() { close(connClosed) }}, nil
+ },
+ }
+ c := &http.Client{Transport: tr}
+ res, err := c.Get(st.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ioutil.ReadAll(res.Body); err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+
+ select {
+ case <-connClosed:
+ case <-time.After(1 * time.Second):
+ t.Errorf("timeout")
+ }
+
+}
+
+// Test concurrent requests with Transport.DisableKeepAlives. We can share connections,
+// but when things are totally idle, it still needs to close.
+func TestTransportDisableKeepAlives_Concurrency(t *testing.T) {
+ const D = 25 * time.Millisecond
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(D)
+ io.WriteString(w, "hi")
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+
+ var dials int32
+ var conns sync.WaitGroup
+ tr := &Transport{
+ t1: &http.Transport{
+ DisableKeepAlives: true,
+ },
+ TLSClientConfig: tlsConfigInsecure,
+ DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ tc, err := tls.Dial(network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ atomic.AddInt32(&dials, 1)
+ conns.Add(1)
+ return &noteCloseConn{Conn: tc, closefn: func() { conns.Done() }}, nil
+ },
+ }
+ c := &http.Client{Transport: tr}
+ var reqs sync.WaitGroup
+ const N = 20
+ for i := 0; i < N; i++ {
+ reqs.Add(1)
+ if i == N-1 {
+ // For the final request, try to make all the
+ // others close. This isn't verified in the
+ // count, other than the Log statement, since
+ // it's so timing dependent. This test is
+ // really to make sure we don't interrupt a
+ // valid request.
+ time.Sleep(D * 2)
+ }
+ go func() {
+ defer reqs.Done()
+ res, err := c.Get(st.ts.URL)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if _, err := ioutil.ReadAll(res.Body); err != nil {
+ t.Error(err)
+ return
+ }
+ res.Body.Close()
+ }()
+ }
+ reqs.Wait()
+ conns.Wait()
+ t.Logf("did %d dials, %d requests", atomic.LoadInt32(&dials), N)
+}
+
+type noteCloseConn struct {
+ net.Conn
+ onceClose sync.Once
+ closefn func()
+}
+
+func (c *noteCloseConn) Close() error {
+ c.onceClose.Do(c.closefn)
+ return c.Conn.Close()
+}
+
+func isTimeout(err error) bool {
+ switch err := err.(type) {
+ case nil:
+ return false
+ case *url.Error:
+ return isTimeout(err.Err)
+ case net.Error:
+ return err.Timeout()
+ }
+ return false
+}
+
+// Test that the http1 Transport.ResponseHeaderTimeout option and cancel is sent.
+func TestTransportResponseHeaderTimeout_NoBody(t *testing.T) {
+ testTransportResponseHeaderTimeout(t, false)
+}
+func TestTransportResponseHeaderTimeout_Body(t *testing.T) {
+ testTransportResponseHeaderTimeout(t, true)
+}
+
+func testTransportResponseHeaderTimeout(t *testing.T, body bool) {
+ ct := newClientTester(t)
+ ct.tr.t1 = &http.Transport{
+ ResponseHeaderTimeout: 5 * time.Millisecond,
+ }
+ ct.client = func() error {
+ c := &http.Client{Transport: ct.tr}
+ var err error
+ var n int64
+ const bodySize = 4 << 20
+ if body {
+ _, err = c.Post("https://dummy.tld/", "text/foo", io.LimitReader(countingReader{&n}, bodySize))
+ } else {
+ _, err = c.Get("https://dummy.tld/")
+ }
+ if !isTimeout(err) {
+ t.Errorf("client expected timeout error; got %#v", err)
+ }
+ if body && n != bodySize {
+ t.Errorf("only read %d bytes of body; want %d", n, bodySize)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ t.Logf("ReadFrame: %v", err)
+ return nil
+ }
+ switch f := f.(type) {
+ case *DataFrame:
+ dataLen := len(f.Data())
+ if dataLen > 0 {
+ if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil {
+ return err
+ }
+ if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil {
+ return err
+ }
+ }
+ case *RSTStreamFrame:
+ if f.StreamID == 1 && f.ErrCode == ErrCodeCancel {
+ return nil
+ }
+ }
+ }
+ }
+ ct.run()
+}
+
+func TestTransportDisableCompression(t *testing.T) {
+ const body = "sup"
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ want := http.Header{
+ "User-Agent": []string{"Go-http-client/2.0"},
+ }
+ if !reflect.DeepEqual(r.Header, want) {
+ t.Errorf("request headers = %v; want %v", r.Header, want)
+ }
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ t1: &http.Transport{
+ DisableCompression: true,
+ },
+ }
+ defer tr.CloseIdleConnections()
+
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+}
+
+// RFC 7540 section 8.1.2.2
+func TestTransportRejectsConnHeaders(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ var got []string
+ for k := range r.Header {
+ got = append(got, k)
+ }
+ sort.Strings(got)
+ w.Header().Set("Got-Header", strings.Join(got, ","))
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ tests := []struct {
+ key string
+ value []string
+ want string
+ }{
+ {
+ key: "Upgrade",
+ value: []string{"anything"},
+ want: "ERROR: http2: invalid Upgrade request header: [\"anything\"]",
+ },
+ {
+ key: "Connection",
+ value: []string{"foo"},
+ want: "ERROR: http2: invalid Connection request header: [\"foo\"]",
+ },
+ {
+ key: "Connection",
+ value: []string{"close"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Connection",
+ value: []string{"close", "something-else"},
+ want: "ERROR: http2: invalid Connection request header: [\"close\" \"something-else\"]",
+ },
+ {
+ key: "Connection",
+ value: []string{"keep-alive"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Proxy-Connection", // just deleted and ignored
+ value: []string{"keep-alive"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Transfer-Encoding",
+ value: []string{""},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Transfer-Encoding",
+ value: []string{"foo"},
+ want: "ERROR: http2: invalid Transfer-Encoding request header: [\"foo\"]",
+ },
+ {
+ key: "Transfer-Encoding",
+ value: []string{"chunked"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Transfer-Encoding",
+ value: []string{"chunked", "other"},
+ want: "ERROR: http2: invalid Transfer-Encoding request header: [\"chunked\" \"other\"]",
+ },
+ {
+ key: "Content-Length",
+ value: []string{"123"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Keep-Alive",
+ value: []string{"doop"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ }
+
+ for _, tt := range tests {
+ req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req.Header[tt.key] = tt.value
+ res, err := tr.RoundTrip(req)
+ var got string
+ if err != nil {
+ got = fmt.Sprintf("ERROR: %v", err)
+ } else {
+ got = res.Header.Get("Got-Header")
+ res.Body.Close()
+ }
+ if got != tt.want {
+ t.Errorf("For key %q, value %q, got = %q; want %q", tt.key, tt.value, got, tt.want)
+ }
+ }
+}
+
+// golang.org/issue/14048
+func TestTransportFailsOnInvalidHeaders(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ var got []string
+ for k := range r.Header {
+ got = append(got, k)
+ }
+ sort.Strings(got)
+ w.Header().Set("Got-Header", strings.Join(got, ","))
+ }, optOnlyServer)
+ defer st.Close()
+
+ tests := [...]struct {
+ h http.Header
+ wantErr string
+ }{
+ 0: {
+ h: http.Header{"with space": {"foo"}},
+ wantErr: `invalid HTTP header name "with space"`,
+ },
+ 1: {
+ h: http.Header{"name": {"Брэд"}},
+ wantErr: "", // okay
+ },
+ 2: {
+ h: http.Header{"имя": {"Brad"}},
+ wantErr: `invalid HTTP header name "имя"`,
+ },
+ 3: {
+ h: http.Header{"foo": {"foo\x01bar"}},
+ wantErr: `invalid HTTP header value "foo\x01bar" for header "foo"`,
+ },
+ }
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ for i, tt := range tests {
+ req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req.Header = tt.h
+ res, err := tr.RoundTrip(req)
+ var bad bool
+ if tt.wantErr == "" {
+ if err != nil {
+ bad = true
+ t.Errorf("case %d: error = %v; want no error", i, err)
+ }
+ } else {
+ if !strings.Contains(fmt.Sprint(err), tt.wantErr) {
+ bad = true
+ t.Errorf("case %d: error = %v; want error %q", i, err, tt.wantErr)
+ }
+ }
+ if err == nil {
+ if bad {
+ t.Logf("case %d: server got headers %q", i, res.Header.Get("Got-Header"))
+ }
+ res.Body.Close()
+ }
+ }
+}
+
+// Tests that gzipReader doesn't crash on a second Read call following
+// the first Read call's gzip.NewReader returning an error.
+func TestGzipReader_DoubleReadCrash(t *testing.T) {
+ gz := &gzipReader{
+ body: ioutil.NopCloser(strings.NewReader("0123456789")),
+ }
+ var buf [1]byte
+ n, err1 := gz.Read(buf[:])
+ if n != 0 || !strings.Contains(fmt.Sprint(err1), "invalid header") {
+ t.Fatalf("Read = %v, %v; want 0, invalid header", n, err1)
+ }
+ n, err2 := gz.Read(buf[:])
+ if n != 0 || err2 != err1 {
+ t.Fatalf("second Read = %v, %v; want 0, %v", n, err2, err1)
+ }
+}
+
+func TestTransportNewTLSConfig(t *testing.T) {
+ tests := [...]struct {
+ conf *tls.Config
+ host string
+ want *tls.Config
+ }{
+ // Normal case.
+ 0: {
+ conf: nil,
+ host: "foo.com",
+ want: &tls.Config{
+ ServerName: "foo.com",
+ NextProtos: []string{NextProtoTLS},
+ },
+ },
+
+ // User-provided name (bar.com) takes precedence:
+ 1: {
+ conf: &tls.Config{
+ ServerName: "bar.com",
+ },
+ host: "foo.com",
+ want: &tls.Config{
+ ServerName: "bar.com",
+ NextProtos: []string{NextProtoTLS},
+ },
+ },
+
+ // NextProto is prepended:
+ 2: {
+ conf: &tls.Config{
+ NextProtos: []string{"foo", "bar"},
+ },
+ host: "example.com",
+ want: &tls.Config{
+ ServerName: "example.com",
+ NextProtos: []string{NextProtoTLS, "foo", "bar"},
+ },
+ },
+
+ // NextProto is not duplicated:
+ 3: {
+ conf: &tls.Config{
+ NextProtos: []string{"foo", "bar", NextProtoTLS},
+ },
+ host: "example.com",
+ want: &tls.Config{
+ ServerName: "example.com",
+ NextProtos: []string{"foo", "bar", NextProtoTLS},
+ },
+ },
+ }
+ for i, tt := range tests {
+ // Ignore the session ticket keys part, which ends up populating
+ // unexported fields in the Config:
+ if tt.conf != nil {
+ tt.conf.SessionTicketsDisabled = true
+ }
+
+ tr := &Transport{TLSClientConfig: tt.conf}
+ got := tr.newTLSConfig(tt.host)
+
+ got.SessionTicketsDisabled = false
+
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%d. got %#v; want %#v", i, got, tt.want)
+ }
+ }
+}
+
+// The Google GFE responds to HEAD requests with a HEADERS frame
+// without END_STREAM, followed by a 0-length DATA frame with
+// END_STREAM. Make sure we don't get confused by that. (We did.)
+func TestTransportReadHeadResponse(t *testing.T) {
+ ct := newClientTester(t)
+ clientDone := make(chan struct{})
+ ct.client = func() error {
+ defer close(clientDone)
+ req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return err
+ }
+ if res.ContentLength != 123 {
+ return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return fmt.Errorf("ReadAll: %v", err)
+ }
+ if len(slurp) > 0 {
+ return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ t.Logf("ReadFrame: %v", err)
+ return nil
+ }
+ hf, ok := f.(*HeadersFrame)
+ if !ok {
+ continue
+ }
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false, // as the GFE does
+ BlockFragment: buf.Bytes(),
+ })
+ ct.fr.WriteData(hf.StreamID, true, nil)
+
+ <-clientDone
+ return nil
+ }
+ }
+ ct.run()
+}
+
+func TestTransportReadHeadResponseWithBody(t *testing.T) {
+ response := "redirecting to /elsewhere"
+ ct := newClientTester(t)
+ clientDone := make(chan struct{})
+ ct.client = func() error {
+ defer close(clientDone)
+ req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return err
+ }
+ if res.ContentLength != int64(len(response)) {
+ return fmt.Errorf("Content-Length = %d; want %d", res.ContentLength, len(response))
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return fmt.Errorf("ReadAll: %v", err)
+ }
+ if len(slurp) > 0 {
+ return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ t.Logf("ReadFrame: %v", err)
+ return nil
+ }
+ hf, ok := f.(*HeadersFrame)
+ if !ok {
+ continue
+ }
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "content-length", Value: strconv.Itoa(len(response))})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+ ct.fr.WriteData(hf.StreamID, true, []byte(response))
+
+ <-clientDone
+ return nil
+ }
+ }
+ ct.run()
+}
+
+type neverEnding byte
+
+func (b neverEnding) Read(p []byte) (int, error) {
+ for i := range p {
+ p[i] = byte(b)
+ }
+ return len(p), nil
+}
+
+// golang.org/issue/15425: test that a handler closing the request
+// body doesn't terminate the stream to the peer. (It just stops
+// readability from the handler's side, and eventually the client
+// runs out of flow control tokens)
+func TestTransportHandlerBodyClose(t *testing.T) {
+ const bodySize = 10 << 20
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ r.Body.Close()
+ io.Copy(w, io.LimitReader(neverEnding('A'), bodySize))
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ g0 := runtime.NumGoroutine()
+
+ const numReq = 10
+ for i := 0; i < numReq; i++ {
+ req, err := http.NewRequest("POST", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)})
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err := io.Copy(ioutil.Discard, res.Body)
+ res.Body.Close()
+ if n != bodySize || err != nil {
+ t.Fatalf("req#%d: Copy = %d, %v; want %d, nil", i, n, err, bodySize)
+ }
+ }
+ tr.CloseIdleConnections()
+
+ gd := runtime.NumGoroutine() - g0
+ if gd > numReq/2 {
+ t.Errorf("appeared to leak goroutines")
+ }
+
+}
+
+// https://golang.org/issue/15930
+func TestTransportFlowControl(t *testing.T) {
+ const bufLen = 64 << 10
+ var total int64 = 100 << 20 // 100MB
+ if testing.Short() {
+ total = 10 << 20
+ }
+
+ var wrote int64 // updated atomically
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ b := make([]byte, bufLen)
+ for wrote < total {
+ n, err := w.Write(b)
+ atomic.AddInt64(&wrote, int64(n))
+ if err != nil {
+ t.Errorf("ResponseWriter.Write error: %v", err)
+ break
+ }
+ w.(http.Flusher).Flush()
+ }
+ }, optOnlyServer)
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Fatal("NewRequest error:", err)
+ }
+ resp, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal("RoundTrip error:", err)
+ }
+ defer resp.Body.Close()
+
+ var read int64
+ b := make([]byte, bufLen)
+ for {
+ n, err := resp.Body.Read(b)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal("Read error:", err)
+ }
+ read += int64(n)
+
+ const max = transportDefaultStreamFlow
+ if w := atomic.LoadInt64(&wrote); -max > read-w || read-w > max {
+ t.Fatalf("Too much data inflight: server wrote %v bytes but client only received %v", w, read)
+ }
+
+ // Let the server get ahead of the client.
+ time.Sleep(1 * time.Millisecond)
+ }
+}
+
+// golang.org/issue/14627 -- if the server sends a GOAWAY frame, make
+// the Transport remember it and return it back to users (via
+// RoundTrip or request body reads) if needed (e.g. if the server
+// proceeds to close the TCP connection before the client gets its
+// response)
+func TestTransportUsesGoAwayDebugError_RoundTrip(t *testing.T) {
+ testTransportUsesGoAwayDebugError(t, false)
+}
+
+func TestTransportUsesGoAwayDebugError_Body(t *testing.T) {
+ testTransportUsesGoAwayDebugError(t, true)
+}
+
+func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) {
+ ct := newClientTester(t)
+ clientDone := make(chan struct{})
+
+ const goAwayErrCode = ErrCodeHTTP11Required // arbitrary
+ const goAwayDebugData = "some debug data"
+
+ ct.client = func() error {
+ defer close(clientDone)
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if failMidBody {
+ if err != nil {
+ return fmt.Errorf("unexpected client RoundTrip error: %v", err)
+ }
+ _, err = io.Copy(ioutil.Discard, res.Body)
+ res.Body.Close()
+ }
+ want := GoAwayError{
+ LastStreamID: 5,
+ ErrCode: goAwayErrCode,
+ DebugData: goAwayDebugData,
+ }
+ if !reflect.DeepEqual(err, want) {
+ t.Errorf("RoundTrip error = %T: %#v, want %T (%#v)", err, err, want, want)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ t.Logf("ReadFrame: %v", err)
+ return nil
+ }
+ hf, ok := f.(*HeadersFrame)
+ if !ok {
+ continue
+ }
+ if failMidBody {
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+ }
+ // Write two GOAWAY frames, to test that the Transport takes
+ // the interesting parts of both.
+ ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData))
+ ct.fr.WriteGoAway(5, goAwayErrCode, nil)
+ ct.sc.(*net.TCPConn).CloseWrite()
+ <-clientDone
+ return nil
+ }
+ }
+ ct.run()
+}
+
+func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) {
+ ct := newClientTester(t)
+
+ clientClosed := make(chan struct{})
+ serverWroteFirstByte := make(chan struct{})
+
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return err
+ }
+ <-serverWroteFirstByte
+
+ if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 {
+ return fmt.Errorf("body read = %v, %v; want 1, nil", n, err)
+ }
+ res.Body.Close() // leaving 4999 bytes unread
+ close(clientClosed)
+
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+
+ var hf *HeadersFrame
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
+ }
+ switch f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ continue
+ }
+ var ok bool
+ hf, ok = f.(*HeadersFrame)
+ if !ok {
+ return fmt.Errorf("Got %T; want HeadersFrame", f)
+ }
+ break
+ }
+
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+
+ // Two cases:
+ // - Send one DATA frame with 5000 bytes.
+ // - Send two DATA frames with 1 and 4999 bytes each.
+ //
+ // In both cases, the client should consume one byte of data,
+ // refund that byte, then refund the following 4999 bytes.
+ //
+ // In the second case, the server waits for the client connection to
+ // close before seconding the second DATA frame. This tests the case
+ // where the client receives a DATA frame after it has reset the stream.
+ if oneDataFrame {
+ ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 5000))
+ close(serverWroteFirstByte)
+ <-clientClosed
+ } else {
+ ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 1))
+ close(serverWroteFirstByte)
+ <-clientClosed
+ ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 4999))
+ }
+
+ waitingFor := "RSTStreamFrame"
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame while waiting for %s: %v", waitingFor, err)
+ }
+ if _, ok := f.(*SettingsFrame); ok {
+ continue
+ }
+ switch waitingFor {
+ case "RSTStreamFrame":
+ if rf, ok := f.(*RSTStreamFrame); !ok || rf.ErrCode != ErrCodeCancel {
+ return fmt.Errorf("Expected a RSTStreamFrame with code cancel; got %v", summarizeFrame(f))
+ }
+ waitingFor = "WindowUpdateFrame"
+ case "WindowUpdateFrame":
+ if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != 4999 {
+ return fmt.Errorf("Expected WindowUpdateFrame for 4999 bytes; got %v", summarizeFrame(f))
+ }
+ return nil
+ }
+ }
+ }
+ ct.run()
+}
+
+// See golang.org/issue/16481
+func TestTransportReturnsUnusedFlowControlSingleWrite(t *testing.T) {
+ testTransportReturnsUnusedFlowControl(t, true)
+}
+
+// See golang.org/issue/20469
+func TestTransportReturnsUnusedFlowControlMultipleWrites(t *testing.T) {
+ testTransportReturnsUnusedFlowControl(t, false)
+}
+
+// Issue 16612: adjust flow control on open streams when transport
+// receives SETTINGS with INITIAL_WINDOW_SIZE from server.
+func TestTransportAdjustsFlowControl(t *testing.T) {
+ ct := newClientTester(t)
+ clientDone := make(chan struct{})
+
+ const bodySize = 1 << 20
+
+ ct.client = func() error {
+ defer ct.cc.(*net.TCPConn).CloseWrite()
+ defer close(clientDone)
+
+ req, _ := http.NewRequest("POST", "https://dummy.tld/", struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)})
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return err
+ }
+ res.Body.Close()
+ return nil
+ }
+ ct.server = func() error {
+ _, err := io.ReadFull(ct.sc, make([]byte, len(ClientPreface)))
+ if err != nil {
+ return fmt.Errorf("reading client preface: %v", err)
+ }
+
+ var gotBytes int64
+ var sentSettings bool
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ select {
+ case <-clientDone:
+ return nil
+ default:
+ return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
+ }
+ }
+ switch f := f.(type) {
+ case *DataFrame:
+ gotBytes += int64(len(f.Data()))
+ // After we've got half the client's
+ // initial flow control window's worth
+ // of request body data, give it just
+ // enough flow control to finish.
+ if gotBytes >= initialWindowSize/2 && !sentSettings {
+ sentSettings = true
+
+ ct.fr.WriteSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize})
+ ct.fr.WriteWindowUpdate(0, bodySize)
+ ct.fr.WriteSettingsAck()
+ }
+
+ if f.StreamEnded() {
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: buf.Bytes(),
+ })
+ }
+ }
+ }
+ }
+ ct.run()
+}
+
+// See golang.org/issue/16556
+func TestTransportReturnsDataPaddingFlowControl(t *testing.T) {
+ ct := newClientTester(t)
+
+ unblockClient := make(chan bool, 1)
+
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ <-unblockClient
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+
+ var hf *HeadersFrame
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
+ }
+ switch f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ continue
+ }
+ var ok bool
+ hf, ok = f.(*HeadersFrame)
+ if !ok {
+ return fmt.Errorf("Got %T; want HeadersFrame", f)
+ }
+ break
+ }
+
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+ pad := make([]byte, 5)
+ ct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream
+
+ f, err := ct.readNonSettingsFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame while waiting for first WindowUpdateFrame: %v", err)
+ }
+ wantBack := uint32(len(pad)) + 1 // one byte for the length of the padding
+ if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID != 0 {
+ return fmt.Errorf("Expected conn WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f))
+ }
+
+ f, err = ct.readNonSettingsFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame while waiting for second WindowUpdateFrame: %v", err)
+ }
+ if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID == 0 {
+ return fmt.Errorf("Expected stream WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f))
+ }
+ unblockClient <- true
+ return nil
+ }
+ ct.run()
+}
+
+// golang.org/issue/16572 -- RoundTrip shouldn't hang when it gets a
+// StreamError as a result of the response HEADERS
+func TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) {
+ ct := newClientTester(t)
+
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err == nil {
+ res.Body.Close()
+ return errors.New("unexpected successful GET")
+ }
+ want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")}
+ if !reflect.DeepEqual(want, err) {
+ t.Errorf("RoundTrip error = %#v; want %#v", err, want)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+
+ hf, err := ct.firstHeaders()
+ if err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: " content-type", Value: "bogus"}) // bogus spaces
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+
+ for {
+ fr, err := ct.readFrame()
+ if err != nil {
+ return fmt.Errorf("error waiting for RST_STREAM from client: %v", err)
+ }
+ if _, ok := fr.(*SettingsFrame); ok {
+ continue
+ }
+ if rst, ok := fr.(*RSTStreamFrame); !ok || rst.StreamID != 1 || rst.ErrCode != ErrCodeProtocol {
+ t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr))
+ }
+ break
+ }
+
+ return nil
+ }
+ ct.run()
+}
+
+// byteAndEOFReader returns is in an io.Reader which reads one byte
+// (the underlying byte) and io.EOF at once in its Read call.
+type byteAndEOFReader byte
+
+func (b byteAndEOFReader) Read(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ panic("unexpected useless call")
+ }
+ p[0] = byte(b)
+ return 1, io.EOF
+}
+
+// Issue 16788: the Transport had a regression where it started
+// sending a spurious DATA frame with a duplicate END_STREAM bit after
+// the request body writer goroutine had already read an EOF from the
+// Request.Body and included the END_STREAM on a data-carrying DATA
+// frame.
+//
+// Notably, to trigger this, the requests need to use a Request.Body
+// which returns (non-0, io.EOF) and also needs to set the ContentLength
+// explicitly.
+func TestTransportBodyDoubleEndStream(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // Nothing.
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ for i := 0; i < 2; i++ {
+ req, _ := http.NewRequest("POST", st.ts.URL, byteAndEOFReader('a'))
+ req.ContentLength = 1
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatalf("failure on req %d: %v", i+1, err)
+ }
+ defer res.Body.Close()
+ }
+}
+
+// golang.org/issue/16847, golang.org/issue/19103
+func TestTransportRequestPathPseudo(t *testing.T) {
+ type result struct {
+ path string
+ err string
+ }
+ tests := []struct {
+ req *http.Request
+ want result
+ }{
+ 0: {
+ req: &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Host: "foo.com",
+ Path: "/foo",
+ },
+ },
+ want: result{path: "/foo"},
+ },
+ // In Go 1.7, we accepted paths of "//foo".
+ // In Go 1.8, we rejected it (issue 16847).
+ // In Go 1.9, we accepted it again (issue 19103).
+ 1: {
+ req: &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Host: "foo.com",
+ Path: "//foo",
+ },
+ },
+ want: result{path: "//foo"},
+ },
+
+ // Opaque with //$Matching_Hostname/path
+ 2: {
+ req: &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "https",
+ Opaque: "//foo.com/path",
+ Host: "foo.com",
+ Path: "/ignored",
+ },
+ },
+ want: result{path: "/path"},
+ },
+
+ // Opaque with some other Request.Host instead:
+ 3: {
+ req: &http.Request{
+ Method: "GET",
+ Host: "bar.com",
+ URL: &url.URL{
+ Scheme: "https",
+ Opaque: "//bar.com/path",
+ Host: "foo.com",
+ Path: "/ignored",
+ },
+ },
+ want: result{path: "/path"},
+ },
+
+ // Opaque without the leading "//":
+ 4: {
+ req: &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Opaque: "/path",
+ Host: "foo.com",
+ Path: "/ignored",
+ },
+ },
+ want: result{path: "/path"},
+ },
+
+ // Opaque we can't handle:
+ 5: {
+ req: &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "https",
+ Opaque: "//unknown_host/path",
+ Host: "foo.com",
+ Path: "/ignored",
+ },
+ },
+ want: result{err: `invalid request :path "https://unknown_host/path" from URL.Opaque = "//unknown_host/path"`},
+ },
+
+ // A CONNECT request:
+ 6: {
+ req: &http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{
+ Host: "foo.com",
+ },
+ },
+ want: result{},
+ },
+ }
+ for i, tt := range tests {
+ cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff}
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+ cc.mu.Lock()
+ hdrs, err := cc.encodeHeaders(tt.req, false, "", -1)
+ cc.mu.Unlock()
+ var got result
+ hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) {
+ if f.Name == ":path" {
+ got.path = f.Value
+ }
+ })
+ if err != nil {
+ got.err = err.Error()
+ } else if len(hdrs) > 0 {
+ if _, err := hpackDec.Write(hdrs); err != nil {
+ t.Errorf("%d. bogus hpack: %v", i, err)
+ continue
+ }
+ }
+ if got != tt.want {
+ t.Errorf("%d. got %+v; want %+v", i, got, tt.want)
+ }
+
+ }
+
+}
+
+// golang.org/issue/17071 -- don't sniff the first byte of the request body
+// before we've determined that the ClientConn is usable.
+func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) {
+ const body = "foo"
+ req, _ := http.NewRequest("POST", "http://foo.com/", ioutil.NopCloser(strings.NewReader(body)))
+ cc := &ClientConn{
+ closed: true,
+ }
+ _, err := cc.RoundTrip(req)
+ if err != errClientConnUnusable {
+ t.Fatalf("RoundTrip = %v; want errClientConnUnusable", err)
+ }
+ slurp, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ t.Errorf("ReadAll = %v", err)
+ }
+ if string(slurp) != body {
+ t.Errorf("Body = %q; want %q", slurp, body)
+ }
+}
+
+func TestClientConnPing(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer)
+ defer st.Close()
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ cc, err := tr.dialClientConn(st.ts.Listener.Addr().String(), false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = cc.Ping(testContext{}); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Issue 16974: if the server sent a DATA frame after the user
+// canceled the Transport's Request, the Transport previously wrote to a
+// closed pipe, got an error, and ended up closing the whole TCP
+// connection.
+func TestTransportCancelDataResponseRace(t *testing.T) {
+ cancel := make(chan struct{})
+ clientGotError := make(chan bool, 1)
+
+ const msg = "Hello."
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ if strings.Contains(r.URL.Path, "/hello") {
+ time.Sleep(50 * time.Millisecond)
+ io.WriteString(w, msg)
+ return
+ }
+ for i := 0; i < 50; i++ {
+ io.WriteString(w, "Some data.")
+ w.(http.Flusher).Flush()
+ if i == 2 {
+ close(cancel)
+ <-clientGotError
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ c := &http.Client{Transport: tr}
+ req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req.Cancel = cancel
+ res, err := c.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err = io.Copy(ioutil.Discard, res.Body); err == nil {
+ t.Fatal("unexpected success")
+ }
+ clientGotError <- true
+
+ res, err = c.Get(st.ts.URL + "/hello")
+ if err != nil {
+ t.Fatal(err)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(slurp) != msg {
+ t.Errorf("Got = %q; want %q", slurp, msg)
+ }
+}
+
+// Issue 21316: It should be safe to reuse an http.Request after the
+// request has completed.
+func TestTransportNoRaceOnRequestObjectAfterRequestComplete(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(200)
+ io.WriteString(w, "body")
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ resp, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil {
+ t.Fatalf("error reading response body: %v", err)
+ }
+ if err := resp.Body.Close(); err != nil {
+ t.Fatalf("error closing response body: %v", err)
+ }
+
+ // This access of req.Header should not race with code in the transport.
+ req.Header = http.Header{}
+}
+
+func TestTransportRetryAfterGOAWAY(t *testing.T) {
+ var dialer struct {
+ sync.Mutex
+ count int
+ }
+ ct1 := make(chan *clientTester)
+ ct2 := make(chan *clientTester)
+
+ ln := newLocalListener(t)
+ defer ln.Close()
+
+ tr := &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ }
+ tr.DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ dialer.Lock()
+ defer dialer.Unlock()
+ dialer.count++
+ if dialer.count == 3 {
+ return nil, errors.New("unexpected number of dials")
+ }
+ cc, err := net.Dial("tcp", ln.Addr().String())
+ if err != nil {
+ return nil, fmt.Errorf("dial error: %v", err)
+ }
+ sc, err := ln.Accept()
+ if err != nil {
+ return nil, fmt.Errorf("accept error: %v", err)
+ }
+ ct := &clientTester{
+ t: t,
+ tr: tr,
+ cc: cc,
+ sc: sc,
+ fr: NewFramer(sc, sc),
+ }
+ switch dialer.count {
+ case 1:
+ ct1 <- ct
+ case 2:
+ ct2 <- ct
+ }
+ return cc, nil
+ }
+
+ errs := make(chan error, 3)
+ done := make(chan struct{})
+ defer close(done)
+
+ // Client.
+ go func() {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := tr.RoundTrip(req)
+ if res != nil {
+ res.Body.Close()
+ if got := res.Header.Get("Foo"); got != "bar" {
+ err = fmt.Errorf("foo header = %q; want bar", got)
+ }
+ }
+ if err != nil {
+ err = fmt.Errorf("RoundTrip: %v", err)
+ }
+ errs <- err
+ }()
+
+ connToClose := make(chan io.Closer, 2)
+
+ // Server for the first request.
+ go func() {
+ var ct *clientTester
+ select {
+ case ct = <-ct1:
+ case <-done:
+ return
+ }
+
+ connToClose <- ct.cc
+ ct.greet()
+ hf, err := ct.firstHeaders()
+ if err != nil {
+ errs <- fmt.Errorf("server1 failed reading HEADERS: %v", err)
+ return
+ }
+ t.Logf("server1 got %v", hf)
+ if err := ct.fr.WriteGoAway(0 /*max id*/, ErrCodeNo, nil); err != nil {
+ errs <- fmt.Errorf("server1 failed writing GOAWAY: %v", err)
+ return
+ }
+ errs <- nil
+ }()
+
+ // Server for the second request.
+ go func() {
+ var ct *clientTester
+ select {
+ case ct = <-ct2:
+ case <-done:
+ return
+ }
+
+ connToClose <- ct.cc
+ ct.greet()
+ hf, err := ct.firstHeaders()
+ if err != nil {
+ errs <- fmt.Errorf("server2 failed reading HEADERS: %v", err)
+ return
+ }
+ t.Logf("server2 got %v", hf)
+
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"})
+ err = ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+ if err != nil {
+ errs <- fmt.Errorf("server2 failed writing response HEADERS: %v", err)
+ } else {
+ errs <- nil
+ }
+ }()
+
+ for k := 0; k < 3; k++ {
+ select {
+ case err := <-errs:
+ if err != nil {
+ t.Error(err)
+ }
+ case <-time.After(1 * time.Second):
+ t.Errorf("timed out")
+ }
+ }
+
+ for {
+ select {
+ case c := <-connToClose:
+ c.Close()
+ default:
+ return
+ }
+ }
+}
+
+func TestTransportRetryAfterRefusedStream(t *testing.T) {
+ clientDone := make(chan struct{})
+ ct := newClientTester(t)
+ ct.client = func() error {
+ defer ct.cc.(*net.TCPConn).CloseWrite()
+ defer close(clientDone)
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ resp, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("RoundTrip: %v", err)
+ }
+ resp.Body.Close()
+ if resp.StatusCode != 204 {
+ return fmt.Errorf("Status = %v; want 204", resp.StatusCode)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ nreq := 0
+
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ select {
+ case <-clientDone:
+ // If the client's done, it
+ // will have reported any
+ // errors on its side.
+ return nil
+ default:
+ return err
+ }
+ }
+ switch f := f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ case *HeadersFrame:
+ if !f.HeadersEnded() {
+ return fmt.Errorf("headers should have END_HEADERS be ended: %v", f)
+ }
+ nreq++
+ if nreq == 1 {
+ ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream)
+ } else {
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: buf.Bytes(),
+ })
+ }
+ default:
+ return fmt.Errorf("Unexpected client frame %v", f)
+ }
+ }
+ }
+ ct.run()
+}
+
+func TestTransportRetryHasLimit(t *testing.T) {
+ // Skip in short mode because the total expected delay is 1s+2s+4s+8s+16s=29s.
+ if testing.Short() {
+ t.Skip("skipping long test in short mode")
+ }
+ clientDone := make(chan struct{})
+ ct := newClientTester(t)
+ ct.client = func() error {
+ defer ct.cc.(*net.TCPConn).CloseWrite()
+ defer close(clientDone)
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ resp, err := ct.tr.RoundTrip(req)
+ if err == nil {
+ return fmt.Errorf("RoundTrip expected error, got response: %+v", resp)
+ }
+ t.Logf("expected error, got: %v", err)
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ select {
+ case <-clientDone:
+ // If the client's done, it
+ // will have reported any
+ // errors on its side.
+ return nil
+ default:
+ return err
+ }
+ }
+ switch f := f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ case *HeadersFrame:
+ if !f.HeadersEnded() {
+ return fmt.Errorf("headers should have END_HEADERS be ended: %v", f)
+ }
+ ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream)
+ default:
+ return fmt.Errorf("Unexpected client frame %v", f)
+ }
+ }
+ }
+ ct.run()
+}
+
+func TestTransportResponseDataBeforeHeaders(t *testing.T) {
+ ct := newClientTester(t)
+ ct.client = func() error {
+ defer ct.cc.(*net.TCPConn).CloseWrite()
+ req := httptest.NewRequest("GET", "https://dummy.tld/", nil)
+ // First request is normal to ensure the check is per stream and not per connection.
+ _, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("RoundTrip expected no error, got: %v", err)
+ }
+ // Second request returns a DATA frame with no HEADERS.
+ resp, err := ct.tr.RoundTrip(req)
+ if err == nil {
+ return fmt.Errorf("RoundTrip expected error, got response: %+v", resp)
+ }
+ if err, ok := err.(StreamError); !ok || err.Code != ErrCodeProtocol {
+ return fmt.Errorf("expected stream PROTOCOL_ERROR, got: %v", err)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ switch f := f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ case *HeadersFrame:
+ switch f.StreamID {
+ case 1:
+ // Send a valid response to first request.
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: buf.Bytes(),
+ })
+ case 3:
+ ct.fr.WriteData(f.StreamID, true, []byte("payload"))
+ }
+ default:
+ return fmt.Errorf("Unexpected client frame %v", f)
+ }
+ }
+ }
+ ct.run()
+}
+func TestTransportRequestsStallAtServerLimit(t *testing.T) {
+ const maxConcurrent = 2
+
+ greet := make(chan struct{}) // server sends initial SETTINGS frame
+ gotRequest := make(chan struct{}) // server received a request
+ clientDone := make(chan struct{})
+
+ // Collect errors from goroutines.
+ var wg sync.WaitGroup
+ errs := make(chan error, 100)
+ defer func() {
+ wg.Wait()
+ close(errs)
+ for err := range errs {
+ t.Error(err)
+ }
+ }()
+
+ // We will send maxConcurrent+2 requests. This checker goroutine waits for the
+ // following stages:
+ // 1. The first maxConcurrent requests are received by the server.
+ // 2. The client will cancel the next request
+ // 3. The server is unblocked so it can service the first maxConcurrent requests
+ // 4. The client will send the final request
+ wg.Add(1)
+ unblockClient := make(chan struct{})
+ clientRequestCancelled := make(chan struct{})
+ unblockServer := make(chan struct{})
+ go func() {
+ defer wg.Done()
+ // Stage 1.
+ for k := 0; k < maxConcurrent; k++ {
+ <-gotRequest
+ }
+ // Stage 2.
+ close(unblockClient)
+ <-clientRequestCancelled
+ // Stage 3: give some time for the final RoundTrip call to be scheduled and
+ // verify that the final request is not sent.
+ time.Sleep(50 * time.Millisecond)
+ select {
+ case <-gotRequest:
+ errs <- errors.New("last request did not stall")
+ close(unblockServer)
+ return
+ default:
+ }
+ close(unblockServer)
+ // Stage 4.
+ <-gotRequest
+ }()
+
+ ct := newClientTester(t)
+ ct.client = func() error {
+ var wg sync.WaitGroup
+ defer func() {
+ wg.Wait()
+ close(clientDone)
+ ct.cc.(*net.TCPConn).CloseWrite()
+ }()
+ for k := 0; k < maxConcurrent+2; k++ {
+ wg.Add(1)
+ go func(k int) {
+ defer wg.Done()
+ // Don't send the second request until after receiving SETTINGS from the server
+ // to avoid a race where we use the default SettingMaxConcurrentStreams, which
+ // is much larger than maxConcurrent. We have to send the first request before
+ // waiting because the first request triggers the dial and greet.
+ if k > 0 {
+ <-greet
+ }
+ // Block until maxConcurrent requests are sent before sending any more.
+ if k >= maxConcurrent {
+ <-unblockClient
+ }
+ req, _ := http.NewRequest("GET", fmt.Sprintf("https://dummy.tld/%d", k), nil)
+ if k == maxConcurrent {
+ // This request will be canceled.
+ cancel := make(chan struct{})
+ req.Cancel = cancel
+ close(cancel)
+ _, err := ct.tr.RoundTrip(req)
+ close(clientRequestCancelled)
+ if err == nil {
+ errs <- fmt.Errorf("RoundTrip(%d) should have failed due to cancel", k)
+ return
+ }
+ } else {
+ resp, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ errs <- fmt.Errorf("RoundTrip(%d): %v", k, err)
+ return
+ }
+ ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if resp.StatusCode != 204 {
+ errs <- fmt.Errorf("Status = %v; want 204", resp.StatusCode)
+ return
+ }
+ }
+ }(k)
+ }
+ return nil
+ }
+
+ ct.server = func() error {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ ct.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent})
+
+ // Server write loop.
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ writeResp := make(chan uint32, maxConcurrent+1)
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ <-unblockServer
+ for id := range writeResp {
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: id,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: buf.Bytes(),
+ })
+ }
+ }()
+
+ // Server read loop.
+ var nreq int
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ select {
+ case <-clientDone:
+ // If the client's done, it will have reported any errors on its side.
+ return nil
+ default:
+ return err
+ }
+ }
+ switch f := f.(type) {
+ case *WindowUpdateFrame:
+ case *SettingsFrame:
+ // Wait for the client SETTINGS ack until ending the greet.
+ close(greet)
+ case *HeadersFrame:
+ if !f.HeadersEnded() {
+ return fmt.Errorf("headers should have END_HEADERS be ended: %v", f)
+ }
+ gotRequest <- struct{}{}
+ nreq++
+ writeResp <- f.StreamID
+ if nreq == maxConcurrent+1 {
+ close(writeResp)
+ }
+ default:
+ return fmt.Errorf("Unexpected client frame %v", f)
+ }
+ }
+ }
+
+ ct.run()
+}
+
+func TestAuthorityAddr(t *testing.T) {
+ tests := []struct {
+ scheme, authority string
+ want string
+ }{
+ {"http", "foo.com", "foo.com:80"},
+ {"https", "foo.com", "foo.com:443"},
+ {"https", "foo.com:1234", "foo.com:1234"},
+ {"https", "1.2.3.4:1234", "1.2.3.4:1234"},
+ {"https", "1.2.3.4", "1.2.3.4:443"},
+ {"https", "[::1]:1234", "[::1]:1234"},
+ {"https", "[::1]", "[::1]:443"},
+ }
+ for _, tt := range tests {
+ got := authorityAddr(tt.scheme, tt.authority)
+ if got != tt.want {
+ t.Errorf("authorityAddr(%q, %q) = %q; want %q", tt.scheme, tt.authority, got, tt.want)
+ }
+ }
+}
+
+// Issue 20448: stop allocating for DATA frames' payload after
+// Response.Body.Close is called.
+func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) {
+ megabyteZero := make([]byte, 1<<20)
+
+ writeErr := make(chan error, 1)
+
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ w.(http.Flusher).Flush()
+ var sum int64
+ for i := 0; i < 100; i++ {
+ n, err := w.Write(megabyteZero)
+ sum += int64(n)
+ if err != nil {
+ writeErr <- err
+ return
+ }
+ }
+ t.Logf("wrote all %d bytes", sum)
+ writeErr <- nil
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ c := &http.Client{Transport: tr}
+ res, err := c.Get(st.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var buf [1]byte
+ if _, err := res.Body.Read(buf[:]); err != nil {
+ t.Error(err)
+ }
+ if err := res.Body.Close(); err != nil {
+ t.Error(err)
+ }
+
+ trb, ok := res.Body.(transportResponseBody)
+ if !ok {
+ t.Fatalf("res.Body = %T; want transportResponseBody", res.Body)
+ }
+ if trb.cs.bufPipe.b != nil {
+ t.Errorf("response body pipe is still open")
+ }
+
+ gotErr := <-writeErr
+ if gotErr == nil {
+ t.Errorf("Handler unexpectedly managed to write its entire response without getting an error")
+ } else if gotErr != errStreamClosed {
+ t.Errorf("Handler Write err = %v; want errStreamClosed", gotErr)
+ }
+}
+
+// Issue 18891: make sure Request.Body == NoBody means no DATA frame
+// is ever sent, even if empty.
+func TestTransportNoBodyMeansNoDATA(t *testing.T) {
+ ct := newClientTester(t)
+
+ unblockClient := make(chan bool)
+
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", go18httpNoBody())
+ ct.tr.RoundTrip(req)
+ <-unblockClient
+ return nil
+ }
+ ct.server = func() error {
+ defer close(unblockClient)
+ defer ct.cc.(*net.TCPConn).Close()
+ ct.greet()
+
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
+ }
+ switch f := f.(type) {
+ default:
+ return fmt.Errorf("Got %T; want HeadersFrame", f)
+ case *WindowUpdateFrame, *SettingsFrame:
+ continue
+ case *HeadersFrame:
+ if !f.StreamEnded() {
+ return fmt.Errorf("got headers frame without END_STREAM")
+ }
+ return nil
+ }
+ }
+ }
+ ct.run()
+}
+
+func benchSimpleRoundTrip(b *testing.B, nHeaders int) {
+ defer disableGoroutineTracking()()
+ b.ReportAllocs()
+ st := newServerTester(b,
+ func(w http.ResponseWriter, r *http.Request) {
+ },
+ optOnlyServer,
+ optQuiet,
+ )
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ for i := 0; i < nHeaders; i++ {
+ name := fmt.Sprint("A-", i)
+ req.Header.Set(name, "*")
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ if res != nil {
+ res.Body.Close()
+ }
+ b.Fatalf("RoundTrip err = %v; want nil", err)
+ }
+ res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ b.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK)
+ }
+ }
+}
+
+type infiniteReader struct{}
+
+func (r infiniteReader) Read(b []byte) (int, error) {
+ return len(b), nil
+}
+
+// Issue 20521: it is not an error to receive a response and end stream
+// from the server without the body being consumed.
+func TestTransportResponseAndResetWithoutConsumingBodyRace(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ // The request body needs to be big enough to trigger flow control.
+ req, _ := http.NewRequest("PUT", st.ts.URL, infiniteReader{})
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.StatusCode != http.StatusOK {
+ t.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK)
+ }
+}
+
+func BenchmarkClientRequestHeaders(b *testing.B) {
+ b.Run(" 0 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 0) })
+ b.Run(" 10 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 10) })
+ b.Run(" 100 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 100) })
+ b.Run("1000 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 1000) })
+}
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
new file mode 100644
index 0000000..54ab4a8
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -0,0 +1,365 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
+)
+
+// writeFramer is implemented by any type that is used to write frames.
+type writeFramer interface {
+ writeFrame(writeContext) error
+
+ // staysWithinBuffer reports whether this writer promises that
+ // it will only write less than or equal to size bytes, and it
+ // won't Flush the write context.
+ staysWithinBuffer(size int) bool
+}
+
+// writeContext is the interface needed by the various frame writer
+// types below. All the writeFrame methods below are scheduled via the
+// frame writing scheduler (see writeScheduler in writesched.go).
+//
+// This interface is implemented by *serverConn.
+//
+// TODO: decide whether to a) use this in the client code (which didn't
+// end up using this yet, because it has a simpler design, not
+// currently implementing priorities), or b) delete this and
+// make the server code a bit more concrete.
+type writeContext interface {
+ Framer() *Framer
+ Flush() error
+ CloseConn() error
+ // HeaderEncoder returns an HPACK encoder that writes to the
+ // returned buffer.
+ HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
+}
+
+// writeEndsStream reports whether w writes a frame that will transition
+// the stream to a half-closed local state. This returns false for RST_STREAM,
+// which closes the entire stream (not just the local half).
+func writeEndsStream(w writeFramer) bool {
+ switch v := w.(type) {
+ case *writeData:
+ return v.endStream
+ case *writeResHeaders:
+ return v.endStream
+ case nil:
+ // This can only happen if the caller reuses w after it's
+ // been intentionally nil'ed out to prevent use. Keep this
+ // here to catch future refactoring breaking it.
+ panic("writeEndsStream called on nil writeFramer")
+ }
+ return false
+}
+
+type flushFrameWriter struct{}
+
+func (flushFrameWriter) writeFrame(ctx writeContext) error {
+ return ctx.Flush()
+}
+
+func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
+
+type writeSettings []Setting
+
+func (s writeSettings) staysWithinBuffer(max int) bool {
+ const settingSize = 6 // uint16 + uint32
+ return frameHeaderLen+settingSize*len(s) <= max
+
+}
+
+func (s writeSettings) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettings([]Setting(s)...)
+}
+
+type writeGoAway struct {
+ maxStreamID uint32
+ code ErrCode
+}
+
+func (p *writeGoAway) writeFrame(ctx writeContext) error {
+ err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
+ ctx.Flush() // ignore error: we're hanging up on them anyway
+ return err
+}
+
+func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
+
+type writeData struct {
+ streamID uint32
+ p []byte
+ endStream bool
+}
+
+func (w *writeData) String() string {
+ return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
+}
+
+func (w *writeData) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
+}
+
+func (w *writeData) staysWithinBuffer(max int) bool {
+ return frameHeaderLen+len(w.p) <= max
+}
+
+// handlerPanicRST is the message sent from handler goroutines when
+// the handler panics.
+type handlerPanicRST struct {
+ StreamID uint32
+}
+
+func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
+}
+
+func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
+func (se StreamError) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
+}
+
+func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
+type writePingAck struct{ pf *PingFrame }
+
+func (w writePingAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(true, w.pf.Data)
+}
+
+func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
+
+type writeSettingsAck struct{}
+
+func (writeSettingsAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettingsAck()
+}
+
+func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
+
+// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
+// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
+// for the first/last fragment, respectively.
+func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
+ // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+ // that all peers must support (16KB). Later we could care
+ // more and send larger frames if the peer advertised it, but
+ // there's little point. Most headers are small anyway (so we
+ // generally won't have CONTINUATION frames), and extra frames
+ // only waste 9 bytes anyway.
+ const maxFrameSize = 16384
+
+ first := true
+ for len(headerBlock) > 0 {
+ frag := headerBlock
+ if len(frag) > maxFrameSize {
+ frag = frag[:maxFrameSize]
+ }
+ headerBlock = headerBlock[len(frag):]
+ if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
+ return err
+ }
+ first = false
+ }
+ return nil
+}
+
+// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
+// for HTTP response headers or trailers from a server handler.
+type writeResHeaders struct {
+ streamID uint32
+ httpResCode int // 0 means no ":status" line
+ h http.Header // may be nil
+ trailers []string // if non-nil, which keys of h to write. nil means all.
+ endStream bool
+
+ date string
+ contentType string
+ contentLength string
+}
+
+func encKV(enc *hpack.Encoder, k, v string) {
+ if VerboseLogs {
+ log.Printf("http2: server encoding header %q = %q", k, v)
+ }
+ enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+}
+
+func (w *writeResHeaders) staysWithinBuffer(max int) bool {
+ // TODO: this is a common one. It'd be nice to return true
+ // here and get into the fast path if we could be clever and
+ // calculate the size fast enough, or at least a conservative
+ // uppper bound that usually fires. (Maybe if w.h and
+ // w.trailers are nil, so we don't need to enumerate it.)
+ // Otherwise I'm afraid that just calculating the length to
+ // answer this question would be slower than the ~2µs benefit.
+ return false
+}
+
+func (w *writeResHeaders) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ if w.httpResCode != 0 {
+ encKV(enc, ":status", httpCodeString(w.httpResCode))
+ }
+
+ encodeHeaders(enc, w.h, w.trailers)
+
+ if w.contentType != "" {
+ encKV(enc, "content-type", w.contentType)
+ }
+ if w.contentLength != "" {
+ encKV(enc, "content-length", w.contentLength)
+ }
+ if w.date != "" {
+ encKV(enc, "date", w.date)
+ }
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 && w.trailers == nil {
+ panic("unexpected empty hpack")
+ }
+
+ return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: frag,
+ EndStream: w.endStream,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+ }
+}
+
+// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
+type writePushPromise struct {
+ streamID uint32 // pusher stream
+ method string // for :method
+ url *url.URL // for :scheme, :authority, :path
+ h http.Header
+
+ // Creates an ID for a pushed stream. This runs on serveG just before
+ // the frame is written. The returned ID is copied to promisedID.
+ allocatePromisedID func() (uint32, error)
+ promisedID uint32
+}
+
+func (w *writePushPromise) staysWithinBuffer(max int) bool {
+ // TODO: see writeResHeaders.staysWithinBuffer
+ return false
+}
+
+func (w *writePushPromise) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ encKV(enc, ":method", w.method)
+ encKV(enc, ":scheme", w.url.Scheme)
+ encKV(enc, ":authority", w.url.Host)
+ encKV(enc, ":path", w.url.RequestURI())
+ encodeHeaders(enc, w.h, nil)
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 {
+ panic("unexpected empty hpack")
+ }
+
+ return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WritePushPromise(PushPromiseParam{
+ StreamID: w.streamID,
+ PromiseID: w.promisedID,
+ BlockFragment: frag,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+ }
+}
+
+type write100ContinueHeadersFrame struct {
+ streamID uint32
+}
+
+func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+ encKV(enc, ":status", "100")
+ return ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: buf.Bytes(),
+ EndStream: false,
+ EndHeaders: true,
+ })
+}
+
+func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
+ // Sloppy but conservative:
+ return 9+2*(len(":status")+len("100")) <= max
+}
+
+type writeWindowUpdate struct {
+ streamID uint32 // or 0 for conn-level
+ n uint32
+}
+
+func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
+func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
+}
+
+// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
+// is encoded only only if k is in keys.
+func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
+ if keys == nil {
+ sorter := sorterPool.Get().(*sorter)
+ // Using defer here, since the returned keys from the
+ // sorter.Keys method is only valid until the sorter
+ // is returned:
+ defer sorterPool.Put(sorter)
+ keys = sorter.Keys(h)
+ }
+ for _, k := range keys {
+ vv := h[k]
+ k = lowerHeader(k)
+ if !validWireHeaderFieldName(k) {
+ // Skip it as backup paranoia. Per
+ // golang.org/issue/14048, these should
+ // already be rejected at a higher level.
+ continue
+ }
+ isTE := k == "transfer-encoding"
+ for _, v := range vv {
+ if !httplex.ValidHeaderFieldValue(v) {
+ // TODO: return an error? golang.org/issue/14048
+ // For now just omit it.
+ continue
+ }
+ // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+ if isTE && v != "trailers" {
+ continue
+ }
+ encKV(enc, k, v)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
new file mode 100644
index 0000000..4fe3073
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -0,0 +1,242 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "fmt"
+
+// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
+// Methods are never called concurrently.
+type WriteScheduler interface {
+ // OpenStream opens a new stream in the write scheduler.
+ // It is illegal to call this with streamID=0 or with a streamID that is
+ // already open -- the call may panic.
+ OpenStream(streamID uint32, options OpenStreamOptions)
+
+ // CloseStream closes a stream in the write scheduler. Any frames queued on
+ // this stream should be discarded. It is illegal to call this on a stream
+ // that is not open -- the call may panic.
+ CloseStream(streamID uint32)
+
+ // AdjustStream adjusts the priority of the given stream. This may be called
+ // on a stream that has not yet been opened or has been closed. Note that
+ // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
+ // https://tools.ietf.org/html/rfc7540#section-5.1
+ AdjustStream(streamID uint32, priority PriorityParam)
+
+ // Push queues a frame in the scheduler. In most cases, this will not be
+ // called with wr.StreamID()!=0 unless that stream is currently open. The one
+ // exception is RST_STREAM frames, which may be sent on idle or closed streams.
+ Push(wr FrameWriteRequest)
+
+ // Pop dequeues the next frame to write. Returns false if no frames can
+ // be written. Frames with a given wr.StreamID() are Pop'd in the same
+ // order they are Push'd.
+ Pop() (wr FrameWriteRequest, ok bool)
+}
+
+// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
+type OpenStreamOptions struct {
+ // PusherID is zero if the stream was initiated by the client. Otherwise,
+ // PusherID names the stream that pushed the newly opened stream.
+ PusherID uint32
+}
+
+// FrameWriteRequest is a request to write a frame.
+type FrameWriteRequest struct {
+ // write is the interface value that does the writing, once the
+ // WriteScheduler has selected this frame to write. The write
+ // functions are all defined in write.go.
+ write writeFramer
+
+ // stream is the stream on which this frame will be written.
+ // nil for non-stream frames like PING and SETTINGS.
+ stream *stream
+
+ // done, if non-nil, must be a buffered channel with space for
+ // 1 message and is sent the return value from write (or an
+ // earlier error) when the frame has been written.
+ done chan error
+}
+
+// StreamID returns the id of the stream this frame will be written to.
+// 0 is used for non-stream frames such as PING and SETTINGS.
+func (wr FrameWriteRequest) StreamID() uint32 {
+ if wr.stream == nil {
+ if se, ok := wr.write.(StreamError); ok {
+ // (*serverConn).resetStream doesn't set
+ // stream because it doesn't necessarily have
+ // one. So special case this type of write
+ // message.
+ return se.StreamID
+ }
+ return 0
+ }
+ return wr.stream.id
+}
+
+// DataSize returns the number of flow control bytes that must be consumed
+// to write this entire frame. This is 0 for non-DATA frames.
+func (wr FrameWriteRequest) DataSize() int {
+ if wd, ok := wr.write.(*writeData); ok {
+ return len(wd.p)
+ }
+ return 0
+}
+
+// Consume consumes min(n, available) bytes from this frame, where available
+// is the number of flow control bytes available on the stream. Consume returns
+// 0, 1, or 2 frames, where the integer return value gives the number of frames
+// returned.
+//
+// If flow control prevents consuming any bytes, this returns (_, _, 0). If
+// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
+// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
+// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
+// underlying stream's flow control budget.
+func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
+ var empty FrameWriteRequest
+
+ // Non-DATA frames are always consumed whole.
+ wd, ok := wr.write.(*writeData)
+ if !ok || len(wd.p) == 0 {
+ return wr, empty, 1
+ }
+
+ // Might need to split after applying limits.
+ allowed := wr.stream.flow.available()
+ if n < allowed {
+ allowed = n
+ }
+ if wr.stream.sc.maxFrameSize < allowed {
+ allowed = wr.stream.sc.maxFrameSize
+ }
+ if allowed <= 0 {
+ return empty, empty, 0
+ }
+ if len(wd.p) > int(allowed) {
+ wr.stream.flow.take(allowed)
+ consumed := FrameWriteRequest{
+ stream: wr.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: wd.p[:allowed],
+ // Even if the original had endStream set, there
+ // are bytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false.
+ endStream: false,
+ },
+ // Our caller is blocking on the final DATA frame, not
+ // this intermediate frame, so no need to wait.
+ done: nil,
+ }
+ rest := FrameWriteRequest{
+ stream: wr.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: wd.p[allowed:],
+ endStream: wd.endStream,
+ },
+ done: wr.done,
+ }
+ return consumed, rest, 2
+ }
+
+ // The frame is consumed whole.
+ // NB: This cast cannot overflow because allowed is <= math.MaxInt32.
+ wr.stream.flow.take(int32(len(wd.p)))
+ return wr, empty, 1
+}
+
+// String is for debugging only.
+func (wr FrameWriteRequest) String() string {
+ var des string
+ if s, ok := wr.write.(fmt.Stringer); ok {
+ des = s.String()
+ } else {
+ des = fmt.Sprintf("%T", wr.write)
+ }
+ return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
+}
+
+// replyToWriter sends err to wr.done and panics if the send must block
+// This does nothing if wr.done is nil.
+func (wr *FrameWriteRequest) replyToWriter(err error) {
+ if wr.done == nil {
+ return
+ }
+ select {
+ case wr.done <- err:
+ default:
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
+ }
+ wr.write = nil // prevent use (assume it's tainted after wr.done send)
+}
+
+// writeQueue is used by implementations of WriteScheduler.
+type writeQueue struct {
+ s []FrameWriteRequest
+}
+
+func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+
+func (q *writeQueue) push(wr FrameWriteRequest) {
+ q.s = append(q.s, wr)
+}
+
+func (q *writeQueue) shift() FrameWriteRequest {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ wr := q.s[0]
+ // TODO: less copy-happy queue.
+ copy(q.s, q.s[1:])
+ q.s[len(q.s)-1] = FrameWriteRequest{}
+ q.s = q.s[:len(q.s)-1]
+ return wr
+}
+
+// consume consumes up to n bytes from q.s[0]. If the frame is
+// entirely consumed, it is removed from the queue. If the frame
+// is partially consumed, the frame is kept with the consumed
+// bytes removed. Returns true iff any bytes were consumed.
+func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
+ if len(q.s) == 0 {
+ return FrameWriteRequest{}, false
+ }
+ consumed, rest, numresult := q.s[0].Consume(n)
+ switch numresult {
+ case 0:
+ return FrameWriteRequest{}, false
+ case 1:
+ q.shift()
+ case 2:
+ q.s[0] = rest
+ }
+ return consumed, true
+}
+
+type writeQueuePool []*writeQueue
+
+// put inserts an unused writeQueue into the pool.
+func (p *writeQueuePool) put(q *writeQueue) {
+ for i := range q.s {
+ q.s[i] = FrameWriteRequest{}
+ }
+ q.s = q.s[:0]
+ *p = append(*p, q)
+}
+
+// get returns an empty writeQueue.
+func (p *writeQueuePool) get() *writeQueue {
+ ln := len(*p)
+ if ln == 0 {
+ return new(writeQueue)
+ }
+ x := ln - 1
+ q := (*p)[x]
+ (*p)[x] = nil
+ *p = (*p)[:x]
+ return q
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go
new file mode 100644
index 0000000..848fed6
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority.go
@@ -0,0 +1,452 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+ "sort"
+)
+
+// RFC 7540, Section 5.3.5: the default weight is 16.
+const priorityDefaultWeight = 15 // 16 = 15 + 1
+
+// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
+type PriorityWriteSchedulerConfig struct {
+ // MaxClosedNodesInTree controls the maximum number of closed streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // "It is possible for a stream to become closed while prioritization
+ // information ... is in transit. ... This potentially creates suboptimal
+ // prioritization, since the stream could be given a priority that is
+ // different from what is intended. To avoid these problems, an endpoint
+ // SHOULD retain stream prioritization state for a period after streams
+ // become closed. The longer state is retained, the lower the chance that
+ // streams are assigned incorrect or default priority values."
+ MaxClosedNodesInTree int
+
+ // MaxIdleNodesInTree controls the maximum number of idle streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // Similarly, streams that are in the "idle" state can be assigned
+ // priority or become a parent of other streams. This allows for the
+ // creation of a grouping node in the dependency tree, which enables
+ // more flexible expressions of priority. Idle streams begin with a
+ // default priority (Section 5.3.5).
+ MaxIdleNodesInTree int
+
+ // ThrottleOutOfOrderWrites enables write throttling to help ensure that
+ // data is delivered in priority order. This works around a race where
+ // stream B depends on stream A and both streams are about to call Write
+ // to queue DATA frames. If B wins the race, a naive scheduler would eagerly
+ // write as much data from B as possible, but this is suboptimal because A
+ // is a higher-priority stream. With throttling enabled, we write a small
+ // amount of data from B to minimize the amount of bandwidth that B can
+ // steal from A.
+ ThrottleOutOfOrderWrites bool
+}
+
+// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
+// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
+// If cfg is nil, default options are used.
+func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
+ if cfg == nil {
+ // For justification of these defaults, see:
+ // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
+ cfg = &PriorityWriteSchedulerConfig{
+ MaxClosedNodesInTree: 10,
+ MaxIdleNodesInTree: 10,
+ ThrottleOutOfOrderWrites: false,
+ }
+ }
+
+ ws := &priorityWriteScheduler{
+ nodes: make(map[uint32]*priorityNode),
+ maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
+ maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
+ enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
+ }
+ ws.nodes[0] = &ws.root
+ if cfg.ThrottleOutOfOrderWrites {
+ ws.writeThrottleLimit = 1024
+ } else {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ return ws
+}
+
+type priorityNodeState int
+
+const (
+ priorityNodeOpen priorityNodeState = iota
+ priorityNodeClosed
+ priorityNodeIdle
+)
+
+// priorityNode is a node in an HTTP/2 priority tree.
+// Each node is associated with a single stream ID.
+// See RFC 7540, Section 5.3.
+type priorityNode struct {
+ q writeQueue // queue of pending frames to write
+ id uint32 // id of the stream, or 0 for the root of the tree
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
+ state priorityNodeState // open | closed | idle
+ bytes int64 // number of bytes written by this node, or 0 if closed
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
+
+ // These links form the priority tree.
+ parent *priorityNode
+ kids *priorityNode // start of the kids list
+ prev, next *priorityNode // doubly-linked list of siblings
+}
+
+func (n *priorityNode) setParent(parent *priorityNode) {
+ if n == parent {
+ panic("setParent to self")
+ }
+ if n.parent == parent {
+ return
+ }
+ // Unlink from current parent.
+ if parent := n.parent; parent != nil {
+ if n.prev == nil {
+ parent.kids = n.next
+ } else {
+ n.prev.next = n.next
+ }
+ if n.next != nil {
+ n.next.prev = n.prev
+ }
+ }
+ // Link to new parent.
+ // If parent=nil, remove n from the tree.
+ // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
+ n.parent = parent
+ if parent == nil {
+ n.next = nil
+ n.prev = nil
+ } else {
+ n.next = parent.kids
+ n.prev = nil
+ if n.next != nil {
+ n.next.prev = n
+ }
+ parent.kids = n
+ }
+}
+
+func (n *priorityNode) addBytes(b int64) {
+ n.bytes += b
+ for ; n != nil; n = n.parent {
+ n.subtreeBytes += b
+ }
+}
+
+// walkReadyInOrder iterates over the tree in priority order, calling f for each node
+// with a non-empty write queue. When f returns true, this funcion returns true and the
+// walk halts. tmp is used as scratch space for sorting.
+//
+// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
+// if any ancestor p of n is still open (ignoring the root node).
+func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
+ if !n.q.empty() && f(n, openParent) {
+ return true
+ }
+ if n.kids == nil {
+ return false
+ }
+
+ // Don't consider the root "open" when updating openParent since
+ // we can't send data frames on the root stream (only control frames).
+ if n.id != 0 {
+ openParent = openParent || (n.state == priorityNodeOpen)
+ }
+
+ // Common case: only one kid or all kids have the same weight.
+ // Some clients don't use weights; other clients (like web browsers)
+ // use mostly-linear priority trees.
+ w := n.kids.weight
+ needSort := false
+ for k := n.kids.next; k != nil; k = k.next {
+ if k.weight != w {
+ needSort = true
+ break
+ }
+ }
+ if !needSort {
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+ }
+
+ // Uncommon case: sort the child nodes. We remove the kids from the parent,
+ // then re-insert after sorting so we can reuse tmp for future sort calls.
+ *tmp = (*tmp)[:0]
+ for n.kids != nil {
+ *tmp = append(*tmp, n.kids)
+ n.kids.setParent(nil)
+ }
+ sort.Sort(sortPriorityNodeSiblings(*tmp))
+ for i := len(*tmp) - 1; i >= 0; i-- {
+ (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
+ }
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+}
+
+type sortPriorityNodeSiblings []*priorityNode
+
+func (z sortPriorityNodeSiblings) Len() int { return len(z) }
+func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblings) Less(i, k int) bool {
+ // Prefer the subtree that has sent fewer bytes relative to its weight.
+ // See sections 5.3.2 and 5.3.4.
+ wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
+ wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+ if bi == 0 && bk == 0 {
+ return wi >= wk
+ }
+ if bk == 0 {
+ return false
+ }
+ return bi/bk <= wi/wk
+}
+
+type priorityWriteScheduler struct {
+ // root is the root of the priority tree, where root.id = 0.
+ // The root queues control frames that are not associated with any stream.
+ root priorityNode
+
+ // nodes maps stream ids to priority tree nodes.
+ nodes map[uint32]*priorityNode
+
+ // maxID is the maximum stream id in nodes.
+ maxID uint32
+
+ // lists of nodes that have been closed or are idle, but are kept in
+ // the tree for improved prioritization. When the lengths exceed either
+ // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
+ closedNodes, idleNodes []*priorityNode
+
+ // From the config.
+ maxClosedNodesInTree int
+ maxIdleNodesInTree int
+ writeThrottleLimit int32
+ enableWriteThrottle bool
+
+ // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
+ tmp []*priorityNode
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+ // The stream may be currently idle but cannot be opened or closed.
+ if curr := ws.nodes[streamID]; curr != nil {
+ if curr.state != priorityNodeIdle {
+ panic(fmt.Sprintf("stream %d already opened", streamID))
+ }
+ curr.state = priorityNodeOpen
+ return
+ }
+
+ // RFC 7540, Section 5.3.5:
+ // "All streams are initially assigned a non-exclusive dependency on stream 0x0.
+ // Pushed streams initially depend on their associated stream. In both cases,
+ // streams are assigned a default weight of 16."
+ parent := ws.nodes[options.PusherID]
+ if parent == nil {
+ parent = &ws.root
+ }
+ n := &priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: priorityDefaultWeight,
+ state: priorityNodeOpen,
+ }
+ n.setParent(parent)
+ ws.nodes[streamID] = n
+ if streamID > ws.maxID {
+ ws.maxID = streamID
+ }
+}
+
+func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
+ if streamID == 0 {
+ panic("violation of WriteScheduler interface: cannot close stream 0")
+ }
+ if ws.nodes[streamID] == nil {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
+ }
+ if ws.nodes[streamID].state != priorityNodeOpen {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
+ }
+
+ n := ws.nodes[streamID]
+ n.state = priorityNodeClosed
+ n.addBytes(-n.bytes)
+
+ q := n.q
+ ws.queuePool.put(&q)
+ n.q.s = nil
+ if ws.maxClosedNodesInTree > 0 {
+ ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
+ } else {
+ ws.removeNode(n)
+ }
+}
+
+func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+ if streamID == 0 {
+ panic("adjustPriority on root")
+ }
+
+ // If streamID does not exist, there are two cases:
+ // - A closed stream that has been removed (this will have ID <= maxID)
+ // - An idle stream that is being used for "grouping" (this will have ID > maxID)
+ n := ws.nodes[streamID]
+ if n == nil {
+ if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
+ return
+ }
+ ws.maxID = streamID
+ n = &priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: priorityDefaultWeight,
+ state: priorityNodeIdle,
+ }
+ n.setParent(&ws.root)
+ ws.nodes[streamID] = n
+ ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
+ }
+
+ // Section 5.3.1: A dependency on a stream that is not currently in the tree
+ // results in that stream being given a default priority (Section 5.3.5).
+ parent := ws.nodes[priority.StreamDep]
+ if parent == nil {
+ n.setParent(&ws.root)
+ n.weight = priorityDefaultWeight
+ return
+ }
+
+ // Ignore if the client tries to make a node its own parent.
+ if n == parent {
+ return
+ }
+
+ // Section 5.3.3:
+ // "If a stream is made dependent on one of its own dependencies, the
+ // formerly dependent stream is first moved to be dependent on the
+ // reprioritized stream's previous parent. The moved dependency retains
+ // its weight."
+ //
+ // That is: if parent depends on n, move parent to depend on n.parent.
+ for x := parent.parent; x != nil; x = x.parent {
+ if x == n {
+ parent.setParent(n.parent)
+ break
+ }
+ }
+
+ // Section 5.3.3: The exclusive flag causes the stream to become the sole
+ // dependency of its parent stream, causing other dependencies to become
+ // dependent on the exclusive stream.
+ if priority.Exclusive {
+ k := parent.kids
+ for k != nil {
+ next := k.next
+ if k != n {
+ k.setParent(n)
+ }
+ k = next
+ }
+ }
+
+ n.setParent(parent)
+ n.weight = priority.Weight
+}
+
+func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
+ var n *priorityNode
+ if id := wr.StreamID(); id == 0 {
+ n = &ws.root
+ } else {
+ n = ws.nodes[id]
+ if n == nil {
+ // id is an idle or closed stream. wr should not be a HEADERS or
+ // DATA frame. However, wr can be a RST_STREAM. In this case, we
+ // push wr onto the root, rather than creating a new priorityNode,
+ // since RST_STREAM is tiny and the stream's priority is unknown
+ // anyway. See issue #17919.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ n = &ws.root
+ }
+ }
+ n.q.push(wr)
+}
+
+func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
+ limit := int32(math.MaxInt32)
+ if openParent {
+ limit = ws.writeThrottleLimit
+ }
+ wr, ok = n.q.consume(limit)
+ if !ok {
+ return false
+ }
+ n.addBytes(int64(wr.DataSize()))
+ // If B depends on A and B continuously has data available but A
+ // does not, gradually increase the throttling limit to allow B to
+ // steal more and more bandwidth from A.
+ if openParent {
+ ws.writeThrottleLimit += 1024
+ if ws.writeThrottleLimit < 0 {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ } else if ws.enableWriteThrottle {
+ ws.writeThrottleLimit = 1024
+ }
+ return true
+ })
+ return wr, ok
+}
+
+func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
+ if maxSize == 0 {
+ return
+ }
+ if len(*list) == maxSize {
+ // Remove the oldest node, then shift left.
+ ws.removeNode((*list)[0])
+ x := (*list)[1:]
+ copy(*list, x)
+ *list = (*list)[:len(x)]
+ }
+ *list = append(*list, n)
+}
+
+func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
+ for k := n.kids; k != nil; k = k.next {
+ k.setParent(n.parent)
+ }
+ n.setParent(nil)
+ delete(ws.nodes, n.id)
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_test.go b/vendor/golang.org/x/net/http2/writesched_priority_test.go
new file mode 100644
index 0000000..f2b535a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority_test.go
@@ -0,0 +1,541 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "testing"
+)
+
+func defaultPriorityWriteScheduler() *priorityWriteScheduler {
+ return NewPriorityWriteScheduler(nil).(*priorityWriteScheduler)
+}
+
+func checkPriorityWellFormed(ws *priorityWriteScheduler) error {
+ for id, n := range ws.nodes {
+ if id != n.id {
+ return fmt.Errorf("bad ws.nodes: ws.nodes[%d] = %d", id, n.id)
+ }
+ if n.parent == nil {
+ if n.next != nil || n.prev != nil {
+ return fmt.Errorf("bad node %d: nil parent but prev/next not nil", id)
+ }
+ continue
+ }
+ found := false
+ for k := n.parent.kids; k != nil; k = k.next {
+ if k.id == id {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("bad node %d: not found in parent %d kids list", id, n.parent.id)
+ }
+ }
+ return nil
+}
+
+func fmtTree(ws *priorityWriteScheduler, fmtNode func(*priorityNode) string) string {
+ var ids []int
+ for _, n := range ws.nodes {
+ ids = append(ids, int(n.id))
+ }
+ sort.Ints(ids)
+
+ var buf bytes.Buffer
+ for _, id := range ids {
+ if buf.Len() != 0 {
+ buf.WriteString(" ")
+ }
+ if id == 0 {
+ buf.WriteString(fmtNode(&ws.root))
+ } else {
+ buf.WriteString(fmtNode(ws.nodes[uint32(id)]))
+ }
+ }
+ return buf.String()
+}
+
+func fmtNodeParentSkipRoot(n *priorityNode) string {
+ switch {
+ case n.id == 0:
+ return ""
+ case n.parent == nil:
+ return fmt.Sprintf("%d{parent:nil}", n.id)
+ default:
+ return fmt.Sprintf("%d{parent:%d}", n.id, n.parent.id)
+ }
+}
+
+func fmtNodeWeightParentSkipRoot(n *priorityNode) string {
+ switch {
+ case n.id == 0:
+ return ""
+ case n.parent == nil:
+ return fmt.Sprintf("%d{weight:%d,parent:nil}", n.id, n.weight)
+ default:
+ return fmt.Sprintf("%d{weight:%d,parent:%d}", n.id, n.weight, n.parent.id)
+ }
+}
+
+func TestPriorityTwoStreams(t *testing.T) {
+ ws := defaultPriorityWriteScheduler()
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{})
+
+ want := "1{weight:15,parent:0} 2{weight:15,parent:0}"
+ if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
+ t.Errorf("After open\ngot %q\nwant %q", got, want)
+ }
+
+ // Move 1's parent to 2.
+ ws.AdjustStream(1, PriorityParam{
+ StreamDep: 2,
+ Weight: 32,
+ Exclusive: false,
+ })
+ want = "1{weight:32,parent:2} 2{weight:15,parent:0}"
+ if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
+ t.Errorf("After adjust\ngot %q\nwant %q", got, want)
+ }
+
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPriorityAdjustExclusiveZero(t *testing.T) {
+ // 1, 2, and 3 are all children of the 0 stream.
+ // Exclusive reprioritization to any of the streams should bring
+ // the rest of the streams under the reprioritized stream.
+ ws := defaultPriorityWriteScheduler()
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{})
+ ws.OpenStream(3, OpenStreamOptions{})
+
+ want := "1{weight:15,parent:0} 2{weight:15,parent:0} 3{weight:15,parent:0}"
+ if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
+ t.Errorf("After open\ngot %q\nwant %q", got, want)
+ }
+
+ ws.AdjustStream(2, PriorityParam{
+ StreamDep: 0,
+ Weight: 20,
+ Exclusive: true,
+ })
+ want = "1{weight:15,parent:2} 2{weight:20,parent:0} 3{weight:15,parent:2}"
+ if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
+ t.Errorf("After adjust\ngot %q\nwant %q", got, want)
+ }
+
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPriorityAdjustOwnParent(t *testing.T) {
+ // Assigning a node as its own parent should have no effect.
+ ws := defaultPriorityWriteScheduler()
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{})
+ ws.AdjustStream(2, PriorityParam{
+ StreamDep: 2,
+ Weight: 20,
+ Exclusive: true,
+ })
+ want := "1{weight:15,parent:0} 2{weight:15,parent:0}"
+ if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
+ t.Errorf("After adjust\ngot %q\nwant %q", got, want)
+ }
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPriorityClosedStreams(t *testing.T) {
+ ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxClosedNodesInTree: 2}).(*priorityWriteScheduler)
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
+ ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
+
+ // Close the first three streams. We lose 1, but keep 2 and 3.
+ ws.CloseStream(1)
+ ws.CloseStream(2)
+ ws.CloseStream(3)
+
+ want := "2{weight:15,parent:0} 3{weight:15,parent:2} 4{weight:15,parent:3}"
+ if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
+ t.Errorf("After close\ngot %q\nwant %q", got, want)
+ }
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+
+ // Adding a stream as an exclusive child of 1 gives it default
+ // priorities, since 1 is gone.
+ ws.OpenStream(5, OpenStreamOptions{})
+ ws.AdjustStream(5, PriorityParam{StreamDep: 1, Weight: 15, Exclusive: true})
+
+ // Adding a stream as an exclusive child of 2 should work, since 2 is not gone.
+ ws.OpenStream(6, OpenStreamOptions{})
+ ws.AdjustStream(6, PriorityParam{StreamDep: 2, Weight: 15, Exclusive: true})
+
+ want = "2{weight:15,parent:0} 3{weight:15,parent:6} 4{weight:15,parent:3} 5{weight:15,parent:0} 6{weight:15,parent:2}"
+ if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
+ t.Errorf("After add streams\ngot %q\nwant %q", got, want)
+ }
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPriorityClosedStreamsDisabled(t *testing.T) {
+ ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler)
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
+
+ // Close the first two streams. We keep only 3.
+ ws.CloseStream(1)
+ ws.CloseStream(2)
+
+ want := "3{weight:15,parent:0}"
+ if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
+ t.Errorf("After close\ngot %q\nwant %q", got, want)
+ }
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPriorityIdleStreams(t *testing.T) {
+ ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxIdleNodesInTree: 2}).(*priorityWriteScheduler)
+ ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle
+ ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle
+ ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle
+ ws.OpenStream(4, OpenStreamOptions{})
+ ws.OpenStream(5, OpenStreamOptions{})
+ ws.OpenStream(6, OpenStreamOptions{})
+ ws.AdjustStream(4, PriorityParam{StreamDep: 1, Weight: 15})
+ ws.AdjustStream(5, PriorityParam{StreamDep: 2, Weight: 15})
+ ws.AdjustStream(6, PriorityParam{StreamDep: 3, Weight: 15})
+
+ want := "2{weight:15,parent:0} 3{weight:20,parent:2} 4{weight:15,parent:0} 5{weight:15,parent:2} 6{weight:15,parent:3}"
+ if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
+ t.Errorf("After open\ngot %q\nwant %q", got, want)
+ }
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPriorityIdleStreamsDisabled(t *testing.T) {
+ ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler)
+ ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle
+ ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle
+ ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle
+ ws.OpenStream(4, OpenStreamOptions{})
+
+ want := "4{weight:15,parent:0}"
+ if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
+ t.Errorf("After open\ngot %q\nwant %q", got, want)
+ }
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPrioritySection531NonExclusive(t *testing.T) {
+ // Example from RFC 7540 Section 5.3.1.
+ // A,B,C,D = 1,2,3,4
+ ws := defaultPriorityWriteScheduler()
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(4, OpenStreamOptions{})
+ ws.AdjustStream(4, PriorityParam{
+ StreamDep: 1,
+ Weight: 15,
+ Exclusive: false,
+ })
+ want := "1{parent:0} 2{parent:1} 3{parent:1} 4{parent:1}"
+ if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
+ t.Errorf("After adjust\ngot %q\nwant %q", got, want)
+ }
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPrioritySection531Exclusive(t *testing.T) {
+ // Example from RFC 7540 Section 5.3.1.
+ // A,B,C,D = 1,2,3,4
+ ws := defaultPriorityWriteScheduler()
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(4, OpenStreamOptions{})
+ ws.AdjustStream(4, PriorityParam{
+ StreamDep: 1,
+ Weight: 15,
+ Exclusive: true,
+ })
+ want := "1{parent:0} 2{parent:4} 3{parent:4} 4{parent:1}"
+ if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
+ t.Errorf("After adjust\ngot %q\nwant %q", got, want)
+ }
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+}
+
+func makeSection533Tree() *priorityWriteScheduler {
+ // Initial tree from RFC 7540 Section 5.3.3.
+ // A,B,C,D,E,F = 1,2,3,4,5,6
+ ws := defaultPriorityWriteScheduler()
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
+ ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
+ ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
+ return ws
+}
+
+func TestPrioritySection533NonExclusive(t *testing.T) {
+ // Example from RFC 7540 Section 5.3.3.
+ // A,B,C,D,E,F = 1,2,3,4,5,6
+ ws := defaultPriorityWriteScheduler()
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
+ ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
+ ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
+ ws.AdjustStream(1, PriorityParam{
+ StreamDep: 4,
+ Weight: 15,
+ Exclusive: false,
+ })
+ want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:4}"
+ if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
+ t.Errorf("After adjust\ngot %q\nwant %q", got, want)
+ }
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPrioritySection533Exclusive(t *testing.T) {
+ // Example from RFC 7540 Section 5.3.3.
+ // A,B,C,D,E,F = 1,2,3,4,5,6
+ ws := defaultPriorityWriteScheduler()
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
+ ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
+ ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
+ ws.AdjustStream(1, PriorityParam{
+ StreamDep: 4,
+ Weight: 15,
+ Exclusive: true,
+ })
+ want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:1}"
+ if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
+ t.Errorf("After adjust\ngot %q\nwant %q", got, want)
+ }
+ if err := checkPriorityWellFormed(ws); err != nil {
+ t.Error(err)
+ }
+}
+
+func checkPopAll(ws WriteScheduler, order []uint32) error {
+ for k, id := range order {
+ wr, ok := ws.Pop()
+ if !ok {
+ return fmt.Errorf("Pop[%d]: got ok=false, want %d (order=%v)", k, id, order)
+ }
+ if got := wr.StreamID(); got != id {
+ return fmt.Errorf("Pop[%d]: got %v, want %d (order=%v)", k, got, id, order)
+ }
+ }
+ wr, ok := ws.Pop()
+ if ok {
+ return fmt.Errorf("Pop[%d]: got %v, want ok=false (order=%v)", len(order), wr.StreamID(), order)
+ }
+ return nil
+}
+
+func TestPriorityPopFrom533Tree(t *testing.T) {
+ ws := makeSection533Tree()
+
+ ws.Push(makeWriteHeadersRequest(3 /*C*/))
+ ws.Push(makeWriteNonStreamRequest())
+ ws.Push(makeWriteHeadersRequest(5 /*E*/))
+ ws.Push(makeWriteHeadersRequest(1 /*A*/))
+ t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot))
+
+ if err := checkPopAll(ws, []uint32{0 /*NonStream*/, 1, 3, 5}); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPriorityPopFromLinearTree(t *testing.T) {
+ ws := defaultPriorityWriteScheduler()
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
+ ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
+ ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
+
+ ws.Push(makeWriteHeadersRequest(3))
+ ws.Push(makeWriteHeadersRequest(4))
+ ws.Push(makeWriteHeadersRequest(1))
+ ws.Push(makeWriteHeadersRequest(2))
+ ws.Push(makeWriteNonStreamRequest())
+ ws.Push(makeWriteNonStreamRequest())
+ t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot))
+
+ if err := checkPopAll(ws, []uint32{0, 0 /*NonStreams*/, 1, 2, 3, 4}); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPriorityFlowControl(t *testing.T) {
+ ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: false})
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
+
+ sc := &serverConn{maxFrameSize: 16}
+ st1 := &stream{id: 1, sc: sc}
+ st2 := &stream{id: 2, sc: sc}
+
+ ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil})
+ ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 16), false}, st2, nil})
+ ws.AdjustStream(2, PriorityParam{StreamDep: 1})
+
+ // No flow-control bytes available.
+ if wr, ok := ws.Pop(); ok {
+ t.Fatalf("Pop(limited by flow control)=%v,true, want false", wr)
+ }
+
+ // Add enough flow-control bytes to write st2 in two Pop calls.
+ // Should write data from st2 even though it's lower priority than st1.
+ for i := 1; i <= 2; i++ {
+ st2.flow.add(8)
+ wr, ok := ws.Pop()
+ if !ok {
+ t.Fatalf("Pop(%d)=false, want true", i)
+ }
+ if got, want := wr.DataSize(), 8; got != want {
+ t.Fatalf("Pop(%d)=%d bytes, want %d bytes", i, got, want)
+ }
+ }
+}
+
+func TestPriorityThrottleOutOfOrderWrites(t *testing.T) {
+ ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: true})
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
+
+ sc := &serverConn{maxFrameSize: 4096}
+ st1 := &stream{id: 1, sc: sc}
+ st2 := &stream{id: 2, sc: sc}
+ st1.flow.add(4096)
+ st2.flow.add(4096)
+ ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 4096), false}, st2, nil})
+ ws.AdjustStream(2, PriorityParam{StreamDep: 1})
+
+ // We have enough flow-control bytes to write st2 in a single Pop call.
+ // However, due to out-of-order write throttling, the first call should
+ // only write 1KB.
+ wr, ok := ws.Pop()
+ if !ok {
+ t.Fatalf("Pop(st2.first)=false, want true")
+ }
+ if got, want := wr.StreamID(), uint32(2); got != want {
+ t.Fatalf("Pop(st2.first)=stream %d, want stream %d", got, want)
+ }
+ if got, want := wr.DataSize(), 1024; got != want {
+ t.Fatalf("Pop(st2.first)=%d bytes, want %d bytes", got, want)
+ }
+
+ // Now add data on st1. This should take precedence.
+ ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 4096), false}, st1, nil})
+ wr, ok = ws.Pop()
+ if !ok {
+ t.Fatalf("Pop(st1)=false, want true")
+ }
+ if got, want := wr.StreamID(), uint32(1); got != want {
+ t.Fatalf("Pop(st1)=stream %d, want stream %d", got, want)
+ }
+ if got, want := wr.DataSize(), 4096; got != want {
+ t.Fatalf("Pop(st1)=%d bytes, want %d bytes", got, want)
+ }
+
+ // Should go back to writing 1KB from st2.
+ wr, ok = ws.Pop()
+ if !ok {
+ t.Fatalf("Pop(st2.last)=false, want true")
+ }
+ if got, want := wr.StreamID(), uint32(2); got != want {
+ t.Fatalf("Pop(st2.last)=stream %d, want stream %d", got, want)
+ }
+ if got, want := wr.DataSize(), 1024; got != want {
+ t.Fatalf("Pop(st2.last)=%d bytes, want %d bytes", got, want)
+ }
+}
+
+func TestPriorityWeights(t *testing.T) {
+ ws := defaultPriorityWriteScheduler()
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.OpenStream(2, OpenStreamOptions{})
+
+ sc := &serverConn{maxFrameSize: 8}
+ st1 := &stream{id: 1, sc: sc}
+ st2 := &stream{id: 2, sc: sc}
+ st1.flow.add(40)
+ st2.flow.add(40)
+
+ ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 40), false}, st1, nil})
+ ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 40), false}, st2, nil})
+ ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 34})
+ ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 9})
+
+ // st1 gets 3.5x the bandwidth of st2 (3.5 = (34+1)/(9+1)).
+ // The maximum frame size is 8 bytes. The write sequence should be:
+ // st1, total bytes so far is (st1=8, st=0)
+ // st2, total bytes so far is (st1=8, st=8)
+ // st1, total bytes so far is (st1=16, st=8)
+ // st1, total bytes so far is (st1=24, st=8) // 3x bandwidth
+ // st1, total bytes so far is (st1=32, st=8) // 4x bandwidth
+ // st2, total bytes so far is (st1=32, st=16) // 2x bandwidth
+ // st1, total bytes so far is (st1=40, st=16)
+ // st2, total bytes so far is (st1=40, st=24)
+ // st2, total bytes so far is (st1=40, st=32)
+ // st2, total bytes so far is (st1=40, st=40)
+ if err := checkPopAll(ws, []uint32{1, 2, 1, 1, 1, 2, 1, 2, 2, 2}); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) {
+ ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{
+ MaxClosedNodesInTree: 0,
+ MaxIdleNodesInTree: 0,
+ })
+ ws.OpenStream(1, OpenStreamOptions{})
+ ws.CloseStream(1)
+ ws.Push(FrameWriteRequest{write: streamError(1, ErrCodeProtocol)})
+ ws.Push(FrameWriteRequest{write: streamError(2, ErrCodeProtocol)})
+
+ if err := checkPopAll(ws, []uint32{1, 2}); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go
new file mode 100644
index 0000000..36d7919
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_random.go
@@ -0,0 +1,72 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "math"
+
+// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
+// priorities. Control frames like SETTINGS and PING are written before DATA
+// frames, but if no control frames are queued and multiple streams have queued
+// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
+func NewRandomWriteScheduler() WriteScheduler {
+ return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
+}
+
+type randomWriteScheduler struct {
+ // zero are frames not associated with a specific stream.
+ zero writeQueue
+
+ // sq contains the stream-specific queues, keyed by stream ID.
+ // When a stream is idle or closed, it's deleted from the map.
+ sq map[uint32]*writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+ // no-op: idle streams are not tracked
+}
+
+func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
+ q, ok := ws.sq[streamID]
+ if !ok {
+ return
+ }
+ delete(ws.sq, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+ // no-op: priorities are ignored
+}
+
+func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
+ id := wr.StreamID()
+ if id == 0 {
+ ws.zero.push(wr)
+ return
+ }
+ q, ok := ws.sq[id]
+ if !ok {
+ q = ws.queuePool.get()
+ ws.sq[id] = q
+ }
+ q.push(wr)
+}
+
+func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
+ // Control frames first.
+ if !ws.zero.empty() {
+ return ws.zero.shift(), true
+ }
+ // Iterate over all non-idle streams until finding one that can be consumed.
+ for _, q := range ws.sq {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ return wr, true
+ }
+ }
+ return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_random_test.go b/vendor/golang.org/x/net/http2/writesched_random_test.go
new file mode 100644
index 0000000..3bf4aa3
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_random_test.go
@@ -0,0 +1,44 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "testing"
+
+func TestRandomScheduler(t *testing.T) {
+ ws := NewRandomWriteScheduler()
+ ws.Push(makeWriteHeadersRequest(3))
+ ws.Push(makeWriteHeadersRequest(4))
+ ws.Push(makeWriteHeadersRequest(1))
+ ws.Push(makeWriteHeadersRequest(2))
+ ws.Push(makeWriteNonStreamRequest())
+ ws.Push(makeWriteNonStreamRequest())
+
+ // Pop all frames. Should get the non-stream requests first,
+ // followed by the stream requests in any order.
+ var order []FrameWriteRequest
+ for {
+ wr, ok := ws.Pop()
+ if !ok {
+ break
+ }
+ order = append(order, wr)
+ }
+ t.Logf("got frames: %v", order)
+ if len(order) != 6 {
+ t.Fatalf("got %d frames, expected 6", len(order))
+ }
+ if order[0].StreamID() != 0 || order[1].StreamID() != 0 {
+ t.Fatal("expected non-stream frames first", order[0], order[1])
+ }
+ got := make(map[uint32]bool)
+ for _, wr := range order[2:] {
+ got[wr.StreamID()] = true
+ }
+ for id := uint32(1); id <= 4; id++ {
+ if !got[id] {
+ t.Errorf("frame not found for stream %d", id)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_test.go b/vendor/golang.org/x/net/http2/writesched_test.go
new file mode 100644
index 0000000..0807056
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_test.go
@@ -0,0 +1,125 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "testing"
+)
+
+func makeWriteNonStreamRequest() FrameWriteRequest {
+ return FrameWriteRequest{writeSettingsAck{}, nil, nil}
+}
+
+func makeWriteHeadersRequest(streamID uint32) FrameWriteRequest {
+ st := &stream{id: streamID}
+ return FrameWriteRequest{&writeResHeaders{streamID: streamID, httpResCode: 200}, st, nil}
+}
+
+func checkConsume(wr FrameWriteRequest, nbytes int32, want []FrameWriteRequest) error {
+ consumed, rest, n := wr.Consume(nbytes)
+ var wantConsumed, wantRest FrameWriteRequest
+ switch len(want) {
+ case 0:
+ case 1:
+ wantConsumed = want[0]
+ case 2:
+ wantConsumed = want[0]
+ wantRest = want[1]
+ }
+ if !reflect.DeepEqual(consumed, wantConsumed) || !reflect.DeepEqual(rest, wantRest) || n != len(want) {
+ return fmt.Errorf("got %v, %v, %v\nwant %v, %v, %v", consumed, rest, n, wantConsumed, wantRest, len(want))
+ }
+ return nil
+}
+
+func TestFrameWriteRequestNonData(t *testing.T) {
+ wr := makeWriteNonStreamRequest()
+ if got, want := wr.DataSize(), 0; got != want {
+ t.Errorf("DataSize: got %v, want %v", got, want)
+ }
+
+ // Non-DATA frames are always consumed whole.
+ if err := checkConsume(wr, 0, []FrameWriteRequest{wr}); err != nil {
+ t.Errorf("Consume:\n%v", err)
+ }
+}
+
+func TestFrameWriteRequestData(t *testing.T) {
+ st := &stream{
+ id: 1,
+ sc: &serverConn{maxFrameSize: 16},
+ }
+ const size = 32
+ wr := FrameWriteRequest{&writeData{st.id, make([]byte, size), true}, st, make(chan error)}
+ if got, want := wr.DataSize(), size; got != want {
+ t.Errorf("DataSize: got %v, want %v", got, want)
+ }
+
+ // No flow-control bytes available: cannot consume anything.
+ if err := checkConsume(wr, math.MaxInt32, []FrameWriteRequest{}); err != nil {
+ t.Errorf("Consume(limited by flow control):\n%v", err)
+ }
+
+ // Add enough flow-control bytes to consume the entire frame,
+ // but we're now restricted by st.sc.maxFrameSize.
+ st.flow.add(size)
+ want := []FrameWriteRequest{
+ {
+ write: &writeData{st.id, make([]byte, st.sc.maxFrameSize), false},
+ stream: st,
+ done: nil,
+ },
+ {
+ write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize), true},
+ stream: st,
+ done: wr.done,
+ },
+ }
+ if err := checkConsume(wr, math.MaxInt32, want); err != nil {
+ t.Errorf("Consume(limited by maxFrameSize):\n%v", err)
+ }
+ rest := want[1]
+
+ // Consume 8 bytes from the remaining frame.
+ want = []FrameWriteRequest{
+ {
+ write: &writeData{st.id, make([]byte, 8), false},
+ stream: st,
+ done: nil,
+ },
+ {
+ write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true},
+ stream: st,
+ done: wr.done,
+ },
+ }
+ if err := checkConsume(rest, 8, want); err != nil {
+ t.Errorf("Consume(8):\n%v", err)
+ }
+ rest = want[1]
+
+ // Consume all remaining bytes.
+ want = []FrameWriteRequest{
+ {
+ write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true},
+ stream: st,
+ done: wr.done,
+ },
+ }
+ if err := checkConsume(rest, math.MaxInt32, want); err != nil {
+ t.Errorf("Consume(remainder):\n%v", err)
+ }
+}
+
+func TestFrameWriteRequest_StreamID(t *testing.T) {
+ const streamID = 123
+ wr := FrameWriteRequest{write: streamError(streamID, ErrCodeNo)}
+ if got := wr.StreamID(); got != streamID {
+ t.Errorf("FrameWriteRequest(StreamError) = %v; want %v", got, streamID)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/z_spec_test.go b/vendor/golang.org/x/net/http2/z_spec_test.go
new file mode 100644
index 0000000..610b2cd
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/z_spec_test.go
@@ -0,0 +1,356 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "encoding/xml"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+)
+
+var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests")
+
+// The global map of sentence coverage for the http2 spec.
+var defaultSpecCoverage specCoverage
+
+var loadSpecOnce sync.Once
+
+func loadSpec() {
+ if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil {
+ panic(err)
+ } else {
+ defaultSpecCoverage = readSpecCov(f)
+ f.Close()
+ }
+}
+
+// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not
+// "covered" will be included in report outputted by TestSpecCoverage.
+func covers(sec, sentences string) {
+ loadSpecOnce.Do(loadSpec)
+ defaultSpecCoverage.cover(sec, sentences)
+}
+
+type specPart struct {
+ section string
+ sentence string
+}
+
+func (ss specPart) Less(oo specPart) bool {
+ atoi := func(s string) int {
+ n, err := strconv.Atoi(s)
+ if err != nil {
+ panic(err)
+ }
+ return n
+ }
+ a := strings.Split(ss.section, ".")
+ b := strings.Split(oo.section, ".")
+ for len(a) > 0 {
+ if len(b) == 0 {
+ return false
+ }
+ x, y := atoi(a[0]), atoi(b[0])
+ if x == y {
+ a, b = a[1:], b[1:]
+ continue
+ }
+ return x < y
+ }
+ if len(b) > 0 {
+ return true
+ }
+ return false
+}
+
+type bySpecSection []specPart
+
+func (a bySpecSection) Len() int { return len(a) }
+func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) }
+func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type specCoverage struct {
+ coverage map[specPart]bool
+ d *xml.Decoder
+}
+
+func joinSection(sec []int) string {
+ s := fmt.Sprintf("%d", sec[0])
+ for _, n := range sec[1:] {
+ s = fmt.Sprintf("%s.%d", s, n)
+ }
+ return s
+}
+
+func (sc specCoverage) readSection(sec []int) {
+ var (
+ buf = new(bytes.Buffer)
+ sub = 0
+ )
+ for {
+ tk, err := sc.d.Token()
+ if err != nil {
+ if err == io.EOF {
+ return
+ }
+ panic(err)
+ }
+ switch v := tk.(type) {
+ case xml.StartElement:
+ if skipElement(v) {
+ if err := sc.d.Skip(); err != nil {
+ panic(err)
+ }
+ if v.Name.Local == "section" {
+ sub++
+ }
+ break
+ }
+ switch v.Name.Local {
+ case "section":
+ sub++
+ sc.readSection(append(sec, sub))
+ case "xref":
+ buf.Write(sc.readXRef(v))
+ }
+ case xml.CharData:
+ if len(sec) == 0 {
+ break
+ }
+ buf.Write(v)
+ case xml.EndElement:
+ if v.Name.Local == "section" {
+ sc.addSentences(joinSection(sec), buf.String())
+ return
+ }
+ }
+ }
+}
+
+func (sc specCoverage) readXRef(se xml.StartElement) []byte {
+ var b []byte
+ for {
+ tk, err := sc.d.Token()
+ if err != nil {
+ panic(err)
+ }
+ switch v := tk.(type) {
+ case xml.CharData:
+ if b != nil {
+ panic("unexpected CharData")
+ }
+ b = []byte(string(v))
+ case xml.EndElement:
+ if v.Name.Local != "xref" {
+ panic("expected </xref>")
+ }
+ if b != nil {
+ return b
+ }
+ sig := attrSig(se)
+ switch sig {
+ case "target":
+ return []byte(fmt.Sprintf("[%s]", attrValue(se, "target")))
+ case "fmt-of,rel,target", "fmt-,,rel,target":
+ return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel")))
+ case "fmt-of,sec,target", "fmt-,,sec,target":
+ return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target")))
+ case "fmt-of,rel,sec,target":
+ return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel")))
+ default:
+ panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se)))
+ }
+ default:
+ panic(fmt.Sprintf("unexpected tag %q", v))
+ }
+ }
+}
+
+var skipAnchor = map[string]bool{
+ "intro": true,
+ "Overview": true,
+}
+
+var skipTitle = map[string]bool{
+ "Acknowledgements": true,
+ "Change Log": true,
+ "Document Organization": true,
+ "Conventions and Terminology": true,
+}
+
+func skipElement(s xml.StartElement) bool {
+ switch s.Name.Local {
+ case "artwork":
+ return true
+ case "section":
+ for _, attr := range s.Attr {
+ switch attr.Name.Local {
+ case "anchor":
+ if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") {
+ return true
+ }
+ case "title":
+ if skipTitle[attr.Value] {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func readSpecCov(r io.Reader) specCoverage {
+ sc := specCoverage{
+ coverage: map[specPart]bool{},
+ d: xml.NewDecoder(r)}
+ sc.readSection(nil)
+ return sc
+}
+
+func (sc specCoverage) addSentences(sec string, sentence string) {
+ for _, s := range parseSentences(sentence) {
+ sc.coverage[specPart{sec, s}] = false
+ }
+}
+
+func (sc specCoverage) cover(sec string, sentence string) {
+ for _, s := range parseSentences(sentence) {
+ p := specPart{sec, s}
+ if _, ok := sc.coverage[p]; !ok {
+ panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s))
+ }
+ sc.coverage[specPart{sec, s}] = true
+ }
+
+}
+
+var whitespaceRx = regexp.MustCompile(`\s+`)
+
+func parseSentences(sens string) []string {
+ sens = strings.TrimSpace(sens)
+ if sens == "" {
+ return nil
+ }
+ ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ")
+ for i, s := range ss {
+ s = strings.TrimSpace(s)
+ if !strings.HasSuffix(s, ".") {
+ s += "."
+ }
+ ss[i] = s
+ }
+ return ss
+}
+
+func TestSpecParseSentences(t *testing.T) {
+ tests := []struct {
+ ss string
+ want []string
+ }{
+ {"Sentence 1. Sentence 2.",
+ []string{
+ "Sentence 1.",
+ "Sentence 2.",
+ }},
+ {"Sentence 1. \nSentence 2.\tSentence 3.",
+ []string{
+ "Sentence 1.",
+ "Sentence 2.",
+ "Sentence 3.",
+ }},
+ }
+
+ for i, tt := range tests {
+ got := parseSentences(tt.ss)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%d: got = %q, want %q", i, got, tt.want)
+ }
+ }
+}
+
+func TestSpecCoverage(t *testing.T) {
+ if !*coverSpec {
+ t.Skip()
+ }
+
+ loadSpecOnce.Do(loadSpec)
+
+ var (
+ list []specPart
+ cv = defaultSpecCoverage.coverage
+ total = len(cv)
+ complete = 0
+ )
+
+ for sp, touched := range defaultSpecCoverage.coverage {
+ if touched {
+ complete++
+ } else {
+ list = append(list, sp)
+ }
+ }
+ sort.Stable(bySpecSection(list))
+
+ if testing.Short() && len(list) > 5 {
+ list = list[:5]
+ }
+
+ for _, p := range list {
+ t.Errorf("\tSECTION %s: %s", p.section, p.sentence)
+ }
+
+ t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100)
+}
+
+func attrSig(se xml.StartElement) string {
+ var names []string
+ for _, attr := range se.Attr {
+ if attr.Name.Local == "fmt" {
+ names = append(names, "fmt-"+attr.Value)
+ } else {
+ names = append(names, attr.Name.Local)
+ }
+ }
+ sort.Strings(names)
+ return strings.Join(names, ",")
+}
+
+func attrValue(se xml.StartElement, attr string) string {
+ for _, a := range se.Attr {
+ if a.Name.Local == attr {
+ return a.Value
+ }
+ }
+ panic("unknown attribute " + attr)
+}
+
+func TestSpecPartLess(t *testing.T) {
+ tests := []struct {
+ sec1, sec2 string
+ want bool
+ }{
+ {"6.2.1", "6.2", false},
+ {"6.2", "6.2.1", true},
+ {"6.10", "6.10.1", true},
+ {"6.10", "6.1.1", false}, // 10, not 1
+ {"6.1", "6.1", false}, // equal, so not less
+ }
+ for _, tt := range tests {
+ got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"})
+ if got != tt.want {
+ t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want)
+ }
+ }
+}