From 3b32bd43220a46742575803f32ec1893b0c390ab Mon Sep 17 00:00:00 2001 From: Erik Wilson Date: Tue, 24 Sep 2019 22:00:42 -0700 Subject: [PATCH] Downgrade grpc 1.20.1 -> 1.13.0 --- go.mod | 1 + go.sum | 16 +- .../googleapis/rpc/status/status.pb.go | 63 +- vendor/google.golang.org/grpc/.travis.yml | 41 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 1 - vendor/google.golang.org/grpc/Makefile | 60 +- vendor/google.golang.org/grpc/README.md | 26 +- vendor/google.golang.org/grpc/backoff.go | 2 +- vendor/google.golang.org/grpc/balancer.go | 27 +- .../grpc/balancer/balancer.go | 86 +- .../grpc/balancer/base/balancer.go | 74 +- .../grpc/balancer/base/base.go | 12 - .../grpc/balancer/roundrobin/roundrobin.go | 16 +- .../grpc/balancer_conn_wrappers.go | 55 +- .../grpc/balancer_v1_wrapper.go | 69 +- .../grpc_binarylog_v1/binarylog.pb.go | 900 ---------- vendor/google.golang.org/grpc/call.go | 37 +- vendor/google.golang.org/grpc/clientconn.go | 1482 ++++++++++------- .../grpc/connectivity/connectivity.go | 5 +- .../grpc/credentials/credentials.go | 132 +- .../grpc/credentials/credentials_util_go17.go | 60 + .../{tls13.go => credentials_util_go18.go} | 24 +- .../credentials/credentials_util_pre_go17.go | 57 + .../grpc/credentials/internal/syscallconn.go | 61 - vendor/google.golang.org/grpc/dialoptions.go | 532 ------ .../grpc/encoding/encoding.go | 6 +- .../channelz/util_linux.go => envconfig.go} | 30 +- vendor/google.golang.org/grpc/go.mod | 19 - vendor/google.golang.org/grpc/go.sum | 33 - vendor/google.golang.org/grpc/go16.go | 70 + vendor/google.golang.org/grpc/go17.go | 71 + .../google.golang.org/grpc/grpclog/grpclog.go | 2 +- .../google.golang.org/grpc/health/client.go | 107 -- .../grpc/health/grpc_health_v1/health.pb.go | 144 +- .../google.golang.org/grpc/health/health.go | 72 + .../google.golang.org/grpc/health/server.go | 165 -- vendor/google.golang.org/grpc/install_gae.sh | 6 - vendor/google.golang.org/grpc/interceptor.go | 2 +- .../grpc/internal/balancerload/load.go | 46 - .../grpc/internal/binarylog/binarylog.go | 167 -- .../internal/binarylog/binarylog_testutil.go | 42 - .../grpc/internal/binarylog/env_config.go | 210 --- .../grpc/internal/binarylog/method_logger.go | 423 ----- .../grpc/internal/binarylog/regenerate.sh | 33 - .../grpc/internal/binarylog/sink.go | 162 -- .../grpc/internal/channelz/funcs.go | 202 +-- .../grpc/internal/channelz/types.go | 318 +--- .../grpc/internal/channelz/types_linux.go | 53 - .../grpc/internal/channelz/types_nonlinux.go | 44 - .../grpc/internal/envconfig/envconfig.go | 64 - .../grpc/internal/grpcsync/event.go | 61 - .../grpc/internal/internal.go | 40 +- .../grpc/internal/syscall/syscall_linux.go | 114 -- .../grpc/internal/syscall/syscall_nonlinux.go | 73 - .../grpc/internal/transport/defaults.go | 49 - .../grpc/keepalive/keepalive.go | 64 +- .../grpc/metadata/metadata.go | 3 +- .../grpc/naming/dns_resolver.go | 11 +- vendor/google.golang.org/grpc/naming/go17.go | 34 + .../util_nonlinux.go => naming/go18.go} | 16 +- .../google.golang.org/grpc/naming/naming.go | 2 +- vendor/google.golang.org/grpc/peer/peer.go | 2 +- .../google.golang.org/grpc/picker_wrapper.go | 185 +- vendor/google.golang.org/grpc/pickfirst.go | 4 +- vendor/google.golang.org/grpc/proxy.go | 52 +- .../grpc/resolver/dns/dns_resolver.go | 135 +- .../grpc/resolver/dns/go17.go | 35 + .../dns/go18.go} | 19 +- .../grpc/resolver/passthrough/passthrough.go | 2 +- .../grpc/resolver/resolver.go | 23 +- .../grpc/resolver_conn_wrapper.go | 111 +- vendor/google.golang.org/grpc/rpc_util.go | 192 +-- vendor/google.golang.org/grpc/server.go | 434 +++-- .../google.golang.org/grpc/service_config.go | 174 +- .../google.golang.org/grpc/stats/handlers.go | 3 +- vendor/google.golang.org/grpc/stats/stats.go | 6 +- .../binarylog/util.go => status/go16.go} | 35 +- vendor/google.golang.org/grpc/status/go17.go | 44 + .../google.golang.org/grpc/status/status.go | 25 +- .../grpc/stickiness_linkedmap.go | 97 ++ vendor/google.golang.org/grpc/stream.go | 1223 +++----------- vendor/google.golang.org/grpc/tap/tap.go | 2 +- vendor/google.golang.org/grpc/trace.go | 13 - .../{internal => }/transport/bdp_estimator.go | 7 +- .../{internal => }/transport/controlbuf.go | 92 +- .../{internal => }/transport/flowcontrol.go | 24 + .../google.golang.org/grpc/transport/go16.go | 51 + .../google.golang.org/grpc/transport/go17.go | 52 + .../transport/handler_server.go | 55 +- .../{internal => }/transport/http2_client.go | 353 ++-- .../{internal => }/transport/http2_server.go | 338 ++-- .../{internal => }/transport/http_util.go | 206 +-- .../grpc/{internal => }/transport/log.go | 6 + .../{internal => }/transport/transport.go | 204 +-- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 131 +- vendor/modules.txt | 15 +- 97 files changed, 3290 insertions(+), 7859 deletions(-) delete mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go17.go rename vendor/google.golang.org/grpc/credentials/{tls13.go => credentials_util_go18.go} (59%) create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go delete mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn.go delete mode 100644 vendor/google.golang.org/grpc/dialoptions.go rename vendor/google.golang.org/grpc/{internal/channelz/util_linux.go => envconfig.go} (62%) delete mode 100644 vendor/google.golang.org/grpc/go.mod delete mode 100644 vendor/google.golang.org/grpc/go.sum create mode 100644 vendor/google.golang.org/grpc/go16.go create mode 100644 vendor/google.golang.org/grpc/go17.go delete mode 100644 vendor/google.golang.org/grpc/health/client.go create mode 100644 vendor/google.golang.org/grpc/health/health.go delete mode 100644 vendor/google.golang.org/grpc/health/server.go delete mode 100644 vendor/google.golang.org/grpc/install_gae.sh delete mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go delete mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go delete mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go delete mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 vendor/google.golang.org/grpc/naming/go17.go rename vendor/google.golang.org/grpc/{internal/channelz/util_nonlinux.go => naming/go18.go} (73%) create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go17.go rename vendor/google.golang.org/grpc/{credentials/internal/syscallconn_appengine.go => resolver/dns/go18.go} (73%) rename vendor/google.golang.org/grpc/{internal/binarylog/util.go => status/go16.go} (51%) create mode 100644 vendor/google.golang.org/grpc/status/go17.go create mode 100644 vendor/google.golang.org/grpc/stickiness_linkedmap.go rename vendor/google.golang.org/grpc/{internal => }/transport/bdp_estimator.go (94%) rename vendor/google.golang.org/grpc/{internal => }/transport/controlbuf.go (81%) rename vendor/google.golang.org/grpc/{internal => }/transport/flowcontrol.go (86%) create mode 100644 vendor/google.golang.org/grpc/transport/go16.go create mode 100644 vendor/google.golang.org/grpc/transport/go17.go rename vendor/google.golang.org/grpc/{internal => }/transport/handler_server.go (90%) rename vendor/google.golang.org/grpc/{internal => }/transport/http2_client.go (79%) rename vendor/google.golang.org/grpc/{internal => }/transport/http2_server.go (80%) rename vendor/google.golang.org/grpc/{internal => }/transport/http_util.go (71%) rename vendor/google.golang.org/grpc/{internal => }/transport/log.go (90%) rename vendor/google.golang.org/grpc/{internal => }/transport/transport.go (80%) diff --git a/go.mod b/go.mod index 8e569dd735..97bac2b4bf 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ replace ( github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v0.0.0-20180911193056-5684b8af48c1 github.com/rancher/dynamiclistener => github.com/erikwilson/rancher-dynamiclistener v0.0.0-20190717164634-c08b499d1719 github.com/rancher/kine => github.com/ibuildthecloud/kine v0.1.0 + google.golang.org/grpc => google.golang.org/grpc v1.13.0 k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.15.4-k3s.1 k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.15.4-k3s.1 k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.15.4-k3s.1 diff --git a/go.sum b/go.sum index 8240a4f0e3..d8bde1ef75 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,4 @@ bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/Azure/azure-sdk-for-go v21.4.0+incompatible h1:rsdM2HqR64WhJv7YqMAjWOLAebbx4c9/gzcLC7yoDDE= @@ -55,7 +54,6 @@ github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6Z github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/client9/misspell v0.0.0-20170928000206-9ce5d979ffda/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf h1:eOyFuj3h/Vj5e4voOM16NNrHsUR3jhD0duh76LHMj6Y= github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= @@ -202,9 +200,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v0.0.0-20160127222235-bd3c8e81be01 h1:rtnkE5nwSSDcZrbt4gcVsFPeSXXe7Nq2vCn9DBb0Y8I= github.com/golang/mock v0.0.0-20160127222235-bd3c8e81be01/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -558,7 +555,6 @@ golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHaw golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -570,7 +566,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68= golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -599,7 +594,6 @@ golang.org/x/tools v0.0.0-20170824195420-5d2fd3ccab98/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -610,16 +604,13 @@ gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGB gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= google.golang.org/api v0.0.0-20181220000619-583d854617af h1:iQMS7JKv/0w/iiWf1M49Cg3dmOkBoBZT5KheqPDpaac= google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6 h1:72GtwBPfq6av9X0Ru2HtAopsPW+d+vh1K1zaxanTdE8= google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc= google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -646,7 +637,6 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a h1:QoHVuRquf80YZ+/bovwxoMO3Q/A3nt3yTgS0/0nejuk= k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 7bfe37a3db..40e79375bf 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,12 +1,21 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/rpc/status.proto -package status // import "google.golang.org/genproto/googleapis/rpc/status" +/* +Package status is a generated protocol buffer package. + +It is generated from these files: + google/rpc/status.proto + +It has these top-level messages: + Status +*/ +package status import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import any "github.com/golang/protobuf/ptypes/any" +import google_protobuf "github.com/golang/protobuf/ptypes/any" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -36,7 +45,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // error message is needed, put the localized message in the error details or // localize it in the client. The optional error details may contain arbitrary // information about the error. There is a predefined set of error detail types -// in the package `google.rpc` that can be used for common error conditions. +// in the package `google.rpc` which can be used for common error conditions. // // # Language mapping // @@ -59,7 +68,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // errors. // // - Workflow errors. A typical workflow has multiple steps. Each step may -// have a `Status` message for error reporting. +// have a `Status` message for error reporting purpose. // // - Batch operations. If a client uses batch request and batch response, the // `Status` message should be used directly inside batch response, one for @@ -73,42 +82,20 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // be used directly after any stripping needed for security/privacy reasons. type Status struct { // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. - Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // A list of messages that carry the error details. There is a common set of - // message types for APIs to use. - Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + // A list of messages that carry the error details. There will be a + // common set of message types for APIs to use. + Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` } -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_status_c6e4de62dcdf2edf, []int{0} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) -} -func (dst *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(dst, src) -} -func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *Status) GetCode() int32 { if m != nil { @@ -124,7 +111,7 @@ func (m *Status) GetMessage() string { return "" } -func (m *Status) GetDetails() []*any.Any { +func (m *Status) GetDetails() []*google_protobuf.Any { if m != nil { return m.Details } @@ -135,9 +122,9 @@ func init() { proto.RegisterType((*Status)(nil), "google.rpc.Status") } -func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_c6e4de62dcdf2edf) } +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) } -var fileDescriptor_status_c6e4de62dcdf2edf = []byte{ +var fileDescriptor0 = []byte{ // 209 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index 591343edfb..3c2621ab75 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -1,39 +1,24 @@ language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + matrix: include: - - go: 1.12.x - env: GO111MODULE=on - - go: 1.11.x - env: VET=1 GO111MODULE=on - - go: 1.11.x - env: RACE=1 GO111MODULE=on - - go: 1.11.x - env: RUN386=1 - - go: 1.11.x - env: GRPC_GO_RETRY=on - go: 1.10.x - - go: 1.9.x - - go: 1.9.x - env: GAE=1 + env: RUN386=1 go_import_path: google.golang.org/grpc before_install: - - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then VET_SKIP_PROTO=1; fi - -install: - - try3() { eval "$*" || eval "$*" || eval "$*"; } - - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ "${GAE}" = 1 ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ "${VET}" = 1 ]]; then ./vet.sh -install; fi + - if [[ -n "$RUN386" ]]; then export GOARCH=386; fi + - if [[ "$TRAVIS_GO_VERSION" = 1.10* && "$GOARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi script: - - set -e - - if [[ "${VET}" = 1 ]]; then ./vet.sh; fi - - if [[ "${GAE}" = 1 ]]; then make testappengine; exit 0; fi - - if [[ "${RACE}" = 1 ]]; then make testrace; exit 0; fi - - make test + - if [[ "$TRAVIS_GO_VERSION" = 1.10* && "$GOARCH" != "386" ]]; then ./vet.sh || exit 1; fi + - make test || exit 1 + - if [[ "$GOARCH" != "386" ]]; then make testrace; fi diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index ca34e8aa0d..0863eb26b6 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -31,7 +31,6 @@ How to get your contributions merged smoothly and quickly. - `make vet` to catch vet errors - `make test` to run the tests - `make testrace` to run tests in race mode - - optional `make testappengine` to run tests with appengine - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index db982aabde..6f393a808d 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -1,14 +1,20 @@ all: vet test testrace -build: deps - go build google.golang.org/grpc/... - -clean: - go clean -i google.golang.org/grpc/... - deps: go get -d -v google.golang.org/grpc/... +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +build: deps + go build google.golang.org/grpc/... + proto: @ if ! which protoc > /dev/null; then \ echo "error: protoc not installed" >&2; \ @@ -16,45 +22,27 @@ proto: fi go generate google.golang.org/grpc/... +vet: + ./vet.sh + test: testdeps - go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -testappengine: testappenginedeps - goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -testappenginedeps: - goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... + go test -cpu 1,4 -timeout 5m google.golang.org/grpc/... testrace: testdeps go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... - -vet: vetdeps - ./vet.sh - -vetdeps: - ./vet.sh -install +clean: + go clean -i google.golang.org/grpc/... .PHONY: \ all \ - build \ - clean \ deps \ - proto \ - test \ - testappengine \ - testappenginedeps \ - testdeps \ - testrace \ updatedeps \ + testdeps \ updatetestdeps \ + build \ + proto \ vet \ - vetdeps + test \ + testrace \ + clean diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index f5eec6717f..789adfd653 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -16,11 +16,11 @@ $ go get -u google.golang.org/grpc Prerequisites ------------- -gRPC-Go requires Go 1.9 or later. +This requires Go 1.6 or later. Go 1.7 will be required soon. Constraints ----------- -The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. +The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. Documentation ------------- @@ -43,25 +43,3 @@ Please update proto package, gRPC package and rebuild the proto files: - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - `go get -u google.golang.org/grpc` - `protoc --go_out=plugins=grpc:. *.proto` - -#### How to turn on logging - -The default logger is controlled by the environment variables. Turn everything -on by setting: - -``` -GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info -``` - -#### The RPC failed with error `"code = Unavailable desc = transport is closing"` - -This error means the connection the RPC is using was closed, and there are many -possible reasons, including: - 1. mis-configured transport credentials, connection failed on handshaking - 1. bytes disrupted, possibly by a proxy in between - 1. server shutdown - -It can be tricky to debug this because the error happens on the client side but -the root cause of the connection being closed is on the server side. Turn on -logging on __both client and server__, and see if there are any transport -errors. diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 97c6e2568f..fa31565fd2 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -17,7 +17,7 @@ */ // See internal/backoff package for the backoff implementation. This file is -// kept for the exported types and API backward compatibility. +// kept for the exported types and API backward compatility. package grpc diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go index a78e702bae..e1730166cd 100644 --- a/vendor/google.golang.org/grpc/balancer.go +++ b/vendor/google.golang.org/grpc/balancer.go @@ -19,10 +19,11 @@ package grpc import ( - "context" + "fmt" "net" "sync" + "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" @@ -117,6 +118,26 @@ type Balancer interface { Close() error } +// downErr implements net.Error. It is constructed by gRPC internals and passed to the down +// call of Balancer. +type downErr struct { + timeout bool + temporary bool + desc string +} + +func (e downErr) Error() string { return e.desc } +func (e downErr) Timeout() bool { return e.timeout } +func (e downErr) Temporary() bool { return e.temporary } + +func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr { + return downErr{ + timeout: timeout, + temporary: temporary, + desc: fmt.Sprintf(format, a...), + } +} + // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch // the name resolution updates and updates the addresses available correspondingly. // @@ -389,3 +410,7 @@ func (rr *roundRobin) Close() error { type pickFirst struct { *roundRobin } + +func pickFirstBalancerV1(r naming.Resolver) Balancer { + return &pickFirst{&roundRobin{r: r}} +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index fafede238c..f9d83c2f39 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -21,15 +21,13 @@ package balancer import ( - "context" "errors" "net" "strings" + "golang.org/x/net/context" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -48,20 +46,8 @@ func Register(b Builder) { m[strings.ToLower(b.Name())] = b } -// unregisterForTesting deletes the balancer with the given name from the -// balancer map. -// -// This function is not thread-safe. -func unregisterForTesting(name string) { - delete(m, name) -} - -func init() { - internal.BalancerUnregister = unregisterForTesting -} - // Get returns the resolver builder registered with the given name. -// Note that the compare is done in a case-insensitive fashion. +// Note that the compare is done in a case-insenstive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { if b, ok := m[strings.ToLower(name)]; ok { @@ -102,15 +88,7 @@ type SubConn interface { } // NewSubConnOptions contains options to create new SubConn. -type NewSubConnOptions struct { - // CredsBundle is the credentials bundle that will be used in the created - // SubConn. If it's nil, the original creds from grpc DialOptions will be - // used. - CredsBundle credentials.Bundle - // HealthCheckEnabled indicates whether health check service should be - // enabled on this SubConn - HealthCheckEnabled bool -} +type NewSubConnOptions struct{} // ClientConn represents a gRPC ClientConn. // @@ -127,7 +105,7 @@ type ClientConn interface { // The SubConn will be shutdown. RemoveSubConn(SubConn) - // UpdateBalancerState is called by balancer to notify gRPC that some internal + // UpdateBalancerState is called by balancer to nofity gRPC that some internal // state in balancer has changed. // // gRPC will update the connectivity state of the ClientConn, and will call pick @@ -147,8 +125,6 @@ type BuildOptions struct { // use to dial to a remote load balancer server. The Balancer implementations // can ignore this if it does not need to talk to another party securely. DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle that the Balancer can use. - CredsBundle credentials.Bundle // Dialer is the custom dialer the Balancer implementation can use to dial // to a remote load balancer server. The Balancer implementations // can ignore this if it doesn't need to talk to remote balancer. @@ -167,27 +143,16 @@ type Builder interface { } // PickOptions contains addition information for the Pick operation. -type PickOptions struct { - // FullMethodName is the method name that NewClientStream() is called - // with. The canonical format is /service/Method. - FullMethodName string -} +type PickOptions struct{} // DoneInfo contains additional information for done. type DoneInfo struct { // Err is the rpc error the RPC finished with. It could be nil. Err error - // Trailer contains the metadata from the RPC's trailer, if present. - Trailer metadata.MD // BytesSent indicates if any bytes have been sent to the server. BytesSent bool // BytesReceived indicates if any byte has been received from the server. BytesReceived bool - // ServerLoad is the load received from server. It's usually sent as part of - // trailing metadata. - // - // The only supported type now is *orca_v1.LoadReport. - ServerLoad interface{} } var ( @@ -217,10 +182,8 @@ type Picker interface { // // If a SubConn is returned: // - If it is READY, gRPC will send the RPC on it; - // - If it is not ready, or becomes not ready after it's returned, gRPC will - // block until UpdateBalancerState() is called and will call pick on the - // new picker. The done function returned from Pick(), if not nil, will be - // called with nil error, no bytes sent and no bytes received. + // - If it is not ready, or becomes not ready after it's returned, gRPC will block + // until UpdateBalancerState() is called and will call pick on the new picker. // // If the returned error is not nil: // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() @@ -231,10 +194,9 @@ type Picker interface { // - Else (error is other non-nil error): // - The RPC will fail with unavailable error. // - // The returned done() function will be called once the rpc has finished, - // with the final status of that RPC. If the SubConn returned is not a - // valid SubConn type, done may not be called. done may be nil if balancer - // doesn't care about the RPC status. + // The returned done() function will be called once the rpc has finished, with the + // final status of that RPC. + // done may be nil if balancer doesn't care about the RPC status. Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) } @@ -253,46 +215,18 @@ type Balancer interface { // that back to gRPC. // Balancer should also generate and update Pickers when its internal state has // been changed by the new state. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateSubConnState will be called instead. HandleSubConnStateChange(sc SubConn, state connectivity.State) // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to // balancers. // Balancer can create new SubConn or remove SubConn with the addresses. // An empty address slice and a non-nil error will be passed if the resolver returns // non-nil error to gRPC. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateResolverState will be called instead. HandleResolvedAddrs([]resolver.Address, error) // Close closes the balancer. The balancer is not required to call // ClientConn.RemoveSubConn for its existing SubConns. Close() } -// SubConnState describes the state of a SubConn. -type SubConnState struct { - ConnectivityState connectivity.State - // TODO: add last connection error -} - -// V2Balancer is defined for documentation purposes. If a Balancer also -// implements V2Balancer, its UpdateResolverState method will be called instead -// of HandleResolvedAddrs and its UpdateSubConnState will be called instead of -// HandleSubConnStateChange. -type V2Balancer interface { - // UpdateResolverState is called by gRPC when the state of the resolver - // changes. - UpdateResolverState(resolver.State) - // UpdateSubConnState is called by gRPC when the state of a SubConn - // changes. - UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. - Close() -} - // ConnectivityStateEvaluator takes the connectivity states of multiple SubConns // and returns one aggregated connectivity state. // diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index c5a51bd1d9..23d13511bb 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -19,8 +19,7 @@ package base import ( - "context" - + "golang.org/x/net/context" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" @@ -30,7 +29,6 @@ import ( type baseBuilder struct { name string pickerBuilder PickerBuilder - config Config } func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { @@ -40,12 +38,11 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), - csEvltr: &balancer.ConnectivityStateEvaluator{}, + csEvltr: &connectivityStateEvaluator{}, // Initialize picker to a picker that always return // ErrNoSubConnAvailable, because when state of a SubConn changes, we // may call UpdateBalancerState with this picker. picker: NewErrPicker(balancer.ErrNoSubConnAvailable), - config: bb.config, } } @@ -57,30 +54,27 @@ type baseBalancer struct { cc balancer.ClientConn pickerBuilder PickerBuilder - csEvltr *balancer.ConnectivityStateEvaluator + csEvltr *connectivityStateEvaluator state connectivity.State subConns map[resolver.Address]balancer.SubConn scStates map[balancer.SubConn]connectivity.State picker balancer.Picker - config Config } func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - panic("not implemented") -} - -func (b *baseBalancer) UpdateResolverState(s resolver.State) { - // TODO: handle s.Err (log if not nil) once implemented. - // TODO: handle s.ServiceConfig? - grpclog.Infoln("base.baseBalancer: got new resolver state: ", s) + if err != nil { + grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err) + return + } + grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs) // addrsSet is the set converted from addrs, it's used for quick lookup of an address. addrsSet := make(map[resolver.Address]struct{}) - for _, a := range s.Addresses { + for _, a := range addrs { addrsSet[a] = struct{}{} if _, ok := b.subConns[a]; !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) if err != nil { grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -122,11 +116,6 @@ func (b *baseBalancer) regeneratePicker() { } func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - panic("not implemented") -} - -func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - s := state.ConnectivityState grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) oldS, ok := b.scStates[sc] if !ok { @@ -144,7 +133,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } oldAggrState := b.state - b.state = b.csEvltr.RecordTransition(oldS, s) + b.state = b.csEvltr.recordTransition(oldS, s) // Regenerate picker when one of the following happens: // - this sc became ready from not-ready @@ -176,3 +165,44 @@ type errPicker struct { func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { return nil, nil, p.err } + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// +// recordTransition should only be called synchronously from the same goroutine. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go index 34b1f2994a..012ace2f2f 100644 --- a/vendor/google.golang.org/grpc/balancer/base/base.go +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -45,20 +45,8 @@ type PickerBuilder interface { // NewBalancerBuilder returns a balancer builder. The balancers // built by this builder will use the picker builder to build pickers. func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { - return NewBalancerBuilderWithConfig(name, pb, Config{}) -} - -// Config contains the config info about the base balancer builder. -type Config struct { - // HealthCheck indicates whether health checking should be enabled for this specific balancer. - HealthCheck bool -} - -// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. -func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { return &baseBuilder{ name: name, pickerBuilder: pb, - config: config, } } diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 29f7a4ddd6..2eda0a1c21 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,13 +22,12 @@ package roundrobin import ( - "context" "sync" + "golang.org/x/net/context" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" ) @@ -37,7 +36,7 @@ const Name = "round_robin" // newBuilder creates a new roundrobin balancer builder. func newBuilder() balancer.Builder { - return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}) } func init() { @@ -48,19 +47,12 @@ type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs) - if len(readySCs) == 0 { - return base.NewErrPicker(balancer.ErrNoSubConnAvailable) - } var scs []balancer.SubConn for _, sc := range readySCs { scs = append(scs, sc) } return &rrPicker{ subConns: scs, - // Start at a random index, as the same RR balancer rebuilds a new - // picker when SubConn states change, and we don't want to apply excess - // load to the first server in the list. - next: grpcrand.Intn(len(scs)), } } @@ -75,6 +67,10 @@ type rrPicker struct { } func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + p.mu.Lock() sc := p.subConns[p.next] p.next = (p.next + 1) % len(p.subConns) diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index bc965f0aca..c23f81706f 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -82,13 +82,20 @@ func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { return b.c } +// resolverUpdate contains the new resolved addresses or error if there's +// any. +type resolverUpdate struct { + addrs []resolver.Address + err error +} + // ccBalancerWrapper is a wrapper on top of cc for balancers. // It implements balancer.ClientConn interface. type ccBalancerWrapper struct { cc *ClientConn balancer balancer.Balancer stateChangeQueue *scStateUpdateBuffer - resolverUpdateCh chan *resolver.State + resolverUpdateCh chan *resolverUpdate done chan struct{} mu sync.Mutex @@ -99,7 +106,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui ccb := &ccBalancerWrapper{ cc: cc, stateChangeQueue: newSCStateUpdateBuffer(), - resolverUpdateCh: make(chan *resolver.State, 1), + resolverUpdateCh: make(chan *resolverUpdate, 1), done: make(chan struct{}), subConns: make(map[*acBalancerWrapper]struct{}), } @@ -121,23 +128,15 @@ func (ccb *ccBalancerWrapper) watcher() { return default: } - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state}) - } else { - ccb.balancer.HandleSubConnStateChange(t.sc, t.state) - } - case s := <-ccb.resolverUpdateCh: + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + case t := <-ccb.resolverUpdateCh: select { case <-ccb.done: ccb.balancer.Close() return default: } - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateResolverState(*s) - } else { - ccb.balancer.HandleResolvedAddrs(s.Addresses, nil) - } + ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) case <-ccb.done: } @@ -178,23 +177,15 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) { - if ccb.cc.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } +func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { select { case <-ccb.resolverUpdateCh: default: } - ccb.resolverUpdateCh <- &s + ccb.resolverUpdateCh <- &resolverUpdate{ + addrs: addrs, + err: err, + } } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { @@ -206,7 +197,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer if ccb.subConns == nil { return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") } - ac, err := ccb.cc.newAddrConn(addrs, opts) + ac, err := ccb.cc.newAddrConn(addrs) if err != nil { return nil, err } @@ -238,13 +229,8 @@ func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balanc if ccb.subConns == nil { return } - // Update picker before updating state. Even though the ordering here does - // not matter, it can lead to multiple calls of Pick in the common start-up - // case where we wait for ready and then perform an RPC. If the picker is - // updated later, we could call the "connecting" picker when the state is - // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(p) ccb.cc.csMgr.updateState(s) + ccb.cc.blockingpicker.updatePicker(p) } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) { @@ -271,7 +257,6 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { } if !acbw.ac.tryUpdateAddrs(addrs) { cc := acbw.ac.cc - opts := acbw.ac.scopts acbw.ac.mu.Lock() // Set old ac.acbw to nil so the Shutdown state update will be ignored // by balancer. @@ -287,7 +272,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { return } - ac, err := cc.newAddrConn(addrs, opts) + ac, err := cc.newAddrConn(addrs) if err != nil { grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go index 29bda6353d..e0ce32cfb6 100644 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -19,14 +19,16 @@ package grpc import ( - "context" "strings" "sync" + "golang.org/x/net/context" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) type balancerWrapperBuilder struct { @@ -281,8 +283,9 @@ func (bw *balancerWrapper) Close() { } // The picker is the balancerWrapper itself. +// Pick should never return ErrNoSubConnAvailable. // It either blocks or returns error, consistent with v1 balancer Get(). -func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) { +func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { failfast := true // Default failfast is true. if ss, ok := rpcInfoFromContext(ctx); ok { failfast = ss.failfast @@ -291,51 +294,35 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) if err != nil { return nil, nil, err } + var done func(balancer.DoneInfo) if p != nil { - done = func(balancer.DoneInfo) { p() } - defer func() { - if err != nil { - p() - } - }() + done = func(i balancer.DoneInfo) { p() } } - + var sc balancer.SubConn bw.mu.Lock() defer bw.mu.Unlock() if bw.pickfirst { // Get the first sc in conns. - for _, sc := range bw.conns { - return sc, done, nil + for _, sc = range bw.conns { + break + } + } else { + var ok bool + sc, ok = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + if !ok && failfast { + return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available") + } + if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { + // If the returned sc is not ready and RPC is failfast, + // return error, and this RPC will fail. + return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available") } - return nil, nil, balancer.ErrNoSubConnAvailable - } - sc, ok1 := bw.conns[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, - ServerName: "", - Metadata: a.Metadata, - }] - s, ok2 := bw.connSt[sc] - if !ok1 || !ok2 { - // This can only happen due to a race where Get() returned an address - // that was subsequently removed by Notify. In this case we should - // retry always. - return nil, nil, balancer.ErrNoSubConnAvailable - } - switch s.s { - case connectivity.Ready, connectivity.Idle: - return sc, done, nil - case connectivity.Shutdown, connectivity.TransientFailure: - // If the returned sc has been shut down or is in transient failure, - // return error, and this RPC will fail or wait for another picker (if - // non-failfast). - return nil, nil, balancer.ErrTransientFailure - default: - // For other states (connecting or unknown), the v1 balancer would - // traditionally wait until ready and then issue the RPC. Returning - // ErrNoSubConnAvailable will be a slight improvement in that it will - // allow the balancer to choose another address in case others are - // connected. - return nil, nil, balancer.ErrNoSubConnAvailable } + + return sc, done, nil } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go deleted file mode 100644 index f393bb6618..0000000000 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ /dev/null @@ -1,900 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto - -package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import duration "github.com/golang/protobuf/ptypes/duration" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Enumerates the type of event -// Note the terminology is different from the RPC semantics -// definition, but the same meaning is expressed here. -type GrpcLogEntry_EventType int32 - -const ( - GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 - // Header sent from client to server - GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 - // Header sent from server to client - GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 - // Message sent from client to server - GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 - // Message sent from server to client - GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 - // A signal that client is done sending - GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 - // Trailer indicates the end of the RPC. - // On client side, this event means a trailer was either received - // from the network or the gRPC library locally generated a status - // to inform the application about a failure. - // On server side, this event means the server application requested - // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after - // this due to races on server side. - GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 - // A signal that the RPC is cancelled. On client side, this - // indicates the client application requests a cancellation. - // On server side, this indicates that cancellation was detected. - // Note: This marks the end of the RPC. Events may arrive after - // this due to races. For example, on client side a trailer - // may arrive even though the application requested to cancel the RPC. - GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 -) - -var GrpcLogEntry_EventType_name = map[int32]string{ - 0: "EVENT_TYPE_UNKNOWN", - 1: "EVENT_TYPE_CLIENT_HEADER", - 2: "EVENT_TYPE_SERVER_HEADER", - 3: "EVENT_TYPE_CLIENT_MESSAGE", - 4: "EVENT_TYPE_SERVER_MESSAGE", - 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", - 6: "EVENT_TYPE_SERVER_TRAILER", - 7: "EVENT_TYPE_CANCEL", -} -var GrpcLogEntry_EventType_value = map[string]int32{ - "EVENT_TYPE_UNKNOWN": 0, - "EVENT_TYPE_CLIENT_HEADER": 1, - "EVENT_TYPE_SERVER_HEADER": 2, - "EVENT_TYPE_CLIENT_MESSAGE": 3, - "EVENT_TYPE_SERVER_MESSAGE": 4, - "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, - "EVENT_TYPE_SERVER_TRAILER": 6, - "EVENT_TYPE_CANCEL": 7, -} - -func (x GrpcLogEntry_EventType) String() string { - return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) -} -func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} -} - -// Enumerates the entity that generates the log entry -type GrpcLogEntry_Logger int32 - -const ( - GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 - GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 - GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 -) - -var GrpcLogEntry_Logger_name = map[int32]string{ - 0: "LOGGER_UNKNOWN", - 1: "LOGGER_CLIENT", - 2: "LOGGER_SERVER", -} -var GrpcLogEntry_Logger_value = map[string]int32{ - "LOGGER_UNKNOWN": 0, - "LOGGER_CLIENT": 1, - "LOGGER_SERVER": 2, -} - -func (x GrpcLogEntry_Logger) String() string { - return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) -} -func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} -} - -type Address_Type int32 - -const ( - Address_TYPE_UNKNOWN Address_Type = 0 - // address is in 1.2.3.4 form - Address_TYPE_IPV4 Address_Type = 1 - // address is in IPv6 canonical form (RFC5952 section 4) - // The scope is NOT included in the address string. - Address_TYPE_IPV6 Address_Type = 2 - // address is UDS string - Address_TYPE_UNIX Address_Type = 3 -) - -var Address_Type_name = map[int32]string{ - 0: "TYPE_UNKNOWN", - 1: "TYPE_IPV4", - 2: "TYPE_IPV6", - 3: "TYPE_UNIX", -} -var Address_Type_value = map[string]int32{ - "TYPE_UNKNOWN": 0, - "TYPE_IPV4": 1, - "TYPE_IPV6": 2, - "TYPE_UNIX": 3, -} - -func (x Address_Type) String() string { - return proto.EnumName(Address_Type_name, int32(x)) -} -func (Address_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} -} - -// Log entry we store in binary logs -type GrpcLogEntry struct { - // The timestamp of the binary log message - Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Uniquely identifies a call. The value must not be 0 in order to disambiguate - // from an unset value. - // Each call may have several log entries, they will all have the same call_id. - // Nothing is guaranteed about their value other than they are unique across - // different RPCs in the same gRPC process. - CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` - // The entry sequence id for this call. The first GrpcLogEntry has a - // value of 1, to disambiguate from an unset value. The purpose of - // this field is to detect missing entries in environments where - // durability or ordering is not guaranteed. - SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` - Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` - Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` - // The logger uses one of the following fields to record the payload, - // according to the type of the log entry. - // - // Types that are valid to be assigned to Payload: - // *GrpcLogEntry_ClientHeader - // *GrpcLogEntry_ServerHeader - // *GrpcLogEntry_Message - // *GrpcLogEntry_Trailer - Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` - // true if payload does not represent the full message or metadata. - PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` - // Peer address information, will only be recorded on the first - // incoming event. On client side, peer is logged on - // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in - // the case of trailers-only. On server side, peer is always - // logged on EVENT_TYPE_CLIENT_HEADER. - Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } -func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } -func (*GrpcLogEntry) ProtoMessage() {} -func (*GrpcLogEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} -} -func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) -} -func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) -} -func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_GrpcLogEntry.Merge(dst, src) -} -func (m *GrpcLogEntry) XXX_Size() int { - return xxx_messageInfo_GrpcLogEntry.Size(m) -} -func (m *GrpcLogEntry) XXX_DiscardUnknown() { - xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo - -func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -func (m *GrpcLogEntry) GetCallId() uint64 { - if m != nil { - return m.CallId - } - return 0 -} - -func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { - if m != nil { - return m.SequenceIdWithinCall - } - return 0 -} - -func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType { - if m != nil { - return m.Type - } - return GrpcLogEntry_EVENT_TYPE_UNKNOWN -} - -func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { - if m != nil { - return m.Logger - } - return GrpcLogEntry_LOGGER_UNKNOWN -} - -type isGrpcLogEntry_Payload interface { - isGrpcLogEntry_Payload() -} - -type GrpcLogEntry_ClientHeader struct { - ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` -} - -type GrpcLogEntry_ServerHeader struct { - ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` -} - -type GrpcLogEntry_Message struct { - Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` -} - -type GrpcLogEntry_Trailer struct { - Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` -} - -func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} - -func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *GrpcLogEntry) GetClientHeader() *ClientHeader { - if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok { - return x.ClientHeader - } - return nil -} - -func (m *GrpcLogEntry) GetServerHeader() *ServerHeader { - if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok { - return x.ServerHeader - } - return nil -} - -func (m *GrpcLogEntry) GetMessage() *Message { - if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok { - return x.Message - } - return nil -} - -func (m *GrpcLogEntry) GetTrailer() *Trailer { - if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok { - return x.Trailer - } - return nil -} - -func (m *GrpcLogEntry) GetPayloadTruncated() bool { - if m != nil { - return m.PayloadTruncated - } - return false -} - -func (m *GrpcLogEntry) GetPeer() *Address { - if m != nil { - return m.Peer - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ - (*GrpcLogEntry_ClientHeader)(nil), - (*GrpcLogEntry_ServerHeader)(nil), - (*GrpcLogEntry_Message)(nil), - (*GrpcLogEntry_Trailer)(nil), - } -} - -func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ClientHeader); err != nil { - return err - } - case *GrpcLogEntry_ServerHeader: - b.EncodeVarint(7<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ServerHeader); err != nil { - return err - } - case *GrpcLogEntry_Message: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Message); err != nil { - return err - } - case *GrpcLogEntry_Trailer: - b.EncodeVarint(9<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Trailer); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) - } - return nil -} - -func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*GrpcLogEntry) - switch tag { - case 6: // payload.client_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClientHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ClientHeader{msg} - return true, err - case 7: // payload.server_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ServerHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ServerHeader{msg} - return true, err - case 8: // payload.message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Message) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Message{msg} - return true, err - case 9: // payload.trailer - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Trailer) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Trailer{msg} - return true, err - default: - return false, nil - } -} - -func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - s := proto.Size(x.ClientHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_ServerHeader: - s := proto.Size(x.ServerHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Message: - s := proto.Size(x.Message) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Trailer: - s := proto.Size(x.Trailer) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ClientHeader struct { - // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - // The name of the RPC method, which looks something like: - // // - // Note the leading "/" character. - MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` - // A single process may be used to run multiple virtual - // servers with different identities. - // The authority is the name of such a server identitiy. - // It is typically a portion of the URI in the form of - // or : . - Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` - // the RPC timeout - Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClientHeader) Reset() { *m = ClientHeader{} } -func (m *ClientHeader) String() string { return proto.CompactTextString(m) } -func (*ClientHeader) ProtoMessage() {} -func (*ClientHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} -} -func (m *ClientHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClientHeader.Unmarshal(m, b) -} -func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) -} -func (dst *ClientHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientHeader.Merge(dst, src) -} -func (m *ClientHeader) XXX_Size() int { - return xxx_messageInfo_ClientHeader.Size(m) -} -func (m *ClientHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ClientHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientHeader proto.InternalMessageInfo - -func (m *ClientHeader) GetMetadata() *Metadata { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *ClientHeader) GetMethodName() string { - if m != nil { - return m.MethodName - } - return "" -} - -func (m *ClientHeader) GetAuthority() string { - if m != nil { - return m.Authority - } - return "" -} - -func (m *ClientHeader) GetTimeout() *duration.Duration { - if m != nil { - return m.Timeout - } - return nil -} - -type ServerHeader struct { - // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServerHeader) Reset() { *m = ServerHeader{} } -func (m *ServerHeader) String() string { return proto.CompactTextString(m) } -func (*ServerHeader) ProtoMessage() {} -func (*ServerHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} -} -func (m *ServerHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServerHeader.Unmarshal(m, b) -} -func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) -} -func (dst *ServerHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerHeader.Merge(dst, src) -} -func (m *ServerHeader) XXX_Size() int { - return xxx_messageInfo_ServerHeader.Size(m) -} -func (m *ServerHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ServerHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_ServerHeader proto.InternalMessageInfo - -func (m *ServerHeader) GetMetadata() *Metadata { - if m != nil { - return m.Metadata - } - return nil -} - -type Trailer struct { - // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - // The gRPC status code. - StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` - // An original status message before any transport specific - // encoding. - StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` - // The value of the 'grpc-status-details-bin' metadata key. If - // present, this is always an encoded 'google.rpc.Status' message. - StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Trailer) Reset() { *m = Trailer{} } -func (m *Trailer) String() string { return proto.CompactTextString(m) } -func (*Trailer) ProtoMessage() {} -func (*Trailer) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} -} -func (m *Trailer) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Trailer.Unmarshal(m, b) -} -func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) -} -func (dst *Trailer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Trailer.Merge(dst, src) -} -func (m *Trailer) XXX_Size() int { - return xxx_messageInfo_Trailer.Size(m) -} -func (m *Trailer) XXX_DiscardUnknown() { - xxx_messageInfo_Trailer.DiscardUnknown(m) -} - -var xxx_messageInfo_Trailer proto.InternalMessageInfo - -func (m *Trailer) GetMetadata() *Metadata { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *Trailer) GetStatusCode() uint32 { - if m != nil { - return m.StatusCode - } - return 0 -} - -func (m *Trailer) GetStatusMessage() string { - if m != nil { - return m.StatusMessage - } - return "" -} - -func (m *Trailer) GetStatusDetails() []byte { - if m != nil { - return m.StatusDetails - } - return nil -} - -// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE -type Message struct { - // Length of the message. It may not be the same as the length of the - // data field, as the logging payload can be truncated or omitted. - Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` - // May be truncated or omitted. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} -} -func (m *Message) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Message.Unmarshal(m, b) -} -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) -} -func (dst *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(dst, src) -} -func (m *Message) XXX_Size() int { - return xxx_messageInfo_Message.Size(m) -} -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) -} - -var xxx_messageInfo_Message proto.InternalMessageInfo - -func (m *Message) GetLength() uint32 { - if m != nil { - return m.Length - } - return 0 -} - -func (m *Message) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -// A list of metadata pairs, used in the payload of client header, -// server header, and server trailer. -// Implementations may omit some entries to honor the header limits -// of GRPC_BINARY_LOG_CONFIG. -// -// Header keys added by gRPC are omitted. To be more specific, -// implementations will not log the following entries, and this is -// not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials -// -// Implementations must always log grpc-trace-bin if it is present. -// Practically speaking it will only be visible on server side because -// grpc-trace-bin is managed by low level client side mechanisms -// inaccessible from the application level. On server side, the -// header is just a normal metadata key. -// The pair will not count towards the size limit. -type Metadata struct { - Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} -} -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metadata.Unmarshal(m, b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) -} -func (dst *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(dst, src) -} -func (m *Metadata) XXX_Size() int { - return xxx_messageInfo_Metadata.Size(m) -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *Metadata) GetEntry() []*MetadataEntry { - if m != nil { - return m.Entry - } - return nil -} - -// A metadata key value pair -type MetadataEntry struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } -func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } -func (*MetadataEntry) ProtoMessage() {} -func (*MetadataEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} -} -func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) -} -func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) -} -func (dst *MetadataEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetadataEntry.Merge(dst, src) -} -func (m *MetadataEntry) XXX_Size() int { - return xxx_messageInfo_MetadataEntry.Size(m) -} -func (m *MetadataEntry) XXX_DiscardUnknown() { - xxx_messageInfo_MetadataEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo - -func (m *MetadataEntry) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *MetadataEntry) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -// Address information -type Address struct { - Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - // only for TYPE_IPV4 and TYPE_IPV6 - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Address) Reset() { *m = Address{} } -func (m *Address) String() string { return proto.CompactTextString(m) } -func (*Address) ProtoMessage() {} -func (*Address) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} -} -func (m *Address) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Address.Unmarshal(m, b) -} -func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Address.Marshal(b, m, deterministic) -} -func (dst *Address) XXX_Merge(src proto.Message) { - xxx_messageInfo_Address.Merge(dst, src) -} -func (m *Address) XXX_Size() int { - return xxx_messageInfo_Address.Size(m) -} -func (m *Address) XXX_DiscardUnknown() { - xxx_messageInfo_Address.DiscardUnknown(m) -} - -var xxx_messageInfo_Address proto.InternalMessageInfo - -func (m *Address) GetType() Address_Type { - if m != nil { - return m.Type - } - return Address_TYPE_UNKNOWN -} - -func (m *Address) GetAddress() string { - if m != nil { - return m.Address - } - return "" -} - -func (m *Address) GetIpPort() uint32 { - if m != nil { - return m.IpPort - } - return 0 -} - -func init() { - proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") - proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") - proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") - proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer") - proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message") - proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") - proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") - proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) - proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) -} - -func init() { - proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) -} - -var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ - // 900 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, - 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, - 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, - 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, - 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, - 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, - 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, - 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, - 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, - 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, - 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, - 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, - 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, - 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, - 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, - 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, - 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, - 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, - 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, - 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, - 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, - 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, - 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, - 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, - 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, - 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, - 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, - 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, - 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, - 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, - 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, - 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, - 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, - 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, - 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, - 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, - 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, - 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, - 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, - 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, - 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, - 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, - 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, - 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, - 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, - 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, - 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, - 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, - 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, - 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, - 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, - 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, - 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, - 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, - 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, - 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, - 0xd4, 0x07, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 9e20e4d385..f73b7d5528 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -19,7 +19,7 @@ package grpc import ( - "context" + "golang.org/x/net/context" ) // Invoke sends the RPC request on the wire and returns after response is @@ -40,7 +40,7 @@ func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply int func combine(o1 []CallOption, o2 []CallOption) []CallOption { // we don't use append because o1 could have extra capacity whose // elements would be overwritten, which could cause inadvertent - // sharing (and race conditions) between concurrent calls + // sharing (and race connditions) between concurrent calls if len(o1) == 0 { return o2 } else if len(o2) == 0 { @@ -63,12 +63,31 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { - cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) - if err != nil { - return err + // TODO: implement retries in clientStream and make this simply + // newClientStream, SendMsg, RecvMsg. + firstAttempt := true + for { + csInt, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + cs := csInt.(*clientStream) + if err := cs.SendMsg(req); err != nil { + if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt { + // TODO: Add a field to header for grpc-transparent-retry-attempts + firstAttempt = false + continue + } + return err + } + if err := cs.RecvMsg(reply); err != nil { + if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt { + // TODO: Add a field to header for grpc-transparent-retry-attempts + firstAttempt = false + continue + } + return err + } + return nil } - if err := cs.SendMsg(req); err != nil { - return err - } - return cs.RecvMsg(reply) } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index bd2d2b3177..84ba9e5ad7 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -19,7 +19,6 @@ package grpc import ( - "context" "errors" "fmt" "math" @@ -27,25 +26,26 @@ import ( "reflect" "strings" "sync" - "sync/atomic" "time" + "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc/balancer" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" ) const ( @@ -66,11 +66,15 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnUnavailable indicates that the connection is unavailable. + errConnUnavailable = errors.New("grpc: the connection is unavailable") // errBalancerClosed indicates that the balancer is closed. errBalancerClosed = errors.New("grpc: balancer is closed") - // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default - // service config. - invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // We use an accessor so that minConnectTimeout can be + // atomically read and updated while testing. + getMinConnectTimeout = func() time.Duration { + return minConnectTimeout + } ) // The following errors are returned from Dial and DialContext @@ -79,26 +83,358 @@ var ( // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") - // errTransportCredsAndBundle indicates that creds bundle is used together - // with other individual Transport Credentials. - errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., OAuth2 token) which requires secure connection on an insecure + // information (e.g., oauth2 token) which requires secure connection on an insecure // connection. errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") // errCredentialsConflict indicates that grpc.WithTransportCredentials() // and grpc.WithInsecure() are both called for a connection. errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") + // errNetworkIO indicates that the connection is down due to some network I/O error. + errNetworkIO = errors.New("grpc: failed with network I/O error") ) +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + cp Compressor + dc Decompressor + bs backoff.Strategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + copts transport.ConnectOptions + callOptions []CallOption + // This is used by v1 balancer dial option WithBalancer to support v1 + // balancer, and also by WithBalancerName dial option. + balancerBuilder balancer.Builder + // This is to support grpclb. + resolverBuilder resolver.Builder + waitForHandshake bool + channelzParentID int64 + disableServiceConfig bool +} + const ( defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 defaultClientMaxSendMessageSize = math.MaxInt32 - // http2IOBufSize specifies the buffer size for sending frames. - defaultWriteBufSize = 32 * 1024 - defaultReadBufSize = 32 * 1024 ) +// RegisterChannelz turns on channelz service. +// This is an EXPERIMENTAL API. +func RegisterChannelz() { + channelz.TurnOn() +} + +// DialOption configures how we set up the connection. +type DialOption func(*dialOptions) + +// WithWaitForHandshake blocks until the initial settings frame is received from the +// server before assigning RPCs to the connection. +// Experimental API. +func WithWaitForHandshake() DialOption { + return func(o *dialOptions) { + o.waitForHandshake = true + } +} + +// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WithWriteBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.WriteBufferSize = s + } +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for each read syscall. +func WithReadBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.ReadBufferSize = s + } +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialWindowSize = s + } +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + } +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + } +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by +// the UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. +func WithCompressor(cp Compressor) DialOption { + return func(o *dialOptions) { + o.cp = cp + } +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. +func WithDecompressor(dc Decompressor) DialOption { + return func(o *dialOptions) { + o.dc = dc + } +} + +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// +// Deprecated: use the new balancer APIs in balancer package and WithBalancerName. +func WithBalancer(b Balancer) DialOption { + return func(o *dialOptions) { + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + } +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// This is an EXPERIMENTAL API. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return func(o *dialOptions) { + o.balancerBuilder = builder + } +} + +// withResolverBuilder is only for grpclb. +func withResolverBuilder(b resolver.Builder) DialOption { + return func(o *dialOptions) { + o.resolverBuilder = b + } +} + +// WithServiceConfig returns a DialOption which has a channel to read the service configuration. +// +// Deprecated: service config should be received through name resolver, as specified here. +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return func(o *dialOptions) { + o.scChan = c + } +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + + return withBackoff(backoff.Exponential{ + MaxDelay: b.MaxDelay, + }) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a +// failed connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs backoff.Strategy) DialOption { + return func(o *dialOptions) { + o.bs = bs + } +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying +// connection is up. Without this, Dial returns immediately and connecting the server +// happens in background. +func WithBlock() DialOption { + return func(o *dialOptions) { + o.block = true + } +} + +// WithInsecure returns a DialOption which disables transport security for this ClientConn. +// Note that transport security is required unless WithInsecure is set. +func WithInsecure() DialOption { + return func(o *dialOptions) { + o.insecure = true + } +} + +// WithTransportCredentials returns a DialOption which configures a +// connection level security credentials (e.g., TLS/SSL). +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return func(o *dialOptions) { + o.copts.TransportCredentials = creds + } +} + +// WithPerRPCCredentials returns a DialOption which sets +// credentials and places auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + } +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn +// initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext and context.WithTimeout instead. +func WithTimeout(d time.Duration) DialOption { + return func(o *dialOptions) { + o.timeout = d + } +} + +func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return func(o *dialOptions) { + o.copts.Dialer = f + } +} + +func init() { + internal.WithContextDialer = withContextDialer + internal.WithResolverBuilder = withResolverBuilder +} + +// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. +// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's +// Temporary() method to decide if it should try to reconnect to the network address. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return withContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, deadline.Sub(time.Now())) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler +// for all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return func(o *dialOptions) { + o.copts.StatsHandler = h + } +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors. +// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network +// address and won't try to reconnect. +// The default value of FailOnNonTempDialError is false. +// This is an EXPERIMENTAL API. +func FailOnNonTempDialError(f bool) DialOption { + return func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + } +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. +func WithUserAgent(s string) DialOption { + return func(o *dialOptions) { + o.copts.UserAgent = s + } +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + return func(o *dialOptions) { + o.copts.KeepaliveParams = kp + } +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return func(o *dialOptions) { + o.unaryInt = f + } +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return func(o *dialOptions) { + o.streamInt = f + } +} + +// WithAuthority returns a DialOption that specifies the value to be used as +// the :authority pseudo-header. This value only works with WithInsecure and +// has no effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return func(o *dialOptions) { + o.copts.Authority = a + } +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of current ClientConn's +// parent. This function is used in nested channel creation (e.g. grpclb dial). +func WithChannelzParentID(id int64) DialOption { + return func(o *dialOptions) { + o.channelzParentID = id + } +} + +// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +func WithDisableServiceConfig() DialOption { + return func(o *dialOptions) { + o.disableServiceConfig = true + } +} + // Dial creates a client connection to the given target. func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) @@ -122,57 +458,32 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + + blockingpicker: newPickerWrapper(), } - cc.retryThrottler.Store((*retryThrottler)(nil)) cc.ctx, cc.cancel = context.WithCancel(context.Background()) for _, opt := range opts { - opt.apply(&cc.dopts) + opt(&cc.dopts) } - defer func() { - if err != nil { - cc.Close() - } - }() - if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtINFO, - }, - }) + cc.channelzID = channelz.RegisterChannel(cc, cc.dopts.channelzParentID, target) } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - }) + cc.channelzID = channelz.RegisterChannel(cc, 0, target) } - cc.csMgr.channelzID = cc.channelzID } if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + if cc.dopts.copts.TransportCredentials == nil { return nil, errNoTransportSecurity } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } } else { - if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { + if cc.dopts.copts.TransportCredentials != nil { return nil, errCredentialsConflict } for _, cd := range cc.dopts.copts.PerRPCCredentials { @@ -182,20 +493,13 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } } - if cc.dopts.defaultServiceConfigRawJSON != nil { - sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) - if err != nil { - return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err) - } - cc.dopts.defaultServiceConfig = sc - } cc.mkp = cc.dopts.copts.KeepaliveParams if cc.dopts.copts.Dialer == nil { cc.dopts.copts.Dialer = newProxyDialer( func(ctx context.Context, addr string) (net.Conn, error) { network, addr := parseDialTarget(addr) - return (&net.Dialer{}).DialContext(ctx, network, addr) + return dialContext(ctx, network, addr) }, ) } @@ -211,12 +515,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) defer cancel() } + defer func() { select { case <-ctx.Done(): conn, err = nil, ctx.Err() default: } + + if err != nil { + cc.Close() + } }() scSet := false @@ -225,7 +534,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * select { case sc, ok := <-cc.dopts.scChan: if ok { - cc.sc = &sc + cc.sc = sc scSet = true } default: @@ -242,9 +551,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) if cc.dopts.resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is + // If resolver builder is still nil, the parse target's scheme is // not registered. Fallback to default resolver and set Endpoint to - // the original target. + // the original unparsed target. grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) cc.parsedTarget = resolver.Target{ Scheme: resolver.GetDefaultScheme(), @@ -258,8 +567,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * creds := cc.dopts.copts.TransportCredentials if creds != nil && creds.Info().ServerName != "" { cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.authority != "" { - cc.authority = cc.dopts.authority + } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" { + cc.authority = cc.dopts.copts.Authority } else { // Use endpoint from "scheme://authority/endpoint" as the default // authority for ClientConn. @@ -271,7 +580,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * select { case sc, ok := <-cc.dopts.scChan: if ok { - cc.sc = &sc + cc.sc = sc } case <-ctx.Done(): return nil, ctx.Err() @@ -287,35 +596,30 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.balancerBuildOpts = balancer.BuildOptions{ DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, ChannelzParentID: cc.channelzID, } // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc) + cc.resolverWrapper, err = newCCResolverWrapper(cc) if err != nil { return nil, fmt.Errorf("failed to build resolver: %v", err) } + // Start the resolver wrapper goroutine after resolverWrapper is created. + // + // If the goroutine is started before resolverWrapper is ready, the + // following may happen: The goroutine sends updates to cc. cc forwards + // those to balancer. Balancer creates new addrConn. addrConn fails to + // connect, and calls resolveNow(). resolveNow() tries to use the non-ready + // resolverWrapper. + cc.resolverWrapper.start() - cc.mu.Lock() - cc.resolverWrapper = rWrapper - cc.mu.Unlock() // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { for { s := cc.GetState() if s == connectivity.Ready { break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.blockingpicker.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } } if !cc.WaitForStateChange(ctx, s) { // ctx got timeout or canceled. @@ -333,7 +637,6 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 } // updateState updates the connectivity.State of ClientConn. @@ -349,12 +652,6 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state - if channelz.IsOn() { - channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel Connectivity change to %v", state), - Severity: channelz.CtINFO, - }) - } if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -389,22 +686,26 @@ type ClientConn struct { csMgr *connectivityStateManager balancerBuildOpts balancer.BuildOptions + resolverWrapper *ccResolverWrapper blockingpicker *pickerWrapper - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} + mu sync.RWMutex + sc ServiceConfig + scRaw string + conns map[*addrConn]struct{} // Keepalive parameter can be updated if a GoAway is received. mkp keepalive.ClientParameters curBalancerName string + preBalancerName string // previous balancer name. + curAddresses []resolver.Address balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - firstResolveEvent *grpcsync.Event - - channelzID int64 // channelz unique identification number - czData *channelzData + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + callsStarted int64 + callsSucceeded int64 + callsFailed int64 + lastCallStartedTime time.Time } // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or @@ -438,8 +739,9 @@ func (cc *ClientConn) scWatcher() { } cc.mu.Lock() // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc + // We may revist this decision in the future. + cc.sc = sc + cc.scRaw = "" cc.mu.Unlock() case <-cc.ctx.Done(): return @@ -447,91 +749,49 @@ func (cc *ClientConn) scWatcher() { } } -// waitForResolvedAddrs blocks until the resolver has provided addresses or the -// context expires. Returns nil unless the context expires first; otherwise -// returns a status error based on the context. -func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { - // This is on the RPC path, so we use a fast path to avoid the - // more-expensive "select" below after the resolver has returned once. - if cc.firstResolveEvent.HasFired() { - return nil - } - select { - case <-cc.firstResolveEvent.Done(): - return nil - case <-ctx.Done(): - return status.FromContextError(ctx.Err()).Err() - case <-cc.ctx.Done(): - return ErrClientConnClosing - } -} - -// gRPC should resort to default service config when: -// * resolver service config is disabled -// * or, resolver does not return a service config or returns an invalid one. -func (cc *ClientConn) fallbackToDefaultServiceConfig(sc string) bool { - if cc.dopts.disableServiceConfig { - return true - } - // The logic below is temporary, will be removed once we change the resolver.State ServiceConfig field type. - // Right now, we assume that empty service config string means resolver does not return a config. - if sc == "" { - return true - } - // TODO: the logic below is temporary. Once we finish the logic to validate service config - // in resolver, we will replace the logic below. - _, err := parseServiceConfig(sc) - return err != nil -} - -func (cc *ClientConn) updateResolverState(s resolver.State) error { +func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) { cc.mu.Lock() defer cc.mu.Unlock() - // Check if the ClientConn is already closed. Some fields (e.g. - // balancerWrapper) are set to nil when closing the ClientConn, and could - // cause nil pointer panic if we don't have this check. if cc.conns == nil { - return nil + // cc was closed. + return } - if cc.fallbackToDefaultServiceConfig(s.ServiceConfig) { - if cc.dopts.defaultServiceConfig != nil && cc.sc == nil { - cc.applyServiceConfig(cc.dopts.defaultServiceConfig) - } - } else { - // TODO: the parsing logic below will be moved inside resolver. - sc, err := parseServiceConfig(s.ServiceConfig) - if err != nil { - return err - } - if cc.sc == nil || cc.sc.rawJSONString != s.ServiceConfig { - cc.applyServiceConfig(sc) - } + if reflect.DeepEqual(cc.curAddresses, addrs) { + return } - // update the service config that will be sent to balancer. - if cc.sc != nil { - s.ServiceConfig = cc.sc.rawJSONString - } + cc.curAddresses = addrs if cc.dopts.balancerBuilder == nil { // Only look at balancer types and switch balancer if balancer dial // option is not set. var isGRPCLB bool - for _, a := range s.Addresses { + for _, a := range addrs { if a.Type == resolver.GRPCLB { isGRPCLB = true break } } var newBalancerName string - // TODO: use new loadBalancerConfig field with appropriate priority. if isGRPCLB { newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB } else { - newBalancerName = PickFirstBalancerName + // Address list doesn't contain grpclb address. Try to pick a + // non-grpclb balancer. + newBalancerName = cc.curBalancerName + // If current balancer is grpclb, switch to the previous one. + if newBalancerName == grpclbName { + newBalancerName = cc.preBalancerName + } + // The following could be true in two cases: + // - the first time handling resolved addresses + // (curBalancerName="") + // - the first time handling non-grpclb addresses + // (curBalancerName="grpclb", preBalancerName="") + if newBalancerName == "" { + newBalancerName = PickFirstBalancerName + } } cc.switchBalancer(newBalancerName) } else if cc.balancerWrapper == nil { @@ -540,9 +800,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error { cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) } - cc.balancerWrapper.updateResolverState(s) - cc.firstResolveEvent.Fire() - return nil + cc.balancerWrapper.handleResolvedAddrs(addrs, nil) } // switchBalancer starts the switching from current balancer to the balancer @@ -554,6 +812,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error { // // Caller must hold cc.mu. func (cc *ClientConn) switchBalancer(name string) { + if cc.conns == nil { + return + } + if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) { return } @@ -563,29 +825,20 @@ func (cc *ClientConn) switchBalancer(name string) { grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") return } + // TODO(bar switching) change this to two steps: drain and close. + // Keep track of sc in wrapper. if cc.balancerWrapper != nil { cc.balancerWrapper.close() } + // Clear all stickiness state. + cc.blockingpicker.clearStickinessState() builder := balancer.Get(name) - if channelz.IsOn() { - if builder == nil { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), - Severity: channelz.CtWarning, - }) - } else { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), - Severity: channelz.CtINFO, - }) - } - } if builder == nil { grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) builder = newPickfirstBuilder() } - + cc.preBalancerName = cc.curBalancerName cc.curBalancerName = builder.Name() cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) } @@ -605,14 +858,11 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi // newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { ac := &addrConn{ - cc: cc, - addrs: addrs, - scopts: opts, - dopts: cc.dopts, - czData: new(channelzData), - resetBackoff: make(chan struct{}), + cc: cc, + addrs: addrs, + dopts: cc.dopts, } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. @@ -623,14 +873,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub } if channelz.IsOn() { ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtINFO, - }, - }) } cc.conns[ac] = struct{}{} cc.mu.Unlock() @@ -650,39 +892,47 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { ac.tearDown(err) } -func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { +// ChannelzMetric returns ChannelInternalMetric of current ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) ChannelzMetric() *channelz.ChannelInternalMetric { + state := cc.GetState() + cc.czmu.RLock() + defer cc.czmu.RUnlock() return &channelz.ChannelInternalMetric{ - State: cc.GetState(), + State: state, Target: cc.target, - CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + CallsStarted: cc.callsStarted, + CallsSucceeded: cc.callsSucceeded, + CallsFailed: cc.callsFailed, + LastCallStartedTimestamp: cc.lastCallStartedTime, } } -// Target returns the target string of the ClientConn. -// This is an EXPERIMENTAL API. -func (cc *ClientConn) Target() string { - return cc.target -} - func (cc *ClientConn) incrCallsStarted() { - atomic.AddInt64(&cc.czData.callsStarted, 1) - atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) + cc.czmu.Lock() + cc.callsStarted++ + // TODO(yuxuanli): will make this a time.Time pointer improve performance? + cc.lastCallStartedTime = time.Now() + cc.czmu.Unlock() } func (cc *ClientConn) incrCallsSucceeded() { - atomic.AddInt64(&cc.czData.callsSucceeded, 1) + cc.czmu.Lock() + cc.callsSucceeded++ + cc.czmu.Unlock() } func (cc *ClientConn) incrCallsFailed() { - atomic.AddInt64(&cc.czData.callsFailed, 1) + cc.czmu.Lock() + cc.callsFailed++ + cc.czmu.Unlock() } -// connect starts creating a transport. +// connect starts to creating transport and also starts the transport monitor +// goroutine for this ac. // It does nothing if the ac is not IDLE. // TODO(bar) Move this to the addrConn section. +// This was part of resetAddrConn, keep it here to make the diff look clean. func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { @@ -693,11 +943,22 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.updateConnectivityState(connectivity.Connecting) + ac.state = connectivity.Connecting + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) ac.mu.Unlock() // Start a goroutine connecting to the server asynchronously. - go ac.resetTransport() + go func() { + if err := ac.resetTransport(); err != nil { + grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return + } + ac.transportMonitor() + }() return nil } @@ -716,12 +977,6 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } - // Unless we're busy reconnecting already, let's reconnect from the top of - // the list. - if ac.state != connectivity.Ready { - return false - } - var curAddrFound bool for _, a := range addrs { if reflect.DeepEqual(ac.curAddr, a) { @@ -732,6 +987,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) if curAddrFound { ac.addrs = addrs + ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list. } return curAddrFound @@ -748,9 +1004,6 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { // TODO: Avoid the locking here. cc.mu.RLock() defer cc.mu.RUnlock() - if cc.sc == nil { - return MethodConfig{} - } m, ok := cc.sc.Methods[method] if !ok { i := strings.LastIndex(method, "/") @@ -759,44 +1012,53 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { return m } -func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { - cc.mu.RLock() - defer cc.mu.RUnlock() - if cc.sc == nil { - return nil - } - return cc.sc.healthCheckConfig -} - -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ - FullMethodName: method, - }) +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{}) if err != nil { return nil, nil, toRPCErr(err) } return t, done, nil } -func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error { - if sc == nil { - // should never reach here. - return fmt.Errorf("got nil pointer for service config") +// handleServiceConfig parses the service config string in JSON format to Go native +// struct ServiceConfig, and store both the struct and the JSON string in ClientConn. +func (cc *ClientConn) handleServiceConfig(js string) error { + if cc.dopts.disableServiceConfig { + return nil } + sc, err := parseServiceConfig(js) + if err != nil { + return err + } + cc.mu.Lock() + cc.scRaw = js cc.sc = sc - - if cc.sc.retryThrottling != nil { - newThrottler := &retryThrottler{ - tokens: cc.sc.retryThrottling.MaxTokens, - max: cc.sc.retryThrottling.MaxTokens, - thresh: cc.sc.retryThrottling.MaxTokens / 2, - ratio: cc.sc.retryThrottling.TokenRatio, + if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config. + if cc.curBalancerName == grpclbName { + // If current balancer is grpclb, there's at least one grpclb + // balancer address in the resolved list. Don't switch the balancer, + // but change the previous balancer name, so if a new resolved + // address list doesn't contain grpclb address, balancer will be + // switched to *sc.LB. + cc.preBalancerName = *sc.LB + } else { + cc.switchBalancer(*sc.LB) + cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil) } - cc.retryThrottler.Store(newThrottler) - } else { - cc.retryThrottler.Store((*retryThrottler)(nil)) } + if envConfigStickinessOn { + var newStickinessMDKey string + if sc.stickinessMetadataKey != nil && *sc.stickinessMetadataKey != "" { + newStickinessMDKey = *sc.stickinessMetadataKey + } + // newStickinessMDKey is "" if one of the following happens: + // - stickinessMetadataKey is set to "" + // - stickinessMetadataKey field doesn't exist in service config + cc.blockingpicker.updateStickinessMDKey(strings.ToLower(newStickinessMDKey)) + } + + cc.mu.Unlock() return nil } @@ -810,24 +1072,6 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { go r.resolveNow(o) } -// ResetConnectBackoff wakes up all subchannels in transient failure and causes -// them to attempt another connection immediately. It also resets the backoff -// times used for subsequent attempts regardless of the current state. -// -// In general, this function should not be used. Typical service or network -// outages result in a reasonable client reconnection strategy by default. -// However, if a previously unavailable network becomes available, this may be -// used to trigger an immediate reconnect. -// -// This API is EXPERIMENTAL. -func (cc *ClientConn) ResetConnectBackoff() { - cc.mu.Lock() - defer cc.mu.Unlock() - for ac := range cc.conns { - ac.resetConnectBackoff() - } -} - // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { defer cc.cancel() @@ -860,19 +1104,6 @@ func (cc *ClientConn) Close() error { ac.tearDown(ErrClientConnClosing) } if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", - Severity: channelz.CtINFO, - } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtINFO, - } - } - channelz.AddTraceEvent(cc.channelzID, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(cc.channelzID) } return nil @@ -884,45 +1115,37 @@ type addrConn struct { cancel context.CancelFunc cc *ClientConn + addrs []resolver.Address dopts dialOptions + events trace.EventLog acbw balancer.SubConn - scopts balancer.NewSubConnOptions - // transport is set when there's a viable transport (note: ac state may not be READY as LB channel - // health checking may require server to report healthy to set ac to READY), and is reset - // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway - // is received, transport is closed, ac has been torn down). - transport transport.ClientTransport // The current transport. + mu sync.Mutex + curAddr resolver.Address + reconnectIdx int // The index in addrs list to start reconnecting from. + state connectivity.State + // ready is closed and becomes nil when a new transport is up or failed + // due to timeout. + ready chan struct{} + transport transport.ClientTransport - mu sync.Mutex - curAddr resolver.Address // The current address. - addrs []resolver.Address // All addresses that the resolver resolved to. + // The reason this addrConn is torn down. + tearDownErr error - // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + connectRetryNum int + // backoffDeadline is the time until which resetTransport needs to + // wait before increasing connectRetryNum count. + backoffDeadline time.Time + // connectDeadline is the time by which all connection + // negotiations must complete. + connectDeadline time.Time - backoffIdx int // Needs to be stateful for resetConnectBackoff. - resetBackoff chan struct{} - - channelzID int64 // channelz unique identification number. - czData *channelzData -} - -// Note: this requires a lock on ac.mu. -func (ac *addrConn) updateConnectivityState(s connectivity.State) { - if ac.state == s { - return - } - - updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) - ac.state = s - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: updateMsg, - Severity: channelz.CtINFO, - }) - } - ac.cc.handleSubConnStateChange(ac.acbw, s) + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + callsStarted int64 + callsSucceeded int64 + callsFailed int64 + lastCallStartedTime time.Time } // adjustParams updates parameters used to create transports upon @@ -939,269 +1162,329 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - for i := 0; ; i++ { - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOption{}) - } +// printf records an event in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) printf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Printf(format, a...) + } +} +// errorf records an error in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) errorf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Errorf(format, a...) + } +} + +// resetTransport recreates a transport to the address for ac. The old +// transport will close itself on error or when the clientconn is closed. +// The created transport must receive initial settings frame from the server. +// In case that doesn't happen, transportMonitor will kill the newly created +// transport after connectDeadline has expired. +// In case there was an error on the transport before the settings frame was +// received, resetTransport resumes connecting to backends after the one that +// was previously connected to. In case end of the list is reached, resetTransport +// backs off until the original deadline. +// If the DialOption WithWaitForHandshake was set, resetTrasport returns +// successfully only after server settings are received. +// +// TODO(bar) make sure all state transitions are valid. +func (ac *addrConn) resetTransport() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.transport = nil + ridx := ac.reconnectIdx + ac.mu.Unlock() + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + var backoffDeadline, connectDeadline time.Time + for connectRetryNum := 0; ; connectRetryNum++ { ac.mu.Lock() + if ac.backoffDeadline.IsZero() { + // This means either a successful HTTP2 connection was established + // or this is the first time this addrConn is trying to establish a + // connection. + backoffFor := ac.dopts.bs.Backoff(connectRetryNum) // time.Duration. + // This will be the duration that dial gets to finish. + dialDuration := getMinConnectTimeout() + if backoffFor > dialDuration { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + start := time.Now() + backoffDeadline = start.Add(backoffFor) + connectDeadline = start.Add(dialDuration) + ridx = 0 // Start connecting from the beginning. + } else { + // Continue trying to connect with the same deadlines. + connectRetryNum = ac.connectRetryNum + backoffDeadline = ac.backoffDeadline + connectDeadline = ac.connectDeadline + ac.backoffDeadline = time.Time{} + ac.connectDeadline = time.Time{} + ac.connectRetryNum = 0 + } if ac.state == connectivity.Shutdown { ac.mu.Unlock() - return + return errConnClosing } - - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() + ac.printf("connecting") + if ac.state != connectivity.Connecting { + ac.state = connectivity.Connecting + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) } - - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor - } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) + // copy ac.addrs in case of race + addrsIter := make([]resolver.Address, len(ac.addrs)) + copy(addrsIter, ac.addrs) + copts := ac.dopts.copts ac.mu.Unlock() - - newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) + connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts) if err != nil { - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + return err + } + if connected { + return nil + } + } +} + +// createTransport creates a connection to one of the backends in addrs. +// It returns true if a connection was established. +func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions) (bool, error) { + for i := ridx; i < len(addrs); i++ { + addr := addrs[i] + target := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: ac.cc.authority, + } + done := make(chan struct{}) + onPrefaceReceipt := func() { + ac.mu.Lock() + close(done) + if !ac.backoffDeadline.IsZero() { + // If we haven't already started reconnecting to + // other backends. + // Note, this can happen when writer notices an error + // and triggers resetTransport while at the same time + // reader receives the preface and invokes this closure. + ac.backoffDeadline = time.Time{} + ac.connectDeadline = time.Time{} + ac.connectRetryNum = 0 + } + ac.mu.Unlock() + } + // Do not cancel in the success path because of + // this issue in Go1.6: https://github.com/golang/go/issues/15078. + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt) + if err != nil { + cancel() + ac.cc.blockingpicker.updateConnectionError(err) ac.mu.Lock() if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. ac.mu.Unlock() - return + return false, errConnClosing } - ac.updateConnectivityState(connectivity.TransientFailure) - - // Backoff. - b := ac.resetBackoff ac.mu.Unlock() - - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-ac.ctx.Done(): - timer.Stop() - return - } + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) continue } - + if ac.dopts.waitForHandshake { + select { + case <-done: + case <-connectCtx.Done(): + // Didn't receive server preface, must kill this new transport now. + grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.") + newTr.Close() + break + case <-ac.ctx.Done(): + } + } ac.mu.Lock() if ac.state == connectivity.Shutdown { - newTr.Close() ac.mu.Unlock() - return + // ac.tearDonn(...) has been invoked. + newTr.Close() + return false, errConnClosing } - ac.curAddr = addr + ac.printf("ready") + ac.state = connectivity.Ready + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) ac.transport = newTr - ac.backoffIdx = 0 - - healthCheckConfig := ac.cc.healthCheckConfig() - // LB channel health checking is only enabled when all the four requirements below are met: - // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption, - // 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package, - // 3. a service config with non-empty healthCheckConfig field is provided, - // 4. the current load balancer allows it. - hctx, hcancel := context.WithCancel(ac.ctx) - healthcheckManagingState := false - if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled { - if ac.cc.dopts.healthCheckFunc == nil { - // TODO: add a link to the health check doc in the error message. - grpclog.Error("the client side LB channel health check function has not been set.") - } else { - // TODO(deklerk) refactor to just return transport - go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName) - healthcheckManagingState = true - } + ac.curAddr = addr + if ac.ready != nil { + close(ac.ready) + ac.ready = nil } - if !healthcheckManagingState { - ac.updateConnectivityState(connectivity.Ready) - } - ac.mu.Unlock() - - // Block until the created transport is down. And when this happens, - // we restart from the top of the addr list. - <-reconnect.Done() - hcancel() - - // Need to reconnect after a READY, the addrConn enters - // TRANSIENT_FAILURE. - // - // This will set addrConn to TRANSIENT_FAILURE for a very short period - // of time, and turns CONNECTING. It seems reasonable to skip this, but - // READY-CONNECTING is not a valid transition. - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure) - ac.mu.Unlock() - } -} - -// tryAllAddrs tries to creates a connection to the addresses, and stop when at the -// first successful one. It returns the transport, the address and a Event in -// the successful case. The Event fires when the returned transport disconnects. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { - for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return nil, resolver.Address{}, nil, errConnClosing - } - ac.updateConnectivityState(connectivity.Connecting) - ac.transport = nil - - ac.cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = ac.cc.mkp - ac.cc.mu.RUnlock() - - copts := ac.dopts.copts - if ac.scopts.CredsBundle != nil { - copts.CredsBundle = ac.scopts.CredsBundle - } - ac.mu.Unlock() - - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), - Severity: channelz.CtINFO, - }) - } - - newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) - if err == nil { - return newTr, addr, reconnect, nil - } - ac.cc.blockingpicker.updateConnectionError(err) - } - - // Couldn't connect to any address. - return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address") -} - -// createTransport creates a connection to addr. It returns the transport and a -// Event in the successful case. The Event fires when the returned transport -// disconnects. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { - prefaceReceived := make(chan struct{}) - onCloseCalled := make(chan struct{}) - reconnect := grpcsync.NewEvent() - - target := transport.TargetInfo{ - Addr: addr.Addr, - Metadata: addr.Metadata, - Authority: ac.cc.authority, - } - - onGoAway := func(r transport.GoAwayReason) { - ac.mu.Lock() - ac.adjustParams(r) - ac.mu.Unlock() - reconnect.Fire() - } - - onClose := func() { - close(onCloseCalled) - reconnect.Fire() - } - - onPrefaceReceipt := func() { - close(prefaceReceived) - } - - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) - defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } - - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) - if err != nil { - // newTr is either nil, or closed. - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) - return nil, nil, err - } - - if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn { select { - case <-time.After(connectDeadline.Sub(time.Now())): - // We didn't get the preface in time. - newTr.Close() - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: - // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + case <-done: + // If the server has responded back with preface already, + // don't set the reconnect parameters. + default: + ac.connectRetryNum = connectRetryNum + ac.backoffDeadline = backoffDeadline + ac.connectDeadline = connectDeadline + ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list. } + ac.mu.Unlock() + return true, nil } - return newTr, reconnect, nil + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return false, errConnClosing + } + ac.state = connectivity.TransientFailure + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.cc.resolveNow(resolver.ResolveNowOption{}) + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.mu.Unlock() + timer := time.NewTimer(backoffDeadline.Sub(time.Now())) + select { + case <-timer.C: + case <-ac.ctx.Done(): + timer.Stop() + return false, ac.ctx.Err() + } + return false, nil } -func (ac *addrConn) startHealthCheck(ctx context.Context, newTr transport.ClientTransport, addr resolver.Address, serviceName string) { - // Set up the health check helper functions - newStream := func() (interface{}, error) { - return ac.newClientStream(ctx, &StreamDesc{ServerStreams: true}, "/grpc.health.v1.Health/Watch", newTr) - } - firstReady := true - reportHealth := func(ok bool) { +// Run in a goroutine to track the error in transport and create the +// new transport if an error happens. It returns when the channel is closing. +func (ac *addrConn) transportMonitor() { + for { + var timer *time.Timer + var cdeadline <-chan time.Time ac.mu.Lock() - defer ac.mu.Unlock() - if ac.transport != newTr { + t := ac.transport + if !ac.connectDeadline.IsZero() { + timer = time.NewTimer(ac.connectDeadline.Sub(time.Now())) + cdeadline = timer.C + } + ac.mu.Unlock() + // Block until we receive a goaway or an error occurs. + select { + case <-t.GoAway(): + done := t.Error() + cleanup := t.Close + // Since this transport will be orphaned (won't have a transportMonitor) + // we need to launch a goroutine to keep track of clientConn.Close() + // happening since it might not be noticed by any other goroutine for a while. + go func() { + <-done + cleanup() + }() + case <-t.Error(): + // In case this is triggered because clientConn.Close() + // was called, we want to immeditately close the transport + // since no other goroutine might notice it for a while. + t.Close() + case <-cdeadline: + ac.mu.Lock() + // This implies that client received server preface. + if ac.backoffDeadline.IsZero() { + ac.mu.Unlock() + continue + } + ac.mu.Unlock() + timer = nil + // No server preface received until deadline. + // Kill the connection. + grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.") + t.Close() + } + if timer != nil { + timer.Stop() + } + // If a GoAway happened, regardless of error, adjust our keepalive + // parameters as appropriate. + select { + case <-t.GoAway(): + ac.adjustParams(t.GetGoAwayReason()) + default: + } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() return } - if ok { - if firstReady { - firstReady = false - ac.curAddr = addr + // Set connectivity state to TransientFailure before calling + // resetTransport. Transition READY->CONNECTING is not valid. + ac.state = connectivity.TransientFailure + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.cc.resolveNow(resolver.ResolveNowOption{}) + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + if err := ac.resetTransport(); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) } - ac.updateConnectivityState(connectivity.Ready) - } else { - ac.updateConnectivityState(connectivity.TransientFailure) - } - } - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, reportHealth, serviceName) - if err != nil { - if status.Code(err) == codes.Unimplemented { - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", - Severity: channelz.CtError, - }) - } - grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") - } else { - grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) + return } } } -func (ac *addrConn) resetConnectBackoff() { - ac.mu.Lock() - close(ac.resetBackoff) - ac.backoffIdx = 0 - ac.resetBackoff = make(chan struct{}) - ac.mu.Unlock() +// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or +// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true. +func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { + for { + ac.mu.Lock() + switch { + case ac.state == connectivity.Shutdown: + if failfast || !hasBalancer { + // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. + err := ac.tearDownErr + ac.mu.Unlock() + return nil, err + } + ac.mu.Unlock() + return nil, errConnClosing + case ac.state == connectivity.Ready: + ct := ac.transport + ac.mu.Unlock() + return ct, nil + case ac.state == connectivity.TransientFailure: + if failfast || hasBalancer { + ac.mu.Unlock() + return nil, errConnUnavailable + } + } + ready := ac.ready + if ready == nil { + ready = make(chan struct{}) + ac.ready = ready + } + ac.mu.Unlock() + select { + case <-ctx.Done(): + return nil, toRPCErr(ctx.Err()) + // Wait until the new transport is ready or failed. + case <-ready: + } + } } // getReadyTransport returns the transport if ac's state is READY. @@ -1209,7 +1492,7 @@ func (ac *addrConn) resetConnectBackoff() { // If ac's state is IDLE, it will trigger ac to connect. func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { ac.mu.Lock() - if ac.state == connectivity.Ready && ac.transport != nil { + if ac.state == connectivity.Ready { t := ac.transport ac.mu.Unlock() return t, true @@ -1232,42 +1515,34 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { // tight loop. // tearDown doesn't remove ac from ac.cc.conns. func (ac *addrConn) tearDown(err error) { + ac.cancel() ac.mu.Lock() + defer ac.mu.Unlock() if ac.state == connectivity.Shutdown { - ac.mu.Unlock() return } - curTr := ac.transport - ac.transport = nil - // We have to set the state to Shutdown before anything else to prevent races - // between setting the state and logic that waits on context cancelation / etc. - ac.updateConnectivityState(connectivity.Shutdown) - ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { + if err == errConnDrain && ac.transport != nil { // GracefulClose(...) may be executed multiple times when // i) receiving multiple GoAway frames from the server; or // ii) there are concurrent name resolver/Balancer triggered // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() + ac.transport.GracefulClose() + } + ac.state = connectivity.Shutdown + ac.tearDownErr = err + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + if ac.events != nil { + ac.events.Finish() + ac.events = nil + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil } if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtINFO, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity beng deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(ac.channelzID) } - ac.mu.Unlock() } func (ac *addrConn) getState() connectivity.State { @@ -1276,76 +1551,47 @@ func (ac *addrConn) getState() connectivity.State { return ac.state } +func (ac *addrConn) getCurAddr() (ret resolver.Address) { + ac.mu.Lock() + ret = ac.curAddr + ac.mu.Unlock() + return +} + func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { ac.mu.Lock() addr := ac.curAddr.Addr ac.mu.Unlock() + state := ac.getState() + ac.czmu.RLock() + defer ac.czmu.RUnlock() return &channelz.ChannelInternalMetric{ - State: ac.getState(), + State: state, Target: addr, - CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + CallsStarted: ac.callsStarted, + CallsSucceeded: ac.callsSucceeded, + CallsFailed: ac.callsFailed, + LastCallStartedTimestamp: ac.lastCallStartedTime, } } func (ac *addrConn) incrCallsStarted() { - atomic.AddInt64(&ac.czData.callsStarted, 1) - atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) + ac.czmu.Lock() + ac.callsStarted++ + ac.lastCallStartedTime = time.Now() + ac.czmu.Unlock() } func (ac *addrConn) incrCallsSucceeded() { - atomic.AddInt64(&ac.czData.callsSucceeded, 1) + ac.czmu.Lock() + ac.callsSucceeded++ + ac.czmu.Unlock() } func (ac *addrConn) incrCallsFailed() { - atomic.AddInt64(&ac.czData.callsFailed, 1) -} - -type retryThrottler struct { - max float64 - thresh float64 - ratio float64 - - mu sync.Mutex - tokens float64 // TODO(dfawley): replace with atomic and remove lock. -} - -// throttle subtracts a retry token from the pool and returns whether a retry -// should be throttled (disallowed) based upon the retry throttling policy in -// the service config. -func (rt *retryThrottler) throttle() bool { - if rt == nil { - return false - } - rt.mu.Lock() - defer rt.mu.Unlock() - rt.tokens-- - if rt.tokens < 0 { - rt.tokens = 0 - } - return rt.tokens <= rt.thresh -} - -func (rt *retryThrottler) successfulRPC() { - if rt == nil { - return - } - rt.mu.Lock() - defer rt.mu.Unlock() - rt.tokens += rt.ratio - if rt.tokens > rt.max { - rt.tokens = rt.max - } -} - -type channelzChannel struct { - cc *ClientConn -} - -func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { - return c.cc.channelzMetric() + ac.czmu.Lock() + ac.callsFailed++ + ac.czmu.Unlock() } // ErrClientConnTimeout indicates that the ClientConn cannot establish the diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go index 34ec36fbf6..568ef5dc68 100644 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -22,8 +22,7 @@ package connectivity import ( - "context" - + "golang.org/x/net/context" "google.golang.org/grpc/grpclog" ) @@ -52,7 +51,7 @@ func (s State) String() string { const ( // Idle indicates the ClientConn is idle. Idle State = iota - // Connecting indicates the ClientConn is connecting. + // Connecting indicates the ClienConn is connecting. Connecting // Ready indicates the ClientConn is ready for work. Ready diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 88aff94596..3351bf0ee5 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -23,7 +23,6 @@ package credentials // import "google.golang.org/grpc/credentials" import ( - "context" "crypto/tls" "crypto/x509" "errors" @@ -32,10 +31,12 @@ import ( "net" "strings" - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/credentials/internal" + "golang.org/x/net/context" ) +// alpnProtoStr are the specified application level protocols for gRPC. +var alpnProtoStr = []string{"h2"} + // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { @@ -106,25 +107,6 @@ type TransportCredentials interface { OverrideServerName(string) error } -// Bundle is a combination of TransportCredentials and PerRPCCredentials. -// -// It also contains a mode switching method, so it can be used as a combination -// of different credential policies. -// -// Bundle cannot be used together with individual TransportCredentials. -// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. -// -// This API is experimental. -type Bundle interface { - TransportCredentials() TransportCredentials - PerRPCCredentials() PerRPCCredentials - // NewWithMode should make a copy of Bundle, and switch mode. Modifying the - // existing Bundle may cause races. - // - // NewWithMode returns nil if the requested mode is not supported. - NewWithMode(mode string) (Bundle, error) -} - // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { @@ -136,18 +118,6 @@ func (t TLSInfo) AuthType() string { return "tls" } -// GetSecurityValue returns security info requested by channelz. -func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { - v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], - } - // Currently there's no way to get LocalCertificate info from tls package. - if len(t.State.PeerCertificates) > 0 { - v.RemoteCertificate = t.State.PeerCertificates[0].Raw - } - return v -} - // tlsCreds is the credentials required for authenticating a connection using TLS. type tlsCreds struct { // TLS configuration @@ -185,7 +155,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon case <-ctx.Done(): return nil, nil, ctx.Err() } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil + return conn, TLSInfo{conn.ConnectionState()}, nil } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { @@ -193,7 +163,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if err := conn.Handshake(); err != nil { return nil, nil, err } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil + return conn, TLSInfo{conn.ConnectionState()}, nil } func (c *tlsCreds) Clone() TransportCredentials { @@ -205,23 +175,10 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } -const alpnProtoStrH2 = "h2" - -func appendH2ToNextProtos(ps []string) []string { - for _, p := range ps { - if p == alpnProtoStrH2 { - return ps - } - } - ret := make([]string, 0, len(ps)+1) - ret = append(ret, ps...) - return append(ret, alpnProtoStrH2) -} - // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{cloneTLSConfig(c)} - tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + tc.config.NextProtos = alpnProtoStr return tc } @@ -261,78 +218,3 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error } return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil } - -// ChannelzSecurityInfo defines the interface that security protocols should implement -// in order to provide security info to channelz. -type ChannelzSecurityInfo interface { - GetSecurityValue() ChannelzSecurityValue -} - -// ChannelzSecurityValue defines the interface that GetSecurityValue() return value -// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue -// and *OtherChannelzSecurityValue. -type ChannelzSecurityValue interface { - isChannelzSecurityValue() -} - -// TLSChannelzSecurityValue defines the struct that TLS protocol should return -// from GetSecurityValue(), containing security info like cipher and certificate used. -type TLSChannelzSecurityValue struct { - StandardName string - LocalCertificate []byte - RemoteCertificate []byte -} - -func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {} - -// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return -// from GetSecurityValue(), which contains protocol specific security info. Note -// the Value field will be sent to users of channelz requesting channel info, and -// thus sensitive info should better be avoided. -type OtherChannelzSecurityValue struct { - Name string - Value proto.Message -} - -func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {} - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", -} - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO: inline this function if possible. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - - return cfg.Clone() -} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go new file mode 100644 index 0000000000..60409aac0f --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go @@ -0,0 +1,60 @@ +// +build go1.7 +// +build !go1.8 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, + Renegotiation: cfg.Renegotiation, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go similarity index 59% rename from vendor/google.golang.org/grpc/credentials/tls13.go rename to vendor/google.golang.org/grpc/credentials/credentials_util_go18.go index ccbf35b331..93f0e1d8de 100644 --- a/vendor/google.golang.org/grpc/credentials/tls13.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go @@ -1,8 +1,8 @@ -// +build go1.12 +// +build go1.8 /* * - * Copyright 2019 gRPC authors. + * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,19 @@ package credentials -import "crypto/tls" +import ( + "crypto/tls" +) -// This init function adds cipher suite constants only defined in Go 1.12. -func init() { - cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" - cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" - cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() } diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go new file mode 100644 index 0000000000..d6bbcc9fdd --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go @@ -0,0 +1,57 @@ +// +build !go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go deleted file mode 100644 index 2f4472becc..0000000000 --- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build !appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains credentials-internal code. -package internal - -import ( - "net" - "syscall" -) - -type sysConn = syscall.Conn - -// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. -// SyscallConn() (the method in interface syscall.Conn) is explicitly -// implemented on this type, -// -// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. -// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns -// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn -// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't -// help here). -type syscallConn struct { - net.Conn - // sysConn is a type alias of syscall.Conn. It's necessary because the name - // `Conn` collides with `net.Conn`. - sysConn -} - -// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that -// implements syscall.Conn. rawConn will be used to support syscall, and newConn -// will be used for read/write. -// -// This function returns newConn if rawConn doesn't implement syscall.Conn. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - sysConn, ok := rawConn.(syscall.Conn) - if !ok { - return newConn - } - return &syscallConn{ - Conn: newConn, - sysConn: sysConn, - } -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go deleted file mode 100644 index e114fecbb7..0000000000 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ /dev/null @@ -1,532 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "fmt" - "net" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/stats" -) - -// dialOptions configure a Dial call. dialOptions are set by the DialOption -// values passed to Dial. -type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor - cp Compressor - dc Decompressor - bs backoff.Strategy - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by v1 balancer dial option WithBalancer to support v1 - // balancer, and also by WithBalancerName dial option. - balancerBuilder balancer.Builder - // This is to support grpclb. - resolverBuilder resolver.Builder - reqHandshake envconfig.RequireHandshakeSetting - channelzParentID int64 - disableServiceConfig bool - disableRetry bool - disableHealthCheck bool - healthCheckFunc internal.HealthChecker - minConnectTimeout func() time.Duration - defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. - defaultServiceConfigRawJSON *string -} - -// DialOption configures how we set up the connection. -type DialOption interface { - apply(*dialOptions) -} - -// EmptyDialOption does not alter the dial configuration. It can be embedded in -// another structure to build custom dial options. -// -// This API is EXPERIMENTAL. -type EmptyDialOption struct{} - -func (EmptyDialOption) apply(*dialOptions) {} - -// funcDialOption wraps a function that modifies dialOptions into an -// implementation of the DialOption interface. -type funcDialOption struct { - f func(*dialOptions) -} - -func (fdo *funcDialOption) apply(do *dialOptions) { - fdo.f(do) -} - -func newFuncDialOption(f func(*dialOptions)) *funcDialOption { - return &funcDialOption{ - f: f, - } -} - -// WithWaitForHandshake blocks until the initial settings frame is received from -// the server before assigning RPCs to the connection. -// -// Deprecated: this is the default behavior, and this option will be removed -// after the 1.18 release. -func WithWaitForHandshake() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.reqHandshake = envconfig.RequireHandshakeOn - }) -} - -// WithWriteBufferSize determines how much data can be batched before doing a -// write on the wire. The corresponding memory allocation for this buffer will -// be twice the size to keep syscalls low. The default value for this buffer is -// 32KB. -// -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. -func WithWriteBufferSize(s int) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.WriteBufferSize = s - }) -} - -// WithReadBufferSize lets you set the size of read buffer, this determines how -// much data can be read at most for each read syscall. -// -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. -func WithReadBufferSize(s int) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.ReadBufferSize = s - }) -} - -// WithInitialWindowSize returns a DialOption which sets the value for initial -// window size on a stream. The lower bound for window size is 64K and any value -// smaller than that will be ignored. -func WithInitialWindowSize(s int32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.InitialWindowSize = s - }) -} - -// WithInitialConnWindowSize returns a DialOption which sets the value for -// initial window size on a connection. The lower bound for window size is 64K -// and any value smaller than that will be ignored. -func WithInitialConnWindowSize(s int32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.InitialConnWindowSize = s - }) -} - -// WithMaxMsgSize returns a DialOption which sets the maximum message size the -// client can receive. -// -// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. -func WithMaxMsgSize(s int) DialOption { - return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) -} - -// WithDefaultCallOptions returns a DialOption which sets the default -// CallOptions for calls over the connection. -func WithDefaultCallOptions(cos ...CallOption) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.callOptions = append(o.callOptions, cos...) - }) -} - -// WithCodec returns a DialOption which sets a codec for message marshaling and -// unmarshaling. -// -// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. -func WithCodec(c Codec) DialOption { - return WithDefaultCallOptions(CallCustomCodec(c)) -} - -// WithCompressor returns a DialOption which sets a Compressor to use for -// message compression. It has lower priority than the compressor set by the -// UseCompressor CallOption. -// -// Deprecated: use UseCompressor instead. -func WithCompressor(cp Compressor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.cp = cp - }) -} - -// WithDecompressor returns a DialOption which sets a Decompressor to use for -// incoming message decompression. If incoming response messages are encoded -// using the decompressor's Type(), it will be used. Otherwise, the message -// encoding will be used to look up the compressor registered via -// encoding.RegisterCompressor, which will then be used to decompress the -// message. If no compressor is registered for the encoding, an Unimplemented -// status error will be returned. -// -// Deprecated: use encoding.RegisterCompressor instead. -func WithDecompressor(dc Decompressor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.dc = dc - }) -} - -// WithBalancer returns a DialOption which sets a load balancer with the v1 API. -// Name resolver will be ignored if this DialOption is specified. -// -// Deprecated: use the new balancer APIs in balancer package and -// WithBalancerName. -func WithBalancer(b Balancer) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = &balancerWrapperBuilder{ - b: b, - } - }) -} - -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// This is an EXPERIMENTAL API. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - -// withResolverBuilder is only for grpclb. -func withResolverBuilder(b resolver.Builder) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolverBuilder = b - }) -} - -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver, as -// specified here. -// https://github.com/grpc/grpc/blob/master/doc/service_config.md -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - -// WithBackoffMaxDelay configures the dialer to use the provided maximum delay -// when backing off after failed connection attempts. -func WithBackoffMaxDelay(md time.Duration) DialOption { - return WithBackoffConfig(BackoffConfig{MaxDelay: md}) -} - -// WithBackoffConfig configures the dialer to use the provided backoff -// parameters after connection failures. -// -// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up -// for use. -func WithBackoffConfig(b BackoffConfig) DialOption { - return withBackoff(backoff.Exponential{ - MaxDelay: b.MaxDelay, - }) -} - -// withBackoff sets the backoff strategy used for connectRetryNum after a failed -// connection attempt. -// -// This can be exported if arbitrary backoff strategies are allowed by gRPC. -func withBackoff(bs backoff.Strategy) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.bs = bs - }) -} - -// WithBlock returns a DialOption which makes caller of Dial blocks until the -// underlying connection is up. Without this, Dial returns immediately and -// connecting the server happens in background. -func WithBlock() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.block = true - }) -} - -// WithInsecure returns a DialOption which disables transport security for this -// ClientConn. Note that transport security is required unless WithInsecure is -// set. -func WithInsecure() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.insecure = true - }) -} - -// WithTransportCredentials returns a DialOption which configures a connection -// level security credentials (e.g., TLS/SSL). This should not be used together -// with WithCredentialsBundle. -func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.TransportCredentials = creds - }) -} - -// WithPerRPCCredentials returns a DialOption which sets credentials and places -// auth state on each outbound RPC. -func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) - }) -} - -// WithCredentialsBundle returns a DialOption to set a credentials bundle for -// the ClientConn.WithCreds. This should not be used together with -// WithTransportCredentials. -// -// This API is experimental. -func WithCredentialsBundle(b credentials.Bundle) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.CredsBundle = b - }) -} - -// WithTimeout returns a DialOption that configures a timeout for dialing a -// ClientConn initially. This is valid if and only if WithBlock() is present. -// -// Deprecated: use DialContext and context.WithTimeout instead. -func WithTimeout(d time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.timeout = d - }) -} - -// WithContextDialer returns a DialOption that sets a dialer to create -// connections. If FailOnNonTempDialError() is set to true, and an error is -// returned by f, gRPC checks the error's Temporary() method to decide if it -// should try to reconnect to the network address. -func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.Dialer = f - }) -} - -func init() { - internal.WithResolverBuilder = withResolverBuilder - internal.WithHealthCheckFunc = withHealthCheckFunc -} - -// WithDialer returns a DialOption that specifies a function to use for dialing -// network addresses. If FailOnNonTempDialError() is set to true, and an error -// is returned by f, gRPC checks the error's Temporary() method to decide if it -// should try to reconnect to the network address. -// -// Deprecated: use WithContextDialer instead -func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { - return WithContextDialer( - func(ctx context.Context, addr string) (net.Conn, error) { - if deadline, ok := ctx.Deadline(); ok { - return f(addr, time.Until(deadline)) - } - return f(addr, 0) - }) -} - -// WithStatsHandler returns a DialOption that specifies the stats handler for -// all the RPCs and underlying network connections in this ClientConn. -func WithStatsHandler(h stats.Handler) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h - }) -} - -// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on -// non-temporary dial errors. If f is true, and dialer returns a non-temporary -// error, gRPC will fail the connection to the network address and won't try to -// reconnect. The default value of FailOnNonTempDialError is false. -// -// FailOnNonTempDialError only affects the initial dial, and does not do -// anything useful unless you are also using WithBlock(). -// -// This is an EXPERIMENTAL API. -func FailOnNonTempDialError(f bool) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.FailOnNonTempDialError = f - }) -} - -// WithUserAgent returns a DialOption that specifies a user agent string for all -// the RPCs. -func WithUserAgent(s string) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s - }) -} - -// WithKeepaliveParams returns a DialOption that specifies keepalive parameters -// for the client transport. -func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { - if kp.Time < internal.KeepaliveMinPingTime { - grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) - kp.Time = internal.KeepaliveMinPingTime - } - return newFuncDialOption(func(o *dialOptions) { - o.copts.KeepaliveParams = kp - }) -} - -// WithUnaryInterceptor returns a DialOption that specifies the interceptor for -// unary RPCs. -func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.unaryInt = f - }) -} - -// WithStreamInterceptor returns a DialOption that specifies the interceptor for -// streaming RPCs. -func WithStreamInterceptor(f StreamClientInterceptor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.streamInt = f - }) -} - -// WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header. This value only works with WithInsecure and has no -// effect if TransportCredentials are present. -func WithAuthority(a string) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.authority = a - }) -} - -// WithChannelzParentID returns a DialOption that specifies the channelz ID of -// current ClientConn's parent. This function is used in nested channel creation -// (e.g. grpclb dial). -func WithChannelzParentID(id int64) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.channelzParentID = id - }) -} - -// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any -// service config provided by the resolver and provides a hint to the resolver -// to not fetch service configs. -// -// Note that, this dial option only disables service config from resolver. If -// default service config is provided, grpc will use the default service config. -func WithDisableServiceConfig() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.disableServiceConfig = true - }) -} - -// WithDefaultServiceConfig returns a DialOption that configures the default -// service config, which will be used in cases where: -// 1. WithDisableServiceConfig is called. -// 2. Resolver does not return service config or if the resolver gets and invalid config. -// -// This API is EXPERIMENTAL. -func WithDefaultServiceConfig(s string) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.defaultServiceConfigRawJSON = &s - }) -} - -// WithDisableRetry returns a DialOption that disables retries, even if the -// service config enables them. This does not impact transparent retries, which -// will happen automatically if no data is written to the wire or if the RPC is -// unprocessed by the remote server. -// -// Retry support is currently disabled by default, but will be enabled by -// default in the future. Until then, it may be enabled by setting the -// environment variable "GRPC_GO_RETRY" to "on". -// -// This API is EXPERIMENTAL. -func WithDisableRetry() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.disableRetry = true - }) -} - -// WithMaxHeaderListSize returns a DialOption that specifies the maximum -// (uncompressed) size of header list that the client is prepared to accept. -func WithMaxHeaderListSize(s uint32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.MaxHeaderListSize = &s - }) -} - -// WithDisableHealthCheck disables the LB channel health checking for all -// SubConns of this ClientConn. -// -// This API is EXPERIMENTAL. -func WithDisableHealthCheck() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.disableHealthCheck = true - }) -} - -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - -func defaultDialOptions() dialOptions { - return dialOptions{ - disableRetry: !envconfig.Retry, - reqHandshake: envconfig.RequireHandshake, - healthCheckFunc: internal.HealthCheckFunc, - copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, - ReadBufferSize: defaultReadBufSize, - }, - } -} - -// withGetMinConnectDeadline specifies the function that clientconn uses to -// get minConnectDeadline. This can be used to make connection attempts happen -// faster/slower. -// -// For testing purpose only. -func withMinConnectDeadline(f func() time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.minConnectTimeout = f - }) -} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 30a75da99d..ade8b7cec7 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -102,10 +102,10 @@ func RegisterCodec(codec Codec) { if codec == nil { panic("cannot register a nil Codec") } - if codec.Name() == "" { - panic("cannot register Codec with empty string result for Name()") - } contentSubtype := strings.ToLower(codec.Name()) + if contentSubtype == "" { + panic("cannot register Codec with empty string result for String()") + } registeredCodecs[contentSubtype] = codec } diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/envconfig.go similarity index 62% rename from vendor/google.golang.org/grpc/internal/channelz/util_linux.go rename to vendor/google.golang.org/grpc/envconfig.go index fdf409d55d..d50178e517 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/envconfig.go @@ -1,5 +1,3 @@ -// +build linux,!appengine - /* * * Copyright 2018 gRPC authors. @@ -18,22 +16,22 @@ * */ -package channelz +package grpc import ( - "syscall" + "os" + "strings" ) -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { - c, ok := socket.(syscall.Conn) - if !ok { - return nil - } - data := &SocketOptionData{} - if rawConn, err := c.SyscallConn(); err == nil { - rawConn.Control(data.Getsockopt) - return data - } - return nil +const ( + envConfigPrefix = "GRPC_GO_" + envConfigStickinessStr = envConfigPrefix + "STICKINESS" +) + +var ( + envConfigStickinessOn bool +) + +func init() { + envConfigStickinessOn = strings.EqualFold(os.Getenv(envConfigStickinessStr), "on") } diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod deleted file mode 100644 index 9f3ef3a539..0000000000 --- a/vendor/google.golang.org/grpc/go.mod +++ /dev/null @@ -1,19 +0,0 @@ -module google.golang.org/grpc - -require ( - cloud.google.com/go v0.26.0 // indirect - github.com/BurntSushi/toml v0.3.1 // indirect - github.com/client9/misspell v0.3.4 - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/mock v1.1.1 - github.com/golang/protobuf v1.2.0 - golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 - golang.org/x/net v0.0.0-20190311183353-d8887717615a - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect - golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a - golang.org/x/tools v0.0.0-20190311212946-11955173bddd - google.golang.org/appengine v1.1.0 // indirect - google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 - honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099 -) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum deleted file mode 100644 index b8638ce769..0000000000 --- a/vendor/google.golang.org/grpc/go.sum +++ /dev/null @@ -1,33 +0,0 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099 h1:XJP7lxbSxWLOMNdBE4B/STaqVy6L73o0knwj2vIlxnw= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go new file mode 100644 index 0000000000..535ee9356f --- /dev/null +++ b/vendor/google.golang.org/grpc/go16.go @@ -0,0 +1,70 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "io" + "net" + "net/http" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req.Cancel = ctx.Done() + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go new file mode 100644 index 0000000000..ec676a93c3 --- /dev/null +++ b/vendor/google.golang.org/grpc/go17.go @@ -0,0 +1,71 @@ +// +build go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + + netctx "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled, netctx.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 51bb9457cd..1fabb11e1b 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,7 +18,7 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in transport package only go to verbose level 2. // All logs in other packages in grpc are logged in spite of the verbosity level. // // In the default logger, diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go deleted file mode 100644 index e15f04c229..0000000000 --- a/vendor/google.golang.org/grpc/health/client.go +++ /dev/null @@ -1,107 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package health - -import ( - "context" - "fmt" - "io" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/status" -) - -const maxDelay = 120 * time.Second - -var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay} -var backoffFunc = func(ctx context.Context, retries int) bool { - d := backoffStrategy.Backoff(retries) - timer := time.NewTimer(d) - select { - case <-timer.C: - return true - case <-ctx.Done(): - timer.Stop() - return false - } -} - -func init() { - internal.HealthCheckFunc = clientHealthCheck -} - -func clientHealthCheck(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), service string) error { - tryCnt := 0 - -retryConnection: - for { - // Backs off if the connection has failed in some way without receiving a message in the previous retry. - if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { - return nil - } - tryCnt++ - - if ctx.Err() != nil { - return nil - } - rawS, err := newStream() - if err != nil { - continue retryConnection - } - - s, ok := rawS.(grpc.ClientStream) - // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. - if !ok { - reportHealth(true) - return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) - } - - if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { - // Stream should have been closed, so we can safely continue to create a new stream. - continue retryConnection - } - s.CloseSend() - - resp := new(healthpb.HealthCheckResponse) - for { - err = s.RecvMsg(resp) - - // Reports healthy for the LBing purposes if health check is not implemented in the server. - if status.Code(err) == codes.Unimplemented { - reportHealth(true) - return err - } - - // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. - if err != nil { - reportHealth(false) - continue retryConnection - } - - // As a message has been received, removes the need for backoff for the next retry by reseting the try count. - tryCnt = 0 - reportHealth(resp.Status == healthpb.HealthCheckResponse_SERVING) - } - } -} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index c2f2c7729d..a1fda2801b 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -26,30 +26,27 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type HealthCheckResponse_ServingStatus int32 const ( - HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 - HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 - HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 - HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 ) var HealthCheckResponse_ServingStatus_name = map[int32]string{ 0: "UNKNOWN", 1: "SERVING", 2: "NOT_SERVING", - 3: "SERVICE_UNKNOWN", } var HealthCheckResponse_ServingStatus_value = map[string]int32{ - "UNKNOWN": 0, - "SERVING": 1, - "NOT_SERVING": 2, - "SERVICE_UNKNOWN": 3, + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, } func (x HealthCheckResponse_ServingStatus) String() string { return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) } func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0} + return fileDescriptor_health_85731b6c49265086, []int{1, 0} } type HealthCheckRequest struct { @@ -63,7 +60,7 @@ func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } func (*HealthCheckRequest) ProtoMessage() {} func (*HealthCheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_health_6b1a06aa67f91efd, []int{0} + return fileDescriptor_health_85731b6c49265086, []int{0} } func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b) @@ -101,7 +98,7 @@ func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } func (*HealthCheckResponse) ProtoMessage() {} func (*HealthCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_health_6b1a06aa67f91efd, []int{1} + return fileDescriptor_health_85731b6c49265086, []int{1} } func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b) @@ -146,25 +143,7 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type HealthClient interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) - // Performs a watch for the serving status of the requested service. - // The server will immediately send back a message indicating the current - // serving status. It will then subsequently send a new message whenever - // the service's serving status changes. - // - // If the requested service is unknown when the call is received, the - // server will send a message setting the serving status to - // SERVICE_UNKNOWN but will *not* terminate the call. If at some - // future point, the serving status of the service becomes known, the - // server will send a new message with the service's serving status. - // - // If the call terminates with status UNIMPLEMENTED, then clients - // should assume this method is not supported and should not retry the - // call. If the call terminates with any other status (including OK), - // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) } type healthClient struct { @@ -184,59 +163,9 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) - if err != nil { - return nil, err - } - x := &healthWatchClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - // HealthServer is the server API for Health service. type HealthServer interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) - // Performs a watch for the serving status of the requested service. - // The server will immediately send back a message indicating the current - // serving status. It will then subsequently send a new message whenever - // the service's serving status changes. - // - // If the requested service is unknown when the call is received, the - // server will send a message setting the serving status to - // SERVICE_UNKNOWN but will *not* terminate the call. If at some - // future point, the serving status of the service becomes known, the - // server will send a new message with the service's serving status. - // - // If the call terminates with status UNIMPLEMENTED, then clients - // should assume this method is not supported and should not retry the - // call. If the call terminates with any other status (including OK), - // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error } func RegisterHealthServer(s *grpc.Server, srv HealthServer) { @@ -261,27 +190,6 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } -func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(HealthCheckRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream -} - -type healthWatchServer struct { - grpc.ServerStream -} - -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} - var _Health_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.health.v1.Health", HandlerType: (*HealthServer)(nil), @@ -291,37 +199,29 @@ var _Health_serviceDesc = grpc.ServiceDesc{ Handler: _Health_Check_Handler, }, }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Watch", - Handler: _Health_Watch_Handler, - ServerStreams: true, - }, - }, + Streams: []grpc.StreamDesc{}, Metadata: "grpc/health/v1/health.proto", } -func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) } +func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_85731b6c49265086) } -var fileDescriptor_health_6b1a06aa67f91efd = []byte{ - // 297 bytes of a gzipped FileDescriptorProto +var fileDescriptor_health_85731b6c49265086 = []byte{ + // 271 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, - 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8, + 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d, - 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f, + 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8, - 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3, - 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac, - 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10, - 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc, - 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4, - 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, - 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20, - 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff, - 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00, + 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, 0x84, + 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, 0xb8, + 0xd6, 0x29, 0x91, 0x4b, 0x30, 0x33, 0x1f, 0x4d, 0xa1, 0x13, 0x37, 0x44, 0x65, 0x00, 0x28, 0x70, + 0x03, 0x18, 0xa3, 0x74, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xd2, 0xf3, 0x73, 0x12, 0xf3, + 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0x91, 0x63, 0x03, 0xc4, 0x8e, 0x87, 0xb0, 0xe3, 0xcb, 0x0c, + 0x57, 0x31, 0xf1, 0xb9, 0x83, 0x4c, 0x83, 0x18, 0xa1, 0x17, 0x66, 0x98, 0xc4, 0x06, 0x8e, 0x24, + 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xec, 0x66, 0x81, 0xcb, 0xc3, 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go new file mode 100644 index 0000000000..c2588867e5 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/health.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package health provides some utility functions to health-check a server. The implementation +// is based on protobuf. Users need to write their own implementations if other IDLs are used. +package health + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +// Server implements `service Health`. +type Server struct { + mu sync.Mutex + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if in.Service == "" { + // check the server overall health status. + return &healthpb.HealthCheckResponse{ + Status: healthpb.HealthCheckResponse_SERVING, + }, nil + } + if status, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: status, + }, nil + } + return nil, status.Error(codes.NotFound, "unknown service") +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + s.statusMap[service] = status + s.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go deleted file mode 100644 index c79f9d2ab8..0000000000 --- a/vendor/google.golang.org/grpc/health/server.go +++ /dev/null @@ -1,165 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -//go:generate ./regenerate.sh - -// Package health provides a service that exposes server's health and it must be -// imported to enable support for client-side health checks. -package health - -import ( - "context" - "sync" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - healthgrpc "google.golang.org/grpc/health/grpc_health_v1" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/status" -) - -// Server implements `service Health`. -type Server struct { - mu sync.Mutex - // If shutdown is true, it's expected all serving status is NOT_SERVING, and - // will stay in NOT_SERVING. - shutdown bool - // statusMap stores the serving status of the services this Server monitors. - statusMap map[string]healthpb.HealthCheckResponse_ServingStatus - updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus -} - -// NewServer returns a new Server. -func NewServer() *Server { - return &Server{ - statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, - updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), - } -} - -// Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { - s.mu.Lock() - defer s.mu.Unlock() - if servingStatus, ok := s.statusMap[in.Service]; ok { - return &healthpb.HealthCheckResponse{ - Status: servingStatus, - }, nil - } - return nil, status.Error(codes.NotFound, "unknown service") -} - -// Watch implements `service Health`. -func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - service := in.Service - // update channel is used for getting service status updates. - update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) - s.mu.Lock() - // Puts the initial status to the channel. - if servingStatus, ok := s.statusMap[service]; ok { - update <- servingStatus - } else { - update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN - } - - // Registers the update channel to the correct place in the updates map. - if _, ok := s.updates[service]; !ok { - s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) - } - s.updates[service][stream] = update - defer func() { - s.mu.Lock() - delete(s.updates[service], stream) - s.mu.Unlock() - }() - s.mu.Unlock() - - var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1 - for { - select { - // Status updated. Sends the up-to-date status to the client. - case servingStatus := <-update: - if lastSentStatus == servingStatus { - continue - } - lastSentStatus = servingStatus - err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) - if err != nil { - return status.Error(codes.Canceled, "Stream has ended.") - } - // Context done. Removes the update channel from the updates map. - case <-stream.Context().Done(): - return status.Error(codes.Canceled, "Stream has ended.") - } - } -} - -// SetServingStatus is called when need to reset the serving status of a service -// or insert a new service entry into the statusMap. -func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { - s.mu.Lock() - defer s.mu.Unlock() - if s.shutdown { - grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) - return - } - - s.setServingStatusLocked(service, servingStatus) -} - -func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { - s.statusMap[service] = servingStatus - for _, update := range s.updates[service] { - // Clears previous updates, that are not sent to the client, from the channel. - // This can happen if the client is not reading and the server gets flow control limited. - select { - case <-update: - default: - } - // Puts the most recent update to the channel. - update <- servingStatus - } -} - -// Shutdown sets all serving status to NOT_SERVING, and configures the server to -// ignore all future status changes. -// -// This changes serving status for all services. To set status for a perticular -// services, call SetServingStatus(). -func (s *Server) Shutdown() { - s.mu.Lock() - defer s.mu.Unlock() - s.shutdown = true - for service := range s.statusMap { - s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING) - } -} - -// Resume sets all serving status to SERVING, and configures the server to -// accept all future status changes. -// -// This changes serving status for all services. To set status for a perticular -// services, call SetServingStatus(). -func (s *Server) Resume() { - s.mu.Lock() - defer s.mu.Unlock() - s.shutdown = false - for service := range s.statusMap { - s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING) - } -} diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh deleted file mode 100644 index 7c7bcada50..0000000000 --- a/vendor/google.golang.org/grpc/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 8b7350022a..1f6ef67803 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -19,7 +19,7 @@ package grpc import ( - "context" + "golang.org/x/net/context" ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go deleted file mode 100644 index 3a905d9665..0000000000 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package balancerload defines APIs to parse server loads in trailers. The -// parsed loads are sent to balancers in DoneInfo. -package balancerload - -import ( - "google.golang.org/grpc/metadata" -) - -// Parser converts loads from metadata into a concrete type. -type Parser interface { - // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} -} - -var parser Parser - -// SetParser sets the load parser. -// -// Not mutex-protected, should be called before any gRPC functions. -func SetParser(lr Parser) { - parser = lr -} - -// Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { - if parser == nil { - return nil - } - return parser.Parse(md) -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go deleted file mode 100644 index fee6aecd08..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ /dev/null @@ -1,167 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package binarylog implementation binary logging as defined in -// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. -package binarylog - -import ( - "fmt" - "os" - - "google.golang.org/grpc/grpclog" -) - -// Logger is the global binary logger. It can be used to get binary logger for -// each method. -type Logger interface { - getMethodLogger(methodName string) *MethodLogger -} - -// binLogger is the global binary logger for the binary. One of this should be -// built at init time from the configuration (environment varialbe or flags). -// -// It is used to get a methodLogger for each individual method. -var binLogger Logger - -// SetLogger sets the binarg logger. -// -// Only call this at init time. -func SetLogger(l Logger) { - binLogger = l -} - -// GetMethodLogger returns the methodLogger for the given methodName. -// -// methodName should be in the format of "/service/method". -// -// Each methodLogger returned by this method is a new instance. This is to -// generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { - if binLogger == nil { - return nil - } - return binLogger.getMethodLogger(methodName) -} - -func init() { - const envStr = "GRPC_BINARY_LOG_FILTER" - configStr := os.Getenv(envStr) - binLogger = NewLoggerFromConfigString(configStr) -} - -type methodLoggerConfig struct { - // Max length of header and message. - hdr, msg uint64 -} - -type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig - - blacklist map[string]struct{} -} - -// newEmptyLogger creates an empty logger. The map fields need to be filled in -// using the set* functions. -func newEmptyLogger() *logger { - return &logger{} -} - -// Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { - return fmt.Errorf("conflicting global rules found") - } - l.all = ml - return nil -} - -// Set method logger for "service/*". -// -// New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { - return fmt.Errorf("conflicting rules for service %v found", service) - } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) - } - l.services[service] = ml - return nil -} - -// Set method logger for "service/method". -// -// New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) - } - if _, ok := l.methods[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) - } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) - } - l.methods[method] = ml - return nil -} - -// Set blacklist method for "-service/method". -func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) - } - if _, ok := l.methods[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) - } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) - } - l.blacklist[method] = struct{}{} - return nil -} - -// getMethodLogger returns the methodLogger for the given methodName. -// -// methodName should be in the format of "/service/method". -// -// Each methodLogger returned by this method is a new instance. This is to -// generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { - s, m, err := parseMethodName(methodName) - if err != nil { - grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) - return nil - } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) - } - if _, ok := l.blacklist[s+"/"+m]; ok { - return nil - } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) - } - if l.all == nil { - return nil - } - return newMethodLogger(l.all.hdr, l.all.msg) -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go deleted file mode 100644 index 1ee00a39ac..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file contains exported variables/functions that are exported for testing -// only. -// -// An ideal way for this would be to put those in a *_test.go but in binarylog -// package. But this doesn't work with staticcheck with go module. Error was: -// "MdToMetadataProto not declared by package binarylog". This could be caused -// by the way staticcheck looks for files for a certain package, which doesn't -// support *_test.go files. -// -// Move those to binary_test.go when staticcheck is fixed. - -package binarylog - -var ( - // AllLogger is a logger that logs all headers/messages for all RPCs. It's - // for testing only. - AllLogger = NewLoggerFromConfigString("*") - // MdToMetadataProto converts metadata to a binary logging proto message. - // It's for testing only. - MdToMetadataProto = mdToMetadataProto - // AddrToProto converts an address to a binary logging proto message. It's - // for testing only. - AddrToProto = addrToProto -) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go deleted file mode 100644 index 4cc2525df7..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ /dev/null @@ -1,210 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "strings" - - "google.golang.org/grpc/grpclog" -) - -// NewLoggerFromConfigString reads the string and build a logger. It can be used -// to build a new logger and assign it to binarylog.Logger. -// -// Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. -// -// If two configs exist for one certain method or service, the one specified -// later overrides the privous config. -func NewLoggerFromConfigString(s string) Logger { - if s == "" { - return nil - } - l := newEmptyLogger() - methods := strings.Split(s, ",") - for _, method := range methods { - if err := l.fillMethodLoggerWithConfigString(method); err != nil { - grpclog.Warningf("failed to parse binary log config: %v", err) - return nil - } - } - return l -} - -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds -// it to the right map in the logger. -func (l *logger) fillMethodLoggerWithConfigString(config string) error { - // "" is invalid. - if config == "" { - return errors.New("empty string is not a valid method binary logging config") - } - - // "-service/method", blacklist, no * or {} allowed. - if config[0] == '-' { - s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) - if err != nil { - return fmt.Errorf("invalid config: %q, %v", config, err) - } - if m == "*" { - return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config") - } - if suffix != "" { - return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") - } - if err := l.setBlacklist(s + "/" + m); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - return nil - } - - // "*{h:256;m:256}" - if config[0] == '*' { - hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) - if err != nil { - return fmt.Errorf("invalid config: %q, %v", config, err) - } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - return nil - } - - s, m, suffix, err := parseMethodConfigAndSuffix(config) - if err != nil { - return fmt.Errorf("invalid config: %q, %v", config, err) - } - hdr, msg, err := parseHeaderMessageLengthConfig(suffix) - if err != nil { - return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) - } - if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - } - return nil -} - -const ( - // TODO: this const is only used by env_config now. But could be useful for - // other config. Move to binarylog.go if necessary. - maxUInt = ^uint64(0) - - // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for - // expected output. - longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` - - // For suffix from above, "{h:123,m:123}". See test for expected output. - optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". - headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` - messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` - headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` -) - -var ( - longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) - headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) - messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) - headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) -) - -// Turn "service/method{h;m}" into "service", "method", "{h;m}". -func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { - // Regexp result: - // - // in: "p.s/m{h:123,m:123}", - // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, - match := longMethodConfigRegexp.FindStringSubmatch(c) - if match == nil { - return "", "", "", fmt.Errorf("%q contains invalid substring", c) - } - service = match[1] - method = match[2] - suffix = match[3] - return -} - -// Turn "{h:123;m:345}" into 123, 345. -// -// Return maxUInt if length is unspecified. -func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { - if c == "" { - return maxUInt, maxUInt, nil - } - // Header config only. - if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { - if s := match[1]; s != "" { - hdrLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) - } - return hdrLenStr, 0, nil - } - return maxUInt, 0, nil - } - - // Message config only. - if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { - if s := match[1]; s != "" { - msgLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) - } - return 0, msgLenStr, nil - } - return 0, maxUInt, nil - } - - // Header and message config both. - if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { - // Both hdr and msg are specified, but one or two of them might be empty. - hdrLenStr = maxUInt - msgLenStr = maxUInt - if s := match[1]; s != "" { - hdrLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) - } - } - if s := match[2]; s != "" { - msgLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) - } - } - return hdrLenStr, msgLenStr, nil - } - return 0, 0, fmt.Errorf("%q contains invalid substring", c) -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go deleted file mode 100644 index 160f6e8616..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ /dev/null @@ -1,423 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "net" - "strings" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -type callIDGenerator struct { - id uint64 -} - -func (g *callIDGenerator) next() uint64 { - id := atomic.AddUint64(&g.id, 1) - return id -} - -// reset is for testing only, and doesn't need to be thread safe. -func (g *callIDGenerator) reset() { - g.id = 0 -} - -var idGen callIDGenerator - -// MethodLogger is the sub-logger for each method. -type MethodLogger struct { - headerMaxLen, messageMaxLen uint64 - - callID uint64 - idWithinCallGen *callIDGenerator - - sink Sink // TODO(blog): make this plugable. -} - -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ - headerMaxLen: h, - messageMaxLen: m, - - callID: idGen.next(), - idWithinCallGen: &callIDGenerator{}, - - sink: defaultSink, // TODO(blog): make it plugable. - } -} - -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { - m := c.toProto() - timestamp, _ := ptypes.TimestampProto(time.Now()) - m.Timestamp = timestamp - m.CallId = ml.callID - m.SequenceIdWithinCall = ml.idWithinCallGen.next() - - switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: - m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: - m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: - m.PayloadTruncated = ml.truncateMessage(pay.Message) - } - - ml.sink.Write(m) -} - -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { - if ml.headerMaxLen == maxUInt { - return false - } - var ( - bytesLimit = ml.headerMaxLen - index int - ) - // At the end of the loop, index will be the first entry where the total - // size is greater than the limit: - // - // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. - for ; index < len(mdPb.Entry); index++ { - entry := mdPb.Entry[index] - if entry.Key == "grpc-trace-bin" { - // "grpc-trace-bin" is a special key. It's kept in the log entry, - // but not counted towards the size limit. - continue - } - currentEntryLen := uint64(len(entry.Value)) - if currentEntryLen > bytesLimit { - break - } - bytesLimit -= currentEntryLen - } - truncated = index < len(mdPb.Entry) - mdPb.Entry = mdPb.Entry[:index] - return truncated -} - -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { - if ml.messageMaxLen == maxUInt { - return false - } - if ml.messageMaxLen >= uint64(len(msgPb.Data)) { - return false - } - msgPb.Data = msgPb.Data[:ml.messageMaxLen] - return true -} - -// LogEntryConfig represents the configuration for binary log entry. -type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry -} - -// ClientHeader configs the binary log entry to be a ClientHeader entry. -type ClientHeader struct { - OnClientSide bool - Header metadata.MD - MethodName string - Authority string - Timeout time.Duration - // PeerAddr is required only when it's on server side. - PeerAddr net.Addr -} - -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { - // This function doesn't need to set all the fields (e.g. seq ID). The Log - // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ - Metadata: mdToMetadataProto(c.Header), - MethodName: c.MethodName, - Authority: c.Authority, - } - if c.Timeout > 0 { - clientHeader.Timeout = ptypes.DurationProto(c.Timeout) - } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ - ClientHeader: clientHeader, - }, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - if c.PeerAddr != nil { - ret.Peer = addrToProto(c.PeerAddr) - } - return ret -} - -// ServerHeader configs the binary log entry to be a ServerHeader entry. -type ServerHeader struct { - OnClientSide bool - Header metadata.MD - // PeerAddr is required only when it's on client side. - PeerAddr net.Addr -} - -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ - Metadata: mdToMetadataProto(c.Header), - }, - }, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - if c.PeerAddr != nil { - ret.Peer = addrToProto(c.PeerAddr) - } - return ret -} - -// ClientMessage configs the binary log entry to be a ClientMessage entry. -type ClientMessage struct { - OnClientSide bool - // Message can be a proto.Message or []byte. Other messages formats are not - // supported. - Message interface{} -} - -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { - var ( - data []byte - err error - ) - if m, ok := c.Message.(proto.Message); ok { - data, err = proto.Marshal(m) - if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) - } - } else if b, ok := c.Message.([]byte); ok { - data = b - } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") - } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ - Length: uint32(len(data)), - Data: data, - }, - }, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// ServerMessage configs the binary log entry to be a ServerMessage entry. -type ServerMessage struct { - OnClientSide bool - // Message can be a proto.Message or []byte. Other messages formats are not - // supported. - Message interface{} -} - -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { - var ( - data []byte - err error - ) - if m, ok := c.Message.(proto.Message); ok { - data, err = proto.Marshal(m) - if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) - } - } else if b, ok := c.Message.([]byte); ok { - data = b - } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") - } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ - Length: uint32(len(data)), - Data: data, - }, - }, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. -type ClientHalfClose struct { - OnClientSide bool -} - -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, - Payload: nil, // No payload here. - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// ServerTrailer configs the binary log entry to be a ServerTrailer entry. -type ServerTrailer struct { - OnClientSide bool - Trailer metadata.MD - // Err is the status error. - Err error - // PeerAddr is required only when it's on client side and the RPC is trailer - // only. - PeerAddr net.Addr -} - -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { - st, ok := status.FromError(c.Err) - if !ok { - grpclog.Info("binarylogging: error in trailer is not a status error") - } - var ( - detailsBytes []byte - err error - ) - stProto := st.Proto() - if stProto != nil && len(stProto.Details) != 0 { - detailsBytes, err = proto.Marshal(stProto) - if err != nil { - grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) - } - } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ - Metadata: mdToMetadataProto(c.Trailer), - StatusCode: uint32(st.Code()), - StatusMessage: st.Message(), - StatusDetails: detailsBytes, - }, - }, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - if c.PeerAddr != nil { - ret.Peer = addrToProto(c.PeerAddr) - } - return ret -} - -// Cancel configs the binary log entry to be a Cancel entry. -type Cancel struct { - OnClientSide bool -} - -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, - Payload: nil, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// metadataKeyOmit returns whether the metadata entry with this key should be -// omitted. -func metadataKeyOmit(key string) bool { - switch key { - case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": - return true - case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. - return false - } - return strings.HasPrefix(key, "grpc-") -} - -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} - for k, vv := range md { - if metadataKeyOmit(k) { - continue - } - for _, v := range vv { - ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ - Key: k, - Value: []byte(v), - }, - ) - } - } - return ret -} - -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} - switch a := addr.(type) { - case *net.TCPAddr: - if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 - } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 - } else { - ret.Type = pb.Address_TYPE_UNKNOWN - // Do not set address and port fields. - break - } - ret.Address = a.IP.String() - ret.IpPort = uint32(a.Port) - case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX - ret.Address = a.String() - default: - ret.Type = pb.Address_TYPE_UNKNOWN - } - return ret -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh deleted file mode 100644 index 113d40cbe1..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eux -o pipefail - -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/binarylog/grpc_binarylog_v1 -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto -popd -rm -f ./grpc_binarylog_v1/*.pb.go -cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/ - diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go deleted file mode 100644 index 20d044f0fd..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ /dev/null @@ -1,162 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "bufio" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "sync" - "time" - - "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" -) - -var ( - defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). -) - -// SetDefaultSink sets the sink where binary logs will be written to. -// -// Not thread safe. Only set during initialization. -func SetDefaultSink(s Sink) { - if defaultSink != nil { - defaultSink.Close() - } - defaultSink = s -} - -// Sink writes log entry into the binary log sink. -type Sink interface { - // Write will be called to write the log entry into the sink. - // - // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error - // Close will be called when the Sink is replaced by a new Sink. - Close() error -} - -type noopSink struct{} - -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } - -// newWriterSink creates a binary log sink with the given writer. -// -// Write() marshalls the proto message and writes it to the given writer. Each -// message is prefixed with a 4 byte big endian unsigned integer as the length. -// -// No buffer is done, Close() doesn't try to close the writer. -func newWriterSink(w io.Writer) *writerSink { - return &writerSink{out: w} -} - -type writerSink struct { - out io.Writer -} - -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { - b, err := proto.Marshal(e) - if err != nil { - grpclog.Infof("binary logging: failed to marshal proto message: %v", err) - } - hdr := make([]byte, 4) - binary.BigEndian.PutUint32(hdr, uint32(len(b))) - if _, err := ws.out.Write(hdr); err != nil { - return err - } - if _, err := ws.out.Write(b); err != nil { - return err - } - return nil -} - -func (ws *writerSink) Close() error { return nil } - -type bufWriteCloserSink struct { - mu sync.Mutex - closer io.Closer - out *writerSink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. - - writeStartOnce sync.Once - writeTicker *time.Ticker -} - -func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { - // Start the write loop when Write is called. - fs.writeStartOnce.Do(fs.startFlushGoroutine) - fs.mu.Lock() - if err := fs.out.Write(e); err != nil { - fs.mu.Unlock() - return err - } - fs.mu.Unlock() - return nil -} - -const ( - bufFlushDuration = 60 * time.Second -) - -func (fs *bufWriteCloserSink) startFlushGoroutine() { - fs.writeTicker = time.NewTicker(bufFlushDuration) - go func() { - for range fs.writeTicker.C { - fs.mu.Lock() - fs.buf.Flush() - fs.mu.Unlock() - } - }() -} - -func (fs *bufWriteCloserSink) Close() error { - if fs.writeTicker != nil { - fs.writeTicker.Stop() - } - fs.mu.Lock() - fs.buf.Flush() - fs.closer.Close() - fs.out.Close() - fs.mu.Unlock() - return nil -} - -func newBufWriteCloserSink(o io.WriteCloser) Sink { - bufW := bufio.NewWriter(o) - return &bufWriteCloserSink{ - closer: o, - out: newWriterSink(bufW), - buf: bufW, - } -} - -// NewTempFileSink creates a temp file and returns a Sink that writes to this -// file. -func NewTempFileSink() (Sink, error) { - tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") - if err != nil { - return nil, fmt.Errorf("failed to create temp file: %v", err) - } - return newBufWriteCloserSink(tempFile), nil -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 041520d351..586a0336b4 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -27,22 +27,16 @@ import ( "sort" "sync" "sync/atomic" - "time" "google.golang.org/grpc/grpclog" ) -const ( - defaultMaxTraceEntry int32 = 30 -) - var ( db dbWrapper idGen idGenerator // EntryPerPage defines the number of channelz entries to be shown on a web page. - EntryPerPage = int64(50) - curState int32 - maxTraceEntry = defaultMaxTraceEntry + EntryPerPage = 50 + curState int32 ) // TurnOn turns on channelz data collection. @@ -58,22 +52,6 @@ func IsOn() bool { return atomic.CompareAndSwapInt32(&curState, 1, 1) } -// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). -// Setting it to 0 will disable channel tracing. -func SetMaxTraceEntry(i int32) { - atomic.StoreInt32(&maxTraceEntry, i) -} - -// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. -func ResetMaxTraceEntryToDefault() { - atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) -} - -func getMaxTraceEntry() int { - i := atomic.LoadInt32(&maxTraceEntry) - return int(i) -} - // dbWarpper wraps around a reference to internal channelz data storage, and // provide synchronized functionality to set and get the reference. type dbWrapper struct { @@ -113,20 +91,20 @@ func NewChannelzStorage() { // boolean indicating whether there's more top channels to be queried for. // // The arg id specifies that only top channel with id at or above it will be included -// in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - return db.get().GetTopChannels(id, maxResults) +// in the result. The returned slice is up to a length of EntryPerPage, and is +// sorted in ascending id order. +func GetTopChannels(id int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id) } // GetServers returns a slice of server's ServerMetric, along with a // boolean indicating whether there's more servers to be queried for. // // The arg id specifies that only server with id at or above it will be included -// in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { - return db.get().GetServers(id, maxResults) +// in the result. The returned slice is up to a length of EntryPerPage, and is +// sorted in ascending id order. +func GetServers(id int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id) } // GetServerSockets returns a slice of server's (identified by id) normal socket's @@ -134,10 +112,10 @@ func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { // be queried for. // // The arg startID specifies that only sockets with id at or above it will be -// included in the result. The returned slice is up to a length of the arg maxResults -// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - return db.get().GetServerSockets(id, startID, maxResults) +// included in the result. The returned slice is up to a length of EntryPerPage, +// and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID) } // GetChannel returns the ChannelMetric for the channel (identified by id). @@ -155,11 +133,6 @@ func GetSocket(id int64) *SocketMetric { return db.get().GetSocket(id) } -// GetServer returns the ServerMetric for the server (identified by id). -func GetServer(id int64) *ServerMetric { - return db.get().GetServer(id) -} - // RegisterChannel registers the given channel c in channelz database with ref // as its reference name, and add it to the child list of its parent (identified // by pid). pid = 0 means no parent. It returns the unique channelz tracking id @@ -173,7 +146,6 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 { nestedChans: make(map[int64]string), id: id, pid: pid, - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } if pid == 0 { db.get().addChannel(id, cn, true, pid, ref) @@ -198,7 +170,6 @@ func RegisterSubChannel(c Channel, pid int64, ref string) int64 { sockets: make(map[int64]string), id: id, pid: pid, - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } db.get().addSubChannel(id, sc, pid, ref) return id @@ -255,24 +226,6 @@ func RemoveEntry(id int64) { db.get().removeEntry(id) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. -type TraceEventDesc struct { - Desc string - Severity Severity - Parent *TraceEventDesc -} - -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(id int64, desc *TraceEventDesc) { - if getMaxTraceEntry() == 0 { - return - } - db.get().traceEvent(id, desc) -} - // channelMap is the storage data structure for channelz. // Methods of channelMap can be divided in two two categories with respect to locking. // 1. Methods acquire the global lock. @@ -298,7 +251,6 @@ func (c *channelMap) addServer(id int64, s *server) { func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { c.mu.Lock() cn.cm = c - cn.trace.cm = c c.channels[id] = cn if isTopChannel { c.topLevelChannels[id] = struct{}{} @@ -311,7 +263,6 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { c.mu.Lock() sc.cm = c - sc.trace.cm = c c.subChannels[id] = sc c.findEntry(pid).addChild(id, sc) c.mu.Unlock() @@ -333,25 +284,16 @@ func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref c.mu.Unlock() } -// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to -// wait on the deletion of its children and until no other entity's channel trace references it. -// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully -// shutting down server will lead to the server being also deleted. +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children, or may lead to a chain +// of entry deletion. For example, deleting the last socket of a gracefully shutting +// down server will lead to the server being also deleted. func (c *channelMap) removeEntry(id int64) { c.mu.Lock() c.findEntry(id).triggerDelete() c.mu.Unlock() } -// c.mu must be held by the caller -func (c *channelMap) decrTraceRefCount(id int64) { - e := c.findEntry(id) - if v, ok := e.(tracedChannel); ok { - v.decrTraceRefCount() - e.deleteSelfIfReady() - } -} - // c.mu must be held by the caller. func (c *channelMap) findEntry(id int64) entry { var v entry @@ -405,39 +347,6 @@ func (c *channelMap) deleteEntry(id int64) { } } -func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { - c.mu.Lock() - child := c.findEntry(id) - childTC, ok := child.(tracedChannel) - if !ok { - c.mu.Unlock() - return - } - childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) - if desc.Parent != nil { - parent := c.findEntry(child.getParentID()) - var chanType RefChannelType - switch child.(type) { - case *channel: - chanType = RefChannel - case *subChannel: - chanType = RefSubChannel - } - if parentTC, ok := parent.(tracedChannel); ok { - parentTC.getChannelTrace().append(&TraceEvent{ - Desc: desc.Parent.Desc, - Severity: desc.Parent.Severity, - Timestamp: time.Now(), - RefID: id, - RefName: childTC.getRefName(), - RefType: chanType, - }) - childTC.incrTraceRefCount() - } - } - c.mu.Unlock() -} - type int64Slice []int64 func (s int64Slice) Len() int { return len(s) } @@ -452,32 +361,29 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int64) int64 { +func min(a, b int) int { if a < b { return a } return b } -func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } +func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) { c.mu.RLock() - l := int64(len(c.topLevelChannels)) + l := len(c.topLevelChannels) ids := make([]int64, 0, l) - cns := make([]*channel, 0, min(l, maxResults)) + cns := make([]*channel, 0, min(l, EntryPerPage)) for k := range c.topLevelChannels { ids = append(ids, k) } sort.Sort(int64Slice(ids)) idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) + count := 0 var end bool var t []*ChannelMetric for i, v := range ids[idx:] { - if count == maxResults { + if count == EntryPerPage { break } if cn, ok := c.channels[v]; ok { @@ -502,29 +408,25 @@ func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetri t[i].ChannelData = cn.c.ChannelzMetric() t[i].ID = cn.id t[i].RefName = cn.refName - t[i].Trace = cn.trace.dumpData() } return t, end } -func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } +func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) { c.mu.RLock() - l := int64(len(c.servers)) + l := len(c.servers) ids := make([]int64, 0, l) - ss := make([]*server, 0, min(l, maxResults)) + ss := make([]*server, 0, min(l, EntryPerPage)) for k := range c.servers { ids = append(ids, k) } sort.Sort(int64Slice(ids)) idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) + count := 0 var end bool var s []*ServerMetric for i, v := range ids[idx:] { - if count == maxResults { + if count == EntryPerPage { break } if svr, ok := c.servers[v]; ok { @@ -552,10 +454,7 @@ func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { return s, end } -func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } +func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { var svr *server var ok bool c.mu.RLock() @@ -565,18 +464,18 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) return nil, true } svrskts := svr.sockets - l := int64(len(svrskts)) + l := len(svrskts) ids := make([]int64, 0, l) - sks := make([]*normalSocket, 0, min(l, maxResults)) + sks := make([]*normalSocket, 0, min(l, EntryPerPage)) for k := range svrskts { ids = append(ids, k) } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) - count := int64(0) + sort.Sort((int64Slice(ids))) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 var end bool for i, v := range ids[idx:] { - if count == maxResults { + if count == EntryPerPage { break } if ns, ok := c.normalSockets[v]; ok { @@ -615,14 +514,10 @@ func (c *channelMap) GetChannel(id int64) *ChannelMetric { } cm.NestedChans = copyMap(cn.nestedChans) cm.SubChans = copyMap(cn.subChans) - // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when - // holding the lock to prevent potential data race. - chanCopy := cn.c c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() + cm.ChannelData = cn.c.ChannelzMetric() cm.ID = cn.id cm.RefName = cn.refName - cm.Trace = cn.trace.dumpData() return cm } @@ -637,14 +532,10 @@ func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { return nil } cm.Sockets = copyMap(sc.sockets) - // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when - // holding the lock to prevent potential data race. - chanCopy := sc.c c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() + cm.ChannelData = sc.c.ChannelzMetric() cm.ID = sc.id cm.RefName = sc.refName - cm.Trace = sc.trace.dumpData() return cm } @@ -669,23 +560,6 @@ func (c *channelMap) GetSocket(id int64) *SocketMetric { return nil } -func (c *channelMap) GetServer(id int64) *ServerMetric { - sm := &ServerMetric{} - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - c.mu.RUnlock() - return nil - } - sm.ListenSockets = copyMap(svr.listenSockets) - c.mu.RUnlock() - sm.ID = svr.id - sm.RefName = svr.refName - sm.ServerData = svr.s.ChannelzMetric() - return sm -} - type idGenerator struct { id int64 } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 17c2274cb3..153d75340e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -20,12 +20,9 @@ package channelz import ( "net" - "sync" - "sync/atomic" "time" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" ) @@ -42,8 +39,6 @@ type entry interface { // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child // list is now empty. If both conditions are met, then delete self from database. deleteSelfIfReady() - // getParentID returns parent ID of the entry. 0 value parent ID means no parent. - getParentID() int64 } // dummyEntry is a fake entry to handle entry not found case. @@ -77,10 +72,6 @@ func (*dummyEntry) deleteSelfIfReady() { // code should not reach here. deleteSelfIfReady is always called on an existing entry. } -func (*dummyEntry) getParentID() int64 { - return 0 -} - // ChannelMetric defines the info channelz provides for a specific Channel, which // includes ChannelInternalMetric and channelz-specific data, such as channelz id, // child list, etc. @@ -103,8 +94,6 @@ type ChannelMetric struct { // Note current grpc implementation doesn't allow channel having sockets directly, // therefore, this is field is unused. Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace } // SubChannelMetric defines the info channelz provides for a specific SubChannel, @@ -131,8 +120,6 @@ type SubChannelMetric struct { // Sockets tracks the socket type children of this subchannel in the format of a map // from socket channelz id to corresponding reference string. Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace } // ChannelInternalMetric defines the struct that the implementor of Channel interface @@ -150,35 +137,7 @@ type ChannelInternalMetric struct { CallsFailed int64 // The last time a call was started on the channel. LastCallStartedTimestamp time.Time -} - -// ChannelTrace stores traced events on a channel/subchannel and related info. -type ChannelTrace struct { - // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) - EventNum int64 - // CreationTime is the creation time of the trace. - CreationTime time.Time - // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the - // oldest one) - Events []*TraceEvent -} - -// TraceEvent represent a single trace event -type TraceEvent struct { - // Desc is a simple description of the trace event. - Desc string - // Severity states the severity of this trace event. - Severity Severity - // Timestamp is the event time. - Timestamp time.Time - // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is - // involved in this event. - // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) - RefID int64 - // RefName is the reference name for the entity that gets referenced in the event. - RefName string - // RefType indicates the referenced entity type, i.e Channel or SubChannel. - RefType RefChannelType + //TODO: trace } // Channel is the interface that should be satisfied in order to be tracked by @@ -187,12 +146,6 @@ type Channel interface { ChannelzMetric() *ChannelInternalMetric } -type dummyChannel struct{} - -func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { - return &ChannelInternalMetric{} -} - type channel struct { refName string c Channel @@ -202,10 +155,6 @@ type channel struct { id int64 pid int64 cm *channelMap - trace *channelTrace - // traceRefCount is the number of trace events that reference this channel. - // Non-zero traceRefCount means the trace of this channel cannot be deleted. - traceRefCount int32 } func (c *channel) addChild(id int64, e entry) { @@ -230,96 +179,25 @@ func (c *channel) triggerDelete() { c.deleteSelfIfReady() } -func (c *channel) getParentID() int64 { - return c.pid -} - -// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means -// deleting the channel reference from its parent's child list. -// -// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the -// corresponding grpc object has been invoked, and the channel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (c *channel) deleteSelfFromTree() (deleted bool) { +func (c *channel) deleteSelfIfReady() { if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { - return false + return } + c.cm.deleteEntry(c.id) // not top channel if c.pid != 0 { c.cm.findEntry(c.pid).deleteChild(c.id) } - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means -// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the -// channel, and its memory will be garbage collected. -// -// The trace reference count of the channel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (c *channel) deleteSelfFromMap() (delete bool) { - if c.getTraceRefCount() != 0 { - c.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the channel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. -func (c *channel) deleteSelfIfReady() { - if !c.deleteSelfFromTree() { - return - } - if !c.deleteSelfFromMap() { - return - } - c.cm.deleteEntry(c.id) - c.trace.clear() -} - -func (c *channel) getChannelTrace() *channelTrace { - return c.trace -} - -func (c *channel) incrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, 1) -} - -func (c *channel) decrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, -1) -} - -func (c *channel) getTraceRefCount() int { - i := atomic.LoadInt32(&c.traceRefCount) - return int(i) -} - -func (c *channel) getRefName() string { - return c.refName } type subChannel struct { - refName string - c Channel - closeCalled bool - sockets map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - traceRefCount int32 + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap } func (sc *subChannel) addChild(id int64, e entry) { @@ -340,82 +218,12 @@ func (sc *subChannel) triggerDelete() { sc.deleteSelfIfReady() } -func (sc *subChannel) getParentID() int64 { - return sc.pid -} - -// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which -// means deleting the subchannel reference from its parent's child list. -// -// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of -// the corresponding grpc object has been invoked, and the subchannel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (sc *subChannel) deleteSelfFromTree() (deleted bool) { - if !sc.closeCalled || len(sc.sockets) != 0 { - return false - } - sc.cm.findEntry(sc.pid).deleteChild(sc.id) - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means -// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query -// the subchannel, and its memory will be garbage collected. -// -// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (sc *subChannel) deleteSelfFromMap() (delete bool) { - if sc.getTraceRefCount() != 0 { - // free the grpc struct (i.e. addrConn) - sc.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { - if !sc.deleteSelfFromTree() { - return - } - if !sc.deleteSelfFromMap() { + if !sc.closeCalled || len(sc.sockets) != 0 { return } sc.cm.deleteEntry(sc.id) - sc.trace.clear() -} - -func (sc *subChannel) getChannelTrace() *channelTrace { - return sc.trace -} - -func (sc *subChannel) incrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, 1) -} - -func (sc *subChannel) decrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, -1) -} - -func (sc *subChannel) getTraceRefCount() int { - i := atomic.LoadInt32(&sc.traceRefCount) - return int(i) -} - -func (sc *subChannel) getRefName() string { - return sc.refName + sc.cm.findEntry(sc.pid).deleteChild(sc.id) } // SocketMetric defines the info channelz provides for a specific Socket, which @@ -473,9 +281,9 @@ type SocketInternalMetric struct { RemoteAddr net.Addr // Optional, represents the name of the remote endpoint, if different than // the original target name. - RemoteName string - SocketOptions *SocketOptionData - Security credentials.ChannelzSecurityValue + RemoteName string + //TODO: socket options + //TODO: Security } // Socket is the interface that should be satisfied in order to be tracked by @@ -509,10 +317,6 @@ func (ls *listenSocket) deleteSelfIfReady() { grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") } -func (ls *listenSocket) getParentID() int64 { - return ls.pid -} - type normalSocket struct { refName string s Socket @@ -538,10 +342,6 @@ func (ns *normalSocket) deleteSelfIfReady() { grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") } -func (ns *normalSocket) getParentID() int64 { - return ns.pid -} - // ServerMetric defines the info channelz provides for a specific Server, which // includes ServerInternalMetric and channelz-specific data, such as channelz id, // child list, etc. @@ -569,6 +369,7 @@ type ServerInternalMetric struct { CallsFailed int64 // The last time a call was started on the server. LastCallStartedTimestamp time.Time + //TODO: trace } // Server is the interface to be satisfied in order to be tracked by channelz as @@ -615,88 +416,3 @@ func (s *server) deleteSelfIfReady() { } s.cm.deleteEntry(s.id) } - -func (s *server) getParentID() int64 { - return 0 -} - -type tracedChannel interface { - getChannelTrace() *channelTrace - incrTraceRefCount() - decrTraceRefCount() - getRefName() string -} - -type channelTrace struct { - cm *channelMap - createdTime time.Time - eventCount int64 - mu sync.Mutex - events []*TraceEvent -} - -func (c *channelTrace) append(e *TraceEvent) { - c.mu.Lock() - if len(c.events) == getMaxTraceEntry() { - del := c.events[0] - c.events = c.events[1:] - if del.RefID != 0 { - // start recursive cleanup in a goroutine to not block the call originated from grpc. - go func() { - // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. - c.cm.mu.Lock() - c.cm.decrTraceRefCount(del.RefID) - c.cm.mu.Unlock() - }() - } - } - e.Timestamp = time.Now() - c.events = append(c.events, e) - c.eventCount++ - c.mu.Unlock() -} - -func (c *channelTrace) clear() { - c.mu.Lock() - for _, e := range c.events { - if e.RefID != 0 { - // caller should have already held the c.cm.mu lock. - c.cm.decrTraceRefCount(e.RefID) - } - } - c.mu.Unlock() -} - -// Severity is the severity level of a trace event. -// The canonical enumeration of all valid values is here: -// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. -type Severity int - -const ( - // CtUNKNOWN indicates unknown severity of a trace event. - CtUNKNOWN Severity = iota - // CtINFO indicates info level severity of a trace event. - CtINFO - // CtWarning indicates warning level severity of a trace event. - CtWarning - // CtError indicates error level severity of a trace event. - CtError -) - -// RefChannelType is the type of the entity being referenced in a trace event. -type RefChannelType int - -const ( - // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota - // RefSubChannel indicates the referenced entity is a SubChannel. - RefSubChannel -) - -func (c *channelTrace) dumpData() *ChannelTrace { - c.mu.Lock() - ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} - ct.Events = c.events[:len(c.events)] - c.mu.Unlock() - return ct -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go deleted file mode 100644 index 692dd61817..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build !appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -// SocketOptionData defines the struct to hold socket option data, and related -// getter function to obtain info from fd. -type SocketOptionData struct { - Linger *unix.Linger - RecvTimeout *unix.Timeval - SendTimeout *unix.Timeval - TCPInfo *unix.TCPInfo -} - -// Getsockopt defines the function to get socket options requested by channelz. -// It is to be passed to syscall.RawConn.Control(). -func (s *SocketOptionData) Getsockopt(fd uintptr) { - if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { - s.Linger = v - } - if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { - s.RecvTimeout = v - } - if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { - s.SendTimeout = v - } - if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { - s.TCPInfo = v - } -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go deleted file mode 100644 index 79edbefc43..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build !linux appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "sync" - - "google.golang.org/grpc/grpclog" -) - -var once sync.Once - -// SocketOptionData defines the struct to hold socket option data, and related -// getter function to obtain info from fd. -// Windows OS doesn't support Socket Option -type SocketOptionData struct { -} - -// Getsockopt defines the function to get socket options requested by channelz. -// It is to be passed to syscall.RawConn.Control(). -// Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { - once.Do(func() { - grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") - }) -} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go deleted file mode 100644 index 11be7cd08c..0000000000 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package envconfig contains grpc settings configured by environment variables. -package envconfig - -import ( - "os" - "strings" -) - -const ( - prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" - requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE" -) - -// RequireHandshakeSetting describes the settings for handshaking. -type RequireHandshakeSetting int - -const ( - // RequireHandshakeOn indicates to wait for handshake before considering a - // connection ready/successful. - RequireHandshakeOn RequireHandshakeSetting = iota - // RequireHandshakeOff indicates to not wait for handshake before - // considering a connection ready/successful. - RequireHandshakeOff -) - -var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". - Retry = strings.EqualFold(os.Getenv(retryStr), "on") - // RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE - // environment variable. - // - // Will be removed after the 1.18 release. - RequireHandshake = RequireHandshakeOn -) - -func init() { - switch strings.ToLower(os.Getenv(requireHandshakeStr)) { - case "on": - fallthrough - default: - RequireHandshake = RequireHandshakeOn - case "off": - RequireHandshake = RequireHandshakeOff - } -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go deleted file mode 100644 index fbe697c376..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcsync implements additional synchronization primitives built upon -// the sync package. -package grpcsync - -import ( - "sync" - "sync/atomic" -) - -// Event represents a one-time event that may occur in the future. -type Event struct { - fired int32 - c chan struct{} - o sync.Once -} - -// Fire causes e to complete. It is safe to call multiple times, and -// concurrently. It returns true iff this call to Fire caused the signaling -// channel returned by Done to close. -func (e *Event) Fire() bool { - ret := false - e.o.Do(func() { - atomic.StoreInt32(&e.fired, 1) - close(e.c) - ret = true - }) - return ret -} - -// Done returns a channel that will be closed when Fire is called. -func (e *Event) Done() <-chan struct{} { - return e.c -} - -// HasFired returns true if Fire has been called. -func (e *Event) HasFired() bool { - return atomic.LoadInt32(&e.fired) == 1 -} - -// NewEvent returns a new, ready-to-use Event. -func NewEvent() *Event { - return &Event{c: make(chan struct{})} -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c1d2c690ca..cd34267f7f 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -20,35 +20,17 @@ // symbols to avoid circular dependencies. package internal -import ( - "context" - "time" -) - var ( - // WithResolverBuilder is exported by dialoptions.go + + // TestingUseHandlerImpl enables the http.Handler-based server implementation. + // It must be called before Serve and requires TLS credentials. + // + // The provided grpcServer must be of type *grpc.Server. It is untyped + // for circular dependency reasons. + TestingUseHandlerImpl func(grpcServer interface{}) + + // WithContextDialer is exported by clientconn.go + WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption + // WithResolverBuilder is exported by clientconn.go WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption - // WithHealthCheckFunc is not exported by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption - // HealthCheckFunc is used to provide client-side LB channel health checking - HealthCheckFunc HealthChecker - // BalancerUnregister is exported by package balancer to unregister a balancer. - BalancerUnregister func(name string) - // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by - // default, but tests may wish to set it lower for convenience. - KeepaliveMinPingTime = 10 * time.Second -) - -// HealthChecker defines the signature of the client-side LB channel health checking function. -type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error - -const ( - // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. - CredsBundleModeFallback = "fallback" - // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer - // mode. - CredsBundleModeBalancer = "balancer" - // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode - // that supports backend returned by grpclb balancer. - CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go deleted file mode 100644 index 43281a3e07..0000000000 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ /dev/null @@ -1,114 +0,0 @@ -// +build !appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package syscall provides functionalities that grpc uses to get low-level operating system -// stats/info. -package syscall - -import ( - "fmt" - "net" - "syscall" - "time" - - "golang.org/x/sys/unix" - "google.golang.org/grpc/grpclog" -) - -// GetCPUTime returns the how much CPU time has passed since the start of this process. -func GetCPUTime() int64 { - var ts unix.Timespec - if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { - grpclog.Fatal(err) - } - return ts.Nano() -} - -// Rusage is an alias for syscall.Rusage under linux non-appengine environment. -type Rusage syscall.Rusage - -// GetRusage returns the resource usage of current process. -func GetRusage() (rusage *Rusage) { - rusage = new(Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) - return -} - -// CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - f := (*syscall.Rusage)(first) - l := (*syscall.Rusage)(latest) - var ( - utimeDiffs = l.Utime.Sec - f.Utime.Sec - utimeDiffus = l.Utime.Usec - f.Utime.Usec - stimeDiffs = l.Stime.Sec - f.Stime.Sec - stimeDiffus = l.Stime.Usec - f.Stime.Usec - ) - - uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 - sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 - - return uTimeElapsed, sTimeElapsed -} - -// SetTCPUserTimeout sets the TCP user timeout on a connection's socket -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { - tcpconn, ok := conn.(*net.TCPConn) - if !ok { - // not a TCP connection. exit early - return nil - } - rawConn, err := tcpconn.SyscallConn() - if err != nil { - return fmt.Errorf("error getting raw connection: %v", err) - } - err = rawConn.Control(func(fd uintptr) { - err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) - }) - if err != nil { - return fmt.Errorf("error setting option on socket: %v", err) - } - - return nil -} - -// GetTCPUserTimeout gets the TCP user timeout on a connection's socket -func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { - tcpconn, ok := conn.(*net.TCPConn) - if !ok { - err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) - return - } - rawConn, err := tcpconn.SyscallConn() - if err != nil { - err = fmt.Errorf("error getting raw connection: %v", err) - return - } - err = rawConn.Control(func(fd uintptr) { - opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) - }) - if err != nil { - err = fmt.Errorf("error getting option on socket: %v", err) - return - } - - return -} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go deleted file mode 100644 index d3fd9dab33..0000000000 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build !linux appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package syscall - -import ( - "net" - "sync" - "time" - - "google.golang.org/grpc/grpclog" -) - -var once sync.Once - -func log() { - once.Do(func() { - grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") - }) -} - -// GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. -func GetCPUTime() int64 { - log() - return 0 -} - -// Rusage is an empty struct under non-linux or appengine environment. -type Rusage struct{} - -// GetRusage is a no-op function under non-linux or appengine environment. -func GetRusage() (rusage *Rusage) { - log() - return nil -} - -// CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - log() - return 0, 0 -} - -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { - log() - return nil -} - -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments -// a negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { - log() - return -1, nil -} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go deleted file mode 100644 index 9fa306b2e0..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "math" - "time" -) - -const ( - // The default value of flow control window size in HTTP2 spec. - defaultWindowSize = 65535 - // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - infinity = time.Duration(math.MaxInt64) - defaultClientKeepaliveTime = infinity - defaultClientKeepaliveTimeout = 20 * time.Second - defaultMaxStreamsClient = 100 - defaultMaxConnectionIdle = infinity - defaultMaxConnectionAge = infinity - defaultMaxConnectionAgeGrace = infinity - defaultServerKeepaliveTime = 2 * time.Hour - defaultServerKeepaliveTimeout = 20 * time.Second - defaultKeepalivePolicyMinTime = 5 * time.Minute - // max window limit set by HTTP2 Specs. - maxWindowSize = math.MaxInt32 - // defaultWriteQuota is the default value for number of data - // bytes that each stream can schedule before some of it being - // flushed out. - defaultWriteQuota = 64 * 1024 - defaultClientMaxHeaderListSize = uint32(16 << 20) - defaultServerMaxHeaderListSize = uint32(16 << 20) -) diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7d..f8adc7e6d4 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -16,8 +16,7 @@ * */ -// Package keepalive defines configurable parameters for point-to-point -// healthcheck. +// Package keepalive defines configurable parameters for point-to-point healthcheck. package keepalive import ( @@ -25,61 +24,42 @@ import ( ) // ClientParameters is used to set keepalive parameters on the client-side. -// These configure how the client will actively probe to notice when a -// connection is broken and send pings so intermediaries will be aware of the -// liveness of the connection. Make sure these parameters are set in -// coordination with the keepalive policy on the server, as incompatible -// settings can result in closing of connection. +// These configure how the client will actively probe to notice when a connection is broken +// and send pings so intermediaries will be aware of the liveness of the connection. +// Make sure these parameters are set in coordination with the keepalive policy on the server, +// as incompatible settings can result in closing of connection. type ClientParameters struct { - // After a duration of this time if the client doesn't see any activity it - // pings the server to see if the transport is still alive. - // If set below 10s, a minimum value of 10s will be used instead. + // After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive. Time time.Duration // The current default value is infinity. - // After having pinged for keepalive check, the client waits for a duration - // of Timeout and if no activity is seen even after that the connection is - // closed. + // After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. Timeout time.Duration // The current default value is 20 seconds. - // If true, client sends keepalive pings even with no active RPCs. If false, - // when there are no active RPCs, Time and Timeout will be ignored and no - // keepalive pings will be sent. + // If true, client runs keepalive checks even with no active RPCs. PermitWithoutStream bool // false by default. } -// ServerParameters is used to set keepalive and max-age parameters on the -// server-side. +// ServerParameters is used to set keepalive and max-age parameters on the server-side. type ServerParameters struct { - // MaxConnectionIdle is a duration for the amount of time after which an - // idle connection would be closed by sending a GoAway. Idleness duration is - // defined since the most recent time the number of outstanding RPCs became - // zero or the connection establishment. + // MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway. + // Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment. MaxConnectionIdle time.Duration // The current default value is infinity. - // MaxConnectionAge is a duration for the maximum amount of time a - // connection may exist before it will be closed by sending a GoAway. A - // random jitter of +/-10% will be added to MaxConnectionAge to spread out - // connection storms. + // MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. + // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. MaxConnectionAge time.Duration // The current default value is infinity. - // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after - // which the connection will be forcibly closed. + // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed. MaxConnectionAgeGrace time.Duration // The current default value is infinity. - // After a duration of this time if the server doesn't see any activity it - // pings the client to see if the transport is still alive. - // If set below 1s, a minimum value of 1s will be used instead. + // After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive. Time time.Duration // The current default value is 2 hours. - // After having pinged for keepalive check, the server waits for a duration - // of Timeout and if no activity is seen even after that the connection is - // closed. + // After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. Timeout time.Duration // The current default value is 20 seconds. } -// EnforcementPolicy is used to set keepalive enforcement policy on the -// server-side. Server will close connection with a client that violates this -// policy. +// EnforcementPolicy is used to set keepalive enforcement policy on the server-side. +// Server will close connection with a client that violates this policy. type EnforcementPolicy struct { - // MinTime is the minimum amount of time a client should wait before sending - // a keepalive ping. + // MinTime is the minimum amount of time a client should wait before sending a keepalive ping. MinTime time.Duration // The current default value is 5 minutes. - // If true, server allows keepalive pings even when there are no active - // streams(RPCs). If false, and client sends ping when there are no active - // streams, server will send GOAWAY and close the connection. + // If true, server expects keepalive pings even when there are no active streams(RPCs). PermitWithoutStream bool // false by default. } diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index cf6d1b9478..bd2eaf4083 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -22,9 +22,10 @@ package metadata // import "google.golang.org/grpc/metadata" import ( - "context" "fmt" "strings" + + "golang.org/x/net/context" ) // DecodeKeyValue returns k, v, nil. diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go index c9f79dc533..0f8a908ea9 100644 --- a/vendor/google.golang.org/grpc/naming/dns_resolver.go +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -19,13 +19,13 @@ package naming import ( - "context" "errors" "fmt" "net" "strconv" "time" + "golang.org/x/net/context" "google.golang.org/grpc/grpclog" ) @@ -37,9 +37,6 @@ const ( var ( errMissingAddr = errors.New("missing address") errWatcherClose = errors.New("watcher has been closed") - - lookupHost = net.DefaultResolver.LookupHost - lookupSRV = net.DefaultResolver.LookupSRV ) // NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and @@ -76,8 +73,8 @@ func formatIP(addr string) (addrIP string, ok bool) { // parseTarget takes the user input target string, returns formatted host and port info. // If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. // examples: // target: "www.google.com" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" returns host: "ipv4-host", port: "80" @@ -221,7 +218,7 @@ func (w *dnsWatcher) lookupSRV() map[string]*Update { for _, s := range srvs { lbAddrs, err := lookupHost(w.ctx, s.Target) if err != nil { - grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) continue } for _, a := range lbAddrs { diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go new file mode 100644 index 0000000000..57b65d7b88 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go17.go @@ -0,0 +1,34 @@ +// +build go1.6,!go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } +) diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/naming/go18.go similarity index 73% rename from vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go rename to vendor/google.golang.org/grpc/naming/go18.go index 8864a08111..b5a0f84274 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/naming/go18.go @@ -1,8 +1,8 @@ -// +build !linux appengine +// +build go1.8 /* * - * Copyright 2018 gRPC authors. + * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,9 +18,11 @@ * */ -package channelz +package naming -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { - return nil -} +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go index c99fdbef43..8cc39e9375 100644 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -17,7 +17,7 @@ */ // Package naming defines the naming API and related data structures for gRPC. -// The interface is EXPERIMENTAL and may be subject to change. +// The interface is EXPERIMENTAL and may be suject to change. // // Deprecated: please use package resolver. package naming diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index e01d219ffb..317b8b9d09 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -21,9 +21,9 @@ package peer import ( - "context" "net" + "golang.org/x/net/context" "google.golang.org/grpc/credentials" ) diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index f9625496c4..019e658004 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -19,16 +19,19 @@ package grpc import ( - "context" "io" "sync" + "sync/atomic" + "golang.org/x/net/context" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick @@ -42,10 +45,16 @@ type pickerWrapper struct { // The latest connection happened. connErrMu sync.Mutex connErr error + + stickinessMDKey atomic.Value + stickiness *stickyStore } func newPickerWrapper() *pickerWrapper { - bp := &pickerWrapper{blockingCh: make(chan struct{})} + bp := &pickerWrapper{ + blockingCh: make(chan struct{}), + stickiness: newStickyStore(), + } return bp } @@ -62,6 +71,27 @@ func (bp *pickerWrapper) connectionError() error { return err } +func (bp *pickerWrapper) updateStickinessMDKey(newKey string) { + // No need to check ok because mdKey == "" if ok == false. + if oldKey, _ := bp.stickinessMDKey.Load().(string); oldKey != newKey { + bp.stickinessMDKey.Store(newKey) + bp.stickiness.reset(newKey) + } +} + +func (bp *pickerWrapper) getStickinessMDKey() string { + // No need to check ok because mdKey == "" if ok == false. + mdKey, _ := bp.stickinessMDKey.Load().(string) + return mdKey +} + +func (bp *pickerWrapper) clearStickinessState() { + if oldKey := bp.getStickinessMDKey(); oldKey != "" { + // There's no need to reset store if mdKey was "". + bp.stickiness.reset(oldKey) + } +} + // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (bp *pickerWrapper) updatePicker(p balancer.Picker) { bp.mu.Lock() @@ -101,7 +131,31 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { - var ch chan struct{} + + mdKey := bp.getStickinessMDKey() + stickyKey, isSticky := stickyKeyFromContext(ctx, mdKey) + + // Potential race here: if stickinessMDKey is updated after the above two + // lines, and this pick is a sticky pick, the following put could add an + // entry to sticky store with an outdated sticky key. + // + // The solution: keep the current md key in sticky store, and at the + // beginning of each get/put, check the mdkey against store.curMDKey. + // - Cons: one more string comparing for each get/put. + // - Pros: the string matching happens inside get/put, so the overhead for + // non-sticky RPCs will be minimal. + + if isSticky { + if t, ok := bp.stickiness.get(mdKey, stickyKey); ok { + // Done function returned is always nil. + return t, nil, nil + } + } + + var ( + p balancer.Picker + ch chan struct{} + ) for { bp.mu.Lock() @@ -127,7 +181,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. } ch = bp.blockingCh - p := bp.picker + p = bp.picker bp.mu.Unlock() subConn, done, err := p.Pick(ctx, opts) @@ -141,35 +195,26 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. continue } return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError()) - case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return nil, nil, status.Error(codes.Canceled, err.Error()) default: - if _, ok := status.FromError(err); ok { - return nil, nil, err - } // err is some other error. - return nil, nil, status.Error(codes.Unknown, err.Error()) + return nil, nil, toRPCErr(err) } } acw, ok := subConn.(*acBalancerWrapper) if !ok { - grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if isSticky { + bp.stickiness.put(mdKey, stickyKey, acw) + } if channelz.IsOn() { return t, doneChannelzWrapper(acw, done), nil } return t, done, nil } - if done != nil { - // Calling done with nil error, no bytes sent and no bytes received. - // DoneInfo with default value works. - done(balancer.DoneInfo{}) - } grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. // A valid picker always returns READY subConn. This means the state of ac @@ -187,3 +232,105 @@ func (bp *pickerWrapper) close() { bp.done = true close(bp.blockingCh) } + +const stickinessKeyCountLimit = 1000 + +type stickyStoreEntry struct { + acw *acBalancerWrapper + addr resolver.Address +} + +type stickyStore struct { + mu sync.Mutex + // curMDKey is check before every get/put to avoid races. The operation will + // abort immediately when the given mdKey is different from the curMDKey. + curMDKey string + store *linkedMap +} + +func newStickyStore() *stickyStore { + return &stickyStore{ + store: newLinkedMap(), + } +} + +// reset clears the map in stickyStore, and set the currentMDKey to newMDKey. +func (ss *stickyStore) reset(newMDKey string) { + ss.mu.Lock() + ss.curMDKey = newMDKey + ss.store.clear() + ss.mu.Unlock() +} + +// stickyKey is the key to look up in store. mdKey will be checked against +// curMDKey to avoid races. +func (ss *stickyStore) put(mdKey, stickyKey string, acw *acBalancerWrapper) { + ss.mu.Lock() + defer ss.mu.Unlock() + if mdKey != ss.curMDKey { + return + } + // TODO(stickiness): limit the total number of entries. + ss.store.put(stickyKey, &stickyStoreEntry{ + acw: acw, + addr: acw.getAddrConn().getCurAddr(), + }) + if ss.store.len() > stickinessKeyCountLimit { + ss.store.removeOldest() + } +} + +// stickyKey is the key to look up in store. mdKey will be checked against +// curMDKey to avoid races. +func (ss *stickyStore) get(mdKey, stickyKey string) (transport.ClientTransport, bool) { + ss.mu.Lock() + defer ss.mu.Unlock() + if mdKey != ss.curMDKey { + return nil, false + } + entry, ok := ss.store.get(stickyKey) + if !ok { + return nil, false + } + ac := entry.acw.getAddrConn() + if ac.getCurAddr() != entry.addr { + ss.store.remove(stickyKey) + return nil, false + } + t, ok := ac.getReadyTransport() + if !ok { + ss.store.remove(stickyKey) + return nil, false + } + return t, true +} + +// Get one value from metadata in ctx with key stickinessMDKey. +// +// It returns "", false if stickinessMDKey is an empty string. +func stickyKeyFromContext(ctx context.Context, stickinessMDKey string) (string, bool) { + if stickinessMDKey == "" { + return "", false + } + + md, added, ok := metadata.FromOutgoingContextRaw(ctx) + if !ok { + return "", false + } + + if vv, ok := md[stickinessMDKey]; ok { + if len(vv) > 0 { + return vv[0], true + } + } + + for _, ss := range added { + for i := 0; i < len(ss)-1; i += 2 { + if ss[i] == stickinessMDKey { + return ss[i+1], true + } + } + } + + return "", false +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index d1e38aad77..bf659d49d2 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -19,8 +19,7 @@ package grpc import ( - "context" - + "golang.org/x/net/context" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" @@ -57,7 +56,6 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er if b.sc == nil { b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) if err != nil { - //TODO(yuxuanli): why not change the cc state to Idle? grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) return } diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go index f8f69bfb70..2d40236e21 100644 --- a/vendor/google.golang.org/grpc/proxy.go +++ b/vendor/google.golang.org/grpc/proxy.go @@ -20,8 +20,6 @@ package grpc import ( "bufio" - "context" - "encoding/base64" "errors" "fmt" "io" @@ -29,9 +27,9 @@ import ( "net/http" "net/http/httputil" "net/url" -) -const proxyAuthHeaderKey = "Proxy-Authorization" + "golang.org/x/net/context" +) var ( // errDisabled indicates that proxy is disabled for the address. @@ -40,7 +38,7 @@ var ( httpProxyFromEnvironment = http.ProxyFromEnvironment ) -func mapAddress(ctx context.Context, address string) (*url.URL, error) { +func mapAddress(ctx context.Context, address string) (string, error) { req := &http.Request{ URL: &url.URL{ Scheme: "https", @@ -49,12 +47,12 @@ func mapAddress(ctx context.Context, address string) (*url.URL, error) { } url, err := httpProxyFromEnvironment(req) if err != nil { - return nil, err + return "", err } if url == nil { - return nil, errDisabled + return "", errDisabled } - return url, nil + return url.Host, nil } // To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. @@ -71,28 +69,18 @@ func (c *bufConn) Read(b []byte) (int, error) { return c.r.Read(b) } -func basicAuth(username, password string) string { - auth := username + ":" + password - return base64.StdEncoding.EncodeToString([]byte(auth)) -} - -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) { defer func() { if err != nil { conn.Close() } }() - req := &http.Request{ + req := (&http.Request{ Method: http.MethodConnect, - URL: &url.URL{Host: backendAddr}, + URL: &url.URL{Host: addr}, Header: map[string][]string{"User-Agent": {grpcUA}}, - } - if t := proxyURL.User; t != nil { - u := t.Username() - p, _ := t.Password() - req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) - } + }) if err := sendHTTPRequest(ctx, req, conn); err != nil { return nil, fmt.Errorf("failed to write the HTTP request: %v", err) @@ -120,33 +108,23 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // provided dialer, does HTTP CONNECT handshake and returns the connection. func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { return func(ctx context.Context, addr string) (conn net.Conn, err error) { - var newAddr string - proxyURL, err := mapAddress(ctx, addr) + var skipHandshake bool + newAddr, err := mapAddress(ctx, addr) if err != nil { if err != errDisabled { return nil, err } + skipHandshake = true newAddr = addr - } else { - newAddr = proxyURL.Host } conn, err = dialer(ctx, newAddr) if err != nil { return } - if proxyURL != nil { - // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) + if !skipHandshake { + conn, err = doHTTPConnectHandshake(ctx, conn, addr) } return } } - -func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { - req = req.WithContext(ctx) - if err := req.Write(conn); err != nil { - return fmt.Errorf("failed to write the HTTP request: %v", err) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index 5835599077..048fde67d0 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,7 +21,6 @@ package dns import ( - "context" "encoding/json" "errors" "fmt" @@ -32,8 +31,8 @@ import ( "sync" "time" + "golang.org/x/net/context" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" ) @@ -43,65 +42,34 @@ func init() { } const ( - defaultPort = "443" - defaultFreq = time.Minute * 30 - defaultDNSSvrPort = "53" - golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. - txtPrefix = "_grpc_config." + defaultPort = "443" + defaultFreq = time.Minute * 30 + golang = "GO" // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. txtAttribute = "grpc_config=" ) var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") + errMissingAddr = errors.New("missing address") ) -var ( - defaultResolver netResolver = net.DefaultResolver -) - -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { - var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) - } -} - -var customAuthorityResolver = func(authority string) (netResolver, error) { - host, port, err := parseTarget(authority, defaultDNSSvrPort) - if err != nil { - return nil, err - } - - authorityWithPort := net.JoinHostPort(host, port) - - return &net.Resolver{ - PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), - }, nil -} - // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. func NewBuilder() resolver.Builder { - return &dnsBuilder{minFreq: defaultFreq} + return &dnsBuilder{freq: defaultFreq} } type dnsBuilder struct { - // minimum frequency of polling the DNS server. - minFreq time.Duration + // frequency of polling the DNS server. + freq time.Duration } // Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) + if target.Authority != "" { + return nil, fmt.Errorf("Default DNS resolver does not support custom DNS server") + } + host, port, err := parseTarget(target.Endpoint) if err != nil { return nil, err } @@ -124,8 +92,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - freq: b.minFreq, - backoff: backoff.Exponential{MaxDelay: b.minFreq}, + freq: b.freq, host: host, port: port, ctx: ctx, @@ -136,15 +103,6 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = customAuthorityResolver(target.Authority) - if err != nil { - return nil, err - } - } - d.wg.Add(1) go d.watcher() return d, nil @@ -155,12 +113,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // ipResolver watches for the name resolution update for an IP address. type ipResolver struct { cc resolver.ClientConn @@ -196,15 +148,12 @@ func (i *ipResolver) watcher() { // dnsResolver watches for the name resolution update for a non-IP target. type dnsResolver struct { - freq time.Duration - backoff backoff.Exponential - retryCount int - host string - port string - resolver netResolver - ctx context.Context - cancel context.CancelFunc - cc resolver.ClientConn + freq time.Duration + host string + port string + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn // rn channel is used by ResolveNow() to force an immediate resolution of the target. rn chan struct{} t *time.Timer @@ -243,15 +192,8 @@ func (d *dnsResolver) watcher() { case <-d.rn: } result, sc := d.lookup() - // Next lookup should happen within an interval defined by d.freq. It may be - // more often due to exponential retry on empty address list. - if len(result) == 0 { - d.retryCount++ - d.t.Reset(d.backoff.Backoff(d.retryCount)) - } else { - d.retryCount = 0 - d.t.Reset(d.freq) - } + // Next lookup should happen after an interval defined by d.freq. + d.t.Reset(d.freq) d.cc.NewServiceConfig(sc) d.cc.NewAddress(result) } @@ -259,13 +201,13 @@ func (d *dnsResolver) watcher() { func (d *dnsResolver) lookupSRV() []resolver.Address { var newAddrs []resolver.Address - _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host) if err != nil { grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) return nil } for _, s := range srvs { - lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + lbAddrs, err := lookupHost(d.ctx, s.Target) if err != nil { grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) continue @@ -284,7 +226,7 @@ func (d *dnsResolver) lookupSRV() []resolver.Address { } func (d *dnsResolver) lookupTXT() string { - ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + ss, err := lookupTXT(d.ctx, d.host) if err != nil { grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) return "" @@ -304,7 +246,7 @@ func (d *dnsResolver) lookupTXT() string { func (d *dnsResolver) lookupHost() []resolver.Address { var newAddrs []resolver.Address - addrs, err := d.resolver.LookupHost(d.ctx, d.host) + addrs, err := lookupHost(d.ctx, d.host) if err != nil { grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) return nil @@ -346,16 +288,17 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. +// parseTarget takes the user input target string, returns formatted host and port info. // If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. // examples: -// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" -// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" -func parseTarget(target, defaultPort string) (host, port string, err error) { +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { if target == "" { return "", "", errMissingAddr } @@ -364,15 +307,15 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { return target, defaultPort, nil } if host, port, err = net.SplitHostPort(target); err == nil { - if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. - return "", "", errEndsWithColon - } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. host = "localhost" } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } return host, port, nil } if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go new file mode 100644 index 0000000000..b466bc8f6d --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go17.go @@ -0,0 +1,35 @@ +// +build go1.6, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } + lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) } +) diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go similarity index 73% rename from vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go rename to vendor/google.golang.org/grpc/resolver/dns/go18.go index d4346e9eab..fa34f14cad 100644 --- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go +++ b/vendor/google.golang.org/grpc/resolver/dns/go18.go @@ -1,8 +1,8 @@ -// +build appengine +// +build go1.8 /* * - * Copyright 2018 gRPC authors. + * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,13 +18,12 @@ * */ -package internal +package dns -import ( - "net" +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV + lookupTXT = net.DefaultResolver.LookupTXT ) - -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn -} diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go index 893d5d12cb..b76010d74d 100644 --- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -45,7 +45,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 52ec603daa..506afac88a 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -49,12 +49,8 @@ func Get(scheme string) Builder { return nil } -// SetDefaultScheme sets the default scheme that will be used. The default -// default scheme is "passthrough". -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. The scheme set last overrides -// previously set values. +// SetDefaultScheme sets the default scheme that will be used. +// The default default scheme is "passthrough". func SetDefaultScheme(scheme string) { defaultScheme = scheme } @@ -98,15 +94,6 @@ type BuildOption struct { DisableServiceConfig bool } -// State contains the current Resolver state relevant to the ClientConn. -type State struct { - Addresses []Address // Resolved addresses for the target - ServiceConfig string // JSON representation of the service config - - // TODO: add Err error - // TODO: add ParsedServiceConfig interface{} -} - // ClientConn contains the callbacks for resolver to notify any updates // to the gRPC ClientConn. // @@ -115,18 +102,12 @@ type State struct { // testing, the new implementation should embed this interface. This allows // gRPC to add new methods to this interface. type ClientConn interface { - // UpdateState updates the state of the ClientConn appropriately. - UpdateState(State) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. // The address list should be the complete list of resolved addresses. - // - // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) // NewServiceConfig is called by resolver to notify ClientConn a new // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. NewServiceConfig(serviceConfig string) } diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index e9cef3a92b..494d6931eb 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -21,10 +21,8 @@ package grpc import ( "fmt" "strings" - "sync/atomic" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/resolver" ) @@ -35,12 +33,11 @@ type ccResolverWrapper struct { resolver resolver.Resolver addrCh chan []resolver.Address scCh chan string - done uint32 // accessed atomically; set to 1 when closed. - curState resolver.State + done chan struct{} } // split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. +// If sep is not found, it returns ("", s, false) instead. func split2(s, sep string) (string, string, bool) { spl := strings.SplitN(s, sep, 2) if len(spl) < 2 { @@ -83,6 +80,7 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { cc: cc, addrCh: make(chan []resolver.Address, 1), scCh: make(chan string, 1), + done: make(chan struct{}), } var err error @@ -93,73 +91,68 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { return ccr, nil } +func (ccr *ccResolverWrapper) start() { + go ccr.watcher() +} + +// watcher processes address updates and service config updates sequentially. +// Otherwise, we need to resolve possible races between address and service +// config (e.g. they specify different balancer types). +func (ccr *ccResolverWrapper) watcher() { + for { + select { + case <-ccr.done: + return + default: + } + + select { + case addrs := <-ccr.addrCh: + select { + case <-ccr.done: + return + default: + } + grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + ccr.cc.handleResolvedAddrs(addrs, nil) + case sc := <-ccr.scCh: + select { + case <-ccr.done: + return + default: + } + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + ccr.cc.handleServiceConfig(sc) + case <-ccr.done: + return + } + } +} + func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) { ccr.resolver.ResolveNow(o) } func (ccr *ccResolverWrapper) close() { ccr.resolver.Close() - atomic.StoreUint32(&ccr.done, 1) + close(ccr.done) } -func (ccr *ccResolverWrapper) isDone() bool { - return atomic.LoadUint32(&ccr.done) == 1 -} - -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { - if ccr.isDone() { - return - } - grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } - ccr.cc.updateResolverState(s) - ccr.curState = s -} - -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implemenetion to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - if ccr.isDone() { - return + select { + case <-ccr.addrCh: + default: } - grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState) + ccr.addrCh <- addrs } -// NewServiceConfig is called by the resolver implementation to send service +// NewServiceConfig is called by the resolver implemenetion to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - if ccr.isDone() { - return + select { + case <-ccr.scCh: + default: } - grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: sc}) - } - ccr.curState.ServiceConfig = sc - ccr.cc.updateResolverState(ccr.curState) -} - -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - if s.ServiceConfig == ccr.curState.ServiceConfig && (len(ccr.curState.Addresses) == 0) == (len(s.Addresses) == 0) { - return - } - var updates []string - if s.ServiceConfig != ccr.curState.ServiceConfig { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtINFO, - }) + ccr.scCh <- sc } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2a595622d3..033801f34f 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -21,7 +21,6 @@ package grpc import ( "bytes" "compress/gzip" - "context" "encoding/binary" "fmt" "io" @@ -32,15 +31,16 @@ import ( "sync" "time" + "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" - "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" ) // Compressor defines the interface gRPC uses to compress a message. @@ -155,20 +155,17 @@ func (d *gzipDecompressor) Type() string { type callInfo struct { compressorType string failFast bool - stream ClientStream + stream *clientStream + traceInfo traceInfo // in trace.go maxReceiveMessageSize *int maxSendMessageSize *int creds credentials.PerRPCCredentials contentSubtype string codec baseCodec - maxRetryRPCBufferSize int } func defaultCallInfo() *callInfo { - return &callInfo{ - failFast: true, - maxRetryRPCBufferSize: 256 * 1024, // 256KB - } + return &callInfo{failFast: true} } // CallOption configures a Call before it starts or extracts information from @@ -253,8 +250,8 @@ func (o PeerCallOption) after(c *callInfo) { } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false, the RPC will fail +// FailFast configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If failFast is true, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will // retry the call if it fails due to a transient error. gRPC will not retry if @@ -262,14 +259,7 @@ func (o PeerCallOption) after(c *callInfo) { // the data. Please refer to // https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. // -// By default, RPCs don't "wait for ready". -func WaitForReady(waitForReady bool) CallOption { - return FailFastCallOption{FailFast: !waitForReady} -} - -// FailFast is the opposite of WaitForReady. -// -// Deprecated: use WaitForReady. +// By default, RPCs are "Fail Fast". func FailFast(failFast bool) CallOption { return FailFastCallOption{FailFast: failFast} } @@ -370,13 +360,13 @@ func (o CompressorCallOption) after(c *callInfo) {} // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for // more details. // -// If ForceCodec is not also used, the content-subtype will be used to look up -// the Codec to use in the registry controlled by RegisterCodec. See the -// documentation on RegisterCodec for details on registration. The lookup of -// content-subtype is case-insensitive. If no such Codec is found, the call +// If CallCustomCodec is not also used, the content-subtype will be used to +// look up the Codec to use in the registry controlled by RegisterCodec. See +// the documentation on RegisterCodec for details on registration. The lookup +// of content-subtype is case-insensitive. If no such Codec is found, the call // will result in an error with code codes.Internal. // -// If ForceCodec is also used, that Codec will be used for all request and +// If CallCustomCodec is also used, that Codec will be used for all request and // response messages, with the content-subtype set to the given contentSubtype // here for requests. func CallContentSubtype(contentSubtype string) CallOption { @@ -396,7 +386,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { } func (o ContentSubtypeCallOption) after(c *callInfo) {} -// ForceCodec returns a CallOption that will set the given Codec to be +// CallCustomCodec returns a CallOption that will set the given Codec to be // used for all request and response messages for a call. The result of calling // String() will be used as the content-subtype in a case-insensitive manner. // @@ -408,37 +398,12 @@ func (o ContentSubtypeCallOption) after(c *callInfo) {} // // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. -// -// This is an EXPERIMENTAL API. -func ForceCodec(codec encoding.Codec) CallOption { - return ForceCodecCallOption{Codec: codec} -} - -// ForceCodecCallOption is a CallOption that indicates the codec used for -// marshaling messages. -// -// This is an EXPERIMENTAL API. -type ForceCodecCallOption struct { - Codec encoding.Codec -} - -func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec - return nil -} -func (o ForceCodecCallOption) after(c *callInfo) {} - -// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of -// an encoding.Codec. -// -// Deprecated: use ForceCodec instead. func CallCustomCodec(codec Codec) CallOption { return CustomCodecCallOption{Codec: codec} } // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. -// // This is an EXPERIMENTAL API. type CustomCodecCallOption struct { Codec Codec @@ -450,27 +415,6 @@ func (o CustomCodecCallOption) before(c *callInfo) error { } func (o CustomCodecCallOption) after(c *callInfo) {} -// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory -// used for buffering this RPC's requests for retry purposes. -// -// This API is EXPERIMENTAL. -func MaxRetryRPCBufferSize(bytes int) CallOption { - return MaxRetryRPCBufferSizeCallOption{bytes} -} - -// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of -// memory to be used for caching this RPC for retry purposes. -// This is an EXPERIMENTAL API. -type MaxRetryRPCBufferSizeCallOption struct { - MaxRetryRPCBufferSize int -} - -func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { - c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize - return nil -} -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} - // The format of the payload: compressed or not? type payloadFormat uint8 @@ -500,7 +444,7 @@ type parser struct { // * io.EOF, when no messages remain // * io.ErrUnexpectedEOF // * of type transport.ConnectionError -// * an error from the status package +// * of type transport.StreamError // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. @@ -563,10 +507,7 @@ func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, } cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) - if err != nil { - return nil, wrapErr(err) - } + z, _ := compressor.Compress(cbuf) if _, err := z.Write(in); err != nil { return nil, wrapErr(err) } @@ -630,22 +571,20 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return nil } -type payloadInfo struct { - wireLength int // The compressed length got from wire. - uncompressedBytes []byte -} - -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error { pf, d, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, err + return err } - if payInfo != nil { - payInfo.wireLength = len(d) + if inPayload != nil { + inPayload.WireLength = len(d) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, st.Err() + return st.Err() } if pf == compressionMade { @@ -654,42 +593,33 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei if dc != nil { d, err = dc.Do(bytes.NewReader(d)) if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } else { dcReader, err := compressor.Decompress(bytes.NewReader(d)) if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = ioutil.ReadAll(dcReader) if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } } if len(d) > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) - } - return d, nil -} - -// For the two compressor parameters, both should not be set, but if they are, -// dc takes precedence over compressor. -// TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) - if err != nil { - return err + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) } if err := c.Unmarshal(d, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) } - if payInfo != nil { - payInfo.uncompressedBytes = d + if inPayload != nil { + inPayload.RecvTime = time.Now() + inPayload.Payload = m + // TODO truncate large payload. + inPayload.Data = d + inPayload.Length = len(d) } return nil } @@ -712,17 +642,23 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { // Code returns the error code for err if it was produced by the rpc system. // Otherwise, it returns codes.Unknown. // -// Deprecated: use status.Code instead. +// Deprecated: use status.FromError and Code method instead. func Code(err error) codes.Code { - return status.Code(err) + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown } // ErrorDesc returns the error description of err if it was produced by the rpc system. // Otherwise, it returns err.Error() or empty string when err is nil. // -// Deprecated: use status.Convert and Message method instead. +// Deprecated: use status.FromError and Message method instead. func ErrorDesc(err error) string { - return status.Convert(err).Message() + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() } // Errorf returns an error containing an error code and a description; @@ -733,31 +669,6 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { return status.Errorf(c, format, a...) } -// toRPCErr converts an error into an error from the status package. -func toRPCErr(err error) error { - if err == nil || err == io.EOF { - return err - } - if err == io.ErrUnexpectedEOF { - return status.Error(codes.Internal, err.Error()) - } - if _, ok := status.FromError(err); ok { - return err - } - switch e := err.(type) { - case transport.ConnectionError: - return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } - } - return status.Error(codes.Unknown, err.Error()) -} - // setCallInfoCodec should only be called after CallOptions have been applied. func setCallInfoCodec(c *callInfo) error { if c.codec != nil { @@ -813,19 +724,6 @@ func parseDialTarget(target string) (net string, addr string) { return net, target } -// channelzData is used to store channelz related data for ClientConn, addrConn and Server. -// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - callsStarted int64 - callsFailed int64 - callsSucceeded int64 - // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of - // time.Time since it's more costly to atomically update time.Time variable than int64 variable. - lastCallStartedTime int64 -} - // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest // support package version is 5. diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8115828fdf..014c72b3f2 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -19,7 +19,7 @@ package grpc import ( - "context" + "bytes" "errors" "fmt" "io" @@ -30,9 +30,12 @@ import ( "runtime" "strings" "sync" - "sync/atomic" "time" + "io/ioutil" + + "golang.org/x/net/context" + "golang.org/x/net/http2" "golang.org/x/net/trace" "google.golang.org/grpc/codes" @@ -40,15 +43,14 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" + "google.golang.org/grpc/transport" ) const ( @@ -104,8 +106,12 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number - czData *channelzData + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + lastCallStartedTime time.Time } type options struct { @@ -120,6 +126,7 @@ type options struct { maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int + useHandlerImpl bool // use http.Handler-based server unknownStreamDesc *StreamDesc keepaliveParams keepalive.ServerParameters keepalivePolicy keepalive.EnforcementPolicy @@ -128,25 +135,19 @@ type options struct { writeBufferSize int readBufferSize int connectionTimeout time.Duration - maxHeaderListSize *uint32 } var defaultServerOptions = options{ maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, - writeBufferSize: defaultWriteBufSize, - readBufferSize: defaultReadBufSize, } // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption func(*options) -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. -// Note: A Send call may not directly translate to a write. +// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. func WriteBufferSize(s int) ServerOption { return func(o *options) { o.writeBufferSize = s @@ -155,9 +156,6 @@ func WriteBufferSize(s int) ServerOption { // ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most // for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. func ReadBufferSize(s int) ServerOption { return func(o *options) { o.readBufferSize = s @@ -182,11 +180,6 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { - grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second - } - return func(o *options) { o.keepaliveParams = kp } @@ -249,7 +242,7 @@ func MaxRecvMsgSize(m int) ServerOption { } // MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. -// If this is not set, gRPC uses the default `math.MaxInt32`. +// If this is not set, gRPC uses the default 4MB. func MaxSendMsgSize(m int) ServerOption { return func(o *options) { o.maxSendMessageSize = m @@ -342,14 +335,6 @@ func ConnectionTimeout(d time.Duration) ServerOption { } } -// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size -// of header list that the server is prepared to accept. -func MaxHeaderListSize(s uint32) ServerOption { - return func(o *options) { - o.maxHeaderListSize = &s - } -} - // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -358,13 +343,12 @@ func NewServer(opt ...ServerOption) *Server { o(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[io.Closer]bool), - m: make(map[string]*service), - quit: make(chan struct{}), - done: make(chan struct{}), - czData: new(channelzData), + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[io.Closer]bool), + m: make(map[string]*service), + quit: make(chan struct{}), + done: make(chan struct{}), } s.cv = sync.NewCond(&s.mu) if EnableTracing { @@ -373,7 +357,7 @@ func NewServer(opt ...ServerOption) *Server { } if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + s.channelzID = channelz.RegisterServer(s, "") } return s } @@ -497,8 +481,7 @@ type listenSocket struct { func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { return &channelz.SocketInternalMetric{ - SocketOptions: channelz.GetSocketOption(l.Listener), - LocalAddr: l.Listener.Addr(), + LocalAddr: l.Listener.Addr(), } } @@ -542,7 +525,7 @@ func (s *Server) Serve(lis net.Listener) error { s.lis[ls] = true if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, "") } s.mu.Unlock() @@ -614,13 +597,12 @@ func (s *Server) handleRawConn(rawConn net.Conn) { rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) conn, authInfo, err := s.useTransportAuthenticator(rawConn) if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // If serverHandshake returns ErrConnDispatched, keep rawConn open. if err != credentials.ErrConnDispatched { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() } rawConn.SetDeadline(time.Time{}) @@ -635,19 +617,27 @@ func (s *Server) handleRawConn(rawConn net.Conn) { } s.mu.Unlock() - // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) - if st == nil { - return + var serve func() + c := conn.(io.Closer) + if s.opts.useHandlerImpl { + serve = func() { s.serveUsingHandler(conn) } + } else { + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + c = st + serve = func() { s.serveStreams(st) } } rawConn.SetDeadline(time.Time{}) - if !s.addConn(st) { + if !s.addConn(c) { return } go func() { - s.serveStreams(st) - s.removeConn(st) + serve() + s.removeConn(c) }() } @@ -666,7 +656,6 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, ChannelzParentID: s.channelzID, - MaxHeaderListSize: s.opts.maxHeaderListSize, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { @@ -702,6 +691,27 @@ func (s *Server) serveStreams(st transport.ServerTransport) { var _ http.Handler = (*Server)(nil) +// serveUsingHandler is called from handleRawConn when s is configured +// to handle requests via the http.Handler interface. It sets up a +// net/http.Server to handle the just-accepted conn. The http.Server +// is configured to route all incoming requests (all HTTP/2 streams) +// to ServeHTTP, which creates a new ServerTransport for each stream. +// serveUsingHandler blocks until conn closes. +// +// This codepath is only used when Server.TestingUseHandlerImpl has +// been configured. This lets the end2end tests exercise the ServeHTTP +// method as one of the environment types. +// +// conn is the *tls.Conn that's already been authenticated. +func (s *Server) serveUsingHandler(conn net.Conn) { + h2s := &http2.Server{ + MaxConcurrentStreams: s.opts.maxConcurrentStreams, + } + h2s.ServeConn(conn, &http2.ServeConnOpts{ + Handler: s, + }) +} + // ServeHTTP implements the Go standard library's http.Handler // interface by responding to the gRPC request r, by looking up // the requested gRPC method in the gRPC server s. @@ -749,13 +759,12 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea trInfo = &traceInfo{ tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) + trInfo.firstLine.deadline = dl.Sub(time.Now()) } return trInfo } @@ -785,26 +794,36 @@ func (s *Server) removeConn(c io.Closer) { } } -func (s *Server) channelzMetric() *channelz.ServerInternalMetric { +// ChannelzMetric returns ServerInternalMetric of current server. +// This is an EXPERIMENTAL API. +func (s *Server) ChannelzMetric() *channelz.ServerInternalMetric { + s.czmu.RLock() + defer s.czmu.RUnlock() return &channelz.ServerInternalMetric{ - CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + CallsStarted: s.callsStarted, + CallsSucceeded: s.callsSucceeded, + CallsFailed: s.callsFailed, + LastCallStartedTimestamp: s.lastCallStartedTime, } } func (s *Server) incrCallsStarted() { - atomic.AddInt64(&s.czData.callsStarted, 1) - atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) + s.czmu.Lock() + s.callsStarted++ + s.lastCallStartedTime = time.Now() + s.czmu.Unlock() } func (s *Server) incrCallsSucceeded() { - atomic.AddInt64(&s.czData.callsSucceeded, 1) + s.czmu.Lock() + s.callsSucceeded++ + s.czmu.Unlock() } func (s *Server) incrCallsFailed() { - atomic.AddInt64(&s.czData.callsFailed, 1) + s.czmu.Lock() + s.callsFailed++ + s.czmu.Unlock() } func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { @@ -861,6 +880,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } if trInfo != nil { defer trInfo.tr.Finish() + trInfo.firstLine.client = false trInfo.tr.LazyLog(&trInfo.firstLine, false) defer func() { if err != nil && err != io.EOF { @@ -870,30 +890,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. }() } - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { - ctx := stream.Context() - md, _ := metadata.FromIncomingContext(ctx) - logEntry := &binarylog.ClientHeader{ - Header: md, - MethodName: stream.Method(), - PeerAddr: nil, - } - if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = time.Until(deadline) - if logEntry.Timeout < 0 { - logEntry.Timeout = 0 - } - } - if a := md[":authority"]; len(a) > 0 { - logEntry.Authority = a[0] - } - if peer, ok := peer.FromContext(ctx); ok { - logEntry.PeerAddr = peer.Addr - } - binlog.Log(logEntry) - } - // comp and cp are used for compression. decomp and dc are used for // decompression. If comp and decomp are both set, they are the same; // however they are kept separate to ensure that at most one of the @@ -930,38 +926,81 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } - var payInfo *payloadInfo - if sh != nil || binlog != nil { - payInfo = &payloadInfo{} + p := &parser{r: stream} + pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if st, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, st); e != nil { grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) + } } return err } if channelz.IsOn() { t.IncrMsgRecv() } + if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + return st.Err() + } + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), + } + } df := func(v interface{}) error { - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + if inPayload != nil { + inPayload.WireLength = len(req) + } + if pf == compressionMade { + var err error + if dc != nil { + req, err = dc.Do(bytes.NewReader(req)) + if err != nil { + return status.Errorf(codes.Internal, err.Error()) + } + } else { + tmp, _ := decomp.Decompress(bytes.NewReader(req)) + req, err = ioutil.ReadAll(tmp) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } + } + if len(req) > s.opts.maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) + } + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { - sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - Data: d, - Length: len(d), - }) - } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ - Message: d, - }) + if inPayload != nil { + inPayload.Payload = v + inPayload.Data = req + inPayload.Length = len(req) + sh.HandleRPC(stream.Context(), inPayload) } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) @@ -984,25 +1023,15 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if e := t.WriteStatus(stream, appStatus); e != nil { grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { - if h, _ := stream.Header(); h.Len() > 0 { - // Only log serverHeader if there was header. Otherwise it can - // be trailer only. - binlog.Log(&binarylog.ServerHeader{ - Header: h, - }) - } - binlog.Log(&binarylog.ServerTrailer{ - Trailer: stream.Trailer(), - Err: appErr, - }) - } return appErr } if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } - opts := &transport.Options{Last: true} + opts := &transport.Options{ + Last: true, + Delay: false, + } if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { @@ -1017,31 +1046,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. switch st := err.(type) { case transport.ConnectionError: // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } default: panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { - h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ - Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ - Trailer: stream.Trailer(), - Err: appErr, - }) - } return err } - if binlog != nil { - h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ - Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ - Message: reply, - }) - } if channelz.IsOn() { t.IncrMsgSent() } @@ -1051,14 +1065,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? - err = t.WriteStatus(stream, status.New(codes.OK, "")) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ - Trailer: stream.Trailer(), - Err: appErr, - }) - } - return err + return t.WriteStatus(stream, status.New(codes.OK, "")) } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { @@ -1092,40 +1099,17 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ - ctx: ctx, - t: t, - s: stream, - p: &parser{r: stream}, - codec: s.getCodec(stream.ContentSubtype()), + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, statsHandler: sh, } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { - md, _ := metadata.FromIncomingContext(ctx) - logEntry := &binarylog.ClientHeader{ - Header: md, - MethodName: stream.Method(), - PeerAddr: nil, - } - if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = time.Until(deadline) - if logEntry.Timeout < 0 { - logEntry.Timeout = 0 - } - } - if a := md[":authority"]; len(a) > 0 { - logEntry.Authority = a[0] - } - if peer, ok := peer.FromContext(ss.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - ss.binlog.Log(logEntry) - } - // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { @@ -1185,7 +1169,12 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + switch err := appErr.(type) { + case transport.StreamError: + appStatus = status.New(err.Code, err.Desc) + default: + appStatus = status.New(codes.Unknown, appErr.Error()) + } appErr = appStatus.Err() } if trInfo != nil { @@ -1195,12 +1184,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ - Trailer: ss.s.Trailer(), - Err: appErr, - }) - } // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1209,14 +1192,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - err = t.WriteStatus(ss.s, status.New(codes.OK, "")) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ - Trailer: ss.s.Trailer(), - Err: appErr, - }) - } - return err + return t.WriteStatus(ss.s, status.New(codes.OK, "")) } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { @@ -1245,33 +1221,47 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } service := sm[:pos] method := sm[pos+1:] - - srv, knownService := s.m[service] - if knownService { - if md, ok := srv.md[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + srv, ok := s.m[service] + if !ok { + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) return } - if sd, ok := srv.sd[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) - return + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) + trInfo.tr.SetError() } + errDesc := fmt.Sprintf("unknown service %v", service) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + // Unary RPC or Streaming RPC? + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) + trInfo.tr.SetError() } - // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) return } - var errDesc string - if !knownService { - errDesc = fmt.Sprintf("unknown service %v", service) - } else { - errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) - } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() - } + errDesc := fmt.Sprintf("unknown method %v", method) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) @@ -1420,6 +1410,12 @@ func (s *Server) GracefulStop() { s.mu.Unlock() } +func init() { + internal.TestingUseHandlerImpl = func(arg interface{}) { + arg.(*Server).opts.useHandlerImpl = true + } +} + // contentSubtype must be lowercase // cannot return nil func (s *Server) getCodec(contentSubtype string) baseCodec { @@ -1488,11 +1484,3 @@ func Method(ctx context.Context) (string, bool) { } return s.Method(), true } - -type channelzServer struct { - s *Server -} - -func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { - return c.s.channelzMetric() -} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 1c5227426f..015631d8d3 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -25,7 +25,6 @@ import ( "strings" "time" - "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" ) @@ -57,8 +56,6 @@ type MethodConfig struct { // MaxRespSize is the maximum allowed payload size for an individual response in a // stream (server->client) in bytes. MaxRespSize *int - // RetryPolicy configures retry options for the method. - retryPolicy *retryPolicy } // ServiceConfig is provided by the service provider and contains parameters for how @@ -71,96 +68,13 @@ type ServiceConfig struct { // LB is the load balancer the service providers recommends. The balancer specified // via grpc.WithBalancer will override this. LB *string - - // Methods contains a map for the methods in this service. If there is an - // exact match for a method (i.e. /service/method) in the map, use the - // corresponding MethodConfig. If there's no exact match, look for the - // default config for the service (/service/) and use the corresponding - // MethodConfig if it exists. Otherwise, the method has no MethodConfig to - // use. + // Methods contains a map for the methods in this service. + // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. + // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. + // Otherwise, the method has no MethodConfig to use. Methods map[string]MethodConfig - // If a retryThrottlingPolicy is provided, gRPC will automatically throttle - // retry attempts and hedged RPCs when the client’s ratio of failures to - // successes exceeds a threshold. - // - // For each server name, the gRPC client will maintain a token_count which is - // initially set to maxTokens, and can take values between 0 and maxTokens. - // - // Every outgoing RPC (regardless of service or method invoked) will change - // token_count as follows: - // - // - Every failed RPC will decrement the token_count by 1. - // - Every successful RPC will increment the token_count by tokenRatio. - // - // If token_count is less than or equal to maxTokens / 2, then RPCs will not - // be retried and hedged RPCs will not be sent. - retryThrottling *retryThrottlingPolicy - // healthCheckConfig must be set as one of the requirement to enable LB channel - // health check. - healthCheckConfig *healthCheckConfig - // rawJSONString stores service config json string that get parsed into - // this service config struct. - rawJSONString string -} - -// healthCheckConfig defines the go-native version of the LB channel health check config. -type healthCheckConfig struct { - // serviceName is the service name to use in the health-checking request. - ServiceName string -} - -// retryPolicy defines the go-native version of the retry policy defined by the -// service config here: -// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config -type retryPolicy struct { - // MaxAttempts is the maximum number of attempts, including the original RPC. - // - // This field is required and must be two or greater. - maxAttempts int - - // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initialBackoffMS). In general, the nth attempt will occur at - // random(0, - // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)). - // - // These fields are required and must be greater than zero. - initialBackoff time.Duration - maxBackoff time.Duration - backoffMultiplier float64 - - // The set of status codes which may be retried. - // - // Status codes are specified as strings, e.g., "UNAVAILABLE". - // - // This field is required and must be non-empty. - // Note: a set is used to store this for easy lookup. - retryableStatusCodes map[codes.Code]bool -} - -type jsonRetryPolicy struct { - MaxAttempts int - InitialBackoff string - MaxBackoff string - BackoffMultiplier float64 - RetryableStatusCodes []codes.Code -} - -// retryThrottlingPolicy defines the go-native version of the retry throttling -// policy defined by the service config here: -// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config -type retryThrottlingPolicy struct { - // The number of tokens starts at maxTokens. The token_count will always be - // between 0 and maxTokens. - // - // This field is required and must be greater than zero. - MaxTokens float64 - // The amount of tokens to add on each successful RPC. Typically this will - // be some number between 0 and 1, e.g., 0.1. - // - // This field is required and must be greater than zero. Up to 3 decimal - // places are supported. - TokenRatio float64 + stickinessMetadataKey *string } func parseDuration(s *string) (*time.Duration, error) { @@ -230,33 +144,30 @@ type jsonMC struct { Timeout *string MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 - RetryPolicy *jsonRetryPolicy } // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { - LoadBalancingPolicy *string - MethodConfig *[]jsonMC - RetryThrottling *retryThrottlingPolicy - HealthCheckConfig *healthCheckConfig + LoadBalancingPolicy *string + StickinessMetadataKey *string + MethodConfig *[]jsonMC } -func parseServiceConfig(js string) (*ServiceConfig, error) { +func parseServiceConfig(js string) (ServiceConfig, error) { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + return ServiceConfig{}, err } sc := ServiceConfig{ - LB: rsc.LoadBalancingPolicy, - Methods: make(map[string]MethodConfig), - retryThrottling: rsc.RetryThrottling, - healthCheckConfig: rsc.HealthCheckConfig, - rawJSONString: js, + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + + stickinessMetadataKey: rsc.StickinessMetadataKey, } if rsc.MethodConfig == nil { - return &sc, nil + return sc, nil } for _, m := range *rsc.MethodConfig { @@ -266,17 +177,13 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { d, err := parseDuration(m.Timeout) if err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + return ServiceConfig{}, err } mc := MethodConfig{ WaitForReady: m.WaitForReady, Timeout: d, } - if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err - } if m.MaxRequestMessageBytes != nil { if *m.MaxRequestMessageBytes > int64(maxInt) { mc.MaxReqSize = newInt(maxInt) @@ -298,54 +205,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { } } - if sc.retryThrottling != nil { - if sc.retryThrottling.MaxTokens <= 0 || - sc.retryThrottling.MaxTokens > 1000 || - sc.retryThrottling.TokenRatio <= 0 { - // Illegal throttling config; disable throttling. - sc.retryThrottling = nil - } - } - return &sc, nil -} - -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { - if jrp == nil { - return nil, nil - } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } - - if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || - jrp.BackoffMultiplier <= 0 || - len(jrp.RetryableStatusCodes) == 0 { - grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) - return nil, nil - } - - rp := &retryPolicy{ - maxAttempts: jrp.MaxAttempts, - initialBackoff: *ib, - maxBackoff: *mb, - backoffMultiplier: jrp.BackoffMultiplier, - retryableStatusCodes: make(map[codes.Code]bool), - } - if rp.maxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.maxAttempts = 5 - } - for _, code := range jrp.RetryableStatusCodes { - rp.retryableStatusCodes[code] = true - } - return rp, nil + return sc, nil } func min(a, b *int) *int { diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go index dc03731e45..05b384c693 100644 --- a/vendor/google.golang.org/grpc/stats/handlers.go +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -19,8 +19,9 @@ package stats import ( - "context" "net" + + "golang.org/x/net/context" ) // ConnTagInfo defines the relevant information needed by connection context tagger. diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index f3f593c844..3f13190a0a 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -24,11 +24,10 @@ package stats // import "google.golang.org/grpc/stats" import ( - "context" "net" "time" - "google.golang.org/grpc/metadata" + "golang.org/x/net/context" ) // RPCStats contains stats information about RPCs. @@ -174,9 +173,6 @@ type End struct { BeginTime time.Time // EndTime is the time when the RPC ends. EndTime time.Time - // Trailer contains the trailer metadata received from the server. This - // field is only valid if this End is from the client side. - Trailer metadata.MD // Error is the error the RPC ended with. It is an error generated from // status.Status and can be converted back to status.Status using // status.FromError if non-nil. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/status/go16.go similarity index 51% rename from vendor/google.golang.org/grpc/internal/binarylog/util.go rename to vendor/google.golang.org/grpc/status/go16.go index 15dc7803d8..e59b53e82b 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/util.go +++ b/vendor/google.golang.org/grpc/status/go16.go @@ -1,3 +1,5 @@ +// +build go1.6,!go1.7 + /* * * Copyright 2018 gRPC authors. @@ -16,26 +18,25 @@ * */ -package binarylog +package status import ( - "errors" - "strings" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" ) -// parseMethodName splits service and method from the input. It expects format -// "/service/method". -// -// TODO: move to internal/grpcutil. -func parseMethodName(methodName string) (service, method string, _ error) { - if !strings.HasPrefix(methodName, "/") { - return "", "", errors.New("invalid method name: should start with /") +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return New(codes.OK, "") + case context.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) } - methodName = methodName[1:] - - pos := strings.LastIndex(methodName, "/") - if pos < 0 { - return "", "", errors.New("invalid method name: suffix /method is missing") - } - return methodName[:pos], methodName[pos+1:], nil } diff --git a/vendor/google.golang.org/grpc/status/go17.go b/vendor/google.golang.org/grpc/status/go17.go new file mode 100644 index 0000000000..090215149c --- /dev/null +++ b/vendor/google.golang.org/grpc/status/go17.go @@ -0,0 +1,44 @@ +// +build go1.7 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package status + +import ( + "context" + + netctx "golang.org/x/net/context" + "google.golang.org/grpc/codes" +) + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return New(codes.OK, "") + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled, netctx.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index ed36681bb5..9c61b09450 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -28,7 +28,6 @@ package status import ( - "context" "errors" "fmt" @@ -127,9 +126,7 @@ func FromError(err error) (s *Status, ok bool) { if err == nil { return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { + if se, ok := err.(interface{ GRPCStatus() *Status }); ok { return se.GRPCStatus(), true } return New(codes.Unknown, err.Error()), false @@ -185,26 +182,8 @@ func Code(err error) codes.Code { if err == nil { return codes.OK } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { + if se, ok := err.(interface{ GRPCStatus() *Status }); ok { return se.GRPCStatus().Code() } return codes.Unknown } - -// FromContextError converts a context error into a Status. It returns a -// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is -// non-nil and not a context error. -func FromContextError(err error) *Status { - switch err { - case nil: - return New(codes.OK, "") - case context.DeadlineExceeded: - return New(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return New(codes.Canceled, err.Error()) - default: - return New(codes.Unknown, err.Error()) - } -} diff --git a/vendor/google.golang.org/grpc/stickiness_linkedmap.go b/vendor/google.golang.org/grpc/stickiness_linkedmap.go new file mode 100644 index 0000000000..1c726af164 --- /dev/null +++ b/vendor/google.golang.org/grpc/stickiness_linkedmap.go @@ -0,0 +1,97 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "container/list" +) + +type linkedMapKVPair struct { + key string + value *stickyStoreEntry +} + +// linkedMap is an implementation of a map that supports removing the oldest +// entry. +// +// linkedMap is NOT thread safe. +// +// It's for use of stickiness only! +type linkedMap struct { + m map[string]*list.Element + l *list.List // Head of the list is the oldest element. +} + +// newLinkedMap returns a new LinkedMap. +func newLinkedMap() *linkedMap { + return &linkedMap{ + m: make(map[string]*list.Element), + l: list.New(), + } +} + +// put adds entry (key, value) to the map. Existing key will be overridden. +func (m *linkedMap) put(key string, value *stickyStoreEntry) { + if oldE, ok := m.m[key]; ok { + // Remove existing entry. + m.l.Remove(oldE) + } + e := m.l.PushBack(&linkedMapKVPair{key: key, value: value}) + m.m[key] = e +} + +// get returns the value of the given key. +func (m *linkedMap) get(key string) (*stickyStoreEntry, bool) { + e, ok := m.m[key] + if !ok { + return nil, false + } + m.l.MoveToBack(e) + return e.Value.(*linkedMapKVPair).value, true +} + +// remove removes key from the map, and returns the value. The map is not +// modified if key is not in the map. +func (m *linkedMap) remove(key string) (*stickyStoreEntry, bool) { + e, ok := m.m[key] + if !ok { + return nil, false + } + delete(m.m, key) + m.l.Remove(e) + return e.Value.(*linkedMapKVPair).value, true +} + +// len returns the len of the map. +func (m *linkedMap) len() int { + return len(m.m) +} + +// clear removes all elements from the map. +func (m *linkedMap) clear() { + m.m = make(map[string]*list.Element) + m.l = list.New() +} + +// removeOldest removes the oldest key from the map. +func (m *linkedMap) removeOldest() { + e := m.l.Front() + m.l.Remove(e) + delete(m.m, e.Value.(*linkedMapKVPair).key) +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 6e2bf51e0a..152d9eccd6 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -19,29 +19,21 @@ package grpc import ( - "context" "errors" "io" - "math" - "strconv" "sync" "time" + "golang.org/x/net/context" "golang.org/x/net/trace" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" "google.golang.org/grpc/encoding" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/balancerload" - "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" ) // StreamHandler defines the handler called by gRPC server to complete the @@ -63,20 +55,31 @@ type StreamDesc struct { // Stream defines the common interface a client or server stream has to satisfy. // -// Deprecated: See ClientStream and ServerStream documentation instead. +// All errors returned from Stream are compatible with the status package. type Stream interface { - // Deprecated: See ClientStream and ServerStream documentation instead. + // Context returns the context for this stream. Context() context.Context - // Deprecated: See ClientStream and ServerStream documentation instead. + // SendMsg blocks until it sends m, the stream is done or the stream + // breaks. + // On error, it aborts the stream and returns an RPC status on client + // side. On server side, it simply returns the error to the caller. + // SendMsg is called by generated code. Also Users can call SendMsg + // directly when it is really needed in their use cases. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call SendMsg on the same stream in different goroutines. SendMsg(m interface{}) error - // Deprecated: See ClientStream and ServerStream documentation instead. + // RecvMsg blocks until it receives a message or the stream is + // done. On client side, it returns io.EOF when the stream is done. On + // any other error, it aborts the stream and returns an RPC status. On + // server side, it simply returns the error to the caller. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call RecvMsg on the same stream in different goroutines. RecvMsg(m interface{}) error } -// ClientStream defines the client-side behavior of a streaming RPC. -// -// All errors returned from ClientStream methods are compatible with the -// status package. +// ClientStream defines the interface a client stream has to satisfy. type ClientStream interface { // Header returns the header metadata received from the server if there // is any. It blocks if the metadata is not ready to read. @@ -86,42 +89,15 @@ type ClientStream interface { // stream.Recv has returned a non-nil error (including io.EOF). Trailer() metadata.MD // CloseSend closes the send direction of the stream. It closes the stream - // when non-nil error is met. It is also not safe to call CloseSend - // concurrently with SendMsg. + // when non-nil error is met. CloseSend() error - // Context returns the context for this stream. + // Stream.SendMsg() may return a non-nil error when something wrong happens sending + // the request. The returned error indicates the status of this sending, not the final + // status of the RPC. // - // It should not be called until after Header or RecvMsg has returned. Once - // called, subsequent client-side retries are disabled. - Context() context.Context - // SendMsg is generally called by generated code. On error, SendMsg aborts - // the stream. If the error was generated by the client, the status is - // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. - // - // SendMsg blocks until: - // - There is sufficient flow control to schedule m with the transport, or - // - The stream is done, or - // - The stream breaks. - // - // SendMsg does not wait until the message is received by the server. An - // untimely stream closure may result in lost messages. To ensure delivery, - // users should ensure the RPC completed successfully using RecvMsg. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not safe - // to call SendMsg on the same stream in different goroutines. It is also - // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error - // RecvMsg blocks until it receives a message into m or the stream is - // done. It returns io.EOF when the stream completes successfully. On - // any other error, the stream is aborted and the error contains the RPC - // status. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not - // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + // Always call Stream.RecvMsg() to drain the stream and get the final + // status, otherwise there could be leaked resources. + Stream } // NewStream creates a new Stream for the client side. This is typically @@ -152,6 +128,8 @@ func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method st } // NewClientStream is a wrapper for ClientConn.NewStream. +// +// DEPRECATED: Use ClientConn.NewStream instead. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { return cc.NewStream(ctx, desc, method, opts...) } @@ -166,11 +144,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth }() } c := defaultCallInfo() - // Provide an opportunity for the first RPC to see the first service config - // provided by the resolver. - if err := cc.waitForResolvedAddrs(ctx); err != nil { - return nil, err - } mc := cc.GetMethodConfig(method) if mc.WaitForReady != nil { c.failFast = !*mc.WaitForReady @@ -205,8 +178,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, + Host: cc.authority, + Method: method, + // If it's not client streaming, we should already have the request to be sent, + // so we don't flush the header. + // If it's client streaming, the user may never send a request or send it any + // time soon, so we ask the transport to flush the header. + Flush: desc.ClientStreams, ContentSubtype: c.contentSubtype, } @@ -231,19 +209,24 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if c.creds != nil { callHdr.Creds = c.creds } - var trInfo *traceInfo + var trInfo traceInfo if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } + trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) + trInfo.firstLine.deadline = deadline.Sub(time.Now()) } trInfo.tr.LazyLog(&trInfo.firstLine, false) ctx = trace.NewContext(ctx, trInfo.tr) + defer func() { + if err != nil { + // Need to call tr.finish() if error is returned. + // Because tr will not be returned to caller. + trInfo.tr.LazyPrintf("RPC: [%v]", err) + trInfo.tr.SetError() + trInfo.tr.Finish() + } + }() } ctx = newContextWithRPCInfo(ctx, c.failFast) sh := cc.dopts.copts.StatsHandler @@ -257,59 +240,80 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth FailFast: c.failFast, } sh.HandleRPC(ctx, begin) + defer func() { + if err != nil { + // Only handle end stats if err != nil. + end := &stats.End{ + Client: true, + Error: err, + BeginTime: beginTime, + EndTime: time.Now(), + } + sh.HandleRPC(ctx, end) + } + }() + } + + var ( + t transport.ClientTransport + s *transport.Stream + done func(balancer.DoneInfo) + ) + for { + // Check to make sure the context has expired. This will prevent us from + // looping forever if an error occurs for wait-for-ready RPCs where no data + // is sent on the wire. + select { + case <-ctx.Done(): + return nil, toRPCErr(ctx.Err()) + default: + } + + t, done, err = cc.getTransport(ctx, c.failFast) + if err != nil { + return nil, err + } + + s, err = t.NewStream(ctx, callHdr) + if err != nil { + if done != nil { + done(balancer.DoneInfo{Err: err}) + done = nil + } + // In the event of any error from NewStream, we never attempted to write + // anything to the wire, so we can retry indefinitely for non-fail-fast + // RPCs. + if !c.failFast { + continue + } + return nil, toRPCErr(err) + } + break } cs := &clientStream{ - callHdr: callHdr, - ctx: ctx, - methodConfig: &mc, - opts: opts, - callInfo: c, - cc: cc, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - cancel: cancel, - beginTime: beginTime, - firstAttempt: true, + opts: opts, + c: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + attempt: &csAttempt{ + t: t, + s: s, + p: &parser{r: s}, + done: done, + dc: cc.dopts.dc, + ctx: ctx, + trInfo: trInfo, + statsHandler: sh, + beginTime: beginTime, + }, } - if !cc.dopts.disableRetry { - cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) - } - cs.binlog = binarylog.GetMethodLogger(method) - - cs.callInfo.stream = cs - // Only this initial attempt has stats/tracing. - // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. - if err := cs.newAttemptLocked(sh, trInfo); err != nil { - cs.finish(err) - return nil, err - } - - op := func(a *csAttempt) error { return a.newStream() } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) - return nil, err - } - - if cs.binlog != nil { - md, _ := metadata.FromOutgoingContext(ctx) - logEntry := &binarylog.ClientHeader{ - OnClientSide: true, - Header: md, - MethodName: method, - Authority: cs.cc.authority, - } - if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = time.Until(deadline) - if logEntry.Timeout < 0 { - logEntry.Timeout = 0 - } - } - cs.binlog.Log(logEntry) - } - + cs.c.stream = cs + cs.attempt.cs = cs if desc != unaryStreamDesc { // Listen on cc and stream contexts to cleanup when the user closes the // ClientConn or cancels the stream context. In all other cases, an error @@ -328,48 +332,12 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cs, nil } -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) error { - cs.attempt = &csAttempt{ - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, - } - - if err := cs.ctx.Err(); err != nil { - return toRPCErr(err) - } - t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) - if err != nil { - return err - } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) - } - cs.attempt.t = t - cs.attempt.done = done - return nil -} - -func (a *csAttempt) newStream() error { - cs := a.cs - cs.callHdr.PreviousAttempts = cs.numRetries - s, err := a.t.NewStream(cs.ctx, cs.callHdr) - if err != nil { - return toRPCErr(err) - } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} - return nil -} - // clientStream implements a client side Stream. type clientStream struct { - callHdr *transport.CallHdr - opts []CallOption - callInfo *callInfo - cc *ClientConn - desc *StreamDesc + opts []CallOption + c *callInfo + cc *ClientConn + desc *StreamDesc codec baseCodec cp Compressor @@ -377,34 +345,13 @@ type clientStream struct { cancel context.CancelFunc // cancels all attempts - sentLast bool // sent an end stream - beginTime time.Time + sentLast bool // sent an end stream - methodConfig *MethodConfig + mu sync.Mutex // guards finished + finished bool // TODO: replace with atomic cmpxchg or sync.Once? - ctx context.Context // the application's context, wrapped by stats/tracing - - retryThrottler *retryThrottler // The throttler active when the RPC began. - - binlog *binarylog.MethodLogger // Binary logger, can be nil. - // serverHeaderBinlogged is a boolean for whether server header has been - // logged. Server header will be logged when the first time one of those - // happens: stream.Header(), stream.Recv(). - // - // It's only read and used by Recv() and Header(), so it doesn't need to be - // synchronized. - serverHeaderBinlogged bool - - mu sync.Mutex - firstAttempt bool // if true, transparent retry is valid - numRetries int // exclusive of transparent retry attempt(s) - numRetriesSincePushback int // retries since pushback; to reset backoff - finished bool // TODO: replace with atomic cmpxchg or sync.Once? - attempt *csAttempt // the active client stream attempt + attempt *csAttempt // the active client stream attempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer } // csAttempt implements a single transport stream attempt within a @@ -416,360 +363,53 @@ type csAttempt struct { p *parser done func(balancer.DoneInfo) - finished bool dc Decompressor decomp encoding.Compressor decompSet bool + ctx context.Context // the application's context, wrapped by stats/tracing + mu sync.Mutex // guards trInfo.tr - // trInfo may be nil (if EnableTracing is false). // trInfo.tr is set when created (if EnableTracing is true), // and cleared when the finish method is called. - trInfo *traceInfo + trInfo traceInfo statsHandler stats.Handler -} - -func (cs *clientStream) commitAttemptLocked() { - cs.committed = true - cs.buffer = nil -} - -func (cs *clientStream) commitAttempt() { - cs.mu.Lock() - cs.commitAttemptLocked() - cs.mu.Unlock() -} - -// shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. -func (cs *clientStream) shouldRetry(err error) error { - if cs.attempt.s == nil && !cs.callInfo.failFast { - // In the event of any error from NewStream (attempt.s == nil), we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil - } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. - return err - } - // Wait for the trailers. - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - } - if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { - // First attempt, wait-for-ready, stream unprocessed: transparently retry. - cs.firstAttempt = false - return nil - } - cs.firstAttempt = false - if cs.cc.dopts.disableRetry { - return err - } - - pushback := 0 - hasPushback := false - if cs.attempt.s != nil { - if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to { - return err - } - - // TODO(retry): Move down if the spec changes to not check server pushback - // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] - if len(sps) == 1 { - var e error - if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) - cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err - } - hasPushback = true - } else if len(sps) > 1 { - grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) - cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err - } - } - - var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() - } else { - code = status.Convert(err).Code() - } - - rp := cs.methodConfig.retryPolicy - if rp == nil || !rp.retryableStatusCodes[code] { - return err - } - - // Note: the ordering here is important; we count this as a failure - // only if the code matched a retryable code. - if cs.retryThrottler.throttle() { - return err - } - if cs.numRetries+1 >= rp.maxAttempts { - return err - } - - var dur time.Duration - if hasPushback { - dur = time.Millisecond * time.Duration(pushback) - cs.numRetriesSincePushback = 0 - } else { - fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.initialBackoff) * fact - if max := float64(rp.maxBackoff); cur > max { - cur = max - } - dur = time.Duration(grpcrand.Int63n(int64(cur))) - cs.numRetriesSincePushback++ - } - - // TODO(dfawley): we could eagerly fail here if dur puts us past the - // deadline, but unsure if it is worth doing. - t := time.NewTimer(dur) - select { - case <-t.C: - cs.numRetries++ - return nil - case <-cs.ctx.Done(): - t.Stop() - return status.FromContextError(cs.ctx.Err()).Err() - } -} - -// Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { - for { - cs.attempt.finish(lastErr) - if err := cs.shouldRetry(lastErr); err != nil { - cs.commitAttemptLocked() - return err - } - if err := cs.newAttemptLocked(nil, nil); err != nil { - return err - } - if lastErr = cs.replayBufferLocked(); lastErr == nil { - return nil - } - } + beginTime time.Time } func (cs *clientStream) Context() context.Context { - cs.commitAttempt() - // No need to lock before using attempt, since we know it is committed and - // cannot change. - return cs.attempt.s.Context() -} - -func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { - cs.mu.Lock() - for { - if cs.committed { - cs.mu.Unlock() - return op(cs.attempt) - } - a := cs.attempt - cs.mu.Unlock() - err := op(a) - cs.mu.Lock() - if a != cs.attempt { - // We started another attempt already. - continue - } - if err == io.EOF { - <-a.s.Done() - } - if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { - onSuccess() - cs.mu.Unlock() - return err - } - if err := cs.retryLocked(err); err != nil { - cs.mu.Unlock() - return err - } - } + // TODO(retry): commit the current attempt (the context has peer-aware data). + return cs.attempt.context() } func (cs *clientStream) Header() (metadata.MD, error) { - var m metadata.MD - err := cs.withRetry(func(a *csAttempt) error { - var err error - m, err = a.s.Header() - return toRPCErr(err) - }, cs.commitAttemptLocked) + m, err := cs.attempt.header() if err != nil { + // TODO(retry): maybe retry on error or commit attempt on success. + err = toRPCErr(err) cs.finish(err) - return nil, err - } - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. - logEntry := &binarylog.ServerHeader{ - OnClientSide: true, - Header: m, - PeerAddr: nil, - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - cs.binlog.Log(logEntry) - cs.serverHeaderBinlogged = true } return m, err } func (cs *clientStream) Trailer() metadata.MD { - // On RPC failure, we never need to retry, because usage requires that - // RecvMsg() returned a non-nil error before calling this function is valid. - // We would have retried earlier if necessary. - // - // Commit the attempt anyway, just in case users are not following those - // directions -- it will prevent races and should not meaningfully impact - // performance. - cs.commitAttempt() - if cs.attempt.s == nil { - return nil - } - return cs.attempt.s.Trailer() -} - -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt - for _, f := range cs.buffer { - if err := f(a); err != nil { - return err - } - } - return nil -} - -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { - // Note: we still will buffer if retry is disabled (for transparent retries). - if cs.committed { - return - } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { - cs.commitAttemptLocked() - return - } - cs.buffer = append(cs.buffer, op) + // TODO(retry): on error, maybe retry (trailers-only). + return cs.attempt.trailer() } func (cs *clientStream) SendMsg(m interface{}) (err error) { - defer func() { - if err != nil && err != io.EOF { - // Call finish on the client stream for errors generated by this SendMsg - // call, as these indicate problems created by this client. (Transport - // errors are converted to an io.EOF error in csAttempt.sendMsg; the real - // error will be returned from RecvMsg eventually in that case, or be - // retried.) - cs.finish(err) - } - }() - if cs.sentLast { - return status.Errorf(codes.Internal, "SendMsg called after CloseSend") - } - if !cs.desc.ClientStreams { - cs.sentLast = true - } - data, err := encode(cs.codec, m) - if err != nil { - return err - } - compData, err := compress(data, cs.cp, cs.comp) - if err != nil { - return err - } - hdr, payload := msgHeader(data, compData) - // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) - } - msgBytes := data // Store the pointer before setting to nil. For binary logging. - op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err - } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ - OnClientSide: true, - Message: msgBytes, - }) - } - return + // TODO(retry): buffer message for replaying if not committed. + return cs.attempt.sendMsg(m) } -func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Call Header() to binary log header if it's not already logged. - cs.Header() - } - var recvInfo *payloadInfo - if cs.binlog != nil { - recvInfo = &payloadInfo{} - } - err := cs.withRetry(func(a *csAttempt) error { - return a.recvMsg(m, recvInfo) - }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ - OnClientSide: true, - Message: recvInfo.uncompressedBytes, - }) - } - if err != nil || !cs.desc.ServerStreams { - // err != nil or non-server-streaming indicates end of stream. - cs.finish(err) - - if cs.binlog != nil { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - cs.binlog.Log(logEntry) - } - } - return err +func (cs *clientStream) RecvMsg(m interface{}) (err error) { + // TODO(retry): maybe retry on error or commit attempt on success. + return cs.attempt.recvMsg(m) } func (cs *clientStream) CloseSend() error { - if cs.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? - return nil - } - cs.sentLast = true - op := func(a *csAttempt) error { - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) - // Always return nil; io.EOF is the only error that might make sense - // instead, but there is no need to signal the client to call RecvMsg - // as the only use left for the stream after CloseSend is to call - // RecvMsg. This also matches historical behavior. - return nil - } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ - OnClientSide: true, - }) - } - // We never returned an error here for reasons. + cs.attempt.closeSend() return nil } @@ -784,21 +424,7 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true - cs.commitAttemptLocked() cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ - OnClientSide: true, - }) - } - if err == nil { - cs.retryThrottler.successfulRPC() - } if channelz.IsOn() { if err != nil { cs.cc.incrCallsFailed() @@ -806,51 +432,97 @@ func (cs *clientStream) finish(err error) { cs.cc.incrCallsSucceeded() } } - if cs.attempt != nil { - cs.attempt.finish(err) - } - // after functions all rely upon having a stream. - if cs.attempt.s != nil { - for _, o := range cs.opts { - o.after(cs.callInfo) - } + // TODO(retry): commit current attempt if necessary. + cs.attempt.finish(err) + for _, o := range cs.opts { + o.after(cs.c) } cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) context() context.Context { + return a.s.Context() +} + +func (a *csAttempt) header() (metadata.MD, error) { + return a.s.Header() +} + +func (a *csAttempt) trailer() metadata.MD { + return a.s.Trailer() +} + +func (a *csAttempt) sendMsg(m interface{}) (err error) { + // TODO Investigate how to signal the stats handling party. + // generate error stats if err != nil && err != io.EOF? cs := a.cs - if a.trInfo != nil { + defer func() { + // For non-client-streaming RPCs, we return nil instead of EOF on success + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + if err == io.EOF && !cs.desc.ClientStreams { + err = nil + } + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error below; the real error will be + // returned from RecvMsg eventually in that case, or be retried.) + cs.finish(err) + } + }() + // TODO: Check cs.sentLast and error if we already ended the stream. + if EnableTracing { a.mu.Lock() if a.trInfo.tr != nil { a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } a.mu.Unlock() } - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { - if !cs.desc.ClientStreams { - // For non-client-streaming RPCs, we return nil instead of EOF on error - // because the generated code requires it. finish is not called; RecvMsg() - // will call it with the stream's status independently. - return nil + data, err := encode(cs.codec, m) + if err != nil { + return err + } + compData, err := compress(data, cs.cp, cs.comp) + if err != nil { + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.c.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.c.maxSendMessageSize) + } + + if !cs.desc.ClientStreams { + cs.sentLast = true + } + err = a.t.Write(a.s, hdr, payload, &transport.Options{Last: !cs.desc.ClientStreams}) + if err == nil { + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payload, time.Now())) } - return io.EOF + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) - } - if channelz.IsOn() { - a.t.IncrMsgSent() - } - return nil + return io.EOF } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m interface{}) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { - payInfo = &payloadInfo{} + defer func() { + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + } + }() + var inPayload *stats.InPayload + if a.statsHandler != nil { + inPayload = &stats.InPayload{ + Client: true, + } } - if !a.decompSet { // Block until we receive headers containing received message encoding. if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { @@ -867,7 +539,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, inPayload, a.decomp) if err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { @@ -877,23 +549,15 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return toRPCErr(err) } - if a.trInfo != nil { + if EnableTracing { a.mu.Lock() if a.trInfo.tr != nil { a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength, - Length: len(payInfo.uncompressedBytes), - }) + if inPayload != nil { + a.statsHandler.HandleRPC(a.ctx, inPayload) } if channelz.IsOn() { a.t.IncrMsgRecv() @@ -902,9 +566,10 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { // Subsequent messages should be received by subsequent RecvMsg calls. return nil } + // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, nil, a.decomp) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } @@ -914,47 +579,39 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { return toRPCErr(err) } -func (a *csAttempt) finish(err error) { - a.mu.Lock() - if a.finished { - a.mu.Unlock() +func (a *csAttempt) closeSend() { + cs := a.cs + if cs.sentLast { return } - a.finished = true - if err == io.EOF { - // Ending a stream with EOF indicates a success. - err = nil - } - var tr metadata.MD - if a.s != nil { - a.t.CloseStream(a.s, err) - tr = a.s.Trailer() - } + cs.sentLast = true + cs.attempt.t.Write(cs.attempt.s, nil, nil, &transport.Options{Last: true}) + // We ignore errors from Write. Any error it would return would also be + // returned by a subsequent RecvMsg call, and the user is supposed to always + // finish the stream by calling RecvMsg until it returns err != nil. +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + a.t.CloseStream(a.s, err) if a.done != nil { - br := false - if a.s != nil { - br = a.s.BytesReceived() - } a.done(balancer.DoneInfo{ Err: err, - Trailer: tr, - BytesSent: a.s != nil, - BytesReceived: br, - ServerLoad: balancerload.Parse(tr), + BytesSent: true, + BytesReceived: a.s.BytesReceived(), }) } if a.statsHandler != nil { end := &stats.End{ Client: true, - BeginTime: a.cs.beginTime, + BeginTime: a.beginTime, EndTime: time.Now(), - Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.cs.ctx, end) + a.statsHandler.HandleRPC(a.ctx, end) } - if a.trInfo != nil && a.trInfo.tr != nil { + if a.trInfo.tr != nil { if err == nil { a.trInfo.tr.LazyPrintf("RPC: [OK]") } else { @@ -967,302 +624,7 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, opts ...CallOption) (_ ClientStream, err error) { - ac.mu.Lock() - if ac.transport != t { - ac.mu.Unlock() - return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") - } - // transition to CONNECTING state when an attempt starts - if ac.state != connectivity.Connecting { - ac.updateConnectivityState(connectivity.Connecting) - ac.cc.handleSubConnStateChange(ac.acbw, ac.state) - } - ac.mu.Unlock() - - if t == nil { - // TODO: return RPC error here? - return nil, errors.New("transport provided is nil") - } - // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. - c := &callInfo{} - - for _, o := range opts { - if err := o.before(c); err != nil { - return nil, toRPCErr(err) - } - } - c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) - - // Possible context leak: - // The cancel function for the child context we create will only be called - // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if - // an error is generated by SendMsg. - // https://github.com/grpc/grpc-go/issues/1818. - ctx, cancel := context.WithCancel(ctx) - defer func() { - if err != nil { - cancel() - } - }() - - if err := setCallInfoCodec(c); err != nil { - return nil, err - } - - callHdr := &transport.CallHdr{ - Host: ac.cc.authority, - Method: method, - ContentSubtype: c.contentSubtype, - } - - // Set our outgoing compression according to the UseCompressor CallOption, if - // set. In that case, also find the compressor from the encoding package. - // Otherwise, use the compressor configured by the WithCompressor DialOption, - // if set. - var cp Compressor - var comp encoding.Compressor - if ct := c.compressorType; ct != "" { - callHdr.SendCompress = ct - if ct != encoding.Identity { - comp = encoding.GetCompressor(ct) - if comp == nil { - return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) - } - } - } else if ac.cc.dopts.cp != nil { - callHdr.SendCompress = ac.cc.dopts.cp.Type() - cp = ac.cc.dopts.cp - } - if c.creds != nil { - callHdr.Creds = c.creds - } - - as := &addrConnStream{ - callHdr: callHdr, - ac: ac, - ctx: ctx, - cancel: cancel, - opts: opts, - callInfo: c, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - t: t, - } - - as.callInfo.stream = as - s, err := as.t.NewStream(as.ctx, as.callHdr) - if err != nil { - err = toRPCErr(err) - return nil, err - } - as.s = s - as.p = &parser{r: s} - ac.incrCallsStarted() - if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. - go func() { - select { - case <-ac.ctx.Done(): - as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) - case <-ctx.Done(): - as.finish(toRPCErr(ctx.Err())) - } - }() - } - return as, nil -} - -type addrConnStream struct { - s *transport.Stream - ac *addrConn - callHdr *transport.CallHdr - cancel context.CancelFunc - opts []CallOption - callInfo *callInfo - t transport.ClientTransport - ctx context.Context - sentLast bool - desc *StreamDesc - codec baseCodec - cp Compressor - comp encoding.Compressor - decompSet bool - dc Decompressor - decomp encoding.Compressor - p *parser - mu sync.Mutex - finished bool -} - -func (as *addrConnStream) Header() (metadata.MD, error) { - m, err := as.s.Header() - if err != nil { - as.finish(toRPCErr(err)) - } - return m, err -} - -func (as *addrConnStream) Trailer() metadata.MD { - return as.s.Trailer() -} - -func (as *addrConnStream) CloseSend() error { - if as.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? - return nil - } - as.sentLast = true - - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) - // Always return nil; io.EOF is the only error that might make sense - // instead, but there is no need to signal the client to call RecvMsg - // as the only use left for the stream after CloseSend is to call - // RecvMsg. This also matches historical behavior. - return nil -} - -func (as *addrConnStream) Context() context.Context { - return as.s.Context() -} - -func (as *addrConnStream) SendMsg(m interface{}) (err error) { - defer func() { - if err != nil && err != io.EOF { - // Call finish on the client stream for errors generated by this SendMsg - // call, as these indicate problems created by this client. (Transport - // errors are converted to an io.EOF error in csAttempt.sendMsg; the real - // error will be returned from RecvMsg eventually in that case, or be - // retried.) - as.finish(err) - } - }() - if as.sentLast { - return status.Errorf(codes.Internal, "SendMsg called after CloseSend") - } - if !as.desc.ClientStreams { - as.sentLast = true - } - data, err := encode(as.codec, m) - if err != nil { - return err - } - compData, err := compress(data, as.cp, as.comp) - if err != nil { - return err - } - hdr, payld := msgHeader(data, compData) - // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) - } - - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { - if !as.desc.ClientStreams { - // For non-client-streaming RPCs, we return nil instead of EOF on error - // because the generated code requires it. finish is not called; RecvMsg() - // will call it with the stream's status independently. - return nil - } - return io.EOF - } - - if channelz.IsOn() { - as.t.IncrMsgSent() - } - return nil -} - -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { - defer func() { - if err != nil || !as.desc.ServerStreams { - // err != nil or non-server-streaming indicates end of stream. - as.finish(err) - } - }() - - if !as.decompSet { - // Block until we receive headers containing received message encoding. - if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if as.dc == nil || as.dc.Type() != ct { - // No configured decompressor, or it does not match the incoming - // message encoding; attempt to find a registered compressor that does. - as.dc = nil - as.decomp = encoding.GetCompressor(ct) - } - } else { - // No compression is used; disable our decompressor. - as.dc = nil - } - // Only initialize this state once per stream. - as.decompSet = true - } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { - if err == io.EOF { - if statusErr := as.s.Status().Err(); statusErr != nil { - return statusErr - } - return io.EOF // indicates successful end of stream. - } - return toRPCErr(err) - } - - if channelz.IsOn() { - as.t.IncrMsgRecv() - } - if as.desc.ServerStreams { - // Subsequent messages should be received by subsequent RecvMsg calls. - return nil - } - - // Special handling for non-server-stream rpcs. - // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { - return as.s.Status().Err() // non-server streaming Recv returns nil on success - } - return toRPCErr(err) -} - -func (as *addrConnStream) finish(err error) { - as.mu.Lock() - if as.finished { - as.mu.Unlock() - return - } - as.finished = true - if err == io.EOF { - // Ending a stream with EOF indicates a success. - err = nil - } - if as.s != nil { - as.t.CloseStream(as.s, err) - } - - if err != nil { - as.ac.incrCallsFailed() - } else { - as.ac.incrCallsSucceeded() - } - as.cancel() - as.mu.Unlock() -} - -// ServerStream defines the server-side behavior of a streaming RPC. -// -// All errors returned from ServerStream methods are compatible with the -// status package. +// ServerStream defines the interface a server stream has to satisfy. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1278,32 +640,7 @@ type ServerStream interface { // SetTrailer sets the trailer metadata which will be sent with the RPC status. // When called more than once, all the provided metadata will be merged. SetTrailer(metadata.MD) - // Context returns the context for this stream. - Context() context.Context - // SendMsg sends a message. On error, SendMsg aborts the stream and the - // error is returned directly. - // - // SendMsg blocks until: - // - There is sufficient flow control to schedule m with the transport, or - // - The stream is done, or - // - The stream breaks. - // - // SendMsg does not wait until the message is received by the client. An - // untimely stream closure may result in lost messages. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not safe - // to call SendMsg on the same stream in different goroutines. - SendMsg(m interface{}) error - // RecvMsg blocks until it receives a message into m or the stream is - // done. It returns io.EOF when the client has performed a CloseSend. On - // any non-EOF error, the stream is aborted and the error contains the - // RPC status. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not - // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + Stream } // serverStream implements a server side Stream. @@ -1325,15 +662,6 @@ type serverStream struct { statsHandler stats.Handler - binlog *binarylog.MethodLogger - // serverHeaderBinlogged indicates whether server header has been logged. It - // will happen when one of the following two happens: stream.SendHeader(), - // stream.Send(). - // - // It's only checked in send and sendHeader, doesn't need to be - // synchronized. - serverHeaderBinlogged bool - mu sync.Mutex // protects trInfo.tr after the service handler runs. } @@ -1349,15 +677,7 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { - h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ - Header: h, - }) - ss.serverHeaderBinlogged = true - } - return err + return ss.t.WriteHeader(ss.s, md) } func (ss *serverStream) SetTrailer(md metadata.MD) { @@ -1384,12 +704,6 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) ss.t.WriteStatus(ss.s, st) - // Non-user specified status was sent out. This should be an error - // case (as a server side Cancel maybe). - // - // This is not handled specifically now. User will return a final - // status from the service handler, we will log that error instead. - // This behavior is similar to an interceptor. } if channelz.IsOn() && err == nil { ss.t.IncrMsgSent() @@ -1411,18 +725,6 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { - if !ss.serverHeaderBinlogged { - h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ - Header: h, - }) - ss.serverHeaderBinlogged = true - } - ss.binlog.Log(&binarylog.ServerMessage{ - Message: data, - }) - } if ss.statsHandler != nil { ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) } @@ -1446,26 +748,17 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) ss.t.WriteStatus(ss.s, st) - // Non-user specified status was sent out. This should be an error - // case (as a server side Cancel maybe). - // - // This is not handled specifically now. User will return a final - // status from the service handler, we will log that error instead. - // This behavior is similar to an interceptor. } if channelz.IsOn() && err == nil { ss.t.IncrMsgRecv() } }() - var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { - payInfo = &payloadInfo{} + var inPayload *stats.InPayload + if ss.statsHandler != nil { + inPayload = &stats.InPayload{} } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) - } return err } if err == io.ErrUnexpectedEOF { @@ -1473,20 +766,8 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength, - Length: len(payInfo.uncompressedBytes), - }) - } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, - }) + if inPayload != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), inPayload) } return nil } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index 584360f681..22b8fb50de 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -21,7 +21,7 @@ package tap import ( - "context" + "golang.org/x/net/context" ) // Info defines the relevant information needed by the handles. diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 0a57b99948..c1c96dedcb 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -24,7 +24,6 @@ import ( "io" "net" "strings" - "sync" "time" "golang.org/x/net/trace" @@ -54,25 +53,13 @@ type traceInfo struct { } // firstLine is the first line of an RPC trace. -// It may be mutated after construction; remoteAddr specifically may change -// during client-side use. type firstLine struct { - mu sync.Mutex client bool // whether this is a client (outgoing) RPC remoteAddr net.Addr deadline time.Duration // may be zero } -func (f *firstLine) SetRemoteAddr(addr net.Addr) { - f.mu.Lock() - f.remoteAddr = addr - f.mu.Unlock() -} - func (f *firstLine) String() string { - f.mu.Lock() - defer f.mu.Unlock() - var line bytes.Buffer io.WriteString(&line, "RPC: ") if f.client { diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/transport/bdp_estimator.go similarity index 94% rename from vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go rename to vendor/google.golang.org/grpc/transport/bdp_estimator.go index 070680edba..63cd2627c8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go +++ b/vendor/google.golang.org/grpc/transport/bdp_estimator.go @@ -24,10 +24,9 @@ import ( ) const ( - // bdpLimit is the maximum value the flow control windows will be increased - // to. TCP typically limits this to 4MB, but some systems go up to 16MB. - // Since this is only a limit, it is safe to make it optimistic. - bdpLimit = (1 << 20) * 16 + // bdpLimit is the maximum value the flow control windows + // will be increased to. + bdpLimit = (1 << 20) * 4 // alpha is a constant factor used to keep a moving average // of RTTs. alpha = 0.9 diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/transport/controlbuf.go similarity index 81% rename from vendor/google.golang.org/grpc/internal/transport/controlbuf.go rename to vendor/google.golang.org/grpc/transport/controlbuf.go index 204ba1588b..5c5891a11b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/transport/controlbuf.go @@ -104,6 +104,7 @@ type headerFrame struct { type cleanupStream struct { streamID uint32 + idPtr *uint32 rst bool rstCode http2.ErrCode onWrite func() @@ -137,6 +138,9 @@ type outgoingSettings struct { ss []http2.Setting } +type settingsAck struct { +} + type incomingGoAway struct { } @@ -225,12 +229,6 @@ func (l *outStreamList) dequeue() *outStream { return b } -// controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { ch chan struct{} done <-chan struct{} @@ -281,21 +279,6 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{ return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - func (c *controlBuffer) get(block bool) (interface{}, error) { for { c.mu.Lock() @@ -352,29 +335,13 @@ const ( serverSide ) -// Loopy receives frames from the control buffer. -// Each frame is handled individually; most of the work done by loopy goes -// into handling data frames. Loopy maintains a queue of active streams, and each -// stream maintains a queue of data frames; as loopy receives data frames -// it gets added to the queue of the relevant stream. -// Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While -// processing a stream, loopy writes out data bytes from this stream capped by the min -// of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { - side side - cbuf *controlBuffer - sendQuota uint32 - oiws uint32 // outbound initial window size. - // estdStreams is map of all established streams that are not cleaned-up yet. - // On client-side, this is all streams whose headers were sent out. - // On server-side, this is all streams whose headers were received. - estdStreams map[uint32]*outStream // Established streams. - // activeStreams is a linked-list of all streams that have data to send and some - // stream-level flow control quota. - // Each of these streams internally have a list of data items(and perhaps trailers - // on the server-side) to be sent out. - activeStreams *outStreamList + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + estdStreams map[uint32]*outStream // Established streams. + activeStreams *outStreamList // Streams that are sending data. framer *framer hBuf *bytes.Buffer // The buffer for HPACK encoding. hEnc *hpack.Encoder // HPACK encoder. @@ -405,21 +372,6 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato const minBatchSize = 1000 // run should be run in a separate goroutine. -// It reads control frames from controlBuf and processes them by: -// 1. Updating loopy's internal state, or/and -// 2. Writing out HTTP2 frames on the wire. -// -// Loopy keeps all active streams with data to send in a linked-list. -// All streams in the activeStreams linked-list must have both: -// 1. Data to send, and -// 2. Stream level flow control quota available. -// -// In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the activeStreams linked-list. -// This results in writing of HTTP2 frames into an underlying write buffer. -// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. -// As an optimization, to increase the batch size for each flush, loopy yields the processor, once -// if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { defer func() { if err == ErrConnClosing { @@ -744,30 +696,21 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { return nil } -// processData removes the first stream from active streams, writes out at most 16KB -// of its data and then puts it at the end of activeStreams if there's still more data -// to be sent and stream has some stream-level flow control. func (l *loopyWriter) processData() (bool, error) { if l.sendQuota == 0 { return true, nil } - str := l.activeStreams.dequeue() // Remove the first stream. + str := l.activeStreams.dequeue() if str == nil { return true, nil } - dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. - // A data item is represented by a dataFrame, since it later translates into - // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. - - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + dataItem := str.itl.peek().(*dataFrame) + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } - str.itl.dequeue() // remove the empty data item from stream + str.itl.dequeue() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -796,20 +739,21 @@ func (l *loopyWriter) processData() (bool, error) { if len(buf) < size { size = len(buf) } - if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { str.state = waitingOnStreamQuota return false, nil } else if strQuota < size { size = strQuota } - if l.sendQuota < uint32(size) { // connection-level flow control. + if l.sendQuota < uint32(size) { size = int(l.sendQuota) } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool - // If this is the last data message on this stream and all of it can be written in this iteration. + // This last data message on this stream and all + // of it can be written in this go. if dataItem.endStream && size == len(buf) { // buf contains either data or it contains header but data is empty. if idx == 1 || len(dataItem.d) == 0 { diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/transport/flowcontrol.go similarity index 86% rename from vendor/google.golang.org/grpc/internal/transport/flowcontrol.go rename to vendor/google.golang.org/grpc/transport/flowcontrol.go index 5ea997a7e4..bbf98b6f5e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/transport/flowcontrol.go @@ -23,6 +23,30 @@ import ( "math" "sync" "sync/atomic" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 ) // writeQuota is a soft limit on the amount of data a stream can diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go new file mode 100644 index 0000000000..5babcf9b87 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go16.go @@ -0,0 +1,51 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "net" + "net/http" + + "google.golang.org/grpc/codes" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + +// contextFromRequest returns a background context. +func contextFromRequest(r *http.Request) context.Context { + return context.Background() +} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go new file mode 100644 index 0000000000..b7fa6bdb9c --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go17.go @@ -0,0 +1,52 @@ +// +build go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "net" + "net/http" + + "google.golang.org/grpc/codes" + + netctx "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled, netctx.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + +// contextFromRequest returns a context from the HTTP Request. +func contextFromRequest(r *http.Request) context.Context { + return r.Context() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go similarity index 90% rename from vendor/google.golang.org/grpc/internal/transport/handler_server.go rename to vendor/google.golang.org/grpc/transport/handler_server.go index f2de84d43a..f71b748217 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "context" "errors" "fmt" "io" @@ -35,6 +34,7 @@ import ( "time" "github.com/golang/protobuf/proto" + "golang.org/x/net/context" "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -63,6 +63,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta if _, ok := w.(http.Flusher); !ok { return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") } + if _, ok := w.(http.CloseNotifier); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") + } st := &serverHandlerTransport{ rw: w, @@ -77,7 +80,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err) } st.timeoutSet = true st.timeout = to @@ -95,7 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + return nil, streamErrorf(codes.Internal, "malformed binary metadata: %v", err) } metakv = append(metakv, k, v) } @@ -173,11 +176,17 @@ func (a strAddr) String() string { return string(a) } // do runs fn in the ServeHTTP goroutine. func (ht *serverHandlerTransport) do(fn func()) error { + // Avoid a panic writing to closed channel. Imperfect but maybe good enough. select { case <-ht.closedCh: return ErrConnClosing - case ht.writes <- fn: - return nil + default: + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } } } @@ -228,8 +237,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro if ht.stats != nil { ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } + ht.Close() + close(ht.writes) } - ht.Close() return err } @@ -264,7 +274,9 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts ht.writeCommonHeaders(s) ht.rw.Write(hdr) ht.rw.Write(data) - ht.rw.(http.Flusher).Flush() + if !opts.Delay { + ht.rw.(http.Flusher).Flush() + } }) } @@ -297,7 +309,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { // With this transport type there will be exactly 1 stream: this HTTP request. - ctx := ht.req.Context() + ctx := contextFromRequest(ht.req) var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -305,16 +317,22 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ctx, cancel = context.WithCancel(ctx) } - // requestOver is closed when the status has been written via WriteStatus. + // requestOver is closed when either the request's context is done + // or the status has been written via WriteStatus. requestOver := make(chan struct{}) + + // clientGone receives a single value if peer is gone, either + // because the underlying connection is dead or because the + // peer sends an http2 RST_STREAM. + clientGone := ht.rw.(http.CloseNotifier).CloseNotify() go func() { select { case <-requestOver: + return case <-ht.closedCh: - case <-ht.req.Context().Done(): + case <-clientGone: } cancel() - ht.Close() }() req := ht.req @@ -391,7 +409,10 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace func (ht *serverHandlerTransport) runStream() { for { select { - case fn := <-ht.writes: + case fn, ok := <-ht.writes: + if !ok { + return + } fn() case <-ht.closedCh: return @@ -413,18 +434,18 @@ func (ht *serverHandlerTransport) Drain() { // * io.EOF // * io.ErrUnexpectedEOF // * of type transport.ConnectionError -// * an error from the status package +// * of type transport.StreamError func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err } if se, ok := err.(http2.StreamError); ok { if code, ok := http2ErrConvTab[se.Code]; ok { - return status.Error(code, se.Error()) + return StreamError{ + Code: code, + Desc: se.Error(), + } } } - if strings.Contains(err.Error(), "body closed by handler") { - return status.Error(codes.Canceled, err.Error()) - } return connectionErrorf(true, err, err.Error()) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go similarity index 79% rename from vendor/google.golang.org/grpc/internal/transport/http2_client.go rename to vendor/google.golang.org/grpc/transport/http2_client.go index 9dee6db61d..eaf007eb0a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -19,24 +19,21 @@ package transport import ( - "context" - "fmt" "io" "math" "net" - "strconv" "strings" "sync" "sync/atomic" "time" + "golang.org/x/net/context" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -75,7 +72,7 @@ type http2Client struct { isSecure bool - perRPCCreds []credentials.PerRPCCredentials + creds []credentials.PerRPCCredentials // Boolean to keep track of reading activity on transport. // 1 is true and 0 is false. @@ -87,14 +84,11 @@ type http2Client struct { initialWindowSize int32 - // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE - maxSendHeaderListSize *uint32 - bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon + // onSuccess is a callback that client transport calls upon // receiving server preface to signal that a succefull HTTP2 // connection was established. - onPrefaceReceipt func() + onSuccess func() maxConcurrentStreams uint32 streamQuota int64 @@ -113,17 +107,26 @@ type http2Client struct { // Fields below are for channelz metric collection. channelzID int64 // channelz unique identification number - czData *channelzData - - onGoAway func(GoAwayReason) - onClose func() + czmu sync.RWMutex + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // The number of streams that have ended successfully by receiving EoS bit set + // frame from server. + streamsSucceeded int64 + streamsFailed int64 + lastStreamCreated time.Time + msgSent int64 + msgRecv int64 + lastMsgSent time.Time + lastMsgRecv time.Time } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { if fn != nil { return fn(ctx, addr) } - return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + return dialContext(ctx, "tcp", addr) } func isTemporary(err error) bool { @@ -145,7 +148,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -167,6 +170,18 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne conn.Close() } }(conn) + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + if creds := opts.TransportCredentials; creds != nil { + scheme = "https" + conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + isSecure = true + } kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -175,47 +190,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne if kp.Timeout == 0 { kp.Timeout = defaultClientKeepaliveTimeout } - keepaliveEnabled := false - if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { - return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) - } - keepaliveEnabled = true - } - var ( - isSecure bool - authInfo credentials.AuthInfo - ) - transportCreds := opts.TransportCredentials - perRPCCreds := opts.PerRPCCredentials - - if b := opts.CredsBundle; b != nil { - if t := b.TransportCredentials(); t != nil { - transportCreds = t - } - if t := b.PerRPCCredentials(); t != nil { - perRPCCreds = append(perRPCCreds, t) - } - } - if transportCreds != nil { - scheme = "https" - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) - if err != nil { - return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) - } - isSecure = true - } dynamicWindow := true icwz := int32(initialWindowSize) if opts.InitialConnWindowSize >= defaultWindowSize { icwz = opts.InitialConnWindowSize dynamicWindow = false } - writeBufSize := opts.WriteBufferSize - readBufSize := opts.ReadBufferSize - maxHeaderListSize := defaultClientMaxHeaderListSize - if opts.MaxHeaderListSize != nil { - maxHeaderListSize = *opts.MaxHeaderListSize + writeBufSize := defaultWriteBufSize + if opts.WriteBufferSize > 0 { + writeBufSize = opts.WriteBufferSize + } + readBufSize := defaultReadBufSize + if opts.ReadBufferSize > 0 { + readBufSize = opts.ReadBufferSize } t := &http2Client{ ctx: ctx, @@ -231,24 +218,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne writerDone: make(chan struct{}), goAway: make(chan struct{}), awakenKeepalive: make(chan struct{}, 1), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), isSecure: isSecure, - perRPCCreds: perRPCCreds, + creds: opts.PerRPCCredentials, kp: kp, statsHandler: opts.StatsHandler, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, + onSuccess: onSuccess, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), - czData: new(channelzData), - onGoAway: onGoAway, - onClose: onClose, - keepaliveEnabled: keepaliveEnabled, } t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { @@ -275,16 +258,16 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne t.statsHandler.HandleConn(t.ctx, connBegin) } if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, "") } - if t.keepaliveEnabled { + if t.kp.Time != infinity { + t.keepaliveEnabled = true go t.keepalive() } // Start the reader goroutine for incoming message. Each transport has // a dedicated goroutine which reads HTTP2 frame from network. Then it // dispatches the frame to the corresponding stream entity. go t.reader() - // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { @@ -295,21 +278,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne t.Close() return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) } - var ss []http2.Setting - if t.initialWindowSize != defaultWindowSize { - ss = append(ss, http2.Setting{ + err = t.framer.fr.WriteSettings(http2.Setting{ ID: http2.SettingInitialWindowSize, Val: uint32(t.initialWindowSize), }) + } else { + err = t.framer.fr.WriteSettings() } - if opts.MaxHeaderListSize != nil { - ss = append(ss, http2.Setting{ - ID: http2.SettingMaxHeaderListSize, - Val: *opts.MaxHeaderListSize, - }) - } - err = t.framer.fr.WriteSettings(ss...) if err != nil { t.Close() return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) @@ -321,10 +297,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) } } - - if err := t.framer.writer.Flush(); err != nil { - return nil, err - } + t.framer.writer.Flush() go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() @@ -364,9 +337,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, - closeStream: func(err error) { - t.CloseStream(s, err) - }, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -409,9 +379,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) - if callHdr.PreviousAttempts > 0 { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) - } if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) @@ -419,7 +386,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. - timeout := time.Until(dl) + timeout := dl.Sub(time.Now()) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) } for k, v := range authData { @@ -475,7 +442,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) func (t *http2Client) createAudience(callHdr *CallHdr) string { // Create an audience string only if needed. - if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + if len(t.creds) == 0 && callHdr.Creds == nil { return "" } // Construct URI required to get auth request metadata. @@ -490,14 +457,14 @@ func (t *http2Client) createAudience(callHdr *CallHdr) string { func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { authData := map[string]string{} - for _, c := range t.perRPCCreds { + for _, c := range t.creds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { if _, ok := status.FromError(err); ok { return nil, err } - return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + return nil, streamErrorf(codes.Unauthenticated, "transport: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2. @@ -515,11 +482,11 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // options, then both sets of credentials will be applied. if callCreds := callHdr.Creds; callCreds != nil { if !t.isSecure && callCreds.RequireTransportSecurity() { - return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + return nil, streamErrorf(codes.Internal, "transport: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2 @@ -571,8 +538,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } t.activeStreams[id] = s if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.czmu.Lock() + t.streamsStarted++ + t.lastStreamCreated = time.Now() + t.czmu.Unlock() } var sendPing bool // If the number of active streams change from 0 to 1, then check if keepalive @@ -621,40 +590,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } return true } - var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { - if t.maxSendHeaderListSize == nil { - return true - } - hdrFrame := it.(*headerFrame) - var sz int64 - for _, f := range hdrFrame.hf { - if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) - return false - } - } - return true - } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true - }, hdr) + success, err := t.controlBuf.executeAndPut(checkForStreamQuota, hdr) if err != nil { return nil, err } if success { break } - if hdrListSizeErr != nil { - return nil, hdrListSizeErr - } firstTry = false select { case <-ch: @@ -690,15 +633,13 @@ func (t *http2Client) CloseStream(s *Stream, err error) { rst = true rstCode = http2.ErrCodeCancel } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) + t.closeStream(s, err, rst, rstCode, nil, nil, false) } func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { - // If it was already done, return. If multiple closeStream calls - // happen simultaneously, wait for the first to finish. - <-s.done + // If it was already done, return. return } // status and trailers can be updated here without any synchronization because the stream goroutine will @@ -712,9 +653,10 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // This will unblock reads eventually. s.write(recvMsg{err: err}) } + // This will unblock write. + close(s.done) // If headerChan isn't closed, then close it. if atomic.SwapUint32(&s.headerDone, 1) == 0 { - s.noHeaders = true close(s.headerChan) } cleanup := &cleanupStream{ @@ -726,11 +668,13 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. } t.mu.Unlock() if channelz.IsOn() { + t.czmu.Lock() if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) + t.streamsSucceeded++ } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) + t.streamsFailed++ } + t.czmu.Unlock() } }, rst: rst, @@ -747,17 +691,11 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. return true } t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) - // This will unblock write. - close(s.done) } // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close() error { t.mu.Lock() // Make sure we only Close once. @@ -777,7 +715,7 @@ func (t *http2Client) Close() error { } // Notify all active streams. for _, s := range streams { - t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, nil, nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ @@ -785,7 +723,6 @@ func (t *http2Client) Close() error { } t.statsHandler.HandleConn(t.ctx, connEnd) } - t.onClose() return err } @@ -972,13 +909,6 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) statusCode = codes.Unknown } - if statusCode == codes.Canceled { - // Our deadline was already exceeded, and that was likely the cause of - // this cancelation. Alter the status code accordingly. - if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) { - statusCode = codes.DeadlineExceeded - } - } t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) } @@ -988,20 +918,13 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } var maxStreams *uint32 var ss []http2.Setting - var updateFuncs []func() f.ForeachSetting(func(s http2.Setting) error { - switch s.ID { - case http2.SettingMaxConcurrentStreams: + if s.ID == http2.SettingMaxConcurrentStreams { maxStreams = new(uint32) *maxStreams = s.Val - case http2.SettingMaxHeaderListSize: - updateFuncs = append(updateFuncs, func() { - t.maxSendHeaderListSize = new(uint32) - *t.maxSendHeaderListSize = s.Val - }) - default: - ss = append(ss, s) + return nil } + ss = append(ss, s) return nil }) if isFirst && maxStreams == nil { @@ -1011,24 +934,21 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { sf := &incomingSettings{ ss: ss, } - if maxStreams != nil { - updateStreamQuota := func() { - delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) - t.maxConcurrentStreams = *maxStreams - t.streamQuota += delta - if delta > 0 && t.waitingStreams > 0 { - close(t.streamsQuotaAvailable) // wake all of them up. - t.streamsQuotaAvailable = make(chan struct{}, 1) - } - } - updateFuncs = append(updateFuncs, updateStreamQuota) + if maxStreams == nil { + t.controlBuf.put(sf) + return } - t.controlBuf.executeAndPut(func(interface{}) bool { - for _, f := range updateFuncs { - f() + updateStreamQuota := func(interface{}) bool { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) } return true - }, sf) + } + t.controlBuf.executeAndPut(updateStreamQuota, sf) } func (t *http2Client) handlePing(f *http2.PingFrame) { @@ -1082,9 +1002,6 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { close(t.goAway) t.state = draining t.controlBuf.put(&incomingGoAway{}) - - // This has to be a new goroutine because we're still using the current goroutine to read in the transport. - t.onGoAway(t.goAwayReason) } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1140,27 +1057,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if !ok { return } - endStream := frame.StreamEnded() atomic.StoreUint32(&s.bytesReceived, 1) - initialHeader := atomic.SwapUint32(&s.headerDone, 1) == 0 - - if !initialHeader && !endStream { - // As specified by RFC 7540, a HEADERS frame (and associated CONTINUATION frames) can only appear - // at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. - st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") - t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) - return - } - - state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC ResponseHeader has been received - // which indicates peer speaking gRPC, we are in gRPC mode. - state.data.isGRPC = !initialHeader - if err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) + var state decodeState + if err := state.decodeResponseHeader(frame); err != nil { + t.closeStream(s, err, true, http2.ErrCodeProtocol, nil, nil, false) + // Something wrong. Stops reading even when there is remaining. return } + endStream := frame.StreamEnded() var isHeader bool defer func() { if t.statsHandler != nil { @@ -1179,30 +1084,25 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } }() - // If headers haven't been received yet. - if initialHeader { + if atomic.SwapUint32(&s.headerDone, 1) == 0 { if !endStream { - // Headers frame is ResponseHeader. + // Headers frame is not actually a trailers-only frame. isHeader = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. - s.recvCompress = state.data.encoding - if len(state.data.mdata) > 0 { - s.header = state.data.mdata + s.recvCompress = state.encoding + if len(state.mdata) > 0 { + s.header = state.mdata } - close(s.headerChan) - return } - // Headers frame is Trailers-only. - s.noHeaders = true close(s.headerChan) } - - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) + if !endStream { + return + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, state.status(), state.mdata, true) } // reader runs as a separate goroutine in charge of reading data from network @@ -1216,19 +1116,18 @@ func (t *http2Client) reader() { // Check the validity of server preface. frame, err := t.framer.fr.ReadFrame() if err != nil { - t.Close() // this kicks off resetTransport, so must be last before return + t.Close() return } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) if t.keepaliveEnabled { atomic.CompareAndSwapUint32(&t.activity, 0, 1) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.Close() // this kicks off resetTransport, so must be last before return + t.Close() return } - t.onPrefaceReceipt() + t.onSuccess() t.handleSettings(sf, true) // loop to keep reading incoming messages on this transport. @@ -1247,9 +1146,7 @@ func (t *http2Client) reader() { t.mu.Unlock() if s != nil { // use error detail to provide better err message - code := http2ErrConvTab[se.Code] - msg := t.framer.fr.ErrorDetail().Error() - t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + t.closeStream(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail()), true, http2.ErrCodeProtocol, nil, nil, false) } continue } else { @@ -1306,7 +1203,9 @@ func (t *http2Client) keepalive() { } else { t.mu.Unlock() if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + t.czmu.Lock() + t.kpCount++ + t.czmu.Unlock() } // Send ping. t.controlBuf.put(p) @@ -1346,39 +1245,41 @@ func (t *http2Client) GoAway() <-chan struct{} { } func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + t.czmu.RLock() s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + StreamsStarted: t.streamsStarted, + StreamsSucceeded: t.streamsSucceeded, + StreamsFailed: t.streamsFailed, + MessagesSent: t.msgSent, + MessagesReceived: t.msgRecv, + KeepAlivesSent: t.kpCount, + LastLocalStreamCreatedTimestamp: t.lastStreamCreated, + LastMessageSentTimestamp: t.lastMsgSent, + LastMessageReceivedTimestamp: t.lastMsgRecv, LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + //socket options + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // Security // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } + t.czmu.RUnlock() s.RemoteFlowControlWindow = t.getOutFlowWindow() return &s } -func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } - func (t *http2Client) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.czmu.Lock() + t.msgSent++ + t.lastMsgSent = time.Now() + t.czmu.Unlock() } func (t *http2Client) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.czmu.Lock() + t.msgRecv++ + t.lastMsgRecv = time.Now() + t.czmu.Unlock() } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go similarity index 80% rename from vendor/google.golang.org/grpc/internal/transport/http2_server.go rename to vendor/google.golang.org/grpc/transport/http2_server.go index 435092e5c8..19acedb2b0 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -20,7 +20,6 @@ package transport import ( "bytes" - "context" "errors" "fmt" "io" @@ -32,6 +31,7 @@ import ( "time" "github.com/golang/protobuf/proto" + "golang.org/x/net/context" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" @@ -48,14 +48,9 @@ import ( "google.golang.org/grpc/tap" ) -var ( - // ErrIllegalHeaderWrite indicates that setting header is illegal because of - // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") - // ErrHeaderListSizeLimitViolation indicates that the header list size is larger - // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") -) +// ErrIllegalHeaderWrite indicates that setting header is illegal because of +// the stream's state. +var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { @@ -94,10 +89,9 @@ type http2Server struct { // Flag to signify that number of ping strikes should be reset to 0. // This is set whenever data or header frames are sent. // 1 means yes. - resetPingStrikes uint32 // Accessed atomically. - initialWindowSize int32 - bdpEst *bdpEstimator - maxSendHeaderListSize *uint32 + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator mu sync.Mutex // guard the following @@ -118,19 +112,33 @@ type http2Server struct { // Fields below are for channelz metric collection. channelzID int64 // channelz unique identification number - czData *channelzData + czmu sync.RWMutex + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // The number of streams that have ended successfully by sending frame with + // EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + lastStreamCreated time.Time + msgSent int64 + msgRecv int64 + lastMsgSent time.Time + lastMsgRecv time.Time } // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { - writeBufSize := config.WriteBufferSize - readBufSize := config.ReadBufferSize - maxHeaderListSize := defaultServerMaxHeaderListSize - if config.MaxHeaderListSize != nil { - maxHeaderListSize = *config.MaxHeaderListSize + writeBufSize := defaultWriteBufSize + if config.WriteBufferSize > 0 { + writeBufSize = config.WriteBufferSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + readBufSize := defaultReadBufSize + if config.ReadBufferSize > 0 { + readBufSize = config.ReadBufferSize + } + framer := newFramer(conn, writeBufSize, readBufSize) // Send initial settings as connection preface to client. var isettings []http2.Setting // TODO(zhaoq): Have a better way to signal "no limit" because 0 is @@ -160,12 +168,6 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err ID: http2.SettingInitialWindowSize, Val: uint32(iwz)}) } - if config.MaxHeaderListSize != nil { - isettings = append(isettings, http2.Setting{ - ID: http2.SettingMaxHeaderListSize, - Val: *config.MaxHeaderListSize, - }) - } if err := framer.fr.WriteSettings(isettings...); err != nil { return nil, connectionErrorf(false, err, "transport: %v", err) } @@ -219,7 +221,6 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err idle: time.Now(), kep: kep, initialWindowSize: iwz, - czData: new(channelzData), } t.controlBuf = newControlBuffer(t.ctxDone) if dynamicWindow { @@ -237,7 +238,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err t.stats.HandleConn(t.ctx, connBegin) } if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, "") } t.framer.writer.Flush() @@ -284,21 +285,21 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } // operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { streamID := frame.Header().StreamID - state := &decodeState{ - serverSide: true, - } - if err := state.decodeHeader(frame); err != nil { - if se, ok := status.FromError(err); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: statusCodeConvTab[se.Code()], - onWrite: func() {}, - }) + var state decodeState + for _, hf := range frame.Fields { + if err := state.processHeaderField(hf); err != nil { + if se, ok := err.(StreamError); ok { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: statusCodeConvTab[se.Code], + onWrite: func() {}, + }) + } + return } - return false } buf := newRecvBuffer() @@ -307,16 +308,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( st: t, buf: buf, fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.data.encoding, - method: state.data.method, - contentSubtype: state.data.contentSubtype, + recvCompress: state.encoding, + method: state.method, + contentSubtype: state.contentSubtype, } if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if state.data.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } @@ -329,19 +330,19 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx = peer.NewContext(s.ctx, pr) // Attach the received metadata to the context. - if len(state.data.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) + if len(state.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) } - if state.data.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) + if state.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) } - if state.data.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + if state.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) } if t.inTapHandle != nil { var err error info := &tap.Info{ - FullMethodName: state.data.method, + FullMethodName: state.method, } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { @@ -352,13 +353,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) - return false + return } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() - return false + return } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -368,7 +369,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) - return false + return } if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() @@ -383,8 +384,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } t.mu.Unlock() if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.czmu.Lock() + t.streamsStarted++ + t.lastStreamCreated = time.Now() + t.czmu.Unlock() } s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) @@ -419,7 +422,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return } // HandleStreams receives incoming streams using the given handler. This is @@ -437,7 +440,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. s := t.activeStreams[se.StreamID] t.mu.Unlock() if s != nil { - t.closeStream(s, true, se.Code, false) + t.closeStream(s, true, se.Code, nil, false) } else { t.controlBuf.put(&cleanupStream{ streamID: se.StreamID, @@ -579,7 +582,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { } if size > 0 { if err := s.fc.onData(size); err != nil { - t.closeStream(s, true, http2.ErrCodeFlowControl, false) + t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false) return } if f.Header().Flags.Has(http2.FlagDataPadded) { @@ -604,18 +607,11 @@ func (t *http2Server) handleData(f *http2.DataFrame) { } func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { - // If the stream is not deleted from the transport's active streams map, then do a regular close stream. - if s, ok := t.getStream(f); ok { - t.closeStream(s, false, 0, false) + s, ok := t.getStream(f) + if !ok { return } - // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. - t.controlBuf.put(&cleanupStream{ - streamID: f.Header().StreamID, - rst: false, - rstCode: 0, - onWrite: func() {}, - }) + t.closeStream(s, false, 0, nil, false) } func (t *http2Server) handleSettings(f *http2.SettingsFrame) { @@ -623,25 +619,11 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { return } var ss []http2.Setting - var updateFuncs []func() f.ForeachSetting(func(s http2.Setting) error { - switch s.ID { - case http2.SettingMaxHeaderListSize: - updateFuncs = append(updateFuncs, func() { - t.maxSendHeaderListSize = new(uint32) - *t.maxSendHeaderListSize = s.Val - }) - default: - ss = append(ss, s) - } + ss = append(ss, s) return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { - for _, f := range updateFuncs { - f() - } - return true - }, &incomingSettings{ + t.controlBuf.put(&incomingSettings{ ss: ss, }) } @@ -721,21 +703,6 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { - if t.maxSendHeaderListSize == nil { - return true - } - hdrFrame := it.(*headerFrame) - var sz int64 - for _, f := range hdrFrame.hf { - if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) - return false - } - } - return true -} - // WriteHeader sends the header metedata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { if s.updateHeaderSent() || s.getState() == streamDone { @@ -749,15 +716,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { s.header = md } } - if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err - } + t.writeHeaderLocked(s) s.hdrMu.Unlock() return nil } -func (t *http2Server) writeHeaderLocked(s *Stream) error { +func (t *http2Server) writeHeaderLocked(s *Stream) { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -767,7 +731,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } headerFields = appendHeaderFieldsFromMD(headerFields, s.header) - success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + t.controlBuf.put(&headerFrame{ streamID: s.id, hf: headerFields, endStream: false, @@ -775,20 +739,12 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { atomic.StoreUint32(&t.resetPingStrikes, 1) }, }) - if !success { - if err != nil { - return err - } - t.closeStream(s, true, http2.ErrCodeInternal, false) - return ErrHeaderListSizeLimitViolation - } if t.stats != nil { // Note: WireLength is not set in outHeader. // TODO(mmukhi): Revisit this later, if needed. outHeader := &stats.OutHeader{} t.stats.HandleRPC(s.Context(), outHeader) } - return nil } // WriteStatus sends stream status to the client and terminates the stream. @@ -805,10 +761,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. - if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err - } + t.writeHeaderLocked(s) } else { // Send a trailer only response. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) @@ -838,17 +791,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { }, } s.hdrMu.Unlock() - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) - if !success { - if err != nil { - return err - } - t.closeStream(s, true, http2.ErrCodeInternal, false) - return ErrHeaderListSizeLimitViolation - } - // Send a RST_STREAM after the trailers if the client has not already half-closed. - rst := s.getState() == streamActive - t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + t.closeStream(s, false, 0, trailingHeader, true) if t.stats != nil { t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } @@ -860,11 +803,8 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return streamErrorf(codes.Internal, "transport: %v", err) } } else { // Writing headers checks for this condition. @@ -978,7 +918,9 @@ func (t *http2Server) keepalive() { } pingSent = true if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + t.czmu.Lock() + t.kpCount++ + t.czmu.Unlock() } t.controlBuf.put(p) keepalive.Reset(t.kp.Timeout) @@ -1018,65 +960,47 @@ func (t *http2Server) Close() error { return err } -// deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState streamState) { - oldState = s.swapState(streamDone) - if oldState == streamDone { +// closeStream clears the footprint of a stream when the stream is not needed +// any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + if s.swapState(streamDone) == streamDone { // If the stream was already done, return. - return oldState + return } - // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. s.cancel() - - t.mu.Lock() - if _, ok := t.activeStreams[s.id]; ok { - delete(t.activeStreams, s.id) - if len(t.activeStreams) == 0 { - t.idle = time.Now() - } - } - t.mu.Unlock() - - if channelz.IsOn() { - if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) - } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) - } - } - - return oldState -} - -// finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { - oldState := t.deleteStream(s, eosReceived) - // If the stream is already closed, then don't put trailing header to controlbuf. - if oldState == streamDone { - return - } - - hdr.cleanup = &cleanupStream{ + cleanup := &cleanupStream{ streamID: s.id, rst: rst, rstCode: rstCode, - onWrite: func() {}, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + if channelz.IsOn() { + t.czmu.Lock() + if eosReceived { + t.streamsSucceeded++ + } else { + t.streamsFailed++ + } + t.czmu.Unlock() + } + }, + } + if hdr != nil { + hdr.cleanup = cleanup + t.controlBuf.put(hdr) + } else { + t.controlBuf.put(cleanup) } - t.controlBuf.put(hdr) -} - -// closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { - t.deleteStream(s, eosReceived) - t.controlBuf.put(&cleanupStream{ - streamID: s.id, - rst: rst, - rstCode: rstCode, - onWrite: func() {}, - }) } func (t *http2Server) RemoteAddr() net.Addr { @@ -1155,41 +1079,45 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { } func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + t.czmu.RLock() s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + StreamsStarted: t.streamsStarted, + StreamsSucceeded: t.streamsSucceeded, + StreamsFailed: t.streamsFailed, + MessagesSent: t.msgSent, + MessagesReceived: t.msgRecv, + KeepAlivesSent: t.kpCount, + LastRemoteStreamCreatedTimestamp: t.lastStreamCreated, + LastMessageSentTimestamp: t.lastMsgSent, + LastMessageReceivedTimestamp: t.lastMsgRecv, LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + //socket options + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // Security // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } + t.czmu.RUnlock() s.RemoteFlowControlWindow = t.getOutFlowWindow() return &s } func (t *http2Server) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.czmu.Lock() + t.msgSent++ + t.lastMsgSent = time.Now() + t.czmu.Unlock() } func (t *http2Server) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.czmu.Lock() + t.msgRecv++ + t.lastMsgRecv = time.Now() + t.czmu.Unlock() } func (t *http2Server) getOutFlowWindow() int64 { - resp := make(chan uint32, 1) + resp := make(chan uint32) timer := time.NewTimer(time.Second) defer timer.Stop() t.controlBuf.put(&outFlowControlSizeRequest{resp}) diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go similarity index 71% rename from vendor/google.golang.org/grpc/internal/transport/http_util.go rename to vendor/google.golang.org/grpc/transport/http_util.go index 9d212867ce..7d15c7d742 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -23,8 +23,6 @@ import ( "bytes" "encoding/base64" "fmt" - "io" - "math" "net" "net/http" "strconv" @@ -45,6 +43,9 @@ const ( http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues http2InitHeaderTableSize = 4096 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 // baseContentType is the base content-type for gRPC. This is a valid // content-type on it's own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See @@ -78,8 +79,7 @@ var ( codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, codes.PermissionDenied: http2.ErrCodeInadequateSecurity, } - // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. - HTTPStatusConvTab = map[int]codes.Code{ + httpStatusConvTab = map[int]codes.Code{ // 400 Bad Request - INTERNAL. http.StatusBadRequest: codes.Internal, // 401 Unauthorized - UNAUTHENTICATED. @@ -99,7 +99,9 @@ var ( } ) -type parsedHeaderData struct { +// Records the states during HPACK decoding. Must be reset once the +// decoding of the entire headers are finished. +type decodeState struct { encoding string // statusGen caches the stream status received from the trailer the server // sent. Client side only. Do not access directly. After all trailers are @@ -119,30 +121,6 @@ type parsedHeaderData struct { statsTags []byte statsTrace []byte contentSubtype string - - // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). - // - // We are in gRPC mode (peer speaking gRPC) if: - // * We are client side and have already received a HEADER frame that indicates gRPC peer. - // * The header contains valid a content-type, i.e. a string starts with "application/grpc" - // And we should handle error specific to gRPC. - // - // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we - // are in HTTP fallback mode, and should handle error specific to HTTP. - isGRPC bool - grpcErr error - httpErr error - contentTypeErr string -} - -// decodeState configures decoding criteria and records the decoded data. -type decodeState struct { - // whether decoding on server side or not - serverSide bool - - // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS - // frame once decodeHeader function has been invoked and returned. - data parsedHeaderData } // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -161,9 +139,6 @@ func isReservedHeader(hdr string) bool { "grpc-status", "grpc-timeout", "grpc-status-details-bin", - // Intentionally exclude grpc-previous-rpc-attempts and - // grpc-retry-pushback-ms, which are "reserved", but their API - // intentionally works via metadata. "te": return true default: @@ -171,8 +146,8 @@ func isReservedHeader(hdr string) bool { } } -// isWhitelistedHeader checks whether hdr should be propagated into metadata -// visible to users, even though it is classified as "reserved", above. +// isWhitelistedHeader checks whether hdr should be propagated +// into metadata visible to users. func isWhitelistedHeader(hdr string) bool { switch hdr { case ":authority", "user-agent": @@ -223,11 +198,11 @@ func contentType(contentSubtype string) string { } func (d *decodeState) status() *status.Status { - if d.data.statusGen == nil { + if d.statusGen == nil { // No status-details were provided; generate status using code/msg. - d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) + d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) } - return d.data.statusGen + return d.statusGen } const binHdrSuffix = "-bin" @@ -259,152 +234,111 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - return status.Error(codes.Internal, "peer header list size exceeded limit") - } - +func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error { for _, hf := range frame.Fields { - d.processHeaderField(hf) + if err := d.processHeaderField(hf); err != nil { + return err + } } - if d.data.isGRPC { - if d.data.grpcErr != nil { - return d.data.grpcErr - } - if d.serverSide { - return nil - } - if d.data.rawStatusCode == nil && d.data.statusGen == nil { - // gRPC status doesn't exist. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.data.rawStatusCode = &code - } + // If grpc status exists, no need to check further. + if d.rawStatusCode != nil || d.statusGen != nil { return nil } - // HTTP fallback mode - if d.data.httpErr != nil { - return d.data.httpErr + // If grpc status doesn't exist and http status doesn't exist, + // then it's a malformed header. + if d.httpStatus == nil { + return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") } - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - ok bool - ) - - if d.data.httpStatus != nil { - code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] + if *(d.httpStatus) != http.StatusOK { + code, ok := httpStatusConvTab[*(d.httpStatus)] if !ok { code = codes.Unknown } + return streamErrorf(code, http.StatusText(*(d.httpStatus))) } - return status.Error(code, d.constructHTTPErrMsg()) -} + // gRPC status doesn't exist and http status is OK. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propagated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propagated. + code := int(codes.Unknown) + d.rawStatusCode = &code + return nil -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func (d *decodeState) constructHTTPErrMsg() string { - var errMsgs []string - - if d.data.httpStatus == nil { - errMsgs = append(errMsgs, "malformed header: missing HTTP status") - } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) - } - - if d.data.contentTypeErr == "" { - errMsgs = append(errMsgs, "transport: missing content-type field") - } else { - errMsgs = append(errMsgs, d.data.contentTypeErr) - } - - return strings.Join(errMsgs, "; ") } func (d *decodeState) addMetadata(k, v string) { - if d.data.mdata == nil { - d.data.mdata = make(map[string][]string) + if d.mdata == nil { + d.mdata = make(map[string][]string) } - d.data.mdata[k] = append(d.data.mdata[k], v) + d.mdata[k] = append(d.mdata[k], v) } -func (d *decodeState) processHeaderField(f hpack.HeaderField) { +func (d *decodeState) processHeaderField(f hpack.HeaderField) error { switch f.Name { case "content-type": contentSubtype, validContentType := contentSubtype(f.Value) if !validContentType { - d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) - return + return streamErrorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value) } - d.data.contentSubtype = contentSubtype + d.contentSubtype = contentSubtype // TODO: do we want to propagate the whole content-type in the metadata, // or come up with a way to just propagate the content-subtype if it was set? // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} // in the metadata? d.addMetadata(f.Name, f.Value) - d.data.isGRPC = true case "grpc-encoding": - d.data.encoding = f.Value + d.encoding = f.Value case "grpc-status": code, err := strconv.Atoi(f.Value) if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return + return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) } - d.data.rawStatusCode = &code + d.rawStatusCode = &code case "grpc-message": - d.data.rawStatusMsg = decodeGrpcMessage(f.Value) + d.rawStatusMsg = decodeGrpcMessage(f.Value) case "grpc-status-details-bin": v, err := decodeBinHeader(f.Value) if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return + return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) } s := &spb.Status{} if err := proto.Unmarshal(v, s); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return + return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) } - d.data.statusGen = status.FromProto(s) + d.statusGen = status.FromProto(s) case "grpc-timeout": - d.data.timeoutSet = true + d.timeoutSet = true var err error - if d.data.timeout, err = decodeTimeout(f.Value); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) + if d.timeout, err = decodeTimeout(f.Value); err != nil { + return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err) } case ":path": - d.data.method = f.Value + d.method = f.Value case ":status": code, err := strconv.Atoi(f.Value) if err != nil { - d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - return + return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err) } - d.data.httpStatus = &code + d.httpStatus = &code case "grpc-tags-bin": v, err := decodeBinHeader(f.Value) if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - return + return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) } - d.data.statsTags = v + d.statsTags = v d.addMetadata(f.Name, string(v)) case "grpc-trace-bin": v, err := decodeBinHeader(f.Value) if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - return + return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) } - d.data.statsTrace = v + d.statsTrace = v d.addMetadata(f.Name, string(v)) default: if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { @@ -413,10 +347,11 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { v, err := decodeMetadataHeader(f.Name, f.Value) if err != nil { errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - return + return nil } d.addMetadata(f.Name, v) } + return nil } type timeoutUnit uint8 @@ -489,10 +424,6 @@ func decodeTimeout(s string) (time.Duration, error) { if size < 2 { return 0, fmt.Errorf("transport: timeout string is too short: %q", s) } - if size > 9 { - // Spec allows for 8 digits plus the unit. - return 0, fmt.Errorf("transport: timeout string is too long: %q", s) - } unit := timeoutUnit(s[size-1]) d, ok := timeoutUnitToDuration(unit) if !ok { @@ -502,11 +433,6 @@ func decodeTimeout(s string) (time.Duration, error) { if err != nil { return 0, err } - const maxHours = math.MaxInt64 / int64(time.Hour) - if d == time.Hour && t > maxHours { - // This timeout would overflow math.MaxInt64; clamp it. - return time.Duration(math.MaxInt64), nil - } return d * time.Duration(t), nil } @@ -619,9 +545,6 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { if w.err != nil { return 0, w.err } - if w.batchSize == 0 { // Buffer has been disabled. - return w.conn.Write(b) - } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] @@ -654,14 +577,8 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { - if writeBufferSize < 0 { - writeBufferSize = 0 - } - var r io.Reader = conn - if readBufferSize > 0 { - r = bufio.NewReaderSize(r, readBufferSize) - } +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { + r := bufio.NewReaderSize(conn, readBufferSize) w := newBufWriter(conn, writeBufferSize) f := &framer{ writer: w, @@ -670,7 +587,6 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList // Opt-in to Frame reuse API on framer to reduce garbage. // Frames aren't safe to read from after a subsequent call to ReadFrame. f.fr.SetReuseFrames() - f.fr.MaxHeaderListSize = maxHeaderListSize f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/transport/log.go similarity index 90% rename from vendor/google.golang.org/grpc/internal/transport/log.go rename to vendor/google.golang.org/grpc/transport/log.go index 879df80c4d..ac8e358c5c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/log.go +++ b/vendor/google.golang.org/grpc/transport/log.go @@ -42,3 +42,9 @@ func errorf(format string, args ...interface{}) { grpclog.Errorf(format, args...) } } + +func fatalf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Fatalf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go similarity index 80% rename from vendor/google.golang.org/grpc/internal/transport/transport.go rename to vendor/google.golang.org/grpc/transport/transport.go index 7f82cbb080..f51f878884 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -19,10 +19,9 @@ // Package transport defines and implements message oriented communication // channel to complete various transactions (e.g., an RPC). It is meant for // grpc-internal usage and is not intended to be imported directly by users. -package transport +package transport // externally used as import "google.golang.org/grpc/transport" import ( - "context" "errors" "fmt" "io" @@ -30,6 +29,7 @@ import ( "sync" "sync/atomic" + "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" @@ -110,15 +110,15 @@ func (b *recvBuffer) get() <-chan recvMsg { return b.c } +// // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { - closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. - ctx context.Context - ctxDone <-chan struct{} // cache of ctx.Done() (for performance). - recv *recvBuffer - last []byte // Stores the remaining data in the previous calls. - err error + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last []byte // Stores the remaining data in the previous calls. + err error } // Read reads the next len(p) bytes from last. If last is drained, it tries to @@ -128,53 +128,31 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) { if r.err != nil { return 0, r.err } + n, r.err = r.read(p) + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { if r.last != nil && len(r.last) > 0 { // Read remaining data left in last call. copied := copy(p, r.last) r.last = r.last[copied:] return copied, nil } - if r.closeStream != nil { - n, r.err = r.readClient(p) - } else { - n, r.err = r.read(p) - } - return n, r.err -} - -func (r *recvBufferReader) read(p []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied := copy(p, m.data) + r.last = m.data[copied:] + return copied, nil } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { - // If the context is canceled, then closes the stream with nil metadata. - // closeStream writes its error parameter to r.recv as a recvMsg. - // r.readAdditional acts on that message and returns the necessary error. - select { - case <-r.ctxDone: - r.closeStream(ContextErr(r.ctx.Err())) - m := <-r.recv.get() - return r.readAdditional(m, p) - case m := <-r.recv.get(): - return r.readAdditional(m, p) - } -} - -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { - r.recv.load() - if m.err != nil { - return 0, m.err - } - copied := copy(p, m.data) - r.last = m.data[copied:] - return copied, nil -} - type streamState uint32 const ( @@ -198,6 +176,7 @@ type Stream struct { buf *recvBuffer trReader io.Reader fc *inFlow + recvQuota uint32 wq *writeQuota // Callback to state application's intentions to read data. This @@ -208,16 +187,10 @@ type Stream struct { headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD + hdrMu sync.Mutex + header metadata.MD // the received header metadata. trailer metadata.MD // the key-value map of trailer metadata. - noHeaders bool // set if the client never received headers (set only after the stream is done). - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. headerSent uint32 @@ -286,25 +259,16 @@ func (s *Stream) SetSendCompress(str string) { s.sendCompress = str } -// Done returns a channel which is closed when it receives the final status +// Done returns a chanel which is closed when it receives the final status // from the server. func (s *Stream) Done() <-chan struct{} { return s.done } -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. +// Header acquires the key-value pairs of header metadata once it +// is available. It blocks until i) the metadata is ready or ii) there is no +// header metadata or iii) the stream is canceled/expired. func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil && s.header != nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } err := s.waitOnHeader() // Even if the stream is closed, header is returned if available. select { @@ -318,18 +282,6 @@ func (s *Stream) Header() (metadata.MD, error) { return nil, err } -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. If a context error happens -// first, returns it as a status error. Client-side only. -func (s *Stream) TrailersOnly() (bool, error) { - err := s.waitOnHeader() - if err != nil { - return false, err - } - return s.noHeaders, nil -} - // Trailer returns the cached trailer metedata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. @@ -340,6 +292,12 @@ func (s *Stream) Trailer() metadata.MD { return c } +// ServerTransport returns the underlying ServerTransport for the stream. +// The client side stream always returns nil. +func (s *Stream) ServerTransport() ServerTransport { + return s.st +} + // ContentSubtype returns the content-subtype for a request. For example, a // content-subtype of "proto" will result in a content-type of // "application/grpc+proto". This will always be lowercase. See @@ -361,7 +319,7 @@ func (s *Stream) Method() string { // Status returns the status received from the server. // Status can be read safely only after the stream has ended, -// that is, after Done() is closed. +// that is, read or write has returned io.EOF. func (s *Stream) Status() *status.Status { return s.status } @@ -386,7 +344,8 @@ func (s *Stream) SetHeader(md metadata.MD) error { // combined with any metadata set by previous calls to SetHeader and // then written to the transport stream. func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) + t := s.ServerTransport() + return t.WriteHeader(s, md) } // SetTrailer sets the trailer metadata which will be sent with the RPC status @@ -480,7 +439,6 @@ type ServerConfig struct { WriteBufferSize int ReadBufferSize int ChannelzParentID int64 - MaxHeaderListSize *uint32 } // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -493,18 +451,17 @@ func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (S type ConnectOptions struct { // UserAgent is the application user agent. UserAgent string + // Authority is the :authority pseudo-header to use. This field has no effect if + // TransportCredentials is set. + Authority string // Dialer specifies how to dial a network address. Dialer func(context.Context, string) (net.Conn, error) // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. FailOnNonTempDialError bool // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. PerRPCCredentials []credentials.PerRPCCredentials - // TransportCredentials stores the Authenticator required to setup a client - // connection. Only one of TransportCredentials and CredsBundle is non-nil. + // TransportCredentials stores the Authenticator required to setup a client connection. TransportCredentials credentials.TransportCredentials - // CredsBundle is the credentials bundle to be used. Only one of - // TransportCredentials and CredsBundle is non-nil. - CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. @@ -519,8 +476,6 @@ type ConnectOptions struct { ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID int64 - // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. - MaxHeaderListSize *uint32 } // TargetInfo contains the information of the target such as network address and metadata. @@ -532,8 +487,8 @@ type TargetInfo struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess) } // Options provides additional hints and information for message @@ -542,6 +497,11 @@ type Options struct { // Last indicates whether this write is the last piece for // this stream. Last bool + + // Delay is a hint to the transport implementation for whether + // the data could be buffered for a batching write. The + // transport implementation may ignore the hint. + Delay bool } // CallHdr carries the information of a particular RPC. @@ -559,6 +519,14 @@ type CallHdr struct { // Creds specifies credentials.PerRPCCredentials for a call. Creds credentials.PerRPCCredentials + // Flush indicates whether a new stream command should be sent + // to the peer without waiting for the first data. This is + // only a hint. + // If it's true, the transport may modify the flush decision + // for performance purposes. + // If it's false, new stream will never be flushed. + Flush bool + // ContentSubtype specifies the content-subtype for a request. For example, a // content-subtype of "proto" will result in a content-type of // "application/grpc+proto". The value of ContentSubtype must be all @@ -566,8 +534,6 @@ type CallHdr struct { // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. ContentSubtype string - - PreviousAttempts int // value of grpc-previous-rpc-attempts header to set } // ClientTransport is the common interface for all gRPC client-side transport @@ -610,9 +576,6 @@ type ClientTransport interface { // GetGoAwayReason returns the reason why GoAway frame was received. GetGoAwayReason() GoAwayReason - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr - // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() @@ -659,6 +622,14 @@ type ServerTransport interface { IncrMsgRecv() } +// streamErrorf creates an StreamError with the specified error code and description. +func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { + return StreamError{ + Code: c, + Desc: fmt.Sprintf(format, a...), + } +} + // connectionErrorf creates an ConnectionError with the specified error description. func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { return ConnectionError{ @@ -701,7 +672,7 @@ var ( // errStreamDrain indicates that the stream is rejected because the // connection is draining. This could be caused by goaway or balancer // removing the address. - errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining") // errStreamDone is returned from write at the client side to indiacte application // layer of an error. errStreamDone = errors.New("the stream is done") @@ -710,6 +681,18 @@ var ( statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") ) +// TODO: See if we can replace StreamError with status package errors. + +// StreamError is an error that only affects one stream within a connection. +type StreamError struct { + Code codes.Code + Desc string +} + +func (e StreamError) Error() string { + return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) +} + // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 @@ -723,38 +706,3 @@ const ( // "too_many_pings". GoAwayTooManyPings GoAwayReason = 2 ) - -// channelzData is used to store channelz related data for http2Client and http2Server. -// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - kpCount int64 - // The number of streams that have started, including already finished ones. - streamsStarted int64 - // Client side: The number of streams that have ended successfully by receiving - // EoS bit set frame from server. - // Server side: The number of streams that have ended successfully by sending - // frame with EoS bit set. - streamsSucceeded int64 - streamsFailed int64 - // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type - // instead of time.Time since it's more costly to atomically update time.Time variable than int64 - // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. - lastStreamCreatedTime int64 - msgSent int64 - msgRecv int64 - lastMsgSentTime int64 - lastMsgRecvTime int64 -} - -// ContextErr converts the error from context package into a status error. -func ContextErr(err error) error { - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } - return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) -} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 092e088258..7f124fbd53 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.20.1" +const Version = "1.13.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bd5c8de90c..079bc2896d 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -13,43 +13,24 @@ die() { exit 1 } -fail_on_output() { - tee /dev/stderr | (! read) -} +PATH="$GOPATH/bin:$GOROOT/bin:$PATH" -# Check to make sure it's safe to modify the user's git repo. -git status --porcelain | fail_on_output +# Check proto in manual runs or cron runs. +if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then + check_proto="true" +fi -# Undo any edits made by this script. -cleanup() { - git reset --hard HEAD -} -trap cleanup EXIT - -PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" - -if [[ "$1" = "-install" ]]; then - # Check for module support - if go help mod >& /dev/null; then - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go - else - # Ye olde `go get` incantation. - # Note: this gets the latest version of all tools (vs. the pinned versions - # with Go modules). - go get -u \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go - fi - if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${TRAVIS}" = "true" ]]; then +if [ "$1" = "-install" ]; then + go get -d \ + google.golang.org/grpc/... + go get -u \ + github.com/golang/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell \ + github.com/golang/protobuf/protoc-gen-go + if [[ "$check_proto" = "true" ]]; then + if [[ "$TRAVIS" = "true" ]]; then PROTOBUF_VERSION=3.3.0 PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/travis @@ -66,64 +47,48 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi -# - Ensure all source files contain a copyright message. -git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | fail_on_output +# TODO: Remove this check and the mangling below once "context" is imported +# directly. +if git status --porcelain | read; then + die "Uncommitted or untracked files found; commit changes first" +fi -# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -(! grep 'func Test[^(]' *_test.go) -(! grep 'func Test[^(]' test/*.go) +git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) +git ls-files "*.go" | xargs grep -l '"unsafe"' 2>&1 | (! grep -v '_test.go') | tee /dev/stderr | (! read) +git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') | tee /dev/stderr | (! read) +gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) +goimports -l . 2>&1 | tee /dev/stderr | (! read) +golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read) -# - Do not import math/rand for real library code. Use internal/grpcrand for -# thread safety. -git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT -# - Ensure all ptypes proto packages are renamed when importing. -git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/") +# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484). +# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711). +git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' +set +o pipefail +# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed. +go tool vet -all . 2>&1 | grep -vE '(clientconn|transport\/transport_test).go:.*cancel (function|var)' | grep -vF '.pb.go:' | tee /dev/stderr | (! read) +set -o pipefail +git reset --hard HEAD -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go - -# - gofmt, goimports, golint (with exceptions for generated code), go vet. -gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") | fail_on_output -golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") -go tool vet -all . - -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ +if [[ "$check_proto" = "true" ]]; then + PATH="/home/travis/bin:$PATH" make proto && \ + git status --porcelain 2>&1 | (! read) || \ (git status; git --no-pager diff; exit 1) fi -# - Check that our module is tidy. -if go help mod >& /dev/null; then - go mod tidy && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -# - Collection of static analysis checks # TODO(menghanl): fix errors in transport_test. -staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore ' -google.golang.org/grpc/balancer.go:SA1019 -google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019 -google.golang.org/grpc/balancer/xds/edsbalancer/balancergroup.go:SA1019 -google.golang.org/grpc/balancer/xds/xds.go:SA1019 -google.golang.org/grpc/balancer_conn_wrappers.go:SA1019 -google.golang.org/grpc/balancer_test.go:SA1019 +staticcheck -ignore ' +google.golang.org/grpc/transport/transport_test.go:SA2002 google.golang.org/grpc/benchmark/benchmain/main.go:SA1019 -google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019 -google.golang.org/grpc/clientconn.go:S1024 -google.golang.org/grpc/clientconn_state_transition_test.go:SA1019 -google.golang.org/grpc/clientconn_test.go:SA1019 -google.golang.org/grpc/internal/transport/handler_server.go:SA1019 -google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019 -google.golang.org/grpc/resolver/dns/dns_resolver.go:SA1019 google.golang.org/grpc/stats/stats_test.go:SA1019 -google.golang.org/grpc/test/channelz_test.go:SA1019 google.golang.org/grpc/test/end2end_test.go:SA1019 -google.golang.org/grpc/test/healthcheck_test.go:SA1019 +google.golang.org/grpc/balancer_test.go:SA1019 +google.golang.org/grpc/balancer.go:SA1019 +google.golang.org/grpc/clientconn_test.go:SA1019 ' ./... misspell -error . diff --git a/vendor/modules.txt b/vendor/modules.txt index 01b6c2ec4e..03815aa306 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -878,9 +878,9 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/app_identity google.golang.org/appengine/internal/modules -# google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 +# google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.20.1 +# google.golang.org/grpc v1.20.1 => google.golang.org/grpc v1.13.0 google.golang.org/grpc google.golang.org/grpc/codes google.golang.org/grpc/health/grpc_health_v1 @@ -895,13 +895,7 @@ google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff -google.golang.org/grpc/internal/balancerload -google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/channelz -google.golang.org/grpc/internal/envconfig -google.golang.org/grpc/internal/grpcrand -google.golang.org/grpc/internal/grpcsync -google.golang.org/grpc/internal/transport google.golang.org/grpc/keepalive google.golang.org/grpc/naming google.golang.org/grpc/peer @@ -910,11 +904,10 @@ google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/passthrough google.golang.org/grpc/stats google.golang.org/grpc/tap +google.golang.org/grpc/transport google.golang.org/grpc/health google.golang.org/grpc/balancer/base -google.golang.org/grpc/credentials/internal -google.golang.org/grpc/binarylog/grpc_binarylog_v1 -google.golang.org/grpc/internal/syscall +google.golang.org/grpc/internal/grpcrand # gopkg.in/gcfg.v1 v1.2.0 gopkg.in/gcfg.v1 gopkg.in/gcfg.v1/scanner