mirror of https://github.com/k3s-io/k3s
Merge pull request #51109 from kubernetes/revert-50531-gRPC-keep-alive-godeps
Automatic merge from submit-queue Revert "Updated gRPC vendoring to support Keep Alive" Reverts kubernetes/kubernetes#50531 Ref - https://github.com/kubernetes/kubernetes/issues/51099 /cc @wojtek-t @RenaudWasTakenpull/6/head
commit
b0ad3a1c5d
|
@ -2715,51 +2715,51 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html",
|
"ImportPath": "golang.org/x/net/html",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html/atom",
|
"ImportPath": "golang.org/x/net/html/atom",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
"ImportPath": "golang.org/x/net/internal/timeseries",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/proxy",
|
"ImportPath": "golang.org/x/net/proxy",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/trace",
|
"ImportPath": "golang.org/x/net/trace",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/websocket",
|
"ImportPath": "golang.org/x/net/websocket",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/oauth2",
|
"ImportPath": "golang.org/x/oauth2",
|
||||||
|
@ -2791,67 +2791,63 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/cases",
|
"ImportPath": "golang.org/x/text/cases",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/encoding",
|
"ImportPath": "golang.org/x/text/encoding",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/encoding/internal",
|
"ImportPath": "golang.org/x/text/encoding/internal",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/encoding/internal/identifier",
|
"ImportPath": "golang.org/x/text/encoding/internal/identifier",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/encoding/unicode",
|
"ImportPath": "golang.org/x/text/encoding/unicode",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/internal",
|
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/internal/tag",
|
"ImportPath": "golang.org/x/text/internal/tag",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/internal/utf8internal",
|
"ImportPath": "golang.org/x/text/internal/utf8internal",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/language",
|
"ImportPath": "golang.org/x/text/language",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/runes",
|
"ImportPath": "golang.org/x/text/runes",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/precis",
|
"ImportPath": "golang.org/x/text/secure/precis",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/width",
|
"ImportPath": "golang.org/x/text/width",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/time/rate",
|
"ImportPath": "golang.org/x/time/rate",
|
||||||
|
@ -2913,79 +2909,50 @@
|
||||||
"ImportPath": "google.golang.org/api/pubsub/v1",
|
"ImportPath": "google.golang.org/api/pubsub/v1",
|
||||||
"Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24"
|
"Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
|
|
||||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc",
|
"ImportPath": "google.golang.org/grpc",
|
||||||
"Comment": "v1.5.1",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/codes",
|
"ImportPath": "google.golang.org/grpc/codes",
|
||||||
"Comment": "v1.5.1",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/credentials",
|
"ImportPath": "google.golang.org/grpc/credentials",
|
||||||
"Comment": "v1.5.1",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
|
|
||||||
"Comment": "v1.5.1",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||||
"Comment": "v1.5.1",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/internal",
|
"ImportPath": "google.golang.org/grpc/internal",
|
||||||
"Comment": "v1.5.1",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
|
||||||
"Comment": "v1.5.1",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/metadata",
|
"ImportPath": "google.golang.org/grpc/metadata",
|
||||||
"Comment": "v1.5.1",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/naming",
|
"ImportPath": "google.golang.org/grpc/naming",
|
||||||
"Comment": "v1.5.1",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/peer",
|
"ImportPath": "google.golang.org/grpc/peer",
|
||||||
"Comment": "v1.5.1",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/stats",
|
|
||||||
"Comment": "v1.5.1",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/status",
|
|
||||||
"Comment": "v1.5.1",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/tap",
|
|
||||||
"Comment": "v1.5.1",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/transport",
|
"ImportPath": "google.golang.org/grpc/transport",
|
||||||
"Comment": "v1.5.1",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/gcfg.v1",
|
"ImportPath": "gopkg.in/gcfg.v1",
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -84,63 +84,59 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/cases",
|
"ImportPath": "golang.org/x/text/cases",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/internal",
|
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/internal/tag",
|
"ImportPath": "golang.org/x/text/internal/tag",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/language",
|
"ImportPath": "golang.org/x/text/language",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/runes",
|
"ImportPath": "golang.org/x/text/runes",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/precis",
|
"ImportPath": "golang.org/x/text/secure/precis",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/width",
|
"ImportPath": "golang.org/x/text/width",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/inf.v0",
|
"ImportPath": "gopkg.in/inf.v0",
|
||||||
|
|
|
@ -304,43 +304,43 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html",
|
"ImportPath": "golang.org/x/net/html",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html/atom",
|
"ImportPath": "golang.org/x/net/html/atom",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
"ImportPath": "golang.org/x/net/internal/timeseries",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/trace",
|
"ImportPath": "golang.org/x/net/trace",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/websocket",
|
"ImportPath": "golang.org/x/net/websocket",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/sys/unix",
|
"ImportPath": "golang.org/x/sys/unix",
|
||||||
|
@ -348,107 +348,79 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/cases",
|
"ImportPath": "golang.org/x/text/cases",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/internal",
|
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/internal/tag",
|
"ImportPath": "golang.org/x/text/internal/tag",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/language",
|
"ImportPath": "golang.org/x/text/language",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/runes",
|
"ImportPath": "golang.org/x/text/runes",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/precis",
|
"ImportPath": "golang.org/x/text/secure/precis",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/width",
|
"ImportPath": "golang.org/x/text/width",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
|
|
||||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc",
|
"ImportPath": "google.golang.org/grpc",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/codes",
|
"ImportPath": "google.golang.org/grpc/codes",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/credentials",
|
"ImportPath": "google.golang.org/grpc/credentials",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/internal",
|
"ImportPath": "google.golang.org/grpc/internal",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/metadata",
|
"ImportPath": "google.golang.org/grpc/metadata",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/naming",
|
"ImportPath": "google.golang.org/grpc/naming",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/peer",
|
"ImportPath": "google.golang.org/grpc/peer",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/stats",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/status",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/tap",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/transport",
|
"ImportPath": "google.golang.org/grpc/transport",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/inf.v0",
|
"ImportPath": "gopkg.in/inf.v0",
|
||||||
|
|
|
@ -136,75 +136,71 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html",
|
"ImportPath": "golang.org/x/net/html",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html/atom",
|
"ImportPath": "golang.org/x/net/html/atom",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/websocket",
|
"ImportPath": "golang.org/x/net/websocket",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/cases",
|
"ImportPath": "golang.org/x/text/cases",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/internal",
|
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/internal/tag",
|
"ImportPath": "golang.org/x/text/internal/tag",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/language",
|
"ImportPath": "golang.org/x/text/language",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/runes",
|
"ImportPath": "golang.org/x/text/runes",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/precis",
|
"ImportPath": "golang.org/x/text/secure/precis",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/width",
|
"ImportPath": "golang.org/x/text/width",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/inf.v0",
|
"ImportPath": "gopkg.in/inf.v0",
|
||||||
|
|
|
@ -580,43 +580,43 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html",
|
"ImportPath": "golang.org/x/net/html",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html/atom",
|
"ImportPath": "golang.org/x/net/html/atom",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
"ImportPath": "golang.org/x/net/internal/timeseries",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/trace",
|
"ImportPath": "golang.org/x/net/trace",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/websocket",
|
"ImportPath": "golang.org/x/net/websocket",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/sys/unix",
|
"ImportPath": "golang.org/x/sys/unix",
|
||||||
|
@ -624,111 +624,83 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/cases",
|
"ImportPath": "golang.org/x/text/cases",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/internal",
|
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/internal/tag",
|
"ImportPath": "golang.org/x/text/internal/tag",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/language",
|
"ImportPath": "golang.org/x/text/language",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/runes",
|
"ImportPath": "golang.org/x/text/runes",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/precis",
|
"ImportPath": "golang.org/x/text/secure/precis",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/width",
|
"ImportPath": "golang.org/x/text/width",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/time/rate",
|
"ImportPath": "golang.org/x/time/rate",
|
||||||
"Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631"
|
"Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
|
|
||||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc",
|
"ImportPath": "google.golang.org/grpc",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/codes",
|
"ImportPath": "google.golang.org/grpc/codes",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/credentials",
|
"ImportPath": "google.golang.org/grpc/credentials",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/internal",
|
"ImportPath": "google.golang.org/grpc/internal",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/metadata",
|
"ImportPath": "google.golang.org/grpc/metadata",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/naming",
|
"ImportPath": "google.golang.org/grpc/naming",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/peer",
|
"ImportPath": "google.golang.org/grpc/peer",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/stats",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/status",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/tap",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/transport",
|
"ImportPath": "google.golang.org/grpc/transport",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/inf.v0",
|
"ImportPath": "gopkg.in/inf.v0",
|
||||||
|
|
|
@ -256,27 +256,27 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/oauth2",
|
"ImportPath": "golang.org/x/oauth2",
|
||||||
|
@ -304,47 +304,43 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/cases",
|
"ImportPath": "golang.org/x/text/cases",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/internal",
|
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/internal/tag",
|
"ImportPath": "golang.org/x/text/internal/tag",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/language",
|
"ImportPath": "golang.org/x/text/language",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/runes",
|
"ImportPath": "golang.org/x/text/runes",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/precis",
|
"ImportPath": "golang.org/x/text/secure/precis",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/width",
|
"ImportPath": "golang.org/x/text/width",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/inf.v0",
|
"ImportPath": "gopkg.in/inf.v0",
|
||||||
|
|
|
@ -224,63 +224,59 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/cases",
|
"ImportPath": "golang.org/x/text/cases",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/internal",
|
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/internal/tag",
|
"ImportPath": "golang.org/x/text/internal/tag",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/language",
|
"ImportPath": "golang.org/x/text/language",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/runes",
|
"ImportPath": "golang.org/x/text/runes",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/precis",
|
"ImportPath": "golang.org/x/text/secure/precis",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/width",
|
"ImportPath": "golang.org/x/text/width",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/inf.v0",
|
"ImportPath": "gopkg.in/inf.v0",
|
||||||
|
|
|
@ -300,43 +300,43 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html",
|
"ImportPath": "golang.org/x/net/html",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html/atom",
|
"ImportPath": "golang.org/x/net/html/atom",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
"ImportPath": "golang.org/x/net/internal/timeseries",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/trace",
|
"ImportPath": "golang.org/x/net/trace",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/websocket",
|
"ImportPath": "golang.org/x/net/websocket",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/sys/unix",
|
"ImportPath": "golang.org/x/sys/unix",
|
||||||
|
@ -344,107 +344,79 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/cases",
|
"ImportPath": "golang.org/x/text/cases",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/internal",
|
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/internal/tag",
|
"ImportPath": "golang.org/x/text/internal/tag",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/language",
|
"ImportPath": "golang.org/x/text/language",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/runes",
|
"ImportPath": "golang.org/x/text/runes",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/precis",
|
"ImportPath": "golang.org/x/text/secure/precis",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/width",
|
"ImportPath": "golang.org/x/text/width",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
|
|
||||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc",
|
"ImportPath": "google.golang.org/grpc",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/codes",
|
"ImportPath": "google.golang.org/grpc/codes",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/credentials",
|
"ImportPath": "google.golang.org/grpc/credentials",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/internal",
|
"ImportPath": "google.golang.org/grpc/internal",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/metadata",
|
"ImportPath": "google.golang.org/grpc/metadata",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/naming",
|
"ImportPath": "google.golang.org/grpc/naming",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/peer",
|
"ImportPath": "google.golang.org/grpc/peer",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/stats",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/status",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/tap",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/transport",
|
"ImportPath": "google.golang.org/grpc/transport",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/inf.v0",
|
"ImportPath": "gopkg.in/inf.v0",
|
||||||
|
|
|
@ -124,63 +124,59 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/cases",
|
"ImportPath": "golang.org/x/text/cases",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/internal",
|
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/internal/tag",
|
"ImportPath": "golang.org/x/text/internal/tag",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/language",
|
"ImportPath": "golang.org/x/text/language",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/runes",
|
"ImportPath": "golang.org/x/text/runes",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/precis",
|
"ImportPath": "golang.org/x/text/secure/precis",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/width",
|
"ImportPath": "golang.org/x/text/width",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/inf.v0",
|
"ImportPath": "gopkg.in/inf.v0",
|
||||||
|
|
|
@ -292,43 +292,43 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html",
|
"ImportPath": "golang.org/x/net/html",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html/atom",
|
"ImportPath": "golang.org/x/net/html/atom",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
"ImportPath": "golang.org/x/net/internal/timeseries",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/trace",
|
"ImportPath": "golang.org/x/net/trace",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/websocket",
|
"ImportPath": "golang.org/x/net/websocket",
|
||||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/sys/unix",
|
"ImportPath": "golang.org/x/sys/unix",
|
||||||
|
@ -336,107 +336,79 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/cases",
|
"ImportPath": "golang.org/x/text/cases",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/internal",
|
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/internal/tag",
|
"ImportPath": "golang.org/x/text/internal/tag",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/language",
|
"ImportPath": "golang.org/x/text/language",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/runes",
|
"ImportPath": "golang.org/x/text/runes",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/precis",
|
"ImportPath": "golang.org/x/text/secure/precis",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/width",
|
"ImportPath": "golang.org/x/text/width",
|
||||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
|
|
||||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc",
|
"ImportPath": "google.golang.org/grpc",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/codes",
|
"ImportPath": "google.golang.org/grpc/codes",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/credentials",
|
"ImportPath": "google.golang.org/grpc/credentials",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/internal",
|
"ImportPath": "google.golang.org/grpc/internal",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/metadata",
|
"ImportPath": "google.golang.org/grpc/metadata",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/naming",
|
"ImportPath": "google.golang.org/grpc/naming",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/peer",
|
"ImportPath": "google.golang.org/grpc/peer",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/stats",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/status",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/tap",
|
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/transport",
|
"ImportPath": "google.golang.org/grpc/transport",
|
||||||
"Rev": "b8669c35455183da6d5c474ea6e72fbf55183274"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/inf.v0",
|
"ImportPath": "gopkg.in/inf.v0",
|
||||||
|
|
|
@ -338,7 +338,8 @@ filegroup(
|
||||||
"//vendor/golang.org/x/sys/windows:all-srcs",
|
"//vendor/golang.org/x/sys/windows:all-srcs",
|
||||||
"//vendor/golang.org/x/text/cases:all-srcs",
|
"//vendor/golang.org/x/text/cases:all-srcs",
|
||||||
"//vendor/golang.org/x/text/encoding:all-srcs",
|
"//vendor/golang.org/x/text/encoding:all-srcs",
|
||||||
"//vendor/golang.org/x/text/internal:all-srcs",
|
"//vendor/golang.org/x/text/internal/tag:all-srcs",
|
||||||
|
"//vendor/golang.org/x/text/internal/utf8internal:all-srcs",
|
||||||
"//vendor/golang.org/x/text/language:all-srcs",
|
"//vendor/golang.org/x/text/language:all-srcs",
|
||||||
"//vendor/golang.org/x/text/runes:all-srcs",
|
"//vendor/golang.org/x/text/runes:all-srcs",
|
||||||
"//vendor/golang.org/x/text/secure/bidirule:all-srcs",
|
"//vendor/golang.org/x/text/secure/bidirule:all-srcs",
|
||||||
|
@ -361,7 +362,6 @@ filegroup(
|
||||||
"//vendor/google.golang.org/api/logging/v2beta1:all-srcs",
|
"//vendor/google.golang.org/api/logging/v2beta1:all-srcs",
|
||||||
"//vendor/google.golang.org/api/monitoring/v3:all-srcs",
|
"//vendor/google.golang.org/api/monitoring/v3:all-srcs",
|
||||||
"//vendor/google.golang.org/api/pubsub/v1:all-srcs",
|
"//vendor/google.golang.org/api/pubsub/v1:all-srcs",
|
||||||
"//vendor/google.golang.org/genproto/googleapis/rpc/status:all-srcs",
|
|
||||||
"//vendor/google.golang.org/grpc:all-srcs",
|
"//vendor/google.golang.org/grpc:all-srcs",
|
||||||
"//vendor/gopkg.in/gcfg.v1:all-srcs",
|
"//vendor/gopkg.in/gcfg.v1:all-srcs",
|
||||||
"//vendor/gopkg.in/inf.v0:all-srcs",
|
"//vendor/gopkg.in/inf.v0:all-srcs",
|
||||||
|
|
|
@ -5,9 +5,7 @@ go_library(
|
||||||
srcs = [
|
srcs = [
|
||||||
"context.go",
|
"context.go",
|
||||||
"go17.go",
|
"go17.go",
|
||||||
"go19.go",
|
|
||||||
"pre_go17.go",
|
"pre_go17.go",
|
||||||
"pre_go19.go",
|
|
||||||
],
|
],
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
)
|
)
|
||||||
|
|
|
@ -36,6 +36,103 @@
|
||||||
// Contexts.
|
// Contexts.
|
||||||
package context
|
package context
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// A Context carries a deadline, a cancelation signal, and other values across
|
||||||
|
// API boundaries.
|
||||||
|
//
|
||||||
|
// Context's methods may be called by multiple goroutines simultaneously.
|
||||||
|
type Context interface {
|
||||||
|
// Deadline returns the time when work done on behalf of this context
|
||||||
|
// should be canceled. Deadline returns ok==false when no deadline is
|
||||||
|
// set. Successive calls to Deadline return the same results.
|
||||||
|
Deadline() (deadline time.Time, ok bool)
|
||||||
|
|
||||||
|
// Done returns a channel that's closed when work done on behalf of this
|
||||||
|
// context should be canceled. Done may return nil if this context can
|
||||||
|
// never be canceled. Successive calls to Done return the same value.
|
||||||
|
//
|
||||||
|
// WithCancel arranges for Done to be closed when cancel is called;
|
||||||
|
// WithDeadline arranges for Done to be closed when the deadline
|
||||||
|
// expires; WithTimeout arranges for Done to be closed when the timeout
|
||||||
|
// elapses.
|
||||||
|
//
|
||||||
|
// Done is provided for use in select statements:
|
||||||
|
//
|
||||||
|
// // Stream generates values with DoSomething and sends them to out
|
||||||
|
// // until DoSomething returns an error or ctx.Done is closed.
|
||||||
|
// func Stream(ctx context.Context, out chan<- Value) error {
|
||||||
|
// for {
|
||||||
|
// v, err := DoSomething(ctx)
|
||||||
|
// if err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
// select {
|
||||||
|
// case <-ctx.Done():
|
||||||
|
// return ctx.Err()
|
||||||
|
// case out <- v:
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// See http://blog.golang.org/pipelines for more examples of how to use
|
||||||
|
// a Done channel for cancelation.
|
||||||
|
Done() <-chan struct{}
|
||||||
|
|
||||||
|
// Err returns a non-nil error value after Done is closed. Err returns
|
||||||
|
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||||
|
// context's deadline passed. No other values for Err are defined.
|
||||||
|
// After Done is closed, successive calls to Err return the same value.
|
||||||
|
Err() error
|
||||||
|
|
||||||
|
// Value returns the value associated with this context for key, or nil
|
||||||
|
// if no value is associated with key. Successive calls to Value with
|
||||||
|
// the same key returns the same result.
|
||||||
|
//
|
||||||
|
// Use context values only for request-scoped data that transits
|
||||||
|
// processes and API boundaries, not for passing optional parameters to
|
||||||
|
// functions.
|
||||||
|
//
|
||||||
|
// A key identifies a specific value in a Context. Functions that wish
|
||||||
|
// to store values in Context typically allocate a key in a global
|
||||||
|
// variable then use that key as the argument to context.WithValue and
|
||||||
|
// Context.Value. A key can be any type that supports equality;
|
||||||
|
// packages should define keys as an unexported type to avoid
|
||||||
|
// collisions.
|
||||||
|
//
|
||||||
|
// Packages that define a Context key should provide type-safe accessors
|
||||||
|
// for the values stores using that key:
|
||||||
|
//
|
||||||
|
// // Package user defines a User type that's stored in Contexts.
|
||||||
|
// package user
|
||||||
|
//
|
||||||
|
// import "golang.org/x/net/context"
|
||||||
|
//
|
||||||
|
// // User is the type of value stored in the Contexts.
|
||||||
|
// type User struct {...}
|
||||||
|
//
|
||||||
|
// // key is an unexported type for keys defined in this package.
|
||||||
|
// // This prevents collisions with keys defined in other packages.
|
||||||
|
// type key int
|
||||||
|
//
|
||||||
|
// // userKey is the key for user.User values in Contexts. It is
|
||||||
|
// // unexported; clients use user.NewContext and user.FromContext
|
||||||
|
// // instead of using this key directly.
|
||||||
|
// var userKey key = 0
|
||||||
|
//
|
||||||
|
// // NewContext returns a new Context that carries value u.
|
||||||
|
// func NewContext(ctx context.Context, u *User) context.Context {
|
||||||
|
// return context.WithValue(ctx, userKey, u)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // FromContext returns the User value stored in ctx, if any.
|
||||||
|
// func FromContext(ctx context.Context) (*User, bool) {
|
||||||
|
// u, ok := ctx.Value(userKey).(*User)
|
||||||
|
// return u, ok
|
||||||
|
// }
|
||||||
|
Value(key interface{}) interface{}
|
||||||
|
}
|
||||||
|
|
||||||
// Background returns a non-nil, empty Context. It is never canceled, has no
|
// Background returns a non-nil, empty Context. It is never canceled, has no
|
||||||
// values, and has no deadline. It is typically used by the main function,
|
// values, and has no deadline. It is typically used by the main function,
|
||||||
// initialization, and tests, and as the top-level Context for incoming
|
// initialization, and tests, and as the top-level Context for incoming
|
||||||
|
@ -52,3 +149,8 @@ func Background() Context {
|
||||||
func TODO() Context {
|
func TODO() Context {
|
||||||
return todo
|
return todo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A CancelFunc tells an operation to abandon its work.
|
||||||
|
// A CancelFunc does not wait for the work to stop.
|
||||||
|
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||||
|
type CancelFunc func()
|
||||||
|
|
|
@ -1,20 +0,0 @@
|
||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build go1.9
|
|
||||||
|
|
||||||
package context
|
|
||||||
|
|
||||||
import "context" // standard library's context, as of Go 1.7
|
|
||||||
|
|
||||||
// A Context carries a deadline, a cancelation signal, and other values across
|
|
||||||
// API boundaries.
|
|
||||||
//
|
|
||||||
// Context's methods may be called by multiple goroutines simultaneously.
|
|
||||||
type Context = context.Context
|
|
||||||
|
|
||||||
// A CancelFunc tells an operation to abandon its work.
|
|
||||||
// A CancelFunc does not wait for the work to stop.
|
|
||||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
|
||||||
type CancelFunc = context.CancelFunc
|
|
|
@ -1,109 +0,0 @@
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !go1.9
|
|
||||||
|
|
||||||
package context
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// A Context carries a deadline, a cancelation signal, and other values across
|
|
||||||
// API boundaries.
|
|
||||||
//
|
|
||||||
// Context's methods may be called by multiple goroutines simultaneously.
|
|
||||||
type Context interface {
|
|
||||||
// Deadline returns the time when work done on behalf of this context
|
|
||||||
// should be canceled. Deadline returns ok==false when no deadline is
|
|
||||||
// set. Successive calls to Deadline return the same results.
|
|
||||||
Deadline() (deadline time.Time, ok bool)
|
|
||||||
|
|
||||||
// Done returns a channel that's closed when work done on behalf of this
|
|
||||||
// context should be canceled. Done may return nil if this context can
|
|
||||||
// never be canceled. Successive calls to Done return the same value.
|
|
||||||
//
|
|
||||||
// WithCancel arranges for Done to be closed when cancel is called;
|
|
||||||
// WithDeadline arranges for Done to be closed when the deadline
|
|
||||||
// expires; WithTimeout arranges for Done to be closed when the timeout
|
|
||||||
// elapses.
|
|
||||||
//
|
|
||||||
// Done is provided for use in select statements:
|
|
||||||
//
|
|
||||||
// // Stream generates values with DoSomething and sends them to out
|
|
||||||
// // until DoSomething returns an error or ctx.Done is closed.
|
|
||||||
// func Stream(ctx context.Context, out chan<- Value) error {
|
|
||||||
// for {
|
|
||||||
// v, err := DoSomething(ctx)
|
|
||||||
// if err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// select {
|
|
||||||
// case <-ctx.Done():
|
|
||||||
// return ctx.Err()
|
|
||||||
// case out <- v:
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// See http://blog.golang.org/pipelines for more examples of how to use
|
|
||||||
// a Done channel for cancelation.
|
|
||||||
Done() <-chan struct{}
|
|
||||||
|
|
||||||
// Err returns a non-nil error value after Done is closed. Err returns
|
|
||||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
|
||||||
// context's deadline passed. No other values for Err are defined.
|
|
||||||
// After Done is closed, successive calls to Err return the same value.
|
|
||||||
Err() error
|
|
||||||
|
|
||||||
// Value returns the value associated with this context for key, or nil
|
|
||||||
// if no value is associated with key. Successive calls to Value with
|
|
||||||
// the same key returns the same result.
|
|
||||||
//
|
|
||||||
// Use context values only for request-scoped data that transits
|
|
||||||
// processes and API boundaries, not for passing optional parameters to
|
|
||||||
// functions.
|
|
||||||
//
|
|
||||||
// A key identifies a specific value in a Context. Functions that wish
|
|
||||||
// to store values in Context typically allocate a key in a global
|
|
||||||
// variable then use that key as the argument to context.WithValue and
|
|
||||||
// Context.Value. A key can be any type that supports equality;
|
|
||||||
// packages should define keys as an unexported type to avoid
|
|
||||||
// collisions.
|
|
||||||
//
|
|
||||||
// Packages that define a Context key should provide type-safe accessors
|
|
||||||
// for the values stores using that key:
|
|
||||||
//
|
|
||||||
// // Package user defines a User type that's stored in Contexts.
|
|
||||||
// package user
|
|
||||||
//
|
|
||||||
// import "golang.org/x/net/context"
|
|
||||||
//
|
|
||||||
// // User is the type of value stored in the Contexts.
|
|
||||||
// type User struct {...}
|
|
||||||
//
|
|
||||||
// // key is an unexported type for keys defined in this package.
|
|
||||||
// // This prevents collisions with keys defined in other packages.
|
|
||||||
// type key int
|
|
||||||
//
|
|
||||||
// // userKey is the key for user.User values in Contexts. It is
|
|
||||||
// // unexported; clients use user.NewContext and user.FromContext
|
|
||||||
// // instead of using this key directly.
|
|
||||||
// var userKey key = 0
|
|
||||||
//
|
|
||||||
// // NewContext returns a new Context that carries value u.
|
|
||||||
// func NewContext(ctx context.Context, u *User) context.Context {
|
|
||||||
// return context.WithValue(ctx, userKey, u)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // FromContext returns the User value stored in ctx, if any.
|
|
||||||
// func FromContext(ctx context.Context) (*User, bool) {
|
|
||||||
// u, ok := ctx.Value(userKey).(*User)
|
|
||||||
// return u, ok
|
|
||||||
// }
|
|
||||||
Value(key interface{}) interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A CancelFunc tells an operation to abandon its work.
|
|
||||||
// A CancelFunc does not wait for the work to stop.
|
|
||||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
|
||||||
type CancelFunc func()
|
|
|
@ -3,25 +3,22 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"ciphers.go",
|
|
||||||
"client_conn_pool.go",
|
"client_conn_pool.go",
|
||||||
"configure_transport.go",
|
"configure_transport.go",
|
||||||
"databuffer.go",
|
|
||||||
"errors.go",
|
"errors.go",
|
||||||
|
"fixed_buffer.go",
|
||||||
"flow.go",
|
"flow.go",
|
||||||
"frame.go",
|
"frame.go",
|
||||||
"go16.go",
|
"go16.go",
|
||||||
"go17.go",
|
"go17.go",
|
||||||
"go17_not18.go",
|
"go17_not18.go",
|
||||||
"go18.go",
|
"go18.go",
|
||||||
"go19.go",
|
|
||||||
"gotrack.go",
|
"gotrack.go",
|
||||||
"headermap.go",
|
"headermap.go",
|
||||||
"http2.go",
|
"http2.go",
|
||||||
"not_go16.go",
|
"not_go16.go",
|
||||||
"not_go17.go",
|
"not_go17.go",
|
||||||
"not_go18.go",
|
"not_go18.go",
|
||||||
"not_go19.go",
|
|
||||||
"pipe.go",
|
"pipe.go",
|
||||||
"server.go",
|
"server.go",
|
||||||
"transport.go",
|
"transport.go",
|
||||||
|
|
|
@ -1,641 +0,0 @@
|
||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
// A list of the possible cipher suite ids. Taken from
|
|
||||||
// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt
|
|
||||||
|
|
||||||
const (
|
|
||||||
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
|
|
||||||
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
|
|
||||||
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
|
|
||||||
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
|
|
||||||
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
|
|
||||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
|
|
||||||
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
|
|
||||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
|
|
||||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
|
|
||||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
|
|
||||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
|
|
||||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
|
|
||||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
|
|
||||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
|
|
||||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
|
|
||||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
|
|
||||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
|
|
||||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
|
|
||||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
|
|
||||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
|
|
||||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
|
|
||||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
|
|
||||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
|
|
||||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
|
|
||||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
|
|
||||||
// Reserved uint16 = 0x001C-1D
|
|
||||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
|
|
||||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
|
|
||||||
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
|
|
||||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
|
|
||||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
|
|
||||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
|
|
||||||
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
|
|
||||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
|
|
||||||
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
|
|
||||||
// Reserved uint16 = 0x0047-4F
|
|
||||||
// Reserved uint16 = 0x0050-58
|
|
||||||
// Reserved uint16 = 0x0059-5C
|
|
||||||
// Unassigned uint16 = 0x005D-5F
|
|
||||||
// Reserved uint16 = 0x0060-66
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
|
|
||||||
// Unassigned uint16 = 0x006E-83
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
|
|
||||||
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
|
|
||||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
|
|
||||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
|
|
||||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
|
|
||||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
|
|
||||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
|
|
||||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
|
|
||||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
|
|
||||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
|
|
||||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
|
|
||||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
|
|
||||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
|
|
||||||
// Unassigned uint16 = 0x00C6-FE
|
|
||||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
|
|
||||||
// Unassigned uint16 = 0x01-55,*
|
|
||||||
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
|
|
||||||
// Unassigned uint16 = 0x5601 - 0xC000
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
|
|
||||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
|
|
||||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
|
|
||||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
|
|
||||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
|
|
||||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
|
|
||||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
|
|
||||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
|
|
||||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
|
|
||||||
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
|
|
||||||
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
|
|
||||||
// Unassigned uint16 = 0xC0B0-FF
|
|
||||||
// Unassigned uint16 = 0xC1-CB,*
|
|
||||||
// Unassigned uint16 = 0xCC00-A7
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
|
|
||||||
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
|
|
||||||
)
|
|
||||||
|
|
||||||
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
|
||||||
// References:
|
|
||||||
// https://tools.ietf.org/html/rfc7540#appendix-A
|
|
||||||
// Reject cipher suites from Appendix A.
|
|
||||||
// "This list includes those cipher suites that do not
|
|
||||||
// offer an ephemeral key exchange and those that are
|
|
||||||
// based on the TLS null, stream or block cipher type"
|
|
||||||
func isBadCipher(cipher uint16) bool {
|
|
||||||
switch cipher {
|
|
||||||
case cipher_TLS_NULL_WITH_NULL_NULL,
|
|
||||||
cipher_TLS_RSA_WITH_NULL_MD5,
|
|
||||||
cipher_TLS_RSA_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
|
|
||||||
cipher_TLS_RSA_WITH_RC4_128_MD5,
|
|
||||||
cipher_TLS_RSA_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
|
|
||||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
|
|
||||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
|
|
||||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_KRB5_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
|
|
||||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
|
|
||||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
|
|
||||||
cipher_TLS_KRB5_WITH_RC4_128_MD5,
|
|
||||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_NULL_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_PSK_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA384,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CCM,
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CCM,
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CCM_8,
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CCM_8,
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CCM,
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CCM,
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CCM_8,
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CCM_8:
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -56,7 +56,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerHTTPSProtocol calls Transport.RegisterProtocol but
|
// registerHTTPSProtocol calls Transport.RegisterProtocol but
|
||||||
// converting panics into errors.
|
// convering panics into errors.
|
||||||
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
|
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if e := recover(); e != nil {
|
if e := recover(); e != nil {
|
||||||
|
|
|
@ -1,146 +0,0 @@
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Buffer chunks are allocated from a pool to reduce pressure on GC.
|
|
||||||
// The maximum wasted space per dataBuffer is 2x the largest size class,
|
|
||||||
// which happens when the dataBuffer has multiple chunks and there is
|
|
||||||
// one unread byte in both the first and last chunks. We use a few size
|
|
||||||
// classes to minimize overheads for servers that typically receive very
|
|
||||||
// small request bodies.
|
|
||||||
//
|
|
||||||
// TODO: Benchmark to determine if the pools are necessary. The GC may have
|
|
||||||
// improved enough that we can instead allocate chunks like this:
|
|
||||||
// make([]byte, max(16<<10, expectedBytesRemaining))
|
|
||||||
var (
|
|
||||||
dataChunkSizeClasses = []int{
|
|
||||||
1 << 10,
|
|
||||||
2 << 10,
|
|
||||||
4 << 10,
|
|
||||||
8 << 10,
|
|
||||||
16 << 10,
|
|
||||||
}
|
|
||||||
dataChunkPools = [...]sync.Pool{
|
|
||||||
{New: func() interface{} { return make([]byte, 1<<10) }},
|
|
||||||
{New: func() interface{} { return make([]byte, 2<<10) }},
|
|
||||||
{New: func() interface{} { return make([]byte, 4<<10) }},
|
|
||||||
{New: func() interface{} { return make([]byte, 8<<10) }},
|
|
||||||
{New: func() interface{} { return make([]byte, 16<<10) }},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func getDataBufferChunk(size int64) []byte {
|
|
||||||
i := 0
|
|
||||||
for ; i < len(dataChunkSizeClasses)-1; i++ {
|
|
||||||
if size <= int64(dataChunkSizeClasses[i]) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dataChunkPools[i].Get().([]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putDataBufferChunk(p []byte) {
|
|
||||||
for i, n := range dataChunkSizeClasses {
|
|
||||||
if len(p) == n {
|
|
||||||
dataChunkPools[i].Put(p)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
|
|
||||||
// Each dataBuffer is used to read DATA frames on a single stream.
|
|
||||||
// The buffer is divided into chunks so the server can limit the
|
|
||||||
// total memory used by a single connection without limiting the
|
|
||||||
// request body size on any single stream.
|
|
||||||
type dataBuffer struct {
|
|
||||||
chunks [][]byte
|
|
||||||
r int // next byte to read is chunks[0][r]
|
|
||||||
w int // next byte to write is chunks[len(chunks)-1][w]
|
|
||||||
size int // total buffered bytes
|
|
||||||
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
var errReadEmpty = errors.New("read from empty dataBuffer")
|
|
||||||
|
|
||||||
// Read copies bytes from the buffer into p.
|
|
||||||
// It is an error to read when no data is available.
|
|
||||||
func (b *dataBuffer) Read(p []byte) (int, error) {
|
|
||||||
if b.size == 0 {
|
|
||||||
return 0, errReadEmpty
|
|
||||||
}
|
|
||||||
var ntotal int
|
|
||||||
for len(p) > 0 && b.size > 0 {
|
|
||||||
readFrom := b.bytesFromFirstChunk()
|
|
||||||
n := copy(p, readFrom)
|
|
||||||
p = p[n:]
|
|
||||||
ntotal += n
|
|
||||||
b.r += n
|
|
||||||
b.size -= n
|
|
||||||
// If the first chunk has been consumed, advance to the next chunk.
|
|
||||||
if b.r == len(b.chunks[0]) {
|
|
||||||
putDataBufferChunk(b.chunks[0])
|
|
||||||
end := len(b.chunks) - 1
|
|
||||||
copy(b.chunks[:end], b.chunks[1:])
|
|
||||||
b.chunks[end] = nil
|
|
||||||
b.chunks = b.chunks[:end]
|
|
||||||
b.r = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ntotal, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *dataBuffer) bytesFromFirstChunk() []byte {
|
|
||||||
if len(b.chunks) == 1 {
|
|
||||||
return b.chunks[0][b.r:b.w]
|
|
||||||
}
|
|
||||||
return b.chunks[0][b.r:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of bytes of the unread portion of the buffer.
|
|
||||||
func (b *dataBuffer) Len() int {
|
|
||||||
return b.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write appends p to the buffer.
|
|
||||||
func (b *dataBuffer) Write(p []byte) (int, error) {
|
|
||||||
ntotal := len(p)
|
|
||||||
for len(p) > 0 {
|
|
||||||
// If the last chunk is empty, allocate a new chunk. Try to allocate
|
|
||||||
// enough to fully copy p plus any additional bytes we expect to
|
|
||||||
// receive. However, this may allocate less than len(p).
|
|
||||||
want := int64(len(p))
|
|
||||||
if b.expected > want {
|
|
||||||
want = b.expected
|
|
||||||
}
|
|
||||||
chunk := b.lastChunkOrAlloc(want)
|
|
||||||
n := copy(chunk[b.w:], p)
|
|
||||||
p = p[n:]
|
|
||||||
b.w += n
|
|
||||||
b.size += n
|
|
||||||
b.expected -= int64(n)
|
|
||||||
}
|
|
||||||
return ntotal, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
|
|
||||||
if len(b.chunks) != 0 {
|
|
||||||
last := b.chunks[len(b.chunks)-1]
|
|
||||||
if b.w < len(last) {
|
|
||||||
return last
|
|
||||||
}
|
|
||||||
}
|
|
||||||
chunk := getDataBufferChunk(want)
|
|
||||||
b.chunks = append(b.chunks, chunk)
|
|
||||||
b.w = 0
|
|
||||||
return chunk
|
|
||||||
}
|
|
|
@ -87,16 +87,13 @@ type goAwayFlowError struct{}
|
||||||
|
|
||||||
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
||||||
|
|
||||||
// connError represents an HTTP/2 ConnectionError error code, along
|
// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
|
||||||
// with a string (for debugging) explaining why.
|
|
||||||
//
|
|
||||||
// Errors of this type are only returned by the frame parser functions
|
// Errors of this type are only returned by the frame parser functions
|
||||||
// and converted into ConnectionError(Code), after stashing away
|
// and converted into ConnectionError(ErrCodeProtocol).
|
||||||
// the Reason into the Framer's errDetail field, accessible via
|
|
||||||
// the (*Framer).ErrorDetail method.
|
|
||||||
type connError struct {
|
type connError struct {
|
||||||
Code ErrCode // the ConnectionError error code
|
Code ErrCode
|
||||||
Reason string // additional reason
|
Reason string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e connError) Error() string {
|
func (e connError) Error() string {
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
|
||||||
|
// It never allocates, but moves old data as new data is written.
|
||||||
|
type fixedBuffer struct {
|
||||||
|
buf []byte
|
||||||
|
r, w int
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errReadEmpty = errors.New("read from empty fixedBuffer")
|
||||||
|
errWriteFull = errors.New("write on full fixedBuffer")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Read copies bytes from the buffer into p.
|
||||||
|
// It is an error to read when no data is available.
|
||||||
|
func (b *fixedBuffer) Read(p []byte) (n int, err error) {
|
||||||
|
if b.r == b.w {
|
||||||
|
return 0, errReadEmpty
|
||||||
|
}
|
||||||
|
n = copy(p, b.buf[b.r:b.w])
|
||||||
|
b.r += n
|
||||||
|
if b.r == b.w {
|
||||||
|
b.r = 0
|
||||||
|
b.w = 0
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of bytes of the unread portion of the buffer.
|
||||||
|
func (b *fixedBuffer) Len() int {
|
||||||
|
return b.w - b.r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write copies bytes from p into the buffer.
|
||||||
|
// It is an error to write more data than the buffer can hold.
|
||||||
|
func (b *fixedBuffer) Write(p []byte) (n int, err error) {
|
||||||
|
// Slide existing data to beginning.
|
||||||
|
if b.r > 0 && len(p) > len(b.buf)-b.w {
|
||||||
|
copy(b.buf, b.buf[b.r:b.w])
|
||||||
|
b.w -= b.r
|
||||||
|
b.r = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write new data.
|
||||||
|
n = copy(b.buf[b.w:], p)
|
||||||
|
b.w += n
|
||||||
|
if n < len(p) {
|
||||||
|
err = errWriteFull
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
|
@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
|
||||||
// a frameParser parses a frame given its FrameHeader and payload
|
// a frameParser parses a frame given its FrameHeader and payload
|
||||||
// bytes. The length of payload will always equal fh.Length (which
|
// bytes. The length of payload will always equal fh.Length (which
|
||||||
// might be 0).
|
// might be 0).
|
||||||
type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
|
type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
|
||||||
|
|
||||||
var frameParsers = map[FrameType]frameParser{
|
var frameParsers = map[FrameType]frameParser{
|
||||||
FrameData: parseDataFrame,
|
FrameData: parseDataFrame,
|
||||||
|
@ -323,8 +323,6 @@ type Framer struct {
|
||||||
debugFramerBuf *bytes.Buffer
|
debugFramerBuf *bytes.Buffer
|
||||||
debugReadLoggerf func(string, ...interface{})
|
debugReadLoggerf func(string, ...interface{})
|
||||||
debugWriteLoggerf func(string, ...interface{})
|
debugWriteLoggerf func(string, ...interface{})
|
||||||
|
|
||||||
frameCache *frameCache // nil if frames aren't reused (default)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *Framer) maxHeaderListSize() uint32 {
|
func (fr *Framer) maxHeaderListSize() uint32 {
|
||||||
|
@ -400,27 +398,6 @@ const (
|
||||||
maxFrameSize = 1<<24 - 1
|
maxFrameSize = 1<<24 - 1
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetReuseFrames allows the Framer to reuse Frames.
|
|
||||||
// If called on a Framer, Frames returned by calls to ReadFrame are only
|
|
||||||
// valid until the next call to ReadFrame.
|
|
||||||
func (fr *Framer) SetReuseFrames() {
|
|
||||||
if fr.frameCache != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fr.frameCache = &frameCache{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type frameCache struct {
|
|
||||||
dataFrame DataFrame
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *frameCache) getDataFrame() *DataFrame {
|
|
||||||
if fc == nil {
|
|
||||||
return &DataFrame{}
|
|
||||||
}
|
|
||||||
return &fc.dataFrame
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFramer returns a Framer that writes frames to w and reads them from r.
|
// NewFramer returns a Framer that writes frames to w and reads them from r.
|
||||||
func NewFramer(w io.Writer, r io.Reader) *Framer {
|
func NewFramer(w io.Writer, r io.Reader) *Framer {
|
||||||
fr := &Framer{
|
fr := &Framer{
|
||||||
|
@ -500,7 +477,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
|
||||||
if _, err := io.ReadFull(fr.r, payload); err != nil {
|
if _, err := io.ReadFull(fr.r, payload); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
|
f, err := typeFrameParser(fh.Type)(fh, payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ce, ok := err.(connError); ok {
|
if ce, ok := err.(connError); ok {
|
||||||
return nil, fr.connError(ce.Code, ce.Reason)
|
return nil, fr.connError(ce.Code, ce.Reason)
|
||||||
|
@ -588,7 +565,7 @@ func (f *DataFrame) Data() []byte {
|
||||||
return f.data
|
return f.data
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
// DATA frames MUST be associated with a stream. If a
|
// DATA frames MUST be associated with a stream. If a
|
||||||
// DATA frame is received whose stream identifier
|
// DATA frame is received whose stream identifier
|
||||||
|
@ -597,9 +574,9 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro
|
||||||
// PROTOCOL_ERROR.
|
// PROTOCOL_ERROR.
|
||||||
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
f := fc.getDataFrame()
|
f := &DataFrame{
|
||||||
f.FrameHeader = fh
|
FrameHeader: fh,
|
||||||
|
}
|
||||||
var padSize byte
|
var padSize byte
|
||||||
if fh.Flags.Has(FlagDataPadded) {
|
if fh.Flags.Has(FlagDataPadded) {
|
||||||
var err error
|
var err error
|
||||||
|
@ -623,7 +600,6 @@ var (
|
||||||
errStreamID = errors.New("invalid stream ID")
|
errStreamID = errors.New("invalid stream ID")
|
||||||
errDepStreamID = errors.New("invalid dependent stream ID")
|
errDepStreamID = errors.New("invalid dependent stream ID")
|
||||||
errPadLength = errors.New("pad length too large")
|
errPadLength = errors.New("pad length too large")
|
||||||
errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func validStreamIDOrZero(streamID uint32) bool {
|
func validStreamIDOrZero(streamID uint32) bool {
|
||||||
|
@ -647,7 +623,6 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
|
||||||
//
|
//
|
||||||
// If pad is nil, the padding bit is not sent.
|
// If pad is nil, the padding bit is not sent.
|
||||||
// The length of pad must not exceed 255 bytes.
|
// The length of pad must not exceed 255 bytes.
|
||||||
// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
|
|
||||||
//
|
//
|
||||||
// It will perform exactly one Write to the underlying Writer.
|
// It will perform exactly one Write to the underlying Writer.
|
||||||
// It is the caller's responsibility not to violate the maximum frame size
|
// It is the caller's responsibility not to violate the maximum frame size
|
||||||
|
@ -656,19 +631,9 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by
|
||||||
if !validStreamID(streamID) && !f.AllowIllegalWrites {
|
if !validStreamID(streamID) && !f.AllowIllegalWrites {
|
||||||
return errStreamID
|
return errStreamID
|
||||||
}
|
}
|
||||||
if len(pad) > 0 {
|
|
||||||
if len(pad) > 255 {
|
if len(pad) > 255 {
|
||||||
return errPadLength
|
return errPadLength
|
||||||
}
|
}
|
||||||
if !f.AllowIllegalWrites {
|
|
||||||
for _, b := range pad {
|
|
||||||
if b != 0 {
|
|
||||||
// "Padding octets MUST be set to zero when sending."
|
|
||||||
return errPadBytes
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var flags Flags
|
var flags Flags
|
||||||
if endStream {
|
if endStream {
|
||||||
flags |= FlagDataEndStream
|
flags |= FlagDataEndStream
|
||||||
|
@ -695,7 +660,7 @@ type SettingsFrame struct {
|
||||||
p []byte
|
p []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
|
||||||
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
|
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
|
||||||
// When this (ACK 0x1) bit is set, the payload of the
|
// When this (ACK 0x1) bit is set, the payload of the
|
||||||
// SETTINGS frame MUST be empty. Receipt of a
|
// SETTINGS frame MUST be empty. Receipt of a
|
||||||
|
@ -797,7 +762,7 @@ type PingFrame struct {
|
||||||
|
|
||||||
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
|
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
|
||||||
|
|
||||||
func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
||||||
if len(payload) != 8 {
|
if len(payload) != 8 {
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
|
@ -837,7 +802,7 @@ func (f *GoAwayFrame) DebugData() []byte {
|
||||||
return f.debugData
|
return f.debugData
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
|
||||||
if fh.StreamID != 0 {
|
if fh.StreamID != 0 {
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
|
@ -877,7 +842,7 @@ func (f *UnknownFrame) Payload() []byte {
|
||||||
return f.p
|
return f.p
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
|
||||||
return &UnknownFrame{fh, p}, nil
|
return &UnknownFrame{fh, p}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -888,7 +853,7 @@ type WindowUpdateFrame struct {
|
||||||
Increment uint32 // never read with high bit set
|
Increment uint32 // never read with high bit set
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
|
||||||
if len(p) != 4 {
|
if len(p) != 4 {
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
|
@ -953,7 +918,7 @@ func (f *HeadersFrame) HasPriority() bool {
|
||||||
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
|
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
|
func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
|
||||||
hf := &HeadersFrame{
|
hf := &HeadersFrame{
|
||||||
FrameHeader: fh,
|
FrameHeader: fh,
|
||||||
}
|
}
|
||||||
|
@ -1090,7 +1055,7 @@ func (p PriorityParam) IsZero() bool {
|
||||||
return p == PriorityParam{}
|
return p == PriorityParam{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
|
@ -1137,7 +1102,7 @@ type RSTStreamFrame struct {
|
||||||
ErrCode ErrCode
|
ErrCode ErrCode
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
|
||||||
if len(p) != 4 {
|
if len(p) != 4 {
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
|
@ -1167,7 +1132,7 @@ type ContinuationFrame struct {
|
||||||
headerFragBuf []byte
|
headerFragBuf []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
|
@ -1217,7 +1182,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
|
||||||
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
|
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
|
func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
|
||||||
pp := &PushPromiseFrame{
|
pp := &PushPromiseFrame{
|
||||||
FrameHeader: fh,
|
FrameHeader: fh,
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
package http2
|
package http2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -14,3 +15,29 @@ import (
|
||||||
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
|
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
|
||||||
return t1.ExpectContinueTimeout
|
return t1.ExpectContinueTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
||||||
|
func isBadCipher(cipher uint16) bool {
|
||||||
|
switch cipher {
|
||||||
|
case tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
|
||||||
|
// Reject cipher suites from Appendix A.
|
||||||
|
// "This list includes those cipher suites that do not
|
||||||
|
// offer an ephemeral key exchange and those that are
|
||||||
|
// based on the TLS null, stream or block cipher type"
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -12,11 +12,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }
|
||||||
c2 := c.Clone()
|
|
||||||
c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
|
|
||||||
return c2
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ http.Pusher = (*responseWriter)(nil)
|
var _ http.Pusher = (*responseWriter)(nil)
|
||||||
|
|
||||||
|
@ -52,5 +48,3 @@ func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
|
||||||
func reqBodyIsNoBody(body io.ReadCloser) bool {
|
func reqBodyIsNoBody(body io.ReadCloser) bool {
|
||||||
return body == http.NoBody
|
return body == http.NoBody
|
||||||
}
|
}
|
||||||
|
|
||||||
func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only
|
|
||||||
|
|
|
@ -1,16 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build go1.9
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func configureServer19(s *http.Server, conf *Server) error {
|
|
||||||
s.RegisterOnShutdown(conf.state.startGracefulShutdown)
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -39,7 +39,6 @@ func NewEncoder(w io.Writer) *Encoder {
|
||||||
tableSizeUpdate: false,
|
tableSizeUpdate: false,
|
||||||
w: w,
|
w: w,
|
||||||
}
|
}
|
||||||
e.dynTab.table.init()
|
|
||||||
e.dynTab.setMaxSize(initialHeaderTableSize)
|
e.dynTab.setMaxSize(initialHeaderTableSize)
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
@ -89,17 +88,29 @@ func (e *Encoder) WriteField(f HeaderField) error {
|
||||||
// only name matches, i points to that index and nameValueMatch
|
// only name matches, i points to that index and nameValueMatch
|
||||||
// becomes false.
|
// becomes false.
|
||||||
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||||
i, nameValueMatch = staticTable.search(f)
|
for idx, hf := range staticTable {
|
||||||
if nameValueMatch {
|
if !constantTimeStringCompare(hf.Name, f.Name) {
|
||||||
return i, true
|
continue
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
i = uint64(idx + 1)
|
||||||
|
}
|
||||||
|
if f.Sensitive {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !constantTimeStringCompare(hf.Value, f.Value) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i = uint64(idx + 1)
|
||||||
|
nameValueMatch = true
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
j, nameValueMatch := e.dynTab.table.search(f)
|
j, nameValueMatch := e.dynTab.search(f)
|
||||||
if nameValueMatch || (i == 0 && j != 0) {
|
if nameValueMatch || (i == 0 && j != 0) {
|
||||||
return j + uint64(staticTable.len()), nameValueMatch
|
i = j + uint64(len(staticTable))
|
||||||
}
|
}
|
||||||
|
return
|
||||||
return i, false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
||||||
|
|
|
@ -102,7 +102,6 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod
|
||||||
emit: emitFunc,
|
emit: emitFunc,
|
||||||
emitEnabled: true,
|
emitEnabled: true,
|
||||||
}
|
}
|
||||||
d.dynTab.table.init()
|
|
||||||
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
||||||
d.dynTab.setMaxSize(maxDynamicTableSize)
|
d.dynTab.setMaxSize(maxDynamicTableSize)
|
||||||
return d
|
return d
|
||||||
|
@ -155,9 +154,12 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type dynamicTable struct {
|
type dynamicTable struct {
|
||||||
|
// ents is the FIFO described at
|
||||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
|
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
|
||||||
table headerFieldTable
|
// The newest (low index) is append at the end, and items are
|
||||||
size uint32 // in bytes
|
// evicted from the front.
|
||||||
|
ents []HeaderField
|
||||||
|
size uint32
|
||||||
maxSize uint32 // current maxSize
|
maxSize uint32 // current maxSize
|
||||||
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
||||||
}
|
}
|
||||||
|
@ -167,45 +169,95 @@ func (dt *dynamicTable) setMaxSize(v uint32) {
|
||||||
dt.evict()
|
dt.evict()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: change dynamicTable to be a struct with a slice and a size int field,
|
||||||
|
// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Then make add increment the size. maybe the max size should move from Decoder to
|
||||||
|
// dynamicTable and add should return an ok bool if there was enough space.
|
||||||
|
//
|
||||||
|
// Later we'll need a remove operation on dynamicTable.
|
||||||
|
|
||||||
func (dt *dynamicTable) add(f HeaderField) {
|
func (dt *dynamicTable) add(f HeaderField) {
|
||||||
dt.table.addEntry(f)
|
dt.ents = append(dt.ents, f)
|
||||||
dt.size += f.Size()
|
dt.size += f.Size()
|
||||||
dt.evict()
|
dt.evict()
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're too big, evict old stuff.
|
// If we're too big, evict old stuff (front of the slice)
|
||||||
func (dt *dynamicTable) evict() {
|
func (dt *dynamicTable) evict() {
|
||||||
var n int
|
base := dt.ents // keep base pointer of slice
|
||||||
for dt.size > dt.maxSize && n < dt.table.len() {
|
for dt.size > dt.maxSize {
|
||||||
dt.size -= dt.table.ents[n].Size()
|
dt.size -= dt.ents[0].Size()
|
||||||
n++
|
dt.ents = dt.ents[1:]
|
||||||
}
|
}
|
||||||
dt.table.evictOldest(n)
|
|
||||||
|
// Shift slice contents down if we evicted things.
|
||||||
|
if len(dt.ents) != len(base) {
|
||||||
|
copy(base, dt.ents)
|
||||||
|
dt.ents = base[:len(dt.ents)]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// constantTimeStringCompare compares string a and b in a constant
|
||||||
|
// time manner.
|
||||||
|
func constantTimeStringCompare(a, b string) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
c := byte(0)
|
||||||
|
|
||||||
|
for i := 0; i < len(a); i++ {
|
||||||
|
c |= a[i] ^ b[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
return c == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search searches f in the table. The return value i is 0 if there is
|
||||||
|
// no name match. If there is name match or name/value match, i is the
|
||||||
|
// index of that entry (1-based). If both name and value match,
|
||||||
|
// nameValueMatch becomes true.
|
||||||
|
func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||||
|
l := len(dt.ents)
|
||||||
|
for j := l - 1; j >= 0; j-- {
|
||||||
|
ent := dt.ents[j]
|
||||||
|
if !constantTimeStringCompare(ent.Name, f.Name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
i = uint64(l - j)
|
||||||
|
}
|
||||||
|
if f.Sensitive {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !constantTimeStringCompare(ent.Value, f.Value) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i = uint64(l - j)
|
||||||
|
nameValueMatch = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) maxTableIndex() int {
|
func (d *Decoder) maxTableIndex() int {
|
||||||
// This should never overflow. RFC 7540 Section 6.5.2 limits the size of
|
return len(d.dynTab.ents) + len(staticTable)
|
||||||
// the dynamic table to 2^32 bytes, where each entry will occupy more than
|
|
||||||
// one byte. Further, the staticTable has a fixed, small length.
|
|
||||||
return d.dynTab.table.len() + staticTable.len()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
||||||
// See Section 2.3.3.
|
if i < 1 {
|
||||||
if i == 0 {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if i <= uint64(staticTable.len()) {
|
|
||||||
return staticTable.ents[i-1], true
|
|
||||||
}
|
|
||||||
if i > uint64(d.maxTableIndex()) {
|
if i > uint64(d.maxTableIndex()) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// In the dynamic table, newer entries have lower indices.
|
if i <= uint64(len(staticTable)) {
|
||||||
// However, dt.ents[0] is the oldest entry. Hence, dt.ents is
|
return staticTable[i-1], true
|
||||||
// the reversed dynamic table.
|
}
|
||||||
dt := d.dynTab.table
|
dents := d.dynTab.ents
|
||||||
return dt.ents[dt.len()-(int(i)-staticTable.len())], true
|
return dents[len(dents)-(int(i)-len(staticTable))], true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode decodes an entire block.
|
// Decode decodes an entire block.
|
||||||
|
|
|
@ -4,200 +4,73 @@
|
||||||
|
|
||||||
package hpack
|
package hpack
|
||||||
|
|
||||||
import (
|
func pair(name, value string) HeaderField {
|
||||||
"fmt"
|
return HeaderField{Name: name, Value: value}
|
||||||
)
|
|
||||||
|
|
||||||
// headerFieldTable implements a list of HeaderFields.
|
|
||||||
// This is used to implement the static and dynamic tables.
|
|
||||||
type headerFieldTable struct {
|
|
||||||
// For static tables, entries are never evicted.
|
|
||||||
//
|
|
||||||
// For dynamic tables, entries are evicted from ents[0] and added to the end.
|
|
||||||
// Each entry has a unique id that starts at one and increments for each
|
|
||||||
// entry that is added. This unique id is stable across evictions, meaning
|
|
||||||
// it can be used as a pointer to a specific entry. As in hpack, unique ids
|
|
||||||
// are 1-based. The unique id for ents[k] is k + evictCount + 1.
|
|
||||||
//
|
|
||||||
// Zero is not a valid unique id.
|
|
||||||
//
|
|
||||||
// evictCount should not overflow in any remotely practical situation. In
|
|
||||||
// practice, we will have one dynamic table per HTTP/2 connection. If we
|
|
||||||
// assume a very powerful server that handles 1M QPS per connection and each
|
|
||||||
// request adds (then evicts) 100 entries from the table, it would still take
|
|
||||||
// 2M years for evictCount to overflow.
|
|
||||||
ents []HeaderField
|
|
||||||
evictCount uint64
|
|
||||||
|
|
||||||
// byName maps a HeaderField name to the unique id of the newest entry with
|
|
||||||
// the same name. See above for a definition of "unique id".
|
|
||||||
byName map[string]uint64
|
|
||||||
|
|
||||||
// byNameValue maps a HeaderField name/value pair to the unique id of the newest
|
|
||||||
// entry with the same name and value. See above for a definition of "unique id".
|
|
||||||
byNameValue map[pairNameValue]uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type pairNameValue struct {
|
|
||||||
name, value string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *headerFieldTable) init() {
|
|
||||||
t.byName = make(map[string]uint64)
|
|
||||||
t.byNameValue = make(map[pairNameValue]uint64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// len reports the number of entries in the table.
|
|
||||||
func (t *headerFieldTable) len() int {
|
|
||||||
return len(t.ents)
|
|
||||||
}
|
|
||||||
|
|
||||||
// addEntry adds a new entry.
|
|
||||||
func (t *headerFieldTable) addEntry(f HeaderField) {
|
|
||||||
id := uint64(t.len()) + t.evictCount + 1
|
|
||||||
t.byName[f.Name] = id
|
|
||||||
t.byNameValue[pairNameValue{f.Name, f.Value}] = id
|
|
||||||
t.ents = append(t.ents, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// evictOldest evicts the n oldest entries in the table.
|
|
||||||
func (t *headerFieldTable) evictOldest(n int) {
|
|
||||||
if n > t.len() {
|
|
||||||
panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
|
|
||||||
}
|
|
||||||
for k := 0; k < n; k++ {
|
|
||||||
f := t.ents[k]
|
|
||||||
id := t.evictCount + uint64(k) + 1
|
|
||||||
if t.byName[f.Name] == id {
|
|
||||||
delete(t.byName, f.Name)
|
|
||||||
}
|
|
||||||
if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
|
|
||||||
delete(t.byNameValue, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
copy(t.ents, t.ents[n:])
|
|
||||||
for k := t.len() - n; k < t.len(); k++ {
|
|
||||||
t.ents[k] = HeaderField{} // so strings can be garbage collected
|
|
||||||
}
|
|
||||||
t.ents = t.ents[:t.len()-n]
|
|
||||||
if t.evictCount+uint64(n) < t.evictCount {
|
|
||||||
panic("evictCount overflow")
|
|
||||||
}
|
|
||||||
t.evictCount += uint64(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// search finds f in the table. If there is no match, i is 0.
|
|
||||||
// If both name and value match, i is the matched index and nameValueMatch
|
|
||||||
// becomes true. If only name matches, i points to that index and
|
|
||||||
// nameValueMatch becomes false.
|
|
||||||
//
|
|
||||||
// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
|
|
||||||
// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
|
|
||||||
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
|
|
||||||
// table, the return value i actually refers to the entry t.ents[t.len()-i].
|
|
||||||
//
|
|
||||||
// All tables are assumed to be a dynamic tables except for the global
|
|
||||||
// staticTable pointer.
|
|
||||||
//
|
|
||||||
// See Section 2.3.3.
|
|
||||||
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
|
||||||
if !f.Sensitive {
|
|
||||||
if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
|
|
||||||
return t.idToIndex(id), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if id := t.byName[f.Name]; id != 0 {
|
|
||||||
return t.idToIndex(id), false
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// idToIndex converts a unique id to an HPACK index.
|
|
||||||
// See Section 2.3.3.
|
|
||||||
func (t *headerFieldTable) idToIndex(id uint64) uint64 {
|
|
||||||
if id <= t.evictCount {
|
|
||||||
panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
|
|
||||||
}
|
|
||||||
k := id - t.evictCount - 1 // convert id to an index t.ents[k]
|
|
||||||
if t != staticTable {
|
|
||||||
return uint64(t.len()) - k // dynamic table
|
|
||||||
}
|
|
||||||
return k + 1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||||
var staticTable = newStaticTable()
|
var staticTable = [...]HeaderField{
|
||||||
var staticTableEntries = [...]HeaderField{
|
pair(":authority", ""), // index 1 (1-based)
|
||||||
{Name: ":authority"},
|
pair(":method", "GET"),
|
||||||
{Name: ":method", Value: "GET"},
|
pair(":method", "POST"),
|
||||||
{Name: ":method", Value: "POST"},
|
pair(":path", "/"),
|
||||||
{Name: ":path", Value: "/"},
|
pair(":path", "/index.html"),
|
||||||
{Name: ":path", Value: "/index.html"},
|
pair(":scheme", "http"),
|
||||||
{Name: ":scheme", Value: "http"},
|
pair(":scheme", "https"),
|
||||||
{Name: ":scheme", Value: "https"},
|
pair(":status", "200"),
|
||||||
{Name: ":status", Value: "200"},
|
pair(":status", "204"),
|
||||||
{Name: ":status", Value: "204"},
|
pair(":status", "206"),
|
||||||
{Name: ":status", Value: "206"},
|
pair(":status", "304"),
|
||||||
{Name: ":status", Value: "304"},
|
pair(":status", "400"),
|
||||||
{Name: ":status", Value: "400"},
|
pair(":status", "404"),
|
||||||
{Name: ":status", Value: "404"},
|
pair(":status", "500"),
|
||||||
{Name: ":status", Value: "500"},
|
pair("accept-charset", ""),
|
||||||
{Name: "accept-charset"},
|
pair("accept-encoding", "gzip, deflate"),
|
||||||
{Name: "accept-encoding", Value: "gzip, deflate"},
|
pair("accept-language", ""),
|
||||||
{Name: "accept-language"},
|
pair("accept-ranges", ""),
|
||||||
{Name: "accept-ranges"},
|
pair("accept", ""),
|
||||||
{Name: "accept"},
|
pair("access-control-allow-origin", ""),
|
||||||
{Name: "access-control-allow-origin"},
|
pair("age", ""),
|
||||||
{Name: "age"},
|
pair("allow", ""),
|
||||||
{Name: "allow"},
|
pair("authorization", ""),
|
||||||
{Name: "authorization"},
|
pair("cache-control", ""),
|
||||||
{Name: "cache-control"},
|
pair("content-disposition", ""),
|
||||||
{Name: "content-disposition"},
|
pair("content-encoding", ""),
|
||||||
{Name: "content-encoding"},
|
pair("content-language", ""),
|
||||||
{Name: "content-language"},
|
pair("content-length", ""),
|
||||||
{Name: "content-length"},
|
pair("content-location", ""),
|
||||||
{Name: "content-location"},
|
pair("content-range", ""),
|
||||||
{Name: "content-range"},
|
pair("content-type", ""),
|
||||||
{Name: "content-type"},
|
pair("cookie", ""),
|
||||||
{Name: "cookie"},
|
pair("date", ""),
|
||||||
{Name: "date"},
|
pair("etag", ""),
|
||||||
{Name: "etag"},
|
pair("expect", ""),
|
||||||
{Name: "expect"},
|
pair("expires", ""),
|
||||||
{Name: "expires"},
|
pair("from", ""),
|
||||||
{Name: "from"},
|
pair("host", ""),
|
||||||
{Name: "host"},
|
pair("if-match", ""),
|
||||||
{Name: "if-match"},
|
pair("if-modified-since", ""),
|
||||||
{Name: "if-modified-since"},
|
pair("if-none-match", ""),
|
||||||
{Name: "if-none-match"},
|
pair("if-range", ""),
|
||||||
{Name: "if-range"},
|
pair("if-unmodified-since", ""),
|
||||||
{Name: "if-unmodified-since"},
|
pair("last-modified", ""),
|
||||||
{Name: "last-modified"},
|
pair("link", ""),
|
||||||
{Name: "link"},
|
pair("location", ""),
|
||||||
{Name: "location"},
|
pair("max-forwards", ""),
|
||||||
{Name: "max-forwards"},
|
pair("proxy-authenticate", ""),
|
||||||
{Name: "proxy-authenticate"},
|
pair("proxy-authorization", ""),
|
||||||
{Name: "proxy-authorization"},
|
pair("range", ""),
|
||||||
{Name: "range"},
|
pair("referer", ""),
|
||||||
{Name: "referer"},
|
pair("refresh", ""),
|
||||||
{Name: "refresh"},
|
pair("retry-after", ""),
|
||||||
{Name: "retry-after"},
|
pair("server", ""),
|
||||||
{Name: "server"},
|
pair("set-cookie", ""),
|
||||||
{Name: "set-cookie"},
|
pair("strict-transport-security", ""),
|
||||||
{Name: "strict-transport-security"},
|
pair("transfer-encoding", ""),
|
||||||
{Name: "transfer-encoding"},
|
pair("user-agent", ""),
|
||||||
{Name: "user-agent"},
|
pair("vary", ""),
|
||||||
{Name: "vary"},
|
pair("via", ""),
|
||||||
{Name: "via"},
|
pair("www-authenticate", ""),
|
||||||
{Name: "www-authenticate"},
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStaticTable() *headerFieldTable {
|
|
||||||
t := &headerFieldTable{}
|
|
||||||
t.init()
|
|
||||||
for _, e := range staticTableEntries[:] {
|
|
||||||
t.addEntry(e)
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var huffmanCodes = [256]uint32{
|
var huffmanCodes = [256]uint32{
|
||||||
|
|
|
@ -376,16 +376,12 @@ func (s *sorter) SortStrings(ss []string) {
|
||||||
// validPseudoPath reports whether v is a valid :path pseudo-header
|
// validPseudoPath reports whether v is a valid :path pseudo-header
|
||||||
// value. It must be either:
|
// value. It must be either:
|
||||||
//
|
//
|
||||||
// *) a non-empty string starting with '/'
|
// *) a non-empty string starting with '/', but not with with "//",
|
||||||
// *) the string '*', for OPTIONS requests.
|
// *) the string '*', for OPTIONS requests.
|
||||||
//
|
//
|
||||||
// For now this is only used a quick check for deciding when to clean
|
// For now this is only used a quick check for deciding when to clean
|
||||||
// up Opaque URLs before sending requests from the Transport.
|
// up Opaque URLs before sending requests from the Transport.
|
||||||
// See golang.org/issue/16847
|
// See golang.org/issue/16847
|
||||||
//
|
|
||||||
// We used to enforce that the path also didn't start with "//", but
|
|
||||||
// Google's GFE accepts such paths and Chrome sends them, so ignore
|
|
||||||
// that part of the spec. See golang.org/issue/19103.
|
|
||||||
func validPseudoPath(v string) bool {
|
func validPseudoPath(v string) bool {
|
||||||
return (len(v) > 0 && v[0] == '/') || v == "*"
|
return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*"
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
package http2
|
package http2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -19,3 +20,27 @@ func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
||||||
|
func isBadCipher(cipher uint16) bool {
|
||||||
|
switch cipher {
|
||||||
|
case tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
|
||||||
|
// Reject cipher suites from Appendix A.
|
||||||
|
// "This list includes those cipher suites that do not
|
||||||
|
// offer an ephemeral key exchange and those that are
|
||||||
|
// based on the TLS null, stream or block cipher type"
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -25,5 +25,3 @@ func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func reqBodyIsNoBody(io.ReadCloser) bool { return false }
|
func reqBodyIsNoBody(io.ReadCloser) bool { return false }
|
||||||
|
|
||||||
func go18httpNoBody() io.ReadCloser { return nil } // for tests only
|
|
||||||
|
|
|
@ -1,16 +0,0 @@
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !go1.9
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func configureServer19(s *http.Server, conf *Server) error {
|
|
||||||
// not supported prior to go1.9
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -16,7 +16,7 @@ import (
|
||||||
type pipe struct {
|
type pipe struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
c sync.Cond // c.L lazily initialized to &p.mu
|
c sync.Cond // c.L lazily initialized to &p.mu
|
||||||
b pipeBuffer // nil when done reading
|
b pipeBuffer
|
||||||
err error // read error once empty. non-nil means closed.
|
err error // read error once empty. non-nil means closed.
|
||||||
breakErr error // immediate read error (caller doesn't see rest of b)
|
breakErr error // immediate read error (caller doesn't see rest of b)
|
||||||
donec chan struct{} // closed on error
|
donec chan struct{} // closed on error
|
||||||
|
@ -32,9 +32,6 @@ type pipeBuffer interface {
|
||||||
func (p *pipe) Len() int {
|
func (p *pipe) Len() int {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
if p.b == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return p.b.Len()
|
return p.b.Len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,7 +47,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
|
||||||
if p.breakErr != nil {
|
if p.breakErr != nil {
|
||||||
return 0, p.breakErr
|
return 0, p.breakErr
|
||||||
}
|
}
|
||||||
if p.b != nil && p.b.Len() > 0 {
|
if p.b.Len() > 0 {
|
||||||
return p.b.Read(d)
|
return p.b.Read(d)
|
||||||
}
|
}
|
||||||
if p.err != nil {
|
if p.err != nil {
|
||||||
|
@ -58,7 +55,6 @@ func (p *pipe) Read(d []byte) (n int, err error) {
|
||||||
p.readFn() // e.g. copy trailers
|
p.readFn() // e.g. copy trailers
|
||||||
p.readFn = nil // not sticky like p.err
|
p.readFn = nil // not sticky like p.err
|
||||||
}
|
}
|
||||||
p.b = nil
|
|
||||||
return 0, p.err
|
return 0, p.err
|
||||||
}
|
}
|
||||||
p.c.Wait()
|
p.c.Wait()
|
||||||
|
@ -79,9 +75,6 @@ func (p *pipe) Write(d []byte) (n int, err error) {
|
||||||
if p.err != nil {
|
if p.err != nil {
|
||||||
return 0, errClosedPipeWrite
|
return 0, errClosedPipeWrite
|
||||||
}
|
}
|
||||||
if p.breakErr != nil {
|
|
||||||
return len(d), nil // discard when there is no reader
|
|
||||||
}
|
|
||||||
return p.b.Write(d)
|
return p.b.Write(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,9 +109,6 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.readFn = fn
|
p.readFn = fn
|
||||||
if dst == &p.breakErr {
|
|
||||||
p.b = nil
|
|
||||||
}
|
|
||||||
*dst = err
|
*dst = err
|
||||||
p.closeDoneLocked()
|
p.closeDoneLocked()
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,41 +110,9 @@ type Server struct {
|
||||||
// activity for the purposes of IdleTimeout.
|
// activity for the purposes of IdleTimeout.
|
||||||
IdleTimeout time.Duration
|
IdleTimeout time.Duration
|
||||||
|
|
||||||
// MaxUploadBufferPerConnection is the size of the initial flow
|
|
||||||
// control window for each connections. The HTTP/2 spec does not
|
|
||||||
// allow this to be smaller than 65535 or larger than 2^32-1.
|
|
||||||
// If the value is outside this range, a default value will be
|
|
||||||
// used instead.
|
|
||||||
MaxUploadBufferPerConnection int32
|
|
||||||
|
|
||||||
// MaxUploadBufferPerStream is the size of the initial flow control
|
|
||||||
// window for each stream. The HTTP/2 spec does not allow this to
|
|
||||||
// be larger than 2^32-1. If the value is zero or larger than the
|
|
||||||
// maximum, a default value will be used instead.
|
|
||||||
MaxUploadBufferPerStream int32
|
|
||||||
|
|
||||||
// NewWriteScheduler constructs a write scheduler for a connection.
|
// NewWriteScheduler constructs a write scheduler for a connection.
|
||||||
// If nil, a default scheduler is chosen.
|
// If nil, a default scheduler is chosen.
|
||||||
NewWriteScheduler func() WriteScheduler
|
NewWriteScheduler func() WriteScheduler
|
||||||
|
|
||||||
// Internal state. This is a pointer (rather than embedded directly)
|
|
||||||
// so that we don't embed a Mutex in this struct, which will make the
|
|
||||||
// struct non-copyable, which might break some callers.
|
|
||||||
state *serverInternalState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) initialConnRecvWindowSize() int32 {
|
|
||||||
if s.MaxUploadBufferPerConnection > initialWindowSize {
|
|
||||||
return s.MaxUploadBufferPerConnection
|
|
||||||
}
|
|
||||||
return 1 << 20
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) initialStreamRecvWindowSize() int32 {
|
|
||||||
if s.MaxUploadBufferPerStream > 0 {
|
|
||||||
return s.MaxUploadBufferPerStream
|
|
||||||
}
|
|
||||||
return 1 << 20
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) maxReadFrameSize() uint32 {
|
func (s *Server) maxReadFrameSize() uint32 {
|
||||||
|
@ -161,40 +129,6 @@ func (s *Server) maxConcurrentStreams() uint32 {
|
||||||
return defaultMaxStreams
|
return defaultMaxStreams
|
||||||
}
|
}
|
||||||
|
|
||||||
type serverInternalState struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
activeConns map[*serverConn]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *serverInternalState) registerConn(sc *serverConn) {
|
|
||||||
if s == nil {
|
|
||||||
return // if the Server was used without calling ConfigureServer
|
|
||||||
}
|
|
||||||
s.mu.Lock()
|
|
||||||
s.activeConns[sc] = struct{}{}
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *serverInternalState) unregisterConn(sc *serverConn) {
|
|
||||||
if s == nil {
|
|
||||||
return // if the Server was used without calling ConfigureServer
|
|
||||||
}
|
|
||||||
s.mu.Lock()
|
|
||||||
delete(s.activeConns, sc)
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *serverInternalState) startGracefulShutdown() {
|
|
||||||
if s == nil {
|
|
||||||
return // if the Server was used without calling ConfigureServer
|
|
||||||
}
|
|
||||||
s.mu.Lock()
|
|
||||||
for sc := range s.activeConns {
|
|
||||||
sc.startGracefulShutdown()
|
|
||||||
}
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigureServer adds HTTP/2 support to a net/http Server.
|
// ConfigureServer adds HTTP/2 support to a net/http Server.
|
||||||
//
|
//
|
||||||
// The configuration conf may be nil.
|
// The configuration conf may be nil.
|
||||||
|
@ -207,13 +141,9 @@ func ConfigureServer(s *http.Server, conf *Server) error {
|
||||||
if conf == nil {
|
if conf == nil {
|
||||||
conf = new(Server)
|
conf = new(Server)
|
||||||
}
|
}
|
||||||
conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
|
|
||||||
if err := configureServer18(s, conf); err != nil {
|
if err := configureServer18(s, conf); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := configureServer19(s, conf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.TLSConfig == nil {
|
if s.TLSConfig == nil {
|
||||||
s.TLSConfig = new(tls.Config)
|
s.TLSConfig = new(tls.Config)
|
||||||
|
@ -335,27 +265,25 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||||
streams: make(map[uint32]*stream),
|
streams: make(map[uint32]*stream),
|
||||||
readFrameCh: make(chan readFrameResult),
|
readFrameCh: make(chan readFrameResult),
|
||||||
wantWriteFrameCh: make(chan FrameWriteRequest, 8),
|
wantWriteFrameCh: make(chan FrameWriteRequest, 8),
|
||||||
serveMsgCh: make(chan interface{}, 8),
|
wantStartPushCh: make(chan startPushRequest, 8),
|
||||||
wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
|
wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
|
||||||
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
|
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
|
||||||
doneServing: make(chan struct{}),
|
doneServing: make(chan struct{}),
|
||||||
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
|
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
|
||||||
advMaxStreams: s.maxConcurrentStreams(),
|
advMaxStreams: s.maxConcurrentStreams(),
|
||||||
initialStreamSendWindowSize: initialWindowSize,
|
initialWindowSize: initialWindowSize,
|
||||||
maxFrameSize: initialMaxFrameSize,
|
maxFrameSize: initialMaxFrameSize,
|
||||||
headerTableSize: initialHeaderTableSize,
|
headerTableSize: initialHeaderTableSize,
|
||||||
serveG: newGoroutineLock(),
|
serveG: newGoroutineLock(),
|
||||||
pushEnabled: true,
|
pushEnabled: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
s.state.registerConn(sc)
|
|
||||||
defer s.state.unregisterConn(sc)
|
|
||||||
|
|
||||||
// The net/http package sets the write deadline from the
|
// The net/http package sets the write deadline from the
|
||||||
// http.Server.WriteTimeout during the TLS handshake, but then
|
// http.Server.WriteTimeout during the TLS handshake, but then
|
||||||
// passes the connection off to us with the deadline already set.
|
// passes the connection off to us with the deadline already
|
||||||
// Write deadlines are set per stream in serverConn.newStream.
|
// set. Disarm it here so that it is not applied to additional
|
||||||
// Disarm the net.Conn write deadline here.
|
// streams opened on this connection.
|
||||||
|
// TODO: implement WriteTimeout fully. See Issue 18437.
|
||||||
if sc.hs.WriteTimeout != 0 {
|
if sc.hs.WriteTimeout != 0 {
|
||||||
sc.conn.SetWriteDeadline(time.Time{})
|
sc.conn.SetWriteDeadline(time.Time{})
|
||||||
}
|
}
|
||||||
|
@ -366,9 +294,6 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||||
sc.writeSched = NewRandomWriteScheduler()
|
sc.writeSched = NewRandomWriteScheduler()
|
||||||
}
|
}
|
||||||
|
|
||||||
// These start at the RFC-specified defaults. If there is a higher
|
|
||||||
// configured value for inflow, that will be updated when we send a
|
|
||||||
// WINDOW_UPDATE shortly after sending SETTINGS.
|
|
||||||
sc.flow.add(initialWindowSize)
|
sc.flow.add(initialWindowSize)
|
||||||
sc.inflow.add(initialWindowSize)
|
sc.inflow.add(initialWindowSize)
|
||||||
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
|
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
|
||||||
|
@ -451,9 +376,10 @@ type serverConn struct {
|
||||||
doneServing chan struct{} // closed when serverConn.serve ends
|
doneServing chan struct{} // closed when serverConn.serve ends
|
||||||
readFrameCh chan readFrameResult // written by serverConn.readFrames
|
readFrameCh chan readFrameResult // written by serverConn.readFrames
|
||||||
wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
|
wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
|
||||||
|
wantStartPushCh chan startPushRequest // from handlers -> serve
|
||||||
wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
|
wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
|
||||||
bodyReadCh chan bodyReadMsg // from handlers -> serve
|
bodyReadCh chan bodyReadMsg // from handlers -> serve
|
||||||
serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
|
testHookCh chan func(int) // code to run on the serve loop
|
||||||
flow flow // conn-wide (not stream-specific) outbound flow control
|
flow flow // conn-wide (not stream-specific) outbound flow control
|
||||||
inflow flow // conn-wide inbound flow control
|
inflow flow // conn-wide inbound flow control
|
||||||
tlsState *tls.ConnectionState // shared by all handlers, like net/http
|
tlsState *tls.ConnectionState // shared by all handlers, like net/http
|
||||||
|
@ -473,7 +399,7 @@ type serverConn struct {
|
||||||
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
|
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
|
||||||
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
|
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
|
||||||
streams map[uint32]*stream
|
streams map[uint32]*stream
|
||||||
initialStreamSendWindowSize int32
|
initialWindowSize int32
|
||||||
maxFrameSize int32
|
maxFrameSize int32
|
||||||
headerTableSize uint32
|
headerTableSize uint32
|
||||||
peerMaxHeaderListSize uint32 // zero means unknown (default)
|
peerMaxHeaderListSize uint32 // zero means unknown (default)
|
||||||
|
@ -485,15 +411,14 @@ type serverConn struct {
|
||||||
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
|
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
|
||||||
needToSendGoAway bool // we need to schedule a GOAWAY frame write
|
needToSendGoAway bool // we need to schedule a GOAWAY frame write
|
||||||
goAwayCode ErrCode
|
goAwayCode ErrCode
|
||||||
|
shutdownTimerCh <-chan time.Time // nil until used
|
||||||
shutdownTimer *time.Timer // nil until used
|
shutdownTimer *time.Timer // nil until used
|
||||||
idleTimer *time.Timer // nil if unused
|
idleTimer *time.Timer // nil if unused
|
||||||
|
idleTimerCh <-chan time.Time // nil if unused
|
||||||
|
|
||||||
// Owned by the writeFrameAsync goroutine:
|
// Owned by the writeFrameAsync goroutine:
|
||||||
headerWriteBuf bytes.Buffer
|
headerWriteBuf bytes.Buffer
|
||||||
hpackEncoder *hpack.Encoder
|
hpackEncoder *hpack.Encoder
|
||||||
|
|
||||||
// Used by startGracefulShutdown.
|
|
||||||
shutdownOnce sync.Once
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *serverConn) maxHeaderListSize() uint32 {
|
func (sc *serverConn) maxHeaderListSize() uint32 {
|
||||||
|
@ -541,7 +466,7 @@ type stream struct {
|
||||||
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
||||||
gotTrailerHeader bool // HEADER frame for trailers was seen
|
gotTrailerHeader bool // HEADER frame for trailers was seen
|
||||||
wroteHeaders bool // whether we wrote headers (not status 100)
|
wroteHeaders bool // whether we wrote headers (not status 100)
|
||||||
writeDeadline *time.Timer // nil if unused
|
reqBuf []byte // if non-nil, body pipe buffer to return later at EOF
|
||||||
|
|
||||||
trailer http.Header // accumulated trailers
|
trailer http.Header // accumulated trailers
|
||||||
reqTrailer http.Header // handler's Request.Trailer
|
reqTrailer http.Header // handler's Request.Trailer
|
||||||
|
@ -771,17 +696,15 @@ func (sc *serverConn) serve() {
|
||||||
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
|
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
|
||||||
{SettingMaxConcurrentStreams, sc.advMaxStreams},
|
{SettingMaxConcurrentStreams, sc.advMaxStreams},
|
||||||
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
|
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
|
||||||
{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
|
|
||||||
|
// TODO: more actual settings, notably
|
||||||
|
// SettingInitialWindowSize, but then we also
|
||||||
|
// want to bump up the conn window size the
|
||||||
|
// same amount here right after the settings
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
sc.unackedSettings++
|
sc.unackedSettings++
|
||||||
|
|
||||||
// Each connection starts with intialWindowSize inflow tokens.
|
|
||||||
// If a higher value is configured, we add more tokens.
|
|
||||||
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
|
|
||||||
sc.sendWindowUpdate(nil, int(diff))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sc.readPreface(); err != nil {
|
if err := sc.readPreface(); err != nil {
|
||||||
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
|
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
|
||||||
return
|
return
|
||||||
|
@ -794,25 +717,27 @@ func (sc *serverConn) serve() {
|
||||||
sc.setConnState(http.StateIdle)
|
sc.setConnState(http.StateIdle)
|
||||||
|
|
||||||
if sc.srv.IdleTimeout != 0 {
|
if sc.srv.IdleTimeout != 0 {
|
||||||
sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
|
sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout)
|
||||||
defer sc.idleTimer.Stop()
|
defer sc.idleTimer.Stop()
|
||||||
|
sc.idleTimerCh = sc.idleTimer.C
|
||||||
|
}
|
||||||
|
|
||||||
|
var gracefulShutdownCh <-chan struct{}
|
||||||
|
if sc.hs != nil {
|
||||||
|
gracefulShutdownCh = h1ServerShutdownChan(sc.hs)
|
||||||
}
|
}
|
||||||
|
|
||||||
go sc.readFrames() // closed by defer sc.conn.Close above
|
go sc.readFrames() // closed by defer sc.conn.Close above
|
||||||
|
|
||||||
settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
|
settingsTimer := time.NewTimer(firstSettingsTimeout)
|
||||||
defer settingsTimer.Stop()
|
|
||||||
|
|
||||||
loopNum := 0
|
loopNum := 0
|
||||||
for {
|
for {
|
||||||
loopNum++
|
loopNum++
|
||||||
select {
|
select {
|
||||||
case wr := <-sc.wantWriteFrameCh:
|
case wr := <-sc.wantWriteFrameCh:
|
||||||
if se, ok := wr.write.(StreamError); ok {
|
|
||||||
sc.resetStream(se)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
sc.writeFrame(wr)
|
sc.writeFrame(wr)
|
||||||
|
case spr := <-sc.wantStartPushCh:
|
||||||
|
sc.startPush(spr)
|
||||||
case res := <-sc.wroteFrameCh:
|
case res := <-sc.wroteFrameCh:
|
||||||
sc.wroteFrame(res)
|
sc.wroteFrame(res)
|
||||||
case res := <-sc.readFrameCh:
|
case res := <-sc.readFrameCh:
|
||||||
|
@ -820,37 +745,26 @@ func (sc *serverConn) serve() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
res.readMore()
|
res.readMore()
|
||||||
if settingsTimer != nil {
|
if settingsTimer.C != nil {
|
||||||
settingsTimer.Stop()
|
settingsTimer.Stop()
|
||||||
settingsTimer = nil
|
settingsTimer.C = nil
|
||||||
}
|
}
|
||||||
case m := <-sc.bodyReadCh:
|
case m := <-sc.bodyReadCh:
|
||||||
sc.noteBodyRead(m.st, m.n)
|
sc.noteBodyRead(m.st, m.n)
|
||||||
case msg := <-sc.serveMsgCh:
|
case <-settingsTimer.C:
|
||||||
switch v := msg.(type) {
|
|
||||||
case func(int):
|
|
||||||
v(loopNum) // for testing
|
|
||||||
case *serverMessage:
|
|
||||||
switch v {
|
|
||||||
case settingsTimerMsg:
|
|
||||||
sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
|
sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
|
||||||
return
|
return
|
||||||
case idleTimerMsg:
|
case <-gracefulShutdownCh:
|
||||||
sc.vlogf("connection is idle")
|
gracefulShutdownCh = nil
|
||||||
sc.goAway(ErrCodeNo)
|
sc.startGracefulShutdown()
|
||||||
case shutdownTimerMsg:
|
case <-sc.shutdownTimerCh:
|
||||||
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
|
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
|
||||||
return
|
return
|
||||||
case gracefulShutdownMsg:
|
case <-sc.idleTimerCh:
|
||||||
sc.startGracefulShutdownInternal()
|
sc.vlogf("connection is idle")
|
||||||
default:
|
sc.goAway(ErrCodeNo)
|
||||||
panic("unknown timer")
|
case fn := <-sc.testHookCh:
|
||||||
}
|
fn(loopNum)
|
||||||
case *startPushRequest:
|
|
||||||
sc.startPush(v)
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("unexpected type %T", v))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame {
|
if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame {
|
||||||
|
@ -859,36 +773,6 @@ func (sc *serverConn) serve() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
|
|
||||||
select {
|
|
||||||
case <-sc.doneServing:
|
|
||||||
case <-sharedCh:
|
|
||||||
close(privateCh)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type serverMessage int
|
|
||||||
|
|
||||||
// Message values sent to serveMsgCh.
|
|
||||||
var (
|
|
||||||
settingsTimerMsg = new(serverMessage)
|
|
||||||
idleTimerMsg = new(serverMessage)
|
|
||||||
shutdownTimerMsg = new(serverMessage)
|
|
||||||
gracefulShutdownMsg = new(serverMessage)
|
|
||||||
)
|
|
||||||
|
|
||||||
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
|
|
||||||
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
|
|
||||||
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
|
|
||||||
|
|
||||||
func (sc *serverConn) sendServeMsg(msg interface{}) {
|
|
||||||
sc.serveG.checkNotOn() // NOT
|
|
||||||
select {
|
|
||||||
case sc.serveMsgCh <- msg:
|
|
||||||
case <-sc.doneServing:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readPreface reads the ClientPreface greeting from the peer
|
// readPreface reads the ClientPreface greeting from the peer
|
||||||
// or returns an error on timeout or an invalid greeting.
|
// or returns an error on timeout or an invalid greeting.
|
||||||
func (sc *serverConn) readPreface() error {
|
func (sc *serverConn) readPreface() error {
|
||||||
|
@ -1130,11 +1014,7 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
|
||||||
// stateClosed after the RST_STREAM frame is
|
// stateClosed after the RST_STREAM frame is
|
||||||
// written.
|
// written.
|
||||||
st.state = stateHalfClosedLocal
|
st.state = stateHalfClosedLocal
|
||||||
// Section 8.1: a server MAY request that the client abort
|
sc.resetStream(streamError(st.id, ErrCodeCancel))
|
||||||
// transmission of a request without error by sending a
|
|
||||||
// RST_STREAM with an error code of NO_ERROR after sending
|
|
||||||
// a complete response.
|
|
||||||
sc.resetStream(streamError(st.id, ErrCodeNo))
|
|
||||||
case stateHalfClosedRemote:
|
case stateHalfClosedRemote:
|
||||||
sc.closeStream(st, errHandlerComplete)
|
sc.closeStream(st, errHandlerComplete)
|
||||||
}
|
}
|
||||||
|
@ -1206,19 +1086,10 @@ func (sc *serverConn) scheduleFrameWrite() {
|
||||||
sc.inFrameScheduleLoop = false
|
sc.inFrameScheduleLoop = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// startGracefulShutdown gracefully shuts down a connection. This
|
// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the
|
||||||
// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
|
// client we're gracefully shutting down. The connection isn't closed
|
||||||
// shutting down. The connection isn't closed until all current
|
// until all current streams are done.
|
||||||
// streams are done.
|
|
||||||
//
|
|
||||||
// startGracefulShutdown returns immediately; it does not wait until
|
|
||||||
// the connection has shut down.
|
|
||||||
func (sc *serverConn) startGracefulShutdown() {
|
func (sc *serverConn) startGracefulShutdown() {
|
||||||
sc.serveG.checkNotOn() // NOT
|
|
||||||
sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sc *serverConn) startGracefulShutdownInternal() {
|
|
||||||
sc.goAwayIn(ErrCodeNo, 0)
|
sc.goAwayIn(ErrCodeNo, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1250,7 +1121,8 @@ func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) {
|
||||||
|
|
||||||
func (sc *serverConn) shutDownIn(d time.Duration) {
|
func (sc *serverConn) shutDownIn(d time.Duration) {
|
||||||
sc.serveG.check()
|
sc.serveG.check()
|
||||||
sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
|
sc.shutdownTimer = time.NewTimer(d)
|
||||||
|
sc.shutdownTimerCh = sc.shutdownTimer.C
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *serverConn) resetStream(se StreamError) {
|
func (sc *serverConn) resetStream(se StreamError) {
|
||||||
|
@ -1433,9 +1305,6 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
||||||
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
|
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
|
||||||
}
|
}
|
||||||
st.state = stateClosed
|
st.state = stateClosed
|
||||||
if st.writeDeadline != nil {
|
|
||||||
st.writeDeadline.Stop()
|
|
||||||
}
|
|
||||||
if st.isPushed() {
|
if st.isPushed() {
|
||||||
sc.curPushedStreams--
|
sc.curPushedStreams--
|
||||||
} else {
|
} else {
|
||||||
|
@ -1448,7 +1317,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
||||||
sc.idleTimer.Reset(sc.srv.IdleTimeout)
|
sc.idleTimer.Reset(sc.srv.IdleTimeout)
|
||||||
}
|
}
|
||||||
if h1ServerKeepAlivesDisabled(sc.hs) {
|
if h1ServerKeepAlivesDisabled(sc.hs) {
|
||||||
sc.startGracefulShutdownInternal()
|
sc.startGracefulShutdown()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if p := st.body; p != nil {
|
if p := st.body; p != nil {
|
||||||
|
@ -1526,9 +1395,9 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
|
||||||
// adjust the size of all stream flow control windows that it
|
// adjust the size of all stream flow control windows that it
|
||||||
// maintains by the difference between the new value and the
|
// maintains by the difference between the new value and the
|
||||||
// old value."
|
// old value."
|
||||||
old := sc.initialStreamSendWindowSize
|
old := sc.initialWindowSize
|
||||||
sc.initialStreamSendWindowSize = int32(val)
|
sc.initialWindowSize = int32(val)
|
||||||
growth := int32(val) - old // may be negative
|
growth := sc.initialWindowSize - old // may be negative
|
||||||
for _, st := range sc.streams {
|
for _, st := range sc.streams {
|
||||||
if !st.flow.add(growth) {
|
if !st.flow.add(growth) {
|
||||||
// 6.9.2 Initial Flow Control Window Size
|
// 6.9.2 Initial Flow Control Window Size
|
||||||
|
@ -1635,7 +1504,7 @@ func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
|
||||||
} else {
|
} else {
|
||||||
sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
|
sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
|
||||||
}
|
}
|
||||||
sc.startGracefulShutdownInternal()
|
sc.startGracefulShutdown()
|
||||||
// http://tools.ietf.org/html/rfc7540#section-6.8
|
// http://tools.ietf.org/html/rfc7540#section-6.8
|
||||||
// We should not create any new streams, which means we should disable push.
|
// We should not create any new streams, which means we should disable push.
|
||||||
sc.pushEnabled = false
|
sc.pushEnabled = false
|
||||||
|
@ -1674,12 +1543,6 @@ func (st *stream) copyTrailersToHandlerRequest() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
|
|
||||||
// when the stream's WriteTimeout has fired.
|
|
||||||
func (st *stream) onWriteTimeout() {
|
|
||||||
st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||||
sc.serveG.check()
|
sc.serveG.check()
|
||||||
id := f.StreamID
|
id := f.StreamID
|
||||||
|
@ -1856,12 +1719,9 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
||||||
}
|
}
|
||||||
st.cw.Init()
|
st.cw.Init()
|
||||||
st.flow.conn = &sc.flow // link to conn-level counter
|
st.flow.conn = &sc.flow // link to conn-level counter
|
||||||
st.flow.add(sc.initialStreamSendWindowSize)
|
st.flow.add(sc.initialWindowSize)
|
||||||
st.inflow.conn = &sc.inflow // link to conn-level counter
|
st.inflow.conn = &sc.inflow // link to conn-level counter
|
||||||
st.inflow.add(sc.srv.initialStreamRecvWindowSize())
|
st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
|
||||||
if sc.hs.WriteTimeout != 0 {
|
|
||||||
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
sc.streams[id] = st
|
sc.streams[id] = st
|
||||||
sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
|
sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
|
||||||
|
@ -1925,14 +1785,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if bodyOpen {
|
if bodyOpen {
|
||||||
|
st.reqBuf = getRequestBodyBuf()
|
||||||
|
req.Body.(*requestBody).pipe = &pipe{
|
||||||
|
b: &fixedBuffer{buf: st.reqBuf},
|
||||||
|
}
|
||||||
|
|
||||||
if vv, ok := rp.header["Content-Length"]; ok {
|
if vv, ok := rp.header["Content-Length"]; ok {
|
||||||
req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
|
req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
|
||||||
} else {
|
} else {
|
||||||
req.ContentLength = -1
|
req.ContentLength = -1
|
||||||
}
|
}
|
||||||
req.Body.(*requestBody).pipe = &pipe{
|
|
||||||
b: &dataBuffer{expected: req.ContentLength},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return rw, req, nil
|
return rw, req, nil
|
||||||
}
|
}
|
||||||
|
@ -2028,6 +1890,24 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
|
||||||
return rw, req, nil
|
return rw, req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var reqBodyCache = make(chan []byte, 8)
|
||||||
|
|
||||||
|
func getRequestBodyBuf() []byte {
|
||||||
|
select {
|
||||||
|
case b := <-reqBodyCache:
|
||||||
|
return b
|
||||||
|
default:
|
||||||
|
return make([]byte, initialWindowSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func putRequestBodyBuf(b []byte) {
|
||||||
|
select {
|
||||||
|
case reqBodyCache <- b:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Run on its own goroutine.
|
// Run on its own goroutine.
|
||||||
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
|
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
|
||||||
didPanic := true
|
didPanic := true
|
||||||
|
@ -2123,6 +2003,12 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
|
||||||
case <-sc.doneServing:
|
case <-sc.doneServing:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
if buf := st.reqBuf; buf != nil {
|
||||||
|
st.reqBuf = nil // shouldn't matter; field unused by other
|
||||||
|
putRequestBodyBuf(buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *serverConn) noteBodyRead(st *stream, n int) {
|
func (sc *serverConn) noteBodyRead(st *stream, n int) {
|
||||||
|
@ -2252,7 +2138,6 @@ type responseWriterState struct {
|
||||||
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
|
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
|
||||||
sentHeader bool // have we sent the header frame?
|
sentHeader bool // have we sent the header frame?
|
||||||
handlerDone bool // handler has finished
|
handlerDone bool // handler has finished
|
||||||
dirty bool // a Write failed; don't reuse this responseWriterState
|
|
||||||
|
|
||||||
sentContentLen int64 // non-zero if handler set a Content-Length header
|
sentContentLen int64 // non-zero if handler set a Content-Length header
|
||||||
wroteBytes int64
|
wroteBytes int64
|
||||||
|
@ -2334,7 +2219,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||||
date: date,
|
date: date,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rws.dirty = true
|
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if endStream {
|
if endStream {
|
||||||
|
@ -2356,7 +2240,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||||
if len(p) > 0 || endStream {
|
if len(p) > 0 || endStream {
|
||||||
// only send a 0 byte DATA frame if we're ending the stream.
|
// only send a 0 byte DATA frame if we're ending the stream.
|
||||||
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
|
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
|
||||||
rws.dirty = true
|
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2368,9 +2251,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||||
trailers: rws.trailers,
|
trailers: rws.trailers,
|
||||||
endStream: true,
|
endStream: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
rws.dirty = true
|
|
||||||
}
|
|
||||||
return len(p), err
|
return len(p), err
|
||||||
}
|
}
|
||||||
return len(p), nil
|
return len(p), nil
|
||||||
|
@ -2510,7 +2390,7 @@ func cloneHeader(h http.Header) http.Header {
|
||||||
//
|
//
|
||||||
// * Handler calls w.Write or w.WriteString ->
|
// * Handler calls w.Write or w.WriteString ->
|
||||||
// * -> rws.bw (*bufio.Writer) ->
|
// * -> rws.bw (*bufio.Writer) ->
|
||||||
// * (Handler might call Flush)
|
// * (Handler migth call Flush)
|
||||||
// * -> chunkWriter{rws}
|
// * -> chunkWriter{rws}
|
||||||
// * -> responseWriterState.writeChunk(p []byte)
|
// * -> responseWriterState.writeChunk(p []byte)
|
||||||
// * -> responseWriterState.writeChunk (most of the magic; see comment there)
|
// * -> responseWriterState.writeChunk (most of the magic; see comment there)
|
||||||
|
@ -2549,19 +2429,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
|
||||||
|
|
||||||
func (w *responseWriter) handlerDone() {
|
func (w *responseWriter) handlerDone() {
|
||||||
rws := w.rws
|
rws := w.rws
|
||||||
dirty := rws.dirty
|
|
||||||
rws.handlerDone = true
|
rws.handlerDone = true
|
||||||
w.Flush()
|
w.Flush()
|
||||||
w.rws = nil
|
w.rws = nil
|
||||||
if !dirty {
|
|
||||||
// Only recycle the pool if all prior Write calls to
|
|
||||||
// the serverConn goroutine completed successfully. If
|
|
||||||
// they returned earlier due to resets from the peer
|
|
||||||
// there might still be write goroutines outstanding
|
|
||||||
// from the serverConn referencing the rws memory. See
|
|
||||||
// issue 20704.
|
|
||||||
responseWriterStatePool.Put(rws)
|
responseWriterStatePool.Put(rws)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Push errors.
|
// Push errors.
|
||||||
|
@ -2643,7 +2514,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error {
|
||||||
return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
|
return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := &startPushRequest{
|
msg := startPushRequest{
|
||||||
parent: st,
|
parent: st,
|
||||||
method: opts.Method,
|
method: opts.Method,
|
||||||
url: u,
|
url: u,
|
||||||
|
@ -2656,7 +2527,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error {
|
||||||
return errClientDisconnected
|
return errClientDisconnected
|
||||||
case <-st.cw:
|
case <-st.cw:
|
||||||
return errStreamClosed
|
return errStreamClosed
|
||||||
case sc.serveMsgCh <- msg:
|
case sc.wantStartPushCh <- msg:
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
|
@ -2678,7 +2549,7 @@ type startPushRequest struct {
|
||||||
done chan error
|
done chan error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *serverConn) startPush(msg *startPushRequest) {
|
func (sc *serverConn) startPush(msg startPushRequest) {
|
||||||
sc.serveG.check()
|
sc.serveG.check()
|
||||||
|
|
||||||
// http://tools.ietf.org/html/rfc7540#section-6.6.
|
// http://tools.ietf.org/html/rfc7540#section-6.6.
|
||||||
|
@ -2717,7 +2588,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
|
||||||
// A server that is unable to establish a new stream identifier can send a GOAWAY
|
// A server that is unable to establish a new stream identifier can send a GOAWAY
|
||||||
// frame so that the client is forced to open a new connection for new streams.
|
// frame so that the client is forced to open a new connection for new streams.
|
||||||
if sc.maxPushPromiseID+2 >= 1<<31 {
|
if sc.maxPushPromiseID+2 >= 1<<31 {
|
||||||
sc.startGracefulShutdownInternal()
|
sc.startGracefulShutdown()
|
||||||
return 0, ErrPushLimitReached
|
return 0, ErrPushLimitReached
|
||||||
}
|
}
|
||||||
sc.maxPushPromiseID += 2
|
sc.maxPushPromiseID += 2
|
||||||
|
@ -2842,6 +2713,31 @@ var badTrailer = map[string]bool{
|
||||||
"Www-Authenticate": true,
|
"Www-Authenticate": true,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// h1ServerShutdownChan returns a channel that will be closed when the
|
||||||
|
// provided *http.Server wants to shut down.
|
||||||
|
//
|
||||||
|
// This is a somewhat hacky way to get at http1 innards. It works
|
||||||
|
// when the http2 code is bundled into the net/http package in the
|
||||||
|
// standard library. The alternatives ended up making the cmd/go tool
|
||||||
|
// depend on http Servers. This is the lightest option for now.
|
||||||
|
// This is tested via the TestServeShutdown* tests in net/http.
|
||||||
|
func h1ServerShutdownChan(hs *http.Server) <-chan struct{} {
|
||||||
|
if fn := testh1ServerShutdownChan; fn != nil {
|
||||||
|
return fn(hs)
|
||||||
|
}
|
||||||
|
var x interface{} = hs
|
||||||
|
type I interface {
|
||||||
|
getDoneChan() <-chan struct{}
|
||||||
|
}
|
||||||
|
if hs, ok := x.(I); ok {
|
||||||
|
return hs.getDoneChan()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional test hook for h1ServerShutdownChan.
|
||||||
|
var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{}
|
||||||
|
|
||||||
// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
|
// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
|
||||||
// disabled. See comments on h1ServerShutdownChan above for why
|
// disabled. See comments on h1ServerShutdownChan above for why
|
||||||
// the code is written this way.
|
// the code is written this way.
|
||||||
|
|
|
@ -18,7 +18,6 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
mathrand "math/rand"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -165,7 +164,6 @@ type ClientConn struct {
|
||||||
goAwayDebug string // goAway frame's debug data, retained as a string
|
goAwayDebug string // goAway frame's debug data, retained as a string
|
||||||
streams map[uint32]*clientStream // client-initiated
|
streams map[uint32]*clientStream // client-initiated
|
||||||
nextStreamID uint32
|
nextStreamID uint32
|
||||||
pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
|
|
||||||
pings map[[8]byte]chan struct{} // in flight ping data to notification channel
|
pings map[[8]byte]chan struct{} // in flight ping data to notification channel
|
||||||
bw *bufio.Writer
|
bw *bufio.Writer
|
||||||
br *bufio.Reader
|
br *bufio.Reader
|
||||||
|
@ -218,45 +216,35 @@ type clientStream struct {
|
||||||
resTrailer *http.Header // client's Response.Trailer
|
resTrailer *http.Header // client's Response.Trailer
|
||||||
}
|
}
|
||||||
|
|
||||||
// awaitRequestCancel waits for the user to cancel a request or for the done
|
// awaitRequestCancel runs in its own goroutine and waits for the user
|
||||||
// channel to be signaled. A non-nil error is returned only if the request was
|
// to cancel a RoundTrip request, its context to expire, or for the
|
||||||
// canceled.
|
// request to be done (any way it might be removed from the cc.streams
|
||||||
func awaitRequestCancel(req *http.Request, done <-chan struct{}) error {
|
// map: peer reset, successful completion, TCP connection breakage,
|
||||||
|
// etc)
|
||||||
|
func (cs *clientStream) awaitRequestCancel(req *http.Request) {
|
||||||
ctx := reqContext(req)
|
ctx := reqContext(req)
|
||||||
if req.Cancel == nil && ctx.Done() == nil {
|
if req.Cancel == nil && ctx.Done() == nil {
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-req.Cancel:
|
case <-req.Cancel:
|
||||||
return errRequestCanceled
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
case <-done:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// awaitRequestCancel waits for the user to cancel a request, its context to
|
|
||||||
// expire, or for the request to be done (any way it might be removed from the
|
|
||||||
// cc.streams map: peer reset, successful completion, TCP connection breakage,
|
|
||||||
// etc). If the request is canceled, then cs will be canceled and closed.
|
|
||||||
func (cs *clientStream) awaitRequestCancel(req *http.Request) {
|
|
||||||
if err := awaitRequestCancel(req, cs.done); err != nil {
|
|
||||||
cs.cancelStream()
|
cs.cancelStream()
|
||||||
cs.bufPipe.CloseWithError(err)
|
cs.bufPipe.CloseWithError(errRequestCanceled)
|
||||||
|
case <-ctx.Done():
|
||||||
|
cs.cancelStream()
|
||||||
|
cs.bufPipe.CloseWithError(ctx.Err())
|
||||||
|
case <-cs.done:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) cancelStream() {
|
func (cs *clientStream) cancelStream() {
|
||||||
cc := cs.cc
|
cs.cc.mu.Lock()
|
||||||
cc.mu.Lock()
|
|
||||||
didReset := cs.didReset
|
didReset := cs.didReset
|
||||||
cs.didReset = true
|
cs.didReset = true
|
||||||
cc.mu.Unlock()
|
cs.cc.mu.Unlock()
|
||||||
|
|
||||||
if !didReset {
|
if !didReset {
|
||||||
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
|
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
|
||||||
cc.forgetStreamID(cs.ID)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,7 +329,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
||||||
}
|
}
|
||||||
|
|
||||||
addr := authorityAddr(req.URL.Scheme, req.URL.Host)
|
addr := authorityAddr(req.URL.Scheme, req.URL.Host)
|
||||||
for retry := 0; ; retry++ {
|
for {
|
||||||
cc, err := t.connPool().GetClientConn(req, addr)
|
cc, err := t.connPool().GetClientConn(req, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
|
t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
|
||||||
|
@ -349,26 +337,10 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
||||||
}
|
}
|
||||||
traceGotConn(req, cc)
|
traceGotConn(req, cc)
|
||||||
res, err := cc.RoundTrip(req)
|
res, err := cc.RoundTrip(req)
|
||||||
if err != nil && retry <= 6 {
|
if err != nil {
|
||||||
afterBodyWrite := false
|
if req, err = shouldRetryRequest(req, err); err == nil {
|
||||||
if e, ok := err.(afterReqBodyWriteError); ok {
|
|
||||||
err = e
|
|
||||||
afterBodyWrite = true
|
|
||||||
}
|
|
||||||
if req, err = shouldRetryRequest(req, err, afterBodyWrite); err == nil {
|
|
||||||
// After the first retry, do exponential backoff with 10% jitter.
|
|
||||||
if retry == 0 {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
backoff := float64(uint(1) << (uint(retry) - 1))
|
|
||||||
backoff += backoff * (0.1 * mathrand.Float64())
|
|
||||||
select {
|
|
||||||
case <-time.After(time.Second * time.Duration(backoff)):
|
|
||||||
continue
|
|
||||||
case <-reqContext(req).Done():
|
|
||||||
return nil, reqContext(req).Err()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.vlogf("RoundTrip failure: %v", err)
|
t.vlogf("RoundTrip failure: %v", err)
|
||||||
|
@ -390,30 +362,22 @@ func (t *Transport) CloseIdleConnections() {
|
||||||
var (
|
var (
|
||||||
errClientConnClosed = errors.New("http2: client conn is closed")
|
errClientConnClosed = errors.New("http2: client conn is closed")
|
||||||
errClientConnUnusable = errors.New("http2: client conn not usable")
|
errClientConnUnusable = errors.New("http2: client conn not usable")
|
||||||
|
|
||||||
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
|
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
|
||||||
|
errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written")
|
||||||
)
|
)
|
||||||
|
|
||||||
// afterReqBodyWriteError is a wrapper around errors returned by ClientConn.RoundTrip.
|
|
||||||
// It is used to signal that err happened after part of Request.Body was sent to the server.
|
|
||||||
type afterReqBodyWriteError struct {
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e afterReqBodyWriteError) Error() string {
|
|
||||||
return e.err.Error() + "; some request body already written"
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetryRequest is called by RoundTrip when a request fails to get
|
// shouldRetryRequest is called by RoundTrip when a request fails to get
|
||||||
// response headers. It is always called with a non-nil error.
|
// response headers. It is always called with a non-nil error.
|
||||||
// It returns either a request to retry (either the same request, or a
|
// It returns either a request to retry (either the same request, or a
|
||||||
// modified clone), or an error if the request can't be replayed.
|
// modified clone), or an error if the request can't be replayed.
|
||||||
func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) {
|
func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) {
|
||||||
if !canRetryError(err) {
|
switch err {
|
||||||
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
case errClientConnUnusable, errClientConnGotGoAway:
|
||||||
if !afterBodyWrite {
|
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
case errClientConnGotGoAwayAfterSomeReqBody:
|
||||||
// If the Body is nil (or http.NoBody), it's safe to reuse
|
// If the Body is nil (or http.NoBody), it's safe to reuse
|
||||||
// this request and its Body.
|
// this request and its Body.
|
||||||
if req.Body == nil || reqBodyIsNoBody(req.Body) {
|
if req.Body == nil || reqBodyIsNoBody(req.Body) {
|
||||||
|
@ -423,7 +387,7 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt
|
||||||
// func defined.
|
// func defined.
|
||||||
getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody
|
getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody
|
||||||
if getBody == nil {
|
if getBody == nil {
|
||||||
return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
|
return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error")
|
||||||
}
|
}
|
||||||
body, err := getBody()
|
body, err := getBody()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -432,16 +396,7 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt
|
||||||
newReq := *req
|
newReq := *req
|
||||||
newReq.Body = body
|
newReq.Body = body
|
||||||
return &newReq, nil
|
return &newReq, nil
|
||||||
}
|
|
||||||
|
|
||||||
func canRetryError(err error) bool {
|
|
||||||
if err == errClientConnUnusable || err == errClientConnGotGoAway {
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
if se, ok := err.(StreamError); ok {
|
|
||||||
return se.Code == ErrCodeRefusedStream
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
|
func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
|
||||||
|
@ -605,8 +560,6 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanTakeNewRequest reports whether the connection can take a new request,
|
|
||||||
// meaning it has not been closed or received or sent a GOAWAY.
|
|
||||||
func (cc *ClientConn) CanTakeNewRequest() bool {
|
func (cc *ClientConn) CanTakeNewRequest() bool {
|
||||||
cc.mu.Lock()
|
cc.mu.Lock()
|
||||||
defer cc.mu.Unlock()
|
defer cc.mu.Unlock()
|
||||||
|
@ -618,7 +571,8 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return cc.goAway == nil && !cc.closed &&
|
return cc.goAway == nil && !cc.closed &&
|
||||||
int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32
|
int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
|
||||||
|
cc.nextStreamID < math.MaxInt32
|
||||||
}
|
}
|
||||||
|
|
||||||
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
|
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
|
||||||
|
@ -740,7 +694,7 @@ func checkConnHeaders(req *http.Request) error {
|
||||||
// req.ContentLength, where 0 actually means zero (not unknown) and -1
|
// req.ContentLength, where 0 actually means zero (not unknown) and -1
|
||||||
// means unknown.
|
// means unknown.
|
||||||
func actualContentLength(req *http.Request) int64 {
|
func actualContentLength(req *http.Request) int64 {
|
||||||
if req.Body == nil || reqBodyIsNoBody(req.Body) {
|
if req.Body == nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
if req.ContentLength != 0 {
|
if req.ContentLength != 0 {
|
||||||
|
@ -764,14 +718,15 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
hasTrailers := trailers != ""
|
hasTrailers := trailers != ""
|
||||||
|
|
||||||
cc.mu.Lock()
|
cc.mu.Lock()
|
||||||
if err := cc.awaitOpenSlotForRequest(req); err != nil {
|
cc.lastActive = time.Now()
|
||||||
|
if cc.closed || !cc.canTakeNewRequestLocked() {
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return nil, err
|
return nil, errClientConnUnusable
|
||||||
}
|
}
|
||||||
|
|
||||||
body := req.Body
|
body := req.Body
|
||||||
|
hasBody := body != nil
|
||||||
contentLen := actualContentLength(req)
|
contentLen := actualContentLength(req)
|
||||||
hasBody := contentLen != 0
|
|
||||||
|
|
||||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||||
var requestedGzip bool
|
var requestedGzip bool
|
||||||
|
@ -861,13 +816,14 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
cs.abortRequestBodyWrite(errStopReqBodyWrite)
|
cs.abortRequestBodyWrite(errStopReqBodyWrite)
|
||||||
}
|
}
|
||||||
if re.err != nil {
|
if re.err != nil {
|
||||||
|
if re.err == errClientConnGotGoAway {
|
||||||
cc.mu.Lock()
|
cc.mu.Lock()
|
||||||
afterBodyWrite := cs.startedWrite
|
if cs.startedWrite {
|
||||||
cc.mu.Unlock()
|
re.err = errClientConnGotGoAwayAfterSomeReqBody
|
||||||
cc.forgetStreamID(cs.ID)
|
|
||||||
if afterBodyWrite {
|
|
||||||
return nil, afterReqBodyWriteError{re.err}
|
|
||||||
}
|
}
|
||||||
|
cc.mu.Unlock()
|
||||||
|
}
|
||||||
|
cc.forgetStreamID(cs.ID)
|
||||||
return nil, re.err
|
return nil, re.err
|
||||||
}
|
}
|
||||||
res.Request = req
|
res.Request = req
|
||||||
|
@ -880,31 +836,31 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
case re := <-readLoopResCh:
|
case re := <-readLoopResCh:
|
||||||
return handleReadLoopResponse(re)
|
return handleReadLoopResponse(re)
|
||||||
case <-respHeaderTimer:
|
case <-respHeaderTimer:
|
||||||
|
cc.forgetStreamID(cs.ID)
|
||||||
if !hasBody || bodyWritten {
|
if !hasBody || bodyWritten {
|
||||||
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
|
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
|
||||||
} else {
|
} else {
|
||||||
bodyWriter.cancel()
|
bodyWriter.cancel()
|
||||||
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
|
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
|
||||||
}
|
}
|
||||||
cc.forgetStreamID(cs.ID)
|
|
||||||
return nil, errTimeout
|
return nil, errTimeout
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
cc.forgetStreamID(cs.ID)
|
||||||
if !hasBody || bodyWritten {
|
if !hasBody || bodyWritten {
|
||||||
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
|
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
|
||||||
} else {
|
} else {
|
||||||
bodyWriter.cancel()
|
bodyWriter.cancel()
|
||||||
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
|
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
|
||||||
}
|
}
|
||||||
cc.forgetStreamID(cs.ID)
|
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
case <-req.Cancel:
|
case <-req.Cancel:
|
||||||
|
cc.forgetStreamID(cs.ID)
|
||||||
if !hasBody || bodyWritten {
|
if !hasBody || bodyWritten {
|
||||||
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
|
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
|
||||||
} else {
|
} else {
|
||||||
bodyWriter.cancel()
|
bodyWriter.cancel()
|
||||||
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
|
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
|
||||||
}
|
}
|
||||||
cc.forgetStreamID(cs.ID)
|
|
||||||
return nil, errRequestCanceled
|
return nil, errRequestCanceled
|
||||||
case <-cs.peerReset:
|
case <-cs.peerReset:
|
||||||
// processResetStream already removed the
|
// processResetStream already removed the
|
||||||
|
@ -931,45 +887,6 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams.
|
|
||||||
// Must hold cc.mu.
|
|
||||||
func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
|
|
||||||
var waitingForConn chan struct{}
|
|
||||||
var waitingForConnErr error // guarded by cc.mu
|
|
||||||
for {
|
|
||||||
cc.lastActive = time.Now()
|
|
||||||
if cc.closed || !cc.canTakeNewRequestLocked() {
|
|
||||||
return errClientConnUnusable
|
|
||||||
}
|
|
||||||
if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
|
|
||||||
if waitingForConn != nil {
|
|
||||||
close(waitingForConn)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Unfortunately, we cannot wait on a condition variable and channel at
|
|
||||||
// the same time, so instead, we spin up a goroutine to check if the
|
|
||||||
// request is canceled while we wait for a slot to open in the connection.
|
|
||||||
if waitingForConn == nil {
|
|
||||||
waitingForConn = make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
if err := awaitRequestCancel(req, waitingForConn); err != nil {
|
|
||||||
cc.mu.Lock()
|
|
||||||
waitingForConnErr = err
|
|
||||||
cc.cond.Broadcast()
|
|
||||||
cc.mu.Unlock()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
cc.pendingRequests++
|
|
||||||
cc.cond.Wait()
|
|
||||||
cc.pendingRequests--
|
|
||||||
if waitingForConnErr != nil {
|
|
||||||
return waitingForConnErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// requires cc.wmu be held
|
// requires cc.wmu be held
|
||||||
func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
|
func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
|
||||||
first := true // first frame written (HEADERS is first, then CONTINUATION)
|
first := true // first frame written (HEADERS is first, then CONTINUATION)
|
||||||
|
@ -1329,9 +1246,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
|
||||||
cc.idleTimer.Reset(cc.idleTimeout)
|
cc.idleTimer.Reset(cc.idleTimeout)
|
||||||
}
|
}
|
||||||
close(cs.done)
|
close(cs.done)
|
||||||
// Wake up checkResetOrDone via clientStream.awaitFlowControl and
|
cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
|
||||||
// wake up RoundTrip if there is a pending request.
|
|
||||||
cc.cond.Broadcast()
|
|
||||||
}
|
}
|
||||||
return cs
|
return cs
|
||||||
}
|
}
|
||||||
|
@ -1430,9 +1345,8 @@ func (rl *clientConnReadLoop) run() error {
|
||||||
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
|
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
|
||||||
}
|
}
|
||||||
if se, ok := err.(StreamError); ok {
|
if se, ok := err.(StreamError); ok {
|
||||||
if cs := cc.streamByID(se.StreamID, false); cs != nil {
|
if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
|
||||||
cs.cc.writeStreamReset(cs.ID, se.Code, err)
|
cs.cc.writeStreamReset(cs.ID, se.Code, err)
|
||||||
cs.cc.forgetStreamID(cs.ID)
|
|
||||||
if se.Cause == nil {
|
if se.Cause == nil {
|
||||||
se.Cause = cc.fr.errDetail
|
se.Cause = cc.fr.errDetail
|
||||||
}
|
}
|
||||||
|
@ -1614,7 +1528,8 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}
|
buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
|
||||||
|
cs.bufPipe = pipe{b: buf}
|
||||||
cs.bytesRemain = res.ContentLength
|
cs.bytesRemain = res.ContentLength
|
||||||
res.Body = transportResponseBody{cs}
|
res.Body = transportResponseBody{cs}
|
||||||
go cs.awaitRequestCancel(cs.req)
|
go cs.awaitRequestCancel(cs.req)
|
||||||
|
@ -1741,7 +1656,6 @@ func (b transportResponseBody) Close() error {
|
||||||
cc.wmu.Lock()
|
cc.wmu.Lock()
|
||||||
if !serverSentStreamEnd {
|
if !serverSentStreamEnd {
|
||||||
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
|
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
|
||||||
cs.didReset = true
|
|
||||||
}
|
}
|
||||||
// Return connection-level flow control.
|
// Return connection-level flow control.
|
||||||
if unread > 0 {
|
if unread > 0 {
|
||||||
|
@ -1754,7 +1668,6 @@ func (b transportResponseBody) Close() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
cs.bufPipe.BreakWithError(errClosedResponseBody)
|
cs.bufPipe.BreakWithError(errClosedResponseBody)
|
||||||
cc.forgetStreamID(cs.ID)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1790,6 +1703,12 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if f.Length > 0 {
|
if f.Length > 0 {
|
||||||
|
if len(data) > 0 && cs.bufPipe.b == nil {
|
||||||
|
// Data frame after it's already closed?
|
||||||
|
cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
|
||||||
|
return ConnectionError(ErrCodeProtocol)
|
||||||
|
}
|
||||||
|
|
||||||
// Check connection-level flow control.
|
// Check connection-level flow control.
|
||||||
cc.mu.Lock()
|
cc.mu.Lock()
|
||||||
if cs.inflow.available() >= int32(f.Length) {
|
if cs.inflow.available() >= int32(f.Length) {
|
||||||
|
@ -1800,27 +1719,16 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
|
||||||
}
|
}
|
||||||
// Return any padded flow control now, since we won't
|
// Return any padded flow control now, since we won't
|
||||||
// refund it later on body reads.
|
// refund it later on body reads.
|
||||||
var refund int
|
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
|
||||||
if pad := int(f.Length) - len(data); pad > 0 {
|
cs.inflow.add(pad)
|
||||||
refund += pad
|
cc.inflow.add(pad)
|
||||||
}
|
|
||||||
// Return len(data) now if the stream is already closed,
|
|
||||||
// since data will never be read.
|
|
||||||
didReset := cs.didReset
|
|
||||||
if didReset {
|
|
||||||
refund += len(data)
|
|
||||||
}
|
|
||||||
if refund > 0 {
|
|
||||||
cc.inflow.add(int32(refund))
|
|
||||||
cc.wmu.Lock()
|
cc.wmu.Lock()
|
||||||
cc.fr.WriteWindowUpdate(0, uint32(refund))
|
cc.fr.WriteWindowUpdate(0, uint32(pad))
|
||||||
if !didReset {
|
cc.fr.WriteWindowUpdate(cs.ID, uint32(pad))
|
||||||
cs.inflow.add(int32(refund))
|
|
||||||
cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
|
|
||||||
}
|
|
||||||
cc.bw.Flush()
|
cc.bw.Flush()
|
||||||
cc.wmu.Unlock()
|
cc.wmu.Unlock()
|
||||||
}
|
}
|
||||||
|
didReset := cs.didReset
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
|
|
||||||
if len(data) > 0 && !didReset {
|
if len(data) > 0 && !didReset {
|
||||||
|
|
|
@ -53,7 +53,7 @@ type PriorityWriteSchedulerConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
|
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
|
||||||
// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
|
// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3.
|
||||||
// If cfg is nil, default options are used.
|
// If cfg is nil, default options are used.
|
||||||
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
|
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
|
|
|
@ -5,15 +5,8 @@ go_library(
|
||||||
srcs = [
|
srcs = [
|
||||||
"idna.go",
|
"idna.go",
|
||||||
"punycode.go",
|
"punycode.go",
|
||||||
"tables.go",
|
|
||||||
"trie.go",
|
|
||||||
"trieval.go",
|
|
||||||
],
|
],
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
|
||||||
"//vendor/golang.org/x/text/secure/bidirule:go_default_library",
|
|
||||||
"//vendor/golang.org/x/text/unicode/norm:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
filegroup(
|
filegroup(
|
||||||
|
|
|
@ -1,673 +1,61 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// Package idna implements IDNA2008 using the compatibility processing
|
// Package idna implements IDNA2008 (Internationalized Domain Names for
|
||||||
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and
|
||||||
// deal with the transition from IDNA2003.
|
// RFC 5894.
|
||||||
//
|
|
||||||
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
|
||||||
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
|
||||||
// UTS #46 is defined in http://www.unicode.org/reports/tr46.
|
|
||||||
// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
|
||||||
// differences between these two standards.
|
|
||||||
package idna
|
package idna
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"golang.org/x/text/secure/bidirule"
|
|
||||||
"golang.org/x/text/unicode/norm"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or
|
||||||
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11
|
||||||
// evaluated string as lookup.
|
|
||||||
// TODO: the current error handling is, in my opinion, the least opinionated.
|
|
||||||
// Other strategies are also viable, though:
|
|
||||||
// Option 1) Return an empty string in case of error, but allow the user to
|
|
||||||
// specify explicitly which errors to ignore.
|
|
||||||
// Option 2) Return the partially evaluated string if it is itself a valid
|
|
||||||
// string, otherwise return the empty string in case of error.
|
|
||||||
// Option 3) Option 1 and 2.
|
|
||||||
// Option 4) Always return an empty string for now and implement Option 1 as
|
|
||||||
// needed, and document that the return string may not be empty in case of
|
|
||||||
// error in the future.
|
|
||||||
// I think Option 1 is best, but it is quite opinionated.
|
|
||||||
|
|
||||||
// ToASCII is a wrapper for Punycode.ToASCII.
|
|
||||||
func ToASCII(s string) (string, error) {
|
|
||||||
return Punycode.process(s, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
|
||||||
func ToUnicode(s string) (string, error) {
|
|
||||||
return Punycode.process(s, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Option configures a Profile at creation time.
|
|
||||||
type Option func(*options)
|
|
||||||
|
|
||||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
|
||||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
|
||||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
|
||||||
// compatibility. It is used by most browsers when resolving domain names. This
|
|
||||||
// option is only meaningful if combined with MapForLookup.
|
|
||||||
func Transitional(transitional bool) Option {
|
|
||||||
return func(o *options) { o.transitional = true }
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
|
||||||
// are longer than allowed by the RFC.
|
|
||||||
func VerifyDNSLength(verify bool) Option {
|
|
||||||
return func(o *options) { o.verifyDNSLength = verify }
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
|
||||||
// dots, such as U+3002, are removed as well.
|
|
||||||
//
|
|
||||||
// This is the behavior suggested by the UTS #46 and is adopted by some
|
|
||||||
// browsers.
|
|
||||||
func RemoveLeadingDots(remove bool) Option {
|
|
||||||
return func(o *options) { o.removeLeadingDots = remove }
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
|
||||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
|
||||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
|
||||||
func ValidateLabels(enable bool) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
// Don't override existing mappings, but set one that at least checks
|
|
||||||
// normalization if it is not set.
|
|
||||||
if o.mapping == nil && enable {
|
|
||||||
o.mapping = normalize
|
|
||||||
}
|
|
||||||
o.trie = trie
|
|
||||||
o.validateLabels = enable
|
|
||||||
o.fromPuny = validateFromPunycode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StrictDomainName limits the set of permissable ASCII characters to those
|
|
||||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
|
||||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
|
|
||||||
//
|
|
||||||
// This option is useful, for instance, for browsers that allow characters
|
|
||||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
|
||||||
// http://www.rfc-editor.org/std/std3.txt for more details This option
|
|
||||||
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
|
|
||||||
func StrictDomainName(use bool) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.trie = trie
|
|
||||||
o.useSTD3Rules = use
|
|
||||||
o.fromPuny = validateFromPunycode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: the following options pull in tables. The tables should not be linked
|
|
||||||
// in as long as the options are not used.
|
|
||||||
|
|
||||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
|
||||||
// that relies on proper validation of labels should include this rule.
|
|
||||||
func BidiRule() Option {
|
|
||||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateForRegistration sets validation options to verify that a given IDN is
|
|
||||||
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
|
||||||
func ValidateForRegistration() Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.mapping = validateRegistration
|
|
||||||
StrictDomainName(true)(o)
|
|
||||||
ValidateLabels(true)(o)
|
|
||||||
VerifyDNSLength(true)(o)
|
|
||||||
BidiRule()(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapForLookup sets validation and mapping options such that a given IDN is
|
|
||||||
// transformed for domain name lookup according to the requirements set out in
|
|
||||||
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
|
||||||
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
|
||||||
// to add this check.
|
|
||||||
//
|
|
||||||
// The mappings include normalization and mapping case, width and other
|
|
||||||
// compatibility mappings.
|
|
||||||
func MapForLookup() Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.mapping = validateAndMap
|
|
||||||
StrictDomainName(true)(o)
|
|
||||||
ValidateLabels(true)(o)
|
|
||||||
RemoveLeadingDots(true)(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type options struct {
|
|
||||||
transitional bool
|
|
||||||
useSTD3Rules bool
|
|
||||||
validateLabels bool
|
|
||||||
verifyDNSLength bool
|
|
||||||
removeLeadingDots bool
|
|
||||||
|
|
||||||
trie *idnaTrie
|
|
||||||
|
|
||||||
// fromPuny calls validation rules when converting A-labels to U-labels.
|
|
||||||
fromPuny func(p *Profile, s string) error
|
|
||||||
|
|
||||||
// mapping implements a validation and mapping step as defined in RFC 5895
|
|
||||||
// or UTS 46, tailored to, for example, domain registration or lookup.
|
|
||||||
mapping func(p *Profile, s string) (string, error)
|
|
||||||
|
|
||||||
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
|
||||||
// defined in RFC 5893.
|
|
||||||
bidirule func(s string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Profile defines the configuration of a IDNA mapper.
|
|
||||||
type Profile struct {
|
|
||||||
options
|
|
||||||
}
|
|
||||||
|
|
||||||
func apply(o *options, opts []Option) {
|
|
||||||
for _, f := range opts {
|
|
||||||
f(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new Profile.
|
|
||||||
//
|
|
||||||
// With no options, the returned Profile is the most permissive and equals the
|
|
||||||
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
|
||||||
// MapForLookup and ValidateForRegistration options set a collection of options,
|
|
||||||
// for lookup and registration purposes respectively, which can be tailored by
|
|
||||||
// adding more fine-grained options, where later options override earlier
|
|
||||||
// options.
|
|
||||||
func New(o ...Option) *Profile {
|
|
||||||
p := &Profile{}
|
|
||||||
apply(&p.options, o)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
|
||||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
|
||||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
|
||||||
// an error and a (partially) processed result.
|
|
||||||
func (p *Profile) ToASCII(s string) (string, error) {
|
|
||||||
return p.process(s, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
|
||||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
|
||||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
|
||||||
// an error and a (partially) processed result.
|
|
||||||
func (p *Profile) ToUnicode(s string) (string, error) {
|
|
||||||
pp := *p
|
|
||||||
pp.transitional = false
|
|
||||||
return pp.process(s, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String reports a string with a description of the profile for debugging
|
|
||||||
// purposes. The string format may change with different versions.
|
|
||||||
func (p *Profile) String() string {
|
|
||||||
s := ""
|
|
||||||
if p.transitional {
|
|
||||||
s = "Transitional"
|
|
||||||
} else {
|
|
||||||
s = "NonTransitional"
|
|
||||||
}
|
|
||||||
if p.useSTD3Rules {
|
|
||||||
s += ":UseSTD3Rules"
|
|
||||||
}
|
|
||||||
if p.validateLabels {
|
|
||||||
s += ":ValidateLabels"
|
|
||||||
}
|
|
||||||
if p.verifyDNSLength {
|
|
||||||
s += ":VerifyDNSLength"
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Punycode is a Profile that does raw punycode processing with a minimum
|
|
||||||
// of validation.
|
|
||||||
Punycode *Profile = punycode
|
|
||||||
|
|
||||||
// Lookup is the recommended profile for looking up domain names, according
|
|
||||||
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
|
||||||
// change over time.
|
|
||||||
Lookup *Profile = lookup
|
|
||||||
|
|
||||||
// Display is the recommended profile for displaying domain names.
|
|
||||||
// The configuration of this profile may change over time.
|
|
||||||
Display *Profile = display
|
|
||||||
|
|
||||||
// Registration is the recommended profile for checking whether a given
|
|
||||||
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
|
||||||
Registration *Profile = registration
|
|
||||||
|
|
||||||
punycode = &Profile{}
|
|
||||||
lookup = &Profile{options{
|
|
||||||
transitional: true,
|
|
||||||
useSTD3Rules: true,
|
|
||||||
validateLabels: true,
|
|
||||||
removeLeadingDots: true,
|
|
||||||
trie: trie,
|
|
||||||
fromPuny: validateFromPunycode,
|
|
||||||
mapping: validateAndMap,
|
|
||||||
bidirule: bidirule.ValidString,
|
|
||||||
}}
|
|
||||||
display = &Profile{options{
|
|
||||||
useSTD3Rules: true,
|
|
||||||
validateLabels: true,
|
|
||||||
removeLeadingDots: true,
|
|
||||||
trie: trie,
|
|
||||||
fromPuny: validateFromPunycode,
|
|
||||||
mapping: validateAndMap,
|
|
||||||
bidirule: bidirule.ValidString,
|
|
||||||
}}
|
|
||||||
registration = &Profile{options{
|
|
||||||
useSTD3Rules: true,
|
|
||||||
validateLabels: true,
|
|
||||||
verifyDNSLength: true,
|
|
||||||
trie: trie,
|
|
||||||
fromPuny: validateFromPunycode,
|
|
||||||
mapping: validateRegistration,
|
|
||||||
bidirule: bidirule.ValidString,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// TODO: profiles
|
|
||||||
// Register: recommended for approving domain names: don't do any mappings
|
|
||||||
// but rather reject on invalid input. Bundle or block deviation characters.
|
|
||||||
)
|
|
||||||
|
|
||||||
type labelError struct{ label, code_ string }
|
|
||||||
|
|
||||||
func (e labelError) code() string { return e.code_ }
|
|
||||||
func (e labelError) Error() string {
|
|
||||||
return fmt.Sprintf("idna: invalid label %q", e.label)
|
|
||||||
}
|
|
||||||
|
|
||||||
type runeError rune
|
|
||||||
|
|
||||||
func (e runeError) code() string { return "P1" }
|
|
||||||
func (e runeError) Error() string {
|
|
||||||
return fmt.Sprintf("idna: disallowed rune %U", e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// process implements the algorithm described in section 4 of UTS #46,
|
|
||||||
// see http://www.unicode.org/reports/tr46.
|
|
||||||
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
|
||||||
var err error
|
|
||||||
if p.mapping != nil {
|
|
||||||
s, err = p.mapping(p, s)
|
|
||||||
}
|
|
||||||
// Remove leading empty labels.
|
|
||||||
if p.removeLeadingDots {
|
|
||||||
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// It seems like we should only create this error on ToASCII, but the
|
|
||||||
// UTS 46 conformance tests suggests we should always check this.
|
|
||||||
if err == nil && p.verifyDNSLength && s == "" {
|
|
||||||
err = &labelError{s, "A4"}
|
|
||||||
}
|
|
||||||
labels := labelIter{orig: s}
|
|
||||||
for ; !labels.done(); labels.next() {
|
|
||||||
label := labels.label()
|
|
||||||
if label == "" {
|
|
||||||
// Empty labels are not okay. The label iterator skips the last
|
|
||||||
// label if it is empty.
|
|
||||||
if err == nil && p.verifyDNSLength {
|
|
||||||
err = &labelError{s, "A4"}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(label, acePrefix) {
|
|
||||||
u, err2 := decode(label[len(acePrefix):])
|
|
||||||
if err2 != nil {
|
|
||||||
if err == nil {
|
|
||||||
err = err2
|
|
||||||
}
|
|
||||||
// Spec says keep the old label.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
labels.set(u)
|
|
||||||
if err == nil && p.validateLabels {
|
|
||||||
err = p.fromPuny(p, u)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
// This should be called on NonTransitional, according to the
|
|
||||||
// spec, but that currently does not have any effect. Use the
|
|
||||||
// original profile to preserve options.
|
|
||||||
err = p.validateLabel(u)
|
|
||||||
}
|
|
||||||
} else if err == nil {
|
|
||||||
err = p.validateLabel(label)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if toASCII {
|
|
||||||
for labels.reset(); !labels.done(); labels.next() {
|
|
||||||
label := labels.label()
|
|
||||||
if !ascii(label) {
|
|
||||||
a, err2 := encode(acePrefix, label)
|
|
||||||
if err == nil {
|
|
||||||
err = err2
|
|
||||||
}
|
|
||||||
label = a
|
|
||||||
labels.set(a)
|
|
||||||
}
|
|
||||||
n := len(label)
|
|
||||||
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
|
||||||
err = &labelError{label, "A4"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s = labels.result()
|
|
||||||
if toASCII && p.verifyDNSLength && err == nil {
|
|
||||||
// Compute the length of the domain name minus the root label and its dot.
|
|
||||||
n := len(s)
|
|
||||||
if n > 0 && s[n-1] == '.' {
|
|
||||||
n--
|
|
||||||
}
|
|
||||||
if len(s) < 1 || n > 253 {
|
|
||||||
err = &labelError{s, "A4"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalize(p *Profile, s string) (string, error) {
|
|
||||||
return norm.NFC.String(s), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateRegistration(p *Profile, s string) (string, error) {
|
|
||||||
if !norm.NFC.IsNormalString(s) {
|
|
||||||
return s, &labelError{s, "V1"}
|
|
||||||
}
|
|
||||||
for i := 0; i < len(s); {
|
|
||||||
v, sz := trie.lookupString(s[i:])
|
|
||||||
// Copy bytes not copied so far.
|
|
||||||
switch p.simplify(info(v).category()) {
|
|
||||||
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
|
||||||
// for strict conformance to IDNA2008.
|
|
||||||
case valid, deviation:
|
|
||||||
case disallowed, mapped, unknown, ignored:
|
|
||||||
r, _ := utf8.DecodeRuneInString(s[i:])
|
|
||||||
return s, runeError(r)
|
|
||||||
}
|
|
||||||
i += sz
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateAndMap(p *Profile, s string) (string, error) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
b []byte
|
|
||||||
k int
|
|
||||||
)
|
|
||||||
for i := 0; i < len(s); {
|
|
||||||
v, sz := trie.lookupString(s[i:])
|
|
||||||
start := i
|
|
||||||
i += sz
|
|
||||||
// Copy bytes not copied so far.
|
|
||||||
switch p.simplify(info(v).category()) {
|
|
||||||
case valid:
|
|
||||||
continue
|
|
||||||
case disallowed:
|
|
||||||
if err == nil {
|
|
||||||
r, _ := utf8.DecodeRuneInString(s[start:])
|
|
||||||
err = runeError(r)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
case mapped, deviation:
|
|
||||||
b = append(b, s[k:start]...)
|
|
||||||
b = info(v).appendMapping(b, s[start:i])
|
|
||||||
case ignored:
|
|
||||||
b = append(b, s[k:start]...)
|
|
||||||
// drop the rune
|
|
||||||
case unknown:
|
|
||||||
b = append(b, s[k:start]...)
|
|
||||||
b = append(b, "\ufffd"...)
|
|
||||||
}
|
|
||||||
k = i
|
|
||||||
}
|
|
||||||
if k == 0 {
|
|
||||||
// No changes so far.
|
|
||||||
s = norm.NFC.String(s)
|
|
||||||
} else {
|
|
||||||
b = append(b, s[k:]...)
|
|
||||||
if norm.NFC.QuickSpan(b) != len(b) {
|
|
||||||
b = norm.NFC.Bytes(b)
|
|
||||||
}
|
|
||||||
// TODO: the punycode converters require strings as input.
|
|
||||||
s = string(b)
|
|
||||||
}
|
|
||||||
return s, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// A labelIter allows iterating over domain name labels.
|
|
||||||
type labelIter struct {
|
|
||||||
orig string
|
|
||||||
slice []string
|
|
||||||
curStart int
|
|
||||||
curEnd int
|
|
||||||
i int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) reset() {
|
|
||||||
l.curStart = 0
|
|
||||||
l.curEnd = 0
|
|
||||||
l.i = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) done() bool {
|
|
||||||
return l.curStart >= len(l.orig)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) result() string {
|
|
||||||
if l.slice != nil {
|
|
||||||
return strings.Join(l.slice, ".")
|
|
||||||
}
|
|
||||||
return l.orig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) label() string {
|
|
||||||
if l.slice != nil {
|
|
||||||
return l.slice[l.i]
|
|
||||||
}
|
|
||||||
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
|
||||||
l.curEnd = l.curStart + p
|
|
||||||
if p == -1 {
|
|
||||||
l.curEnd = len(l.orig)
|
|
||||||
}
|
|
||||||
return l.orig[l.curStart:l.curEnd]
|
|
||||||
}
|
|
||||||
|
|
||||||
// next sets the value to the next label. It skips the last label if it is empty.
|
|
||||||
func (l *labelIter) next() {
|
|
||||||
l.i++
|
|
||||||
if l.slice != nil {
|
|
||||||
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
|
||||||
l.curStart = len(l.orig)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
l.curStart = l.curEnd + 1
|
|
||||||
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
|
||||||
l.curStart = len(l.orig)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) set(s string) {
|
|
||||||
if l.slice == nil {
|
|
||||||
l.slice = strings.Split(l.orig, ".")
|
|
||||||
}
|
|
||||||
l.slice[l.i] = s
|
|
||||||
}
|
|
||||||
|
|
||||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||||
const acePrefix = "xn--"
|
const acePrefix = "xn--"
|
||||||
|
|
||||||
func (p *Profile) simplify(cat category) category {
|
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||||
switch cat {
|
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||||
case disallowedSTD3Mapped:
|
// ToASCII("golang") is "golang".
|
||||||
if p.useSTD3Rules {
|
func ToASCII(s string) (string, error) {
|
||||||
cat = disallowed
|
if ascii(s) {
|
||||||
} else {
|
return s, nil
|
||||||
cat = mapped
|
|
||||||
}
|
}
|
||||||
case disallowedSTD3Valid:
|
labels := strings.Split(s, ".")
|
||||||
if p.useSTD3Rules {
|
for i, label := range labels {
|
||||||
cat = disallowed
|
if !ascii(label) {
|
||||||
} else {
|
a, err := encode(acePrefix, label)
|
||||||
cat = valid
|
if err != nil {
|
||||||
|
return "", err
|
||||||
}
|
}
|
||||||
case deviation:
|
labels[i] = a
|
||||||
if !p.transitional {
|
|
||||||
cat = valid
|
|
||||||
}
|
}
|
||||||
case validNV8, validXV8:
|
|
||||||
// TODO: handle V2008
|
|
||||||
cat = valid
|
|
||||||
}
|
}
|
||||||
return cat
|
return strings.Join(labels, "."), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateFromPunycode(p *Profile, s string) error {
|
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||||
if !norm.NFC.IsNormalString(s) {
|
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||||
return &labelError{s, "V1"}
|
// ToUnicode("golang") is "golang".
|
||||||
|
func ToUnicode(s string) (string, error) {
|
||||||
|
if !strings.Contains(s, acePrefix) {
|
||||||
|
return s, nil
|
||||||
}
|
}
|
||||||
for i := 0; i < len(s); {
|
labels := strings.Split(s, ".")
|
||||||
v, sz := trie.lookupString(s[i:])
|
for i, label := range labels {
|
||||||
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
if strings.HasPrefix(label, acePrefix) {
|
||||||
return &labelError{s, "V6"}
|
u, err := decode(label[len(acePrefix):])
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
}
|
}
|
||||||
i += sz
|
labels[i] = u
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
zwnj = "\u200c"
|
|
||||||
zwj = "\u200d"
|
|
||||||
)
|
|
||||||
|
|
||||||
type joinState int8
|
|
||||||
|
|
||||||
const (
|
|
||||||
stateStart joinState = iota
|
|
||||||
stateVirama
|
|
||||||
stateBefore
|
|
||||||
stateBeforeVirama
|
|
||||||
stateAfter
|
|
||||||
stateFAIL
|
|
||||||
)
|
|
||||||
|
|
||||||
var joinStates = [][numJoinTypes]joinState{
|
|
||||||
stateStart: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joinZWNJ: stateFAIL,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateVirama,
|
|
||||||
},
|
|
||||||
stateVirama: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
},
|
|
||||||
stateBefore: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joiningT: stateBefore,
|
|
||||||
joinZWNJ: stateAfter,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateBeforeVirama,
|
|
||||||
},
|
|
||||||
stateBeforeVirama: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joiningT: stateBefore,
|
|
||||||
},
|
|
||||||
stateAfter: {
|
|
||||||
joiningL: stateFAIL,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joiningT: stateAfter,
|
|
||||||
joiningR: stateStart,
|
|
||||||
joinZWNJ: stateFAIL,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
|
||||||
},
|
|
||||||
stateFAIL: {
|
|
||||||
0: stateFAIL,
|
|
||||||
joiningL: stateFAIL,
|
|
||||||
joiningD: stateFAIL,
|
|
||||||
joiningT: stateFAIL,
|
|
||||||
joiningR: stateFAIL,
|
|
||||||
joinZWNJ: stateFAIL,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateFAIL,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
|
||||||
// already implicitly satisfied by the overall implementation.
|
|
||||||
func (p *Profile) validateLabel(s string) error {
|
|
||||||
if s == "" {
|
|
||||||
if p.verifyDNSLength {
|
|
||||||
return &labelError{s, "A4"}
|
|
||||||
}
|
}
|
||||||
return nil
|
return strings.Join(labels, "."), nil
|
||||||
}
|
|
||||||
if p.bidirule != nil && !p.bidirule(s) {
|
|
||||||
return &labelError{s, "B"}
|
|
||||||
}
|
|
||||||
if !p.validateLabels {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
trie := p.trie // p.validateLabels is only set if trie is set.
|
|
||||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
|
||||||
return &labelError{s, "V2"}
|
|
||||||
}
|
|
||||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
|
||||||
return &labelError{s, "V3"}
|
|
||||||
}
|
|
||||||
// TODO: merge the use of this in the trie.
|
|
||||||
v, sz := trie.lookupString(s)
|
|
||||||
x := info(v)
|
|
||||||
if x.isModifier() {
|
|
||||||
return &labelError{s, "V5"}
|
|
||||||
}
|
|
||||||
// Quickly return in the absence of zero-width (non) joiners.
|
|
||||||
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
st := stateStart
|
|
||||||
for i := 0; ; {
|
|
||||||
jt := x.joinType()
|
|
||||||
if s[i:i+sz] == zwj {
|
|
||||||
jt = joinZWJ
|
|
||||||
} else if s[i:i+sz] == zwnj {
|
|
||||||
jt = joinZWNJ
|
|
||||||
}
|
|
||||||
st = joinStates[st][jt]
|
|
||||||
if x.isViramaModifier() {
|
|
||||||
st = joinStates[st][joinVirama]
|
|
||||||
}
|
|
||||||
if i += sz; i == len(s) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v, sz = trie.lookupString(s[i:])
|
|
||||||
x = info(v)
|
|
||||||
}
|
|
||||||
if st == stateFAIL || st == stateAfter {
|
|
||||||
return &labelError{s, "C"}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func ascii(s string) bool {
|
func ascii(s string) bool {
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
@ -9,6 +7,7 @@ package idna
|
||||||
// This file implements the Punycode algorithm from RFC 3492.
|
// This file implements the Punycode algorithm from RFC 3492.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
@ -28,8 +27,6 @@ const (
|
||||||
tmin int32 = 1
|
tmin int32 = 1
|
||||||
)
|
)
|
||||||
|
|
||||||
func punyError(s string) error { return &labelError{s, "A3"} }
|
|
||||||
|
|
||||||
// decode decodes a string as specified in section 6.2.
|
// decode decodes a string as specified in section 6.2.
|
||||||
func decode(encoded string) (string, error) {
|
func decode(encoded string) (string, error) {
|
||||||
if encoded == "" {
|
if encoded == "" {
|
||||||
|
@ -37,7 +34,7 @@ func decode(encoded string) (string, error) {
|
||||||
}
|
}
|
||||||
pos := 1 + strings.LastIndex(encoded, "-")
|
pos := 1 + strings.LastIndex(encoded, "-")
|
||||||
if pos == 1 {
|
if pos == 1 {
|
||||||
return "", punyError(encoded)
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
}
|
}
|
||||||
if pos == len(encoded) {
|
if pos == len(encoded) {
|
||||||
return encoded[:len(encoded)-1], nil
|
return encoded[:len(encoded)-1], nil
|
||||||
|
@ -53,16 +50,16 @@ func decode(encoded string) (string, error) {
|
||||||
oldI, w := i, int32(1)
|
oldI, w := i, int32(1)
|
||||||
for k := base; ; k += base {
|
for k := base; ; k += base {
|
||||||
if pos == len(encoded) {
|
if pos == len(encoded) {
|
||||||
return "", punyError(encoded)
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
}
|
}
|
||||||
digit, ok := decodeDigit(encoded[pos])
|
digit, ok := decodeDigit(encoded[pos])
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", punyError(encoded)
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
}
|
}
|
||||||
pos++
|
pos++
|
||||||
i += digit * w
|
i += digit * w
|
||||||
if i < 0 {
|
if i < 0 {
|
||||||
return "", punyError(encoded)
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
}
|
}
|
||||||
t := k - bias
|
t := k - bias
|
||||||
if t < tmin {
|
if t < tmin {
|
||||||
|
@ -75,7 +72,7 @@ func decode(encoded string) (string, error) {
|
||||||
}
|
}
|
||||||
w *= base - t
|
w *= base - t
|
||||||
if w >= math.MaxInt32/base {
|
if w >= math.MaxInt32/base {
|
||||||
return "", punyError(encoded)
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
x := int32(len(output) + 1)
|
x := int32(len(output) + 1)
|
||||||
|
@ -83,7 +80,7 @@ func decode(encoded string) (string, error) {
|
||||||
n += i / x
|
n += i / x
|
||||||
i %= x
|
i %= x
|
||||||
if n > utf8.MaxRune || len(output) >= 1024 {
|
if n > utf8.MaxRune || len(output) >= 1024 {
|
||||||
return "", punyError(encoded)
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
}
|
}
|
||||||
output = append(output, 0)
|
output = append(output, 0)
|
||||||
copy(output[i+1:], output[i:])
|
copy(output[i+1:], output[i:])
|
||||||
|
@ -124,14 +121,14 @@ func encode(prefix, s string) (string, error) {
|
||||||
}
|
}
|
||||||
delta += (m - n) * (h + 1)
|
delta += (m - n) * (h + 1)
|
||||||
if delta < 0 {
|
if delta < 0 {
|
||||||
return "", punyError(s)
|
return "", fmt.Errorf("idna: invalid label %q", s)
|
||||||
}
|
}
|
||||||
n = m
|
n = m
|
||||||
for _, r := range s {
|
for _, r := range s {
|
||||||
if r < n {
|
if r < n {
|
||||||
delta++
|
delta++
|
||||||
if delta < 0 {
|
if delta < 0 {
|
||||||
return "", punyError(s)
|
return "", fmt.Errorf("idna: invalid label %q", s)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,72 +0,0 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package idna
|
|
||||||
|
|
||||||
// appendMapping appends the mapping for the respective rune. isMapped must be
|
|
||||||
// true. A mapping is a categorization of a rune as defined in UTS #46.
|
|
||||||
func (c info) appendMapping(b []byte, s string) []byte {
|
|
||||||
index := int(c >> indexShift)
|
|
||||||
if c&xorBit == 0 {
|
|
||||||
s := mappings[index:]
|
|
||||||
return append(b, s[1:s[0]+1]...)
|
|
||||||
}
|
|
||||||
b = append(b, s...)
|
|
||||||
if c&inlineXOR == inlineXOR {
|
|
||||||
// TODO: support and handle two-byte inline masks
|
|
||||||
b[len(b)-1] ^= byte(index)
|
|
||||||
} else {
|
|
||||||
for p := len(b) - int(xorData[index]); p < len(b); p++ {
|
|
||||||
index++
|
|
||||||
b[p] ^= xorData[index]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sparse block handling code.
|
|
||||||
|
|
||||||
type valueRange struct {
|
|
||||||
value uint16 // header: value:stride
|
|
||||||
lo, hi byte // header: lo:n
|
|
||||||
}
|
|
||||||
|
|
||||||
type sparseBlocks struct {
|
|
||||||
values []valueRange
|
|
||||||
offset []uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
var idnaSparse = sparseBlocks{
|
|
||||||
values: idnaSparseValues[:],
|
|
||||||
offset: idnaSparseOffset[:],
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't use newIdnaTrie to avoid unconditional linking in of the table.
|
|
||||||
var trie = &idnaTrie{}
|
|
||||||
|
|
||||||
// lookup determines the type of block n and looks up the value for b.
|
|
||||||
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
|
|
||||||
// is a list of ranges with an accompanying value. Given a matching range r,
|
|
||||||
// the value for b is by r.value + (b - r.lo) * stride.
|
|
||||||
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
|
|
||||||
offset := t.offset[n]
|
|
||||||
header := t.values[offset]
|
|
||||||
lo := offset + 1
|
|
||||||
hi := lo + uint16(header.lo)
|
|
||||||
for lo < hi {
|
|
||||||
m := lo + (hi-lo)/2
|
|
||||||
r := t.values[m]
|
|
||||||
if r.lo <= b && b <= r.hi {
|
|
||||||
return r.value + uint16(b-r.lo)*header.value
|
|
||||||
}
|
|
||||||
if b < r.lo {
|
|
||||||
hi = m
|
|
||||||
} else {
|
|
||||||
lo = m + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
|
@ -1,114 +0,0 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
package idna
|
|
||||||
|
|
||||||
// This file contains definitions for interpreting the trie value of the idna
|
|
||||||
// trie generated by "go run gen*.go". It is shared by both the generator
|
|
||||||
// program and the resultant package. Sharing is achieved by the generator
|
|
||||||
// copying gen_trieval.go to trieval.go and changing what's above this comment.
|
|
||||||
|
|
||||||
// info holds information from the IDNA mapping table for a single rune. It is
|
|
||||||
// the value returned by a trie lookup. In most cases, all information fits in
|
|
||||||
// a 16-bit value. For mappings, this value may contain an index into a slice
|
|
||||||
// with the mapped string. Such mappings can consist of the actual mapped value
|
|
||||||
// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
|
|
||||||
// input rune. This technique is used by the cases packages and reduces the
|
|
||||||
// table size significantly.
|
|
||||||
//
|
|
||||||
// The per-rune values have the following format:
|
|
||||||
//
|
|
||||||
// if mapped {
|
|
||||||
// if inlinedXOR {
|
|
||||||
// 15..13 inline XOR marker
|
|
||||||
// 12..11 unused
|
|
||||||
// 10..3 inline XOR mask
|
|
||||||
// } else {
|
|
||||||
// 15..3 index into xor or mapping table
|
|
||||||
// }
|
|
||||||
// } else {
|
|
||||||
// 15..13 unused
|
|
||||||
// 12 modifier (including virama)
|
|
||||||
// 11 virama modifier
|
|
||||||
// 10..8 joining type
|
|
||||||
// 7..3 category type
|
|
||||||
// }
|
|
||||||
// 2 use xor pattern
|
|
||||||
// 1..0 mapped category
|
|
||||||
//
|
|
||||||
// See the definitions below for a more detailed description of the various
|
|
||||||
// bits.
|
|
||||||
type info uint16
|
|
||||||
|
|
||||||
const (
|
|
||||||
catSmallMask = 0x3
|
|
||||||
catBigMask = 0xF8
|
|
||||||
indexShift = 3
|
|
||||||
xorBit = 0x4 // interpret the index as an xor pattern
|
|
||||||
inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
|
|
||||||
|
|
||||||
joinShift = 8
|
|
||||||
joinMask = 0x07
|
|
||||||
|
|
||||||
viramaModifier = 0x0800
|
|
||||||
modifier = 0x1000
|
|
||||||
)
|
|
||||||
|
|
||||||
// A category corresponds to a category defined in the IDNA mapping table.
|
|
||||||
type category uint16
|
|
||||||
|
|
||||||
const (
|
|
||||||
unknown category = 0 // not defined currently in unicode.
|
|
||||||
mapped category = 1
|
|
||||||
disallowedSTD3Mapped category = 2
|
|
||||||
deviation category = 3
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
valid category = 0x08
|
|
||||||
validNV8 category = 0x18
|
|
||||||
validXV8 category = 0x28
|
|
||||||
disallowed category = 0x40
|
|
||||||
disallowedSTD3Valid category = 0x80
|
|
||||||
ignored category = 0xC0
|
|
||||||
)
|
|
||||||
|
|
||||||
// join types and additional rune information
|
|
||||||
const (
|
|
||||||
joiningL = (iota + 1)
|
|
||||||
joiningD
|
|
||||||
joiningT
|
|
||||||
joiningR
|
|
||||||
|
|
||||||
//the following types are derived during processing
|
|
||||||
joinZWJ
|
|
||||||
joinZWNJ
|
|
||||||
joinVirama
|
|
||||||
numJoinTypes
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c info) isMapped() bool {
|
|
||||||
return c&0x3 != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) category() category {
|
|
||||||
small := c & catSmallMask
|
|
||||||
if small != 0 {
|
|
||||||
return category(small)
|
|
||||||
}
|
|
||||||
return category(c & catBigMask)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) joinType() info {
|
|
||||||
if c.isMapped() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return (c >> joinShift) & joinMask
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) isModifier() bool {
|
|
||||||
return c&(modifier|catSmallMask) == modifier
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) isViramaModifier() bool {
|
|
||||||
return c&(viramaModifier|catSmallMask) == viramaModifier
|
|
||||||
}
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A PerHost directs connections to a default Dialer unless the host name
|
// A PerHost directs connections to a default Dialer unless the hostname
|
||||||
// requested matches one of a number of exceptions.
|
// requested matches one of a number of exceptions.
|
||||||
type PerHost struct {
|
type PerHost struct {
|
||||||
def, bypass Dialer
|
def, bypass Dialer
|
||||||
|
@ -76,7 +76,7 @@ func (p *PerHost) dialerForRequest(host string) Dialer {
|
||||||
|
|
||||||
// AddFromString parses a string that contains comma-separated values
|
// AddFromString parses a string that contains comma-separated values
|
||||||
// specifying hosts that should use the bypass proxy. Each value is either an
|
// specifying hosts that should use the bypass proxy. Each value is either an
|
||||||
// IP address, a CIDR range, a zone (*.example.com) or a host name
|
// IP address, a CIDR range, a zone (*.example.com) or a hostname
|
||||||
// (localhost). A best effort is made to parse the string and errors are
|
// (localhost). A best effort is made to parse the string and errors are
|
||||||
// ignored.
|
// ignored.
|
||||||
func (p *PerHost) AddFromString(s string) {
|
func (p *PerHost) AddFromString(s string) {
|
||||||
|
@ -131,7 +131,7 @@ func (p *PerHost) AddZone(zone string) {
|
||||||
p.bypassZones = append(p.bypassZones, zone)
|
p.bypassZones = append(p.bypassZones, zone)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddHost specifies a host name that will use the bypass proxy.
|
// AddHost specifies a hostname that will use the bypass proxy.
|
||||||
func (p *PerHost) AddHost(host string) {
|
func (p *PerHost) AddHost(host string) {
|
||||||
if strings.HasSuffix(host, ".") {
|
if strings.HasSuffix(host, ".") {
|
||||||
host = host[:len(host)-1]
|
host = host[:len(host)-1]
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Dialer is a means to establish a connection.
|
// A Dialer is a means to establish a connection.
|
||||||
|
@ -28,7 +27,7 @@ type Auth struct {
|
||||||
// FromEnvironment returns the dialer specified by the proxy related variables in
|
// FromEnvironment returns the dialer specified by the proxy related variables in
|
||||||
// the environment.
|
// the environment.
|
||||||
func FromEnvironment() Dialer {
|
func FromEnvironment() Dialer {
|
||||||
allProxy := allProxyEnv.Get()
|
allProxy := os.Getenv("all_proxy")
|
||||||
if len(allProxy) == 0 {
|
if len(allProxy) == 0 {
|
||||||
return Direct
|
return Direct
|
||||||
}
|
}
|
||||||
|
@ -42,7 +41,7 @@ func FromEnvironment() Dialer {
|
||||||
return Direct
|
return Direct
|
||||||
}
|
}
|
||||||
|
|
||||||
noProxy := noProxyEnv.Get()
|
noProxy := os.Getenv("no_proxy")
|
||||||
if len(noProxy) == 0 {
|
if len(noProxy) == 0 {
|
||||||
return proxy
|
return proxy
|
||||||
}
|
}
|
||||||
|
@ -93,42 +92,3 @@ func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
|
||||||
|
|
||||||
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
allProxyEnv = &envOnce{
|
|
||||||
names: []string{"ALL_PROXY", "all_proxy"},
|
|
||||||
}
|
|
||||||
noProxyEnv = &envOnce{
|
|
||||||
names: []string{"NO_PROXY", "no_proxy"},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// envOnce looks up an environment variable (optionally by multiple
|
|
||||||
// names) once. It mitigates expensive lookups on some platforms
|
|
||||||
// (e.g. Windows).
|
|
||||||
// (Borrowed from net/http/transport.go)
|
|
||||||
type envOnce struct {
|
|
||||||
names []string
|
|
||||||
once sync.Once
|
|
||||||
val string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *envOnce) Get() string {
|
|
||||||
e.once.Do(e.init)
|
|
||||||
return e.val
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *envOnce) init() {
|
|
||||||
for _, n := range e.names {
|
|
||||||
e.val = os.Getenv(n)
|
|
||||||
if e.val != "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset is used by tests
|
|
||||||
func (e *envOnce) reset() {
|
|
||||||
e.once = sync.Once{}
|
|
||||||
e.val = ""
|
|
||||||
}
|
|
||||||
|
|
|
@ -72,28 +72,24 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := s.connect(conn, addr); err != nil {
|
closeConn := &conn
|
||||||
conn.Close()
|
defer func() {
|
||||||
return nil, err
|
if closeConn != nil {
|
||||||
|
(*closeConn).Close()
|
||||||
}
|
}
|
||||||
return conn, nil
|
}()
|
||||||
}
|
|
||||||
|
|
||||||
// connect takes an existing connection to a socks5 proxy server,
|
host, portStr, err := net.SplitHostPort(addr)
|
||||||
// and commands the server to extend that connection to target,
|
|
||||||
// which must be a canonical address with a host and port.
|
|
||||||
func (s *socks5) connect(conn net.Conn, target string) error {
|
|
||||||
host, portStr, err := net.SplitHostPort(target)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
port, err := strconv.Atoi(portStr)
|
port, err := strconv.Atoi(portStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.New("proxy: failed to parse port number: " + portStr)
|
return nil, errors.New("proxy: failed to parse port number: " + portStr)
|
||||||
}
|
}
|
||||||
if port < 1 || port > 0xffff {
|
if port < 1 || port > 0xffff {
|
||||||
return errors.New("proxy: port number out of range: " + portStr)
|
return nil, errors.New("proxy: port number out of range: " + portStr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// the size here is just an estimate
|
// the size here is just an estimate
|
||||||
|
@ -107,17 +103,17 @@ func (s *socks5) connect(conn net.Conn, target string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := conn.Write(buf); err != nil {
|
if _, err := conn.Write(buf); err != nil {
|
||||||
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||||
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||||
}
|
}
|
||||||
if buf[0] != 5 {
|
if buf[0] != 5 {
|
||||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
||||||
}
|
}
|
||||||
if buf[1] == 0xff {
|
if buf[1] == 0xff {
|
||||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
||||||
}
|
}
|
||||||
|
|
||||||
if buf[1] == socks5AuthPassword {
|
if buf[1] == socks5AuthPassword {
|
||||||
|
@ -129,15 +125,15 @@ func (s *socks5) connect(conn net.Conn, target string) error {
|
||||||
buf = append(buf, s.password...)
|
buf = append(buf, s.password...)
|
||||||
|
|
||||||
if _, err := conn.Write(buf); err != nil {
|
if _, err := conn.Write(buf); err != nil {
|
||||||
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||||
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if buf[1] != 0 {
|
if buf[1] != 0 {
|
||||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,7 +150,7 @@ func (s *socks5) connect(conn net.Conn, target string) error {
|
||||||
buf = append(buf, ip...)
|
buf = append(buf, ip...)
|
||||||
} else {
|
} else {
|
||||||
if len(host) > 255 {
|
if len(host) > 255 {
|
||||||
return errors.New("proxy: destination host name too long: " + host)
|
return nil, errors.New("proxy: destination hostname too long: " + host)
|
||||||
}
|
}
|
||||||
buf = append(buf, socks5Domain)
|
buf = append(buf, socks5Domain)
|
||||||
buf = append(buf, byte(len(host)))
|
buf = append(buf, byte(len(host)))
|
||||||
|
@ -163,11 +159,11 @@ func (s *socks5) connect(conn net.Conn, target string) error {
|
||||||
buf = append(buf, byte(port>>8), byte(port))
|
buf = append(buf, byte(port>>8), byte(port))
|
||||||
|
|
||||||
if _, err := conn.Write(buf); err != nil {
|
if _, err := conn.Write(buf); err != nil {
|
||||||
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||||
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
failure := "unknown error"
|
failure := "unknown error"
|
||||||
|
@ -176,7 +172,7 @@ func (s *socks5) connect(conn net.Conn, target string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(failure) > 0 {
|
if len(failure) > 0 {
|
||||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
||||||
}
|
}
|
||||||
|
|
||||||
bytesToDiscard := 0
|
bytesToDiscard := 0
|
||||||
|
@ -188,11 +184,11 @@ func (s *socks5) connect(conn net.Conn, target string) error {
|
||||||
case socks5Domain:
|
case socks5Domain:
|
||||||
_, err := io.ReadFull(conn, buf[:1])
|
_, err := io.ReadFull(conn, buf[:1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||||
}
|
}
|
||||||
bytesToDiscard = int(buf[0])
|
bytesToDiscard = int(buf[0])
|
||||||
default:
|
default:
|
||||||
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cap(buf) < bytesToDiscard {
|
if cap(buf) < bytesToDiscard {
|
||||||
|
@ -201,13 +197,14 @@ func (s *socks5) connect(conn net.Conn, target string) error {
|
||||||
buf = buf[:bytesToDiscard]
|
buf = buf[:bytesToDiscard]
|
||||||
}
|
}
|
||||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||||
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also need to discard the port number
|
// Also need to discard the port number
|
||||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||||
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
closeConn = nil
|
||||||
|
return conn, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,8 +6,6 @@ go_library(
|
||||||
"events.go",
|
"events.go",
|
||||||
"histogram.go",
|
"histogram.go",
|
||||||
"trace.go",
|
"trace.go",
|
||||||
"trace_go16.go",
|
|
||||||
"trace_go17.go",
|
|
||||||
],
|
],
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
|
|
|
@ -39,9 +39,9 @@ var buckets = []bucket{
|
||||||
}
|
}
|
||||||
|
|
||||||
// RenderEvents renders the HTML page typically served at /debug/events.
|
// RenderEvents renders the HTML page typically served at /debug/events.
|
||||||
// It does not do any auth checking. The request may be nil.
|
// It does not do any auth checking; see AuthRequest for the default auth check
|
||||||
//
|
// used by the handler registered on http.DefaultServeMux.
|
||||||
// Most users will use the Events handler.
|
// req may be nil.
|
||||||
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
|
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
data := &struct {
|
data := &struct {
|
||||||
|
|
|
@ -77,6 +77,7 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/net/internal/timeseries"
|
"golang.org/x/net/internal/timeseries"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -110,18 +111,7 @@ var AuthRequest = func(req *http.Request) (any, sensitive bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// TODO(jbd): Serve Traces from /debug/traces in the future?
|
http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) {
|
||||||
// There is no requirement for a request to be present to have traces.
|
|
||||||
http.HandleFunc("/debug/requests", Traces)
|
|
||||||
http.HandleFunc("/debug/events", Events)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Traces responds with traces from the program.
|
|
||||||
// The package initialization registers it in http.DefaultServeMux
|
|
||||||
// at /debug/requests.
|
|
||||||
//
|
|
||||||
// It performs authorization by running AuthRequest.
|
|
||||||
func Traces(w http.ResponseWriter, req *http.Request) {
|
|
||||||
any, sensitive := AuthRequest(req)
|
any, sensitive := AuthRequest(req)
|
||||||
if !any {
|
if !any {
|
||||||
http.Error(w, "not allowed", http.StatusUnauthorized)
|
http.Error(w, "not allowed", http.StatusUnauthorized)
|
||||||
|
@ -129,14 +119,8 @@ func Traces(w http.ResponseWriter, req *http.Request) {
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||||
Render(w, req, sensitive)
|
Render(w, req, sensitive)
|
||||||
}
|
})
|
||||||
|
http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) {
|
||||||
// Events responds with a page of events collected by EventLogs.
|
|
||||||
// The package initialization registers it in http.DefaultServeMux
|
|
||||||
// at /debug/events.
|
|
||||||
//
|
|
||||||
// It performs authorization by running AuthRequest.
|
|
||||||
func Events(w http.ResponseWriter, req *http.Request) {
|
|
||||||
any, sensitive := AuthRequest(req)
|
any, sensitive := AuthRequest(req)
|
||||||
if !any {
|
if !any {
|
||||||
http.Error(w, "not allowed", http.StatusUnauthorized)
|
http.Error(w, "not allowed", http.StatusUnauthorized)
|
||||||
|
@ -144,12 +128,13 @@ func Events(w http.ResponseWriter, req *http.Request) {
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||||
RenderEvents(w, req, sensitive)
|
RenderEvents(w, req, sensitive)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Render renders the HTML page typically served at /debug/requests.
|
// Render renders the HTML page typically served at /debug/requests.
|
||||||
// It does not do any auth checking. The request may be nil.
|
// It does not do any auth checking; see AuthRequest for the default auth check
|
||||||
//
|
// used by the handler registered on http.DefaultServeMux.
|
||||||
// Most users will use the Traces handler.
|
// req may be nil.
|
||||||
func Render(w io.Writer, req *http.Request, sensitive bool) {
|
func Render(w io.Writer, req *http.Request, sensitive bool) {
|
||||||
data := &struct {
|
data := &struct {
|
||||||
Families []string
|
Families []string
|
||||||
|
@ -286,6 +271,18 @@ type contextKeyT string
|
||||||
|
|
||||||
var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
|
var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
|
||||||
|
|
||||||
|
// NewContext returns a copy of the parent context
|
||||||
|
// and associates it with a Trace.
|
||||||
|
func NewContext(ctx context.Context, tr Trace) context.Context {
|
||||||
|
return context.WithValue(ctx, contextKey, tr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromContext returns the Trace bound to the context, if any.
|
||||||
|
func FromContext(ctx context.Context) (tr Trace, ok bool) {
|
||||||
|
tr, ok = ctx.Value(contextKey).(Trace)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Trace represents an active request.
|
// Trace represents an active request.
|
||||||
type Trace interface {
|
type Trace interface {
|
||||||
// LazyLog adds x to the event log. It will be evaluated each time the
|
// LazyLog adds x to the event log. It will be evaluated each time the
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !go1.7
|
|
||||||
|
|
||||||
package trace
|
|
||||||
|
|
||||||
import "golang.org/x/net/context"
|
|
||||||
|
|
||||||
// NewContext returns a copy of the parent context
|
|
||||||
// and associates it with a Trace.
|
|
||||||
func NewContext(ctx context.Context, tr Trace) context.Context {
|
|
||||||
return context.WithValue(ctx, contextKey, tr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromContext returns the Trace bound to the context, if any.
|
|
||||||
func FromContext(ctx context.Context) (tr Trace, ok bool) {
|
|
||||||
tr, ok = ctx.Value(contextKey).(Trace)
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
package trace
|
|
||||||
|
|
||||||
import "context"
|
|
||||||
|
|
||||||
// NewContext returns a copy of the parent context
|
|
||||||
// and associates it with a Trace.
|
|
||||||
func NewContext(ctx context.Context, tr Trace) context.Context {
|
|
||||||
return context.WithValue(ctx, contextKey, tr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromContext returns the Trace bound to the context, if any.
|
|
||||||
func FromContext(ctx context.Context) (tr Trace, ok bool) {
|
|
||||||
tr, ok = ctx.Value(contextKey).(Trace)
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -11,10 +11,8 @@ go_library(
|
||||||
"tables.go",
|
"tables.go",
|
||||||
"trieval.go",
|
"trieval.go",
|
||||||
],
|
],
|
||||||
cgo = True,
|
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//vendor/golang.org/x/text/internal:go_default_library",
|
|
||||||
"//vendor/golang.org/x/text/language:go_default_library",
|
"//vendor/golang.org/x/text/language:go_default_library",
|
||||||
"//vendor/golang.org/x/text/transform:go_default_library",
|
"//vendor/golang.org/x/text/transform:go_default_library",
|
||||||
"//vendor/golang.org/x/text/unicode/norm:go_default_library",
|
"//vendor/golang.org/x/text/unicode/norm:go_default_library",
|
||||||
|
|
|
@ -35,7 +35,7 @@ import (
|
||||||
// A Caser may be stateful and should therefore not be shared between
|
// A Caser may be stateful and should therefore not be shared between
|
||||||
// goroutines.
|
// goroutines.
|
||||||
type Caser struct {
|
type Caser struct {
|
||||||
t transform.SpanningTransformer
|
t transform.Transformer
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes returns a new byte slice with the result of converting b to the case
|
// Bytes returns a new byte slice with the result of converting b to the case
|
||||||
|
@ -56,17 +56,12 @@ func (c Caser) String(s string) string {
|
||||||
// Transform.
|
// Transform.
|
||||||
func (c Caser) Reset() { c.t.Reset() }
|
func (c Caser) Reset() { c.t.Reset() }
|
||||||
|
|
||||||
// Transform implements the transform.Transformer interface and transforms the
|
// Transform implements the Transformer interface and transforms the given input
|
||||||
// given input to the case form implemented by c.
|
// to the case form implemented by c.
|
||||||
func (c Caser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (c Caser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
return c.t.Transform(dst, src, atEOF)
|
return c.t.Transform(dst, src, atEOF)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Span implements the transform.SpanningTransformer interface.
|
|
||||||
func (c Caser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
return c.t.Span(src, atEOF)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upper returns a Caser for language-specific uppercasing.
|
// Upper returns a Caser for language-specific uppercasing.
|
||||||
func Upper(t language.Tag, opts ...Option) Caser {
|
func Upper(t language.Tag, opts ...Option) Caser {
|
||||||
return Caser{makeUpper(t, getOpts(opts...))}
|
return Caser{makeUpper(t, getOpts(opts...))}
|
||||||
|
@ -88,20 +83,14 @@ func Title(t language.Tag, opts ...Option) Caser {
|
||||||
//
|
//
|
||||||
// Case folding does not normalize the input and may not preserve a normal form.
|
// Case folding does not normalize the input and may not preserve a normal form.
|
||||||
// Use the collate or search package for more convenient and linguistically
|
// Use the collate or search package for more convenient and linguistically
|
||||||
// sound comparisons. Use golang.org/x/text/secure/precis for string comparisons
|
// sound comparisons. Use unicode/precis for string comparisons where security
|
||||||
// where security aspects are a concern.
|
// aspects are a concern.
|
||||||
func Fold(opts ...Option) Caser {
|
func Fold(opts ...Option) Caser {
|
||||||
return Caser{makeFold(getOpts(opts...))}
|
return Caser{makeFold(getOpts(opts...))}
|
||||||
}
|
}
|
||||||
|
|
||||||
// An Option is used to modify the behavior of a Caser.
|
// An Option is used to modify the behavior of a Caser.
|
||||||
type Option func(o options) options
|
type Option func(o *options)
|
||||||
|
|
||||||
// TODO: consider these options to take a boolean as well, like FinalSigma.
|
|
||||||
// The advantage of using this approach is that other providers of a lower-case
|
|
||||||
// algorithm could set different defaults by prefixing a user-provided slice
|
|
||||||
// of options with their own. This is handy, for instance, for the precis
|
|
||||||
// package which would override the default to not handle the Greek final sigma.
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// NoLower disables the lowercasing of non-leading letters for a title
|
// NoLower disables the lowercasing of non-leading letters for a title
|
||||||
|
@ -121,42 +110,20 @@ type options struct {
|
||||||
|
|
||||||
// TODO: segmenter, max ignorable, alternative versions, etc.
|
// TODO: segmenter, max ignorable, alternative versions, etc.
|
||||||
|
|
||||||
ignoreFinalSigma bool
|
noFinalSigma bool // Only used for testing.
|
||||||
}
|
}
|
||||||
|
|
||||||
func getOpts(o ...Option) (res options) {
|
func getOpts(o ...Option) (res options) {
|
||||||
for _, f := range o {
|
for _, f := range o {
|
||||||
res = f(res)
|
f(&res)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func noLower(o options) options {
|
func noLower(o *options) {
|
||||||
o.noLower = true
|
o.noLower = true
|
||||||
return o
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func compact(o options) options {
|
func compact(o *options) {
|
||||||
o.simple = true
|
o.simple = true
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleFinalSigma specifies whether the special handling of Greek final sigma
|
|
||||||
// should be enabled. Unicode prescribes handling the Greek final sigma for all
|
|
||||||
// locales, but standards like IDNA and PRECIS override this default.
|
|
||||||
func HandleFinalSigma(enable bool) Option {
|
|
||||||
if enable {
|
|
||||||
return handleFinalSigma
|
|
||||||
}
|
|
||||||
return ignoreFinalSigma
|
|
||||||
}
|
|
||||||
|
|
||||||
func ignoreFinalSigma(o options) options {
|
|
||||||
o.ignoreFinalSigma = true
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleFinalSigma(o options) options {
|
|
||||||
o.ignoreFinalSigma = false
|
|
||||||
return o
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,9 @@
|
||||||
|
|
||||||
package cases
|
package cases
|
||||||
|
|
||||||
import "golang.org/x/text/transform"
|
import (
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
// A context is used for iterating over source bytes, fetching case info and
|
// A context is used for iterating over source bytes, fetching case info and
|
||||||
// writing to a destination buffer.
|
// writing to a destination buffer.
|
||||||
|
@ -54,14 +56,6 @@ func (c *context) ret() (nDst, nSrc int, err error) {
|
||||||
return c.nDst, c.nSrc, transform.ErrShortSrc
|
return c.nDst, c.nSrc, transform.ErrShortSrc
|
||||||
}
|
}
|
||||||
|
|
||||||
// retSpan returns the return values for the Span method. It checks whether
|
|
||||||
// there were insufficient bytes in src to complete and introduces an error
|
|
||||||
// accordingly, if necessary.
|
|
||||||
func (c *context) retSpan() (n int, err error) {
|
|
||||||
_, nSrc, err := c.ret()
|
|
||||||
return nSrc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkpoint sets the return value buffer points for Transform to the current
|
// checkpoint sets the return value buffer points for Transform to the current
|
||||||
// positions.
|
// positions.
|
||||||
func (c *context) checkpoint() {
|
func (c *context) checkpoint() {
|
||||||
|
@ -206,23 +200,6 @@ func lower(c *context) bool {
|
||||||
return c.copy()
|
return c.copy()
|
||||||
}
|
}
|
||||||
|
|
||||||
func isLower(c *context) bool {
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&hasMappingMask == 0 || ct == cLower {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// upper writes the uppercase version of the current rune to dst.
|
// upper writes the uppercase version of the current rune to dst.
|
||||||
func upper(c *context) bool {
|
func upper(c *context) bool {
|
||||||
ct := c.caseType()
|
ct := c.caseType()
|
||||||
|
@ -249,29 +226,6 @@ func upper(c *context) bool {
|
||||||
return c.copy()
|
return c.copy()
|
||||||
}
|
}
|
||||||
|
|
||||||
// isUpper writes the isUppercase version of the current rune to dst.
|
|
||||||
func isUpper(c *context) bool {
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&hasMappingMask == 0 || ct == cUpper {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
// Get length of first special case mapping.
|
|
||||||
n := (e[1] >> lengthBits) & lengthMask
|
|
||||||
if ct == cTitle {
|
|
||||||
n = e[1] & lengthMask
|
|
||||||
}
|
|
||||||
if n != noChange {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// title writes the title case version of the current rune to dst.
|
// title writes the title case version of the current rune to dst.
|
||||||
func title(c *context) bool {
|
func title(c *context) bool {
|
||||||
ct := c.caseType()
|
ct := c.caseType()
|
||||||
|
@ -303,33 +257,6 @@ func title(c *context) bool {
|
||||||
return c.copy()
|
return c.copy()
|
||||||
}
|
}
|
||||||
|
|
||||||
// isTitle reports whether the current rune is in title case.
|
|
||||||
func isTitle(c *context) bool {
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&hasMappingMask == 0 || ct == cTitle {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
if ct == cLower {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Get the exception data.
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
if nTitle := e[1] & lengthMask; nTitle != noChange {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
nFirst := (e[1] >> lengthBits) & lengthMask
|
|
||||||
if ct == cLower && nFirst != noChange {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// foldFull writes the foldFull version of the current rune to dst.
|
// foldFull writes the foldFull version of the current rune to dst.
|
||||||
func foldFull(c *context) bool {
|
func foldFull(c *context) bool {
|
||||||
if c.info&hasMappingMask == 0 {
|
if c.info&hasMappingMask == 0 {
|
||||||
|
@ -352,25 +279,3 @@ func foldFull(c *context) bool {
|
||||||
}
|
}
|
||||||
return c.writeString(e[2 : 2+n])
|
return c.writeString(e[2 : 2+n])
|
||||||
}
|
}
|
||||||
|
|
||||||
// isFoldFull reports whether the current run is mapped to foldFull
|
|
||||||
func isFoldFull(c *context) bool {
|
|
||||||
if c.info&hasMappingMask == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
if ct != cLower || c.info&inverseFoldBit != 0 {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
n := e[0] & lengthMask
|
|
||||||
if n == 0 && ct == cLower {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
|
@ -18,15 +18,7 @@ func (t *caseFolder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
|
||||||
return c.ret()
|
return c.ret()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *caseFolder) Span(src []byte, atEOF bool) (n int, err error) {
|
func makeFold(o options) transform.Transformer {
|
||||||
c := context{src: src, atEOF: atEOF}
|
|
||||||
for c.next() && isFoldFull(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeFold(o options) transform.SpanningTransformer {
|
|
||||||
// TODO: Special case folding, through option Language, Special/Turkic, or
|
// TODO: Special case folding, through option Language, Special/Turkic, or
|
||||||
// both.
|
// both.
|
||||||
// TODO: Implement Compact options.
|
// TODO: Implement Compact options.
|
||||||
|
|
|
@ -76,7 +76,7 @@ type breakCategory int
|
||||||
const (
|
const (
|
||||||
breakBreak breakCategory = iota
|
breakBreak breakCategory = iota
|
||||||
breakLetter
|
breakLetter
|
||||||
breakMid
|
breakIgnored
|
||||||
)
|
)
|
||||||
|
|
||||||
// mapping returns the case mapping for the given case type.
|
// mapping returns the case mapping for the given case type.
|
||||||
|
@ -162,14 +162,9 @@ func parseUCD() []runeInfo {
|
||||||
|
|
||||||
// We collapse the word breaking properties onto the categories we need.
|
// We collapse the word breaking properties onto the categories we need.
|
||||||
switch p.String(1) { // TODO: officially we need to canonicalize.
|
switch p.String(1) { // TODO: officially we need to canonicalize.
|
||||||
case "MidLetter", "MidNumLet", "Single_Quote":
|
case "Format", "MidLetter", "MidNumLet", "Single_Quote":
|
||||||
ri.BreakCat = breakMid
|
ri.BreakCat = breakIgnored
|
||||||
if !ri.CaseIgnorable {
|
case "ALetter", "Hebrew_Letter", "Numeric", "Extend", "ExtendNumLet":
|
||||||
// finalSigma relies on the fact that all breakMid runes are
|
|
||||||
// also a Case_Ignorable. Revisit this code when this changes.
|
|
||||||
log.Fatalf("Rune %U, which has a break category mid, is not a case ignorable", ri)
|
|
||||||
}
|
|
||||||
case "ALetter", "Hebrew_Letter", "Numeric", "Extend", "ExtendNumLet", "Format", "ZWJ":
|
|
||||||
ri.BreakCat = breakLetter
|
ri.BreakCat = breakLetter
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -245,11 +240,8 @@ func makeEntry(ri *runeInfo) {
|
||||||
case above: // Above
|
case above: // Above
|
||||||
ccc = cccAbove
|
ccc = cccAbove
|
||||||
}
|
}
|
||||||
switch ri.BreakCat {
|
if ri.BreakCat == breakBreak {
|
||||||
case breakBreak:
|
|
||||||
ccc = cccBreak
|
ccc = cccBreak
|
||||||
case breakMid:
|
|
||||||
ri.entry |= isMidBit
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ri.entry |= ccc
|
ri.entry |= ccc
|
||||||
|
@ -596,7 +588,7 @@ func verifyProperties(chars []runeInfo) {
|
||||||
// decomposition is greater than U+00FF, the rune is always
|
// decomposition is greater than U+00FF, the rune is always
|
||||||
// great and not a modifier.
|
// great and not a modifier.
|
||||||
if f := runes[0]; unicode.IsMark(f) || f > 0xFF && !unicode.Is(unicode.Greek, f) {
|
if f := runes[0]; unicode.IsMark(f) || f > 0xFF && !unicode.Is(unicode.Greek, f) {
|
||||||
log.Fatalf("%U: expected first rune of Greek decomposition to be letter, found %U", r, f)
|
log.Fatalf("%U: expeced first rune of Greek decomposition to be letter, found %U", r, f)
|
||||||
}
|
}
|
||||||
// A.6.2: Any follow-up rune in a Greek decomposition is a
|
// A.6.2: Any follow-up rune in a Greek decomposition is a
|
||||||
// modifier of which the first should be gobbled in
|
// modifier of which the first should be gobbled in
|
||||||
|
@ -605,7 +597,7 @@ func verifyProperties(chars []runeInfo) {
|
||||||
switch m {
|
switch m {
|
||||||
case 0x0313, 0x0314, 0x0301, 0x0300, 0x0306, 0x0342, 0x0308, 0x0304, 0x345:
|
case 0x0313, 0x0314, 0x0301, 0x0300, 0x0306, 0x0342, 0x0308, 0x0304, 0x345:
|
||||||
default:
|
default:
|
||||||
log.Fatalf("%U: modifier %U is outside of expected Greek modifier set", r, m)
|
log.Fatalf("%U: modifier %U is outside of expeced Greek modifier set", r, m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -698,7 +690,7 @@ func genTablesTest() {
|
||||||
parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) {
|
parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) {
|
||||||
switch p.String(1) {
|
switch p.String(1) {
|
||||||
case "Extend", "Format", "MidLetter", "MidNumLet", "Single_Quote",
|
case "Extend", "Format", "MidLetter", "MidNumLet", "Single_Quote",
|
||||||
"ALetter", "Hebrew_Letter", "Numeric", "ExtendNumLet", "ZWJ":
|
"ALetter", "Hebrew_Letter", "Numeric", "ExtendNumLet":
|
||||||
notBreak[p.Rune(0)] = true
|
notBreak[p.Rune(0)] = true
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -26,7 +26,6 @@ package main
|
||||||
// Only 13..8 are used for XOR patterns.
|
// Only 13..8 are used for XOR patterns.
|
||||||
// 7 inverseFold (fold to upper, not to lower)
|
// 7 inverseFold (fold to upper, not to lower)
|
||||||
// 6 index: interpret the XOR pattern as an index
|
// 6 index: interpret the XOR pattern as an index
|
||||||
// or isMid if case mode is cIgnorableUncased.
|
|
||||||
// 5..4 CCC: zero (normal or break), above or other
|
// 5..4 CCC: zero (normal or break), above or other
|
||||||
// }
|
// }
|
||||||
// 3 exception: interpret this value as an exception index
|
// 3 exception: interpret this value as an exception index
|
||||||
|
@ -49,7 +48,6 @@ const (
|
||||||
ignorableValue = 0x0004
|
ignorableValue = 0x0004
|
||||||
|
|
||||||
inverseFoldBit = 1 << 7
|
inverseFoldBit = 1 << 7
|
||||||
isMidBit = 1 << 6
|
|
||||||
|
|
||||||
exceptionBit = 1 << 3
|
exceptionBit = 1 << 3
|
||||||
exceptionShift = 5
|
exceptionShift = 5
|
||||||
|
@ -59,7 +57,7 @@ const (
|
||||||
xorShift = 8
|
xorShift = 8
|
||||||
|
|
||||||
// There is no mapping if all xor bits and the exception bit are zero.
|
// There is no mapping if all xor bits and the exception bit are zero.
|
||||||
hasMappingMask = 0xff80 | exceptionBit
|
hasMappingMask = 0xffc0 | exceptionBit
|
||||||
)
|
)
|
||||||
|
|
||||||
// The case mode bits encodes the case type of a rune. This includes uncased,
|
// The case mode bits encodes the case type of a rune. This includes uncased,
|
||||||
|
@ -97,6 +95,10 @@ func (c info) isCaseIgnorable() bool {
|
||||||
return c&ignorableMask == ignorableValue
|
return c&ignorableMask == ignorableValue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c info) isCaseIgnorableAndNonBreakStarter() bool {
|
||||||
|
return c&(fullCasedMask|cccMask) == (ignorableValue | cccZero)
|
||||||
|
}
|
||||||
|
|
||||||
func (c info) isNotCasedAndNotCaseIgnorable() bool {
|
func (c info) isNotCasedAndNotCaseIgnorable() bool {
|
||||||
return c&fullCasedMask == 0
|
return c&fullCasedMask == 0
|
||||||
}
|
}
|
||||||
|
@ -105,10 +107,6 @@ func (c info) isCaseIgnorableAndNotCased() bool {
|
||||||
return c&fullCasedMask == cIgnorableUncased
|
return c&fullCasedMask == cIgnorableUncased
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c info) isMid() bool {
|
|
||||||
return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased
|
|
||||||
}
|
|
||||||
|
|
||||||
// The case mapping implementation will need to know about various Canonical
|
// The case mapping implementation will need to know about various Canonical
|
||||||
// Combining Class (CCC) values. We encode two of these in the trie value:
|
// Combining Class (CCC) values. We encode two of these in the trie value:
|
||||||
// cccZero (0) and cccAbove (230). If the value is cccOther, it means that
|
// cccZero (0) and cccAbove (230). If the value is cccOther, it means that
|
||||||
|
|
|
@ -1,61 +0,0 @@
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build icu
|
|
||||||
|
|
||||||
package cases
|
|
||||||
|
|
||||||
// Ideally these functions would be defined in a test file, but go test doesn't
|
|
||||||
// allow CGO in tests. The build tag should ensure either way that these
|
|
||||||
// functions will not end up in the package.
|
|
||||||
|
|
||||||
// TODO: Ensure that the correct ICU version is set.
|
|
||||||
|
|
||||||
/*
|
|
||||||
#cgo LDFLAGS: -licui18n.57 -licuuc.57
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <unicode/ustring.h>
|
|
||||||
#include <unicode/utypes.h>
|
|
||||||
#include <unicode/localpointer.h>
|
|
||||||
#include <unicode/ucasemap.h>
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
import "unsafe"
|
|
||||||
|
|
||||||
func doICU(tag, caser, input string) string {
|
|
||||||
err := C.UErrorCode(0)
|
|
||||||
loc := C.CString(tag)
|
|
||||||
cm := C.ucasemap_open(loc, C.uint32_t(0), &err)
|
|
||||||
|
|
||||||
buf := make([]byte, len(input)*4)
|
|
||||||
dst := (*C.char)(unsafe.Pointer(&buf[0]))
|
|
||||||
src := C.CString(input)
|
|
||||||
|
|
||||||
cn := C.int32_t(0)
|
|
||||||
|
|
||||||
switch caser {
|
|
||||||
case "fold":
|
|
||||||
cn = C.ucasemap_utf8FoldCase(cm,
|
|
||||||
dst, C.int32_t(len(buf)),
|
|
||||||
src, C.int32_t(len(input)),
|
|
||||||
&err)
|
|
||||||
case "lower":
|
|
||||||
cn = C.ucasemap_utf8ToLower(cm,
|
|
||||||
dst, C.int32_t(len(buf)),
|
|
||||||
src, C.int32_t(len(input)),
|
|
||||||
&err)
|
|
||||||
case "upper":
|
|
||||||
cn = C.ucasemap_utf8ToUpper(cm,
|
|
||||||
dst, C.int32_t(len(buf)),
|
|
||||||
src, C.int32_t(len(input)),
|
|
||||||
&err)
|
|
||||||
case "title":
|
|
||||||
cn = C.ucasemap_utf8ToTitle(cm,
|
|
||||||
dst, C.int32_t(len(buf)),
|
|
||||||
src, C.int32_t(len(input)),
|
|
||||||
&err)
|
|
||||||
}
|
|
||||||
return string(buf[:cn])
|
|
||||||
}
|
|
|
@ -28,6 +28,9 @@ func (c info) cccType() info {
|
||||||
// only makes sense, though, if the performance and/or space penalty of using
|
// only makes sense, though, if the performance and/or space penalty of using
|
||||||
// the generic breaker is big. Extra data will only be needed for non-cased
|
// the generic breaker is big. Extra data will only be needed for non-cased
|
||||||
// runes, which means there are sufficient bits left in the caseType.
|
// runes, which means there are sufficient bits left in the caseType.
|
||||||
|
// Also note that the standard breaking algorithm doesn't always make sense
|
||||||
|
// for title casing. For example, a4a -> A4a, but a"4a -> A"4A (where " stands
|
||||||
|
// for modifier \u0308).
|
||||||
// ICU prohibits breaking in such cases as well.
|
// ICU prohibits breaking in such cases as well.
|
||||||
|
|
||||||
// For the purpose of title casing we use an approximation of the Unicode Word
|
// For the purpose of title casing we use an approximation of the Unicode Word
|
||||||
|
@ -38,19 +41,17 @@ func (c info) cccType() info {
|
||||||
// categories, with associated rules:
|
// categories, with associated rules:
|
||||||
//
|
//
|
||||||
// 1) Letter:
|
// 1) Letter:
|
||||||
// ALetter, Hebrew_Letter, Numeric, ExtendNumLet, Extend, Format_FE, ZWJ.
|
// ALetter, Hebrew_Letter, Numeric, ExtendNumLet, Extend.
|
||||||
// Rule: Never break between consecutive runes of this category.
|
// Rule: Never break between consecutive runes of this category.
|
||||||
//
|
//
|
||||||
// 2) Mid:
|
// 2) Mid:
|
||||||
// MidLetter, MidNumLet, Single_Quote.
|
// Format, MidLetter, MidNumLet, Single_Quote.
|
||||||
// (Cf. case-ignorable: MidLetter, MidNumLet, Single_Quote or cat is Mn,
|
// (Cf. case-ignorable: MidLetter, MidNumLet or cat is Mn, Me, Cf, Lm or Sk).
|
||||||
// Me, Cf, Lm or Sk).
|
|
||||||
// Rule: Don't break between Letter and Mid, but break between two Mids.
|
// Rule: Don't break between Letter and Mid, but break between two Mids.
|
||||||
//
|
//
|
||||||
// 3) Break:
|
// 3) Break:
|
||||||
// Any other category: NewLine, MidNum, CR, LF, Double_Quote, Katakana, and
|
// Any other category, including NewLine, CR, LF and Double_Quote. These
|
||||||
// Other.
|
// categories should always result in a break between two cased letters.
|
||||||
// These categories should always result in a break between two cased letters.
|
|
||||||
// Rule: Always break.
|
// Rule: Always break.
|
||||||
//
|
//
|
||||||
// Note 1: the Katakana and MidNum categories can, in esoteric cases, result in
|
// Note 1: the Katakana and MidNum categories can, in esoteric cases, result in
|
||||||
|
|
|
@ -13,7 +13,6 @@ import (
|
||||||
"unicode"
|
"unicode"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"golang.org/x/text/internal"
|
|
||||||
"golang.org/x/text/language"
|
"golang.org/x/text/language"
|
||||||
"golang.org/x/text/transform"
|
"golang.org/x/text/transform"
|
||||||
"golang.org/x/text/unicode/norm"
|
"golang.org/x/text/unicode/norm"
|
||||||
|
@ -25,11 +24,6 @@ import (
|
||||||
// dst so far won't need changing as we see more source bytes.
|
// dst so far won't need changing as we see more source bytes.
|
||||||
type mapFunc func(*context) bool
|
type mapFunc func(*context) bool
|
||||||
|
|
||||||
// A spanFunc takes a context set to the current rune and returns whether this
|
|
||||||
// rune would be altered when written to the output. It may advance the context
|
|
||||||
// to the next rune. It returns whether a checkpoint is possible.
|
|
||||||
type spanFunc func(*context) bool
|
|
||||||
|
|
||||||
// maxIgnorable defines the maximum number of ignorables to consider for
|
// maxIgnorable defines the maximum number of ignorables to consider for
|
||||||
// lookahead operations.
|
// lookahead operations.
|
||||||
const maxIgnorable = 30
|
const maxIgnorable = 30
|
||||||
|
@ -42,12 +36,12 @@ func init() {
|
||||||
for _, s := range strings.Split(supported, " ") {
|
for _, s := range strings.Split(supported, " ") {
|
||||||
tags = append(tags, language.MustParse(s))
|
tags = append(tags, language.MustParse(s))
|
||||||
}
|
}
|
||||||
matcher = internal.NewInheritanceMatcher(tags)
|
matcher = language.NewMatcher(tags)
|
||||||
Supported = language.NewCoverage(tags)
|
Supported = language.NewCoverage(tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
matcher *internal.InheritanceMatcher
|
matcher language.Matcher
|
||||||
|
|
||||||
Supported language.Coverage
|
Supported language.Coverage
|
||||||
|
|
||||||
|
@ -56,69 +50,56 @@ var (
|
||||||
|
|
||||||
// Some uppercase mappers are stateless, so we can precompute the
|
// Some uppercase mappers are stateless, so we can precompute the
|
||||||
// Transformers and save a bit on runtime allocations.
|
// Transformers and save a bit on runtime allocations.
|
||||||
upperFunc = []struct {
|
upperFunc = []mapFunc{
|
||||||
upper mapFunc
|
|
||||||
span spanFunc
|
|
||||||
}{
|
|
||||||
{nil, nil}, // und
|
|
||||||
{nil, nil}, // af
|
|
||||||
{aztrUpper(upper), isUpper}, // az
|
|
||||||
{elUpper, noSpan}, // el
|
|
||||||
{ltUpper(upper), noSpan}, // lt
|
|
||||||
{nil, nil}, // nl
|
|
||||||
{aztrUpper(upper), isUpper}, // tr
|
|
||||||
}
|
|
||||||
|
|
||||||
undUpper transform.SpanningTransformer = &undUpperCaser{}
|
|
||||||
undLower transform.SpanningTransformer = &undLowerCaser{}
|
|
||||||
undLowerIgnoreSigma transform.SpanningTransformer = &undLowerIgnoreSigmaCaser{}
|
|
||||||
|
|
||||||
lowerFunc = []mapFunc{
|
|
||||||
nil, // und
|
nil, // und
|
||||||
nil, // af
|
nil, // af
|
||||||
aztrLower, // az
|
aztrUpper(upper), // az
|
||||||
nil, // el
|
elUpper, // el
|
||||||
ltLower, // lt
|
ltUpper(upper), // lt
|
||||||
nil, // nl
|
nil, // nl
|
||||||
|
aztrUpper(upper), // tr
|
||||||
|
}
|
||||||
|
|
||||||
|
undUpper transform.Transformer = &undUpperCaser{}
|
||||||
|
|
||||||
|
lowerFunc = []mapFunc{
|
||||||
|
lower, // und
|
||||||
|
lower, // af
|
||||||
|
aztrLower, // az
|
||||||
|
lower, // el
|
||||||
|
ltLower, // lt
|
||||||
|
lower, // nl
|
||||||
aztrLower, // tr
|
aztrLower, // tr
|
||||||
}
|
}
|
||||||
|
|
||||||
titleInfos = []struct {
|
titleInfos = []struct {
|
||||||
title mapFunc
|
title, lower mapFunc
|
||||||
lower mapFunc
|
|
||||||
titleSpan spanFunc
|
|
||||||
rewrite func(*context)
|
rewrite func(*context)
|
||||||
}{
|
}{
|
||||||
{title, lower, isTitle, nil}, // und
|
{title, lower, nil}, // und
|
||||||
{title, lower, isTitle, afnlRewrite}, // af
|
{title, lower, afnlRewrite}, // af
|
||||||
{aztrUpper(title), aztrLower, isTitle, nil}, // az
|
{aztrUpper(title), aztrLower, nil}, // az
|
||||||
{title, lower, isTitle, nil}, // el
|
{title, lower, nil}, // el
|
||||||
{ltUpper(title), ltLower, noSpan, nil}, // lt
|
{ltUpper(title), ltLower, nil}, // lt
|
||||||
{nlTitle, lower, nlTitleSpan, afnlRewrite}, // nl
|
{nlTitle, lower, afnlRewrite}, // nl
|
||||||
{aztrUpper(title), aztrLower, isTitle, nil}, // tr
|
{aztrUpper(title), aztrLower, nil}, // tr
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeUpper(t language.Tag, o options) transform.SpanningTransformer {
|
func makeUpper(t language.Tag, o options) transform.Transformer {
|
||||||
_, i, _ := matcher.Match(t)
|
_, i, _ := matcher.Match(t)
|
||||||
f := upperFunc[i].upper
|
f := upperFunc[i]
|
||||||
if f == nil {
|
if f == nil {
|
||||||
return undUpper
|
return undUpper
|
||||||
}
|
}
|
||||||
return &simpleCaser{f: f, span: upperFunc[i].span}
|
return &simpleCaser{f: f}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeLower(t language.Tag, o options) transform.SpanningTransformer {
|
func makeLower(t language.Tag, o options) transform.Transformer {
|
||||||
_, i, _ := matcher.Match(t)
|
_, i, _ := matcher.Match(t)
|
||||||
f := lowerFunc[i]
|
f := lowerFunc[i]
|
||||||
if f == nil {
|
if o.noFinalSigma {
|
||||||
if o.ignoreFinalSigma {
|
return &simpleCaser{f: f}
|
||||||
return undLowerIgnoreSigma
|
|
||||||
}
|
|
||||||
return undLower
|
|
||||||
}
|
|
||||||
if o.ignoreFinalSigma {
|
|
||||||
return &simpleCaser{f: f, span: isLower}
|
|
||||||
}
|
}
|
||||||
return &lowerCaser{
|
return &lowerCaser{
|
||||||
first: f,
|
first: f,
|
||||||
|
@ -126,28 +107,22 @@ func makeLower(t language.Tag, o options) transform.SpanningTransformer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeTitle(t language.Tag, o options) transform.SpanningTransformer {
|
func makeTitle(t language.Tag, o options) transform.Transformer {
|
||||||
_, i, _ := matcher.Match(t)
|
_, i, _ := matcher.Match(t)
|
||||||
x := &titleInfos[i]
|
x := &titleInfos[i]
|
||||||
lower := x.lower
|
lower := x.lower
|
||||||
if o.noLower {
|
if o.noLower {
|
||||||
lower = (*context).copy
|
lower = (*context).copy
|
||||||
} else if !o.ignoreFinalSigma {
|
} else if !o.noFinalSigma {
|
||||||
lower = finalSigma(lower)
|
lower = finalSigma(lower)
|
||||||
}
|
}
|
||||||
return &titleCaser{
|
return &titleCaser{
|
||||||
title: x.title,
|
title: x.title,
|
||||||
lower: lower,
|
lower: lower,
|
||||||
titleSpan: x.titleSpan,
|
|
||||||
rewrite: x.rewrite,
|
rewrite: x.rewrite,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func noSpan(c *context) bool {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: consider a similar special case for the fast majority lower case. This
|
// TODO: consider a similar special case for the fast majority lower case. This
|
||||||
// is a bit more involved so will require some more precise benchmarking to
|
// is a bit more involved so will require some more precise benchmarking to
|
||||||
// justify it.
|
// justify it.
|
||||||
|
@ -157,7 +132,7 @@ type undUpperCaser struct{ transform.NopResetter }
|
||||||
// undUpperCaser implements the Transformer interface for doing an upper case
|
// undUpperCaser implements the Transformer interface for doing an upper case
|
||||||
// mapping for the root locale (und). It eliminates the need for an allocation
|
// mapping for the root locale (und). It eliminates the need for an allocation
|
||||||
// as it prevents escaping by not using function pointers.
|
// as it prevents escaping by not using function pointers.
|
||||||
func (t undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (t *undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
c := context{dst: dst, src: src, atEOF: atEOF}
|
c := context{dst: dst, src: src, atEOF: atEOF}
|
||||||
for c.next() {
|
for c.next() {
|
||||||
upper(&c)
|
upper(&c)
|
||||||
|
@ -166,117 +141,26 @@ func (t undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, e
|
||||||
return c.ret()
|
return c.ret()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t undUpperCaser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
c := context{src: src, atEOF: atEOF}
|
|
||||||
for c.next() && isUpper(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
// undLowerIgnoreSigmaCaser implements the Transformer interface for doing
|
|
||||||
// a lower case mapping for the root locale (und) ignoring final sigma
|
|
||||||
// handling. This casing algorithm is used in some performance-critical packages
|
|
||||||
// like secure/precis and x/net/http/idna, which warrants its special-casing.
|
|
||||||
type undLowerIgnoreSigmaCaser struct{ transform.NopResetter }
|
|
||||||
|
|
||||||
func (t undLowerIgnoreSigmaCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
c := context{dst: dst, src: src, atEOF: atEOF}
|
|
||||||
for c.next() && lower(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.ret()
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Span implements a generic lower-casing. This is possible as isLower works
|
|
||||||
// for all lowercasing variants. All lowercase variants only vary in how they
|
|
||||||
// transform a non-lowercase letter. They will never change an already lowercase
|
|
||||||
// letter. In addition, there is no state.
|
|
||||||
func (t undLowerIgnoreSigmaCaser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
c := context{src: src, atEOF: atEOF}
|
|
||||||
for c.next() && isLower(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
type simpleCaser struct {
|
type simpleCaser struct {
|
||||||
context
|
context
|
||||||
f mapFunc
|
f mapFunc
|
||||||
span spanFunc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// simpleCaser implements the Transformer interface for doing a case operation
|
// simpleCaser implements the Transformer interface for doing a case operation
|
||||||
// on a rune-by-rune basis.
|
// on a rune-by-rune basis.
|
||||||
func (t *simpleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (t *simpleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
c := context{dst: dst, src: src, atEOF: atEOF}
|
t.context = context{dst: dst, src: src, atEOF: atEOF}
|
||||||
for c.next() && t.f(&c) {
|
c := &t.context
|
||||||
|
for c.next() && t.f(c) {
|
||||||
c.checkpoint()
|
c.checkpoint()
|
||||||
}
|
}
|
||||||
return c.ret()
|
return c.ret()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *simpleCaser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
c := context{src: src, atEOF: atEOF}
|
|
||||||
for c.next() && t.span(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
// undLowerCaser implements the Transformer interface for doing a lower case
|
|
||||||
// mapping for the root locale (und) ignoring final sigma handling. This casing
|
|
||||||
// algorithm is used in some performance-critical packages like secure/precis
|
|
||||||
// and x/net/http/idna, which warrants its special-casing.
|
|
||||||
type undLowerCaser struct{ transform.NopResetter }
|
|
||||||
|
|
||||||
func (t undLowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
c := context{dst: dst, src: src, atEOF: atEOF}
|
|
||||||
|
|
||||||
for isInterWord := true; c.next(); {
|
|
||||||
if isInterWord {
|
|
||||||
if c.info.isCased() {
|
|
||||||
if !lower(&c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
isInterWord = false
|
|
||||||
} else if !c.copy() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if c.info.isNotCasedAndNotCaseIgnorable() {
|
|
||||||
if !c.copy() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
isInterWord = true
|
|
||||||
} else if !c.hasPrefix("Σ") {
|
|
||||||
if !lower(&c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if !finalSigmaBody(&c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.ret()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t undLowerCaser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
c := context{src: src, atEOF: atEOF}
|
|
||||||
for c.next() && isLower(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lowerCaser implements the Transformer interface. The default Unicode lower
|
// lowerCaser implements the Transformer interface. The default Unicode lower
|
||||||
// casing requires different treatment for the first and subsequent characters
|
// casing requires different treatment for the first and subsequent characters
|
||||||
// of a word, most notably to handle the Greek final Sigma.
|
// of a word, most notably to handle the Greek final Sigma.
|
||||||
type lowerCaser struct {
|
type lowerCaser struct {
|
||||||
undLowerIgnoreSigmaCaser
|
|
||||||
|
|
||||||
context
|
context
|
||||||
|
|
||||||
first, midWord mapFunc
|
first, midWord mapFunc
|
||||||
|
@ -318,9 +202,7 @@ type titleCaser struct {
|
||||||
context
|
context
|
||||||
|
|
||||||
// rune mappings used by the actual casing algorithms.
|
// rune mappings used by the actual casing algorithms.
|
||||||
title mapFunc
|
title, lower mapFunc
|
||||||
lower mapFunc
|
|
||||||
titleSpan spanFunc
|
|
||||||
|
|
||||||
rewrite func(*context)
|
rewrite func(*context)
|
||||||
}
|
}
|
||||||
|
@ -346,10 +228,10 @@ func (t *titleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
|
||||||
t.rewrite(c)
|
t.rewrite(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
wasMid := p.isMid()
|
wasMid := p.isCaseIgnorableAndNonBreakStarter()
|
||||||
// Break out of this loop on failure to ensure we do not modify the
|
// Break out of this loop on failure to ensure we do not modify the
|
||||||
// state incorrectly.
|
// state incorrectly.
|
||||||
if p.isCased() {
|
if p.isCased() && !p.isCaseIgnorableAndNotCased() {
|
||||||
if !c.isMidWord {
|
if !c.isMidWord {
|
||||||
if !t.title(c) {
|
if !t.title(c) {
|
||||||
break
|
break
|
||||||
|
@ -360,86 +242,33 @@ func (t *titleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
|
||||||
}
|
}
|
||||||
} else if !c.copy() {
|
} else if !c.copy() {
|
||||||
break
|
break
|
||||||
} else if p.isBreak() {
|
}
|
||||||
|
|
||||||
|
// TODO: make this an "else if" if we can prove that no rune that does
|
||||||
|
// not match the first condition of the if statement can be a break.
|
||||||
|
if p.isBreak() {
|
||||||
c.isMidWord = false
|
c.isMidWord = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// As we save the state of the transformer, it is safe to call
|
// As we save the state of the transformer, it is safe to call
|
||||||
// checkpoint after any successful write.
|
// checkpoint after any successful write.
|
||||||
if !(c.isMidWord && wasMid) {
|
|
||||||
c.checkpoint()
|
c.checkpoint()
|
||||||
}
|
|
||||||
|
|
||||||
if !c.next() {
|
if !c.next() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if wasMid && c.info.isMid() {
|
if wasMid && c.info.isCaseIgnorableAndNonBreakStarter() {
|
||||||
c.isMidWord = false
|
c.isMidWord = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return c.ret()
|
return c.ret()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *titleCaser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
t.context = context{src: src, atEOF: atEOF, isMidWord: t.isMidWord}
|
|
||||||
c := &t.context
|
|
||||||
|
|
||||||
if !c.next() {
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
p := c.info
|
|
||||||
if t.rewrite != nil {
|
|
||||||
t.rewrite(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
wasMid := p.isMid()
|
|
||||||
// Break out of this loop on failure to ensure we do not modify the
|
|
||||||
// state incorrectly.
|
|
||||||
if p.isCased() {
|
|
||||||
if !c.isMidWord {
|
|
||||||
if !t.titleSpan(c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c.isMidWord = true
|
|
||||||
} else if !isLower(c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if p.isBreak() {
|
|
||||||
c.isMidWord = false
|
|
||||||
}
|
|
||||||
// As we save the state of the transformer, it is safe to call
|
|
||||||
// checkpoint after any successful write.
|
|
||||||
if !(c.isMidWord && wasMid) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
|
|
||||||
if !c.next() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if wasMid && c.info.isMid() {
|
|
||||||
c.isMidWord = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
// finalSigma adds Greek final Sigma handing to another casing function. It
|
// finalSigma adds Greek final Sigma handing to another casing function. It
|
||||||
// determines whether a lowercased sigma should be σ or ς, by looking ahead for
|
// determines whether a lowercased sigma should be σ or ς, by looking ahead for
|
||||||
// case-ignorables and a cased letters.
|
// case-ignorables and a cased letters.
|
||||||
func finalSigma(f mapFunc) mapFunc {
|
func finalSigma(f mapFunc) mapFunc {
|
||||||
return func(c *context) bool {
|
return func(c *context) bool {
|
||||||
if !c.hasPrefix("Σ") {
|
|
||||||
return f(c)
|
|
||||||
}
|
|
||||||
return finalSigmaBody(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func finalSigmaBody(c *context) bool {
|
|
||||||
// Current rune must be ∑.
|
|
||||||
|
|
||||||
// ::NFD();
|
// ::NFD();
|
||||||
// # 03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA
|
// # 03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA
|
||||||
// Σ } [:case-ignorable:]* [:cased:] → σ;
|
// Σ } [:case-ignorable:]* [:cased:] → σ;
|
||||||
|
@ -447,29 +276,19 @@ func finalSigmaBody(c *context) bool {
|
||||||
// ::Any-Lower;
|
// ::Any-Lower;
|
||||||
// ::NFC();
|
// ::NFC();
|
||||||
|
|
||||||
|
if !c.hasPrefix("Σ") {
|
||||||
|
return f(c)
|
||||||
|
}
|
||||||
|
|
||||||
p := c.pDst
|
p := c.pDst
|
||||||
c.writeString("ς")
|
c.writeString("ς")
|
||||||
|
|
||||||
// TODO: we should do this here, but right now this will never have an
|
|
||||||
// effect as this is called when the prefix is Sigma, whereas Dutch and
|
|
||||||
// Afrikaans only test for an apostrophe.
|
|
||||||
//
|
|
||||||
// if t.rewrite != nil {
|
|
||||||
// t.rewrite(c)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// We need to do one more iteration after maxIgnorable, as a cased
|
// We need to do one more iteration after maxIgnorable, as a cased
|
||||||
// letter is not an ignorable and may modify the result.
|
// letter is not an ignorable and may modify the result.
|
||||||
wasMid := false
|
|
||||||
for i := 0; i < maxIgnorable+1; i++ {
|
for i := 0; i < maxIgnorable+1; i++ {
|
||||||
if !c.next() {
|
if !c.next() {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if !c.info.isCaseIgnorable() {
|
if !c.info.isCaseIgnorable() {
|
||||||
// All Midword runes are also case ignorable, so we are
|
|
||||||
// guaranteed to have a letter or word break here. As we are
|
|
||||||
// unreading the run, there is no need to unset c.isMidWord;
|
|
||||||
// the title caser will handle this.
|
|
||||||
if c.info.isCased() {
|
if c.info.isCased() {
|
||||||
// p+1 is guaranteed to be in bounds: if writing ς was
|
// p+1 is guaranteed to be in bounds: if writing ς was
|
||||||
// successful, p+1 will contain the second byte of ς. If not,
|
// successful, p+1 will contain the second byte of ς. If not,
|
||||||
|
@ -481,18 +300,13 @@ func finalSigmaBody(c *context) bool {
|
||||||
}
|
}
|
||||||
// A case ignorable may also introduce a word break, so we may need
|
// A case ignorable may also introduce a word break, so we may need
|
||||||
// to continue searching even after detecting a break.
|
// to continue searching even after detecting a break.
|
||||||
isMid := c.info.isMid()
|
c.isMidWord = c.isMidWord && !c.info.isBreak()
|
||||||
if (wasMid && isMid) || c.info.isBreak() {
|
|
||||||
c.isMidWord = false
|
|
||||||
}
|
|
||||||
wasMid = isMid
|
|
||||||
c.copy()
|
c.copy()
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// finalSigmaSpan would be the same as isLower.
|
|
||||||
|
|
||||||
// elUpper implements Greek upper casing, which entails removing a predefined
|
// elUpper implements Greek upper casing, which entails removing a predefined
|
||||||
// set of non-blocked modifiers. Note that these accents should not be removed
|
// set of non-blocked modifiers. Note that these accents should not be removed
|
||||||
// for title casing!
|
// for title casing!
|
||||||
|
@ -562,8 +376,6 @@ func elUpper(c *context) bool {
|
||||||
return i == maxIgnorable
|
return i == maxIgnorable
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: implement elUpperSpan (low-priority: complex and infrequent).
|
|
||||||
|
|
||||||
func ltLower(c *context) bool {
|
func ltLower(c *context) bool {
|
||||||
// From CLDR:
|
// From CLDR:
|
||||||
// # Introduce an explicit dot above when lowercasing capital I's and J's
|
// # Introduce an explicit dot above when lowercasing capital I's and J's
|
||||||
|
@ -578,10 +390,10 @@ func ltLower(c *context) bool {
|
||||||
// ::NFD();
|
// ::NFD();
|
||||||
// I } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0307;
|
// I } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0307;
|
||||||
// J } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → j \u0307;
|
// J } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → j \u0307;
|
||||||
// I \u0328 (Į) } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0328 \u0307;
|
// Į } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → į \u0307;
|
||||||
// I \u0300 (Ì) → i \u0307 \u0300;
|
// Ì → i \u0307 \u0300;
|
||||||
// I \u0301 (Í) → i \u0307 \u0301;
|
// Í → i \u0307 \u0301;
|
||||||
// I \u0303 (Ĩ) → i \u0307 \u0303;
|
// Ĩ → i \u0307 \u0303;
|
||||||
// ::Any-Lower();
|
// ::Any-Lower();
|
||||||
// ::NFC();
|
// ::NFC();
|
||||||
|
|
||||||
|
@ -633,16 +445,9 @@ func ltLower(c *context) bool {
|
||||||
return i == maxIgnorable
|
return i == maxIgnorable
|
||||||
}
|
}
|
||||||
|
|
||||||
// ltLowerSpan would be the same as isLower.
|
|
||||||
|
|
||||||
func ltUpper(f mapFunc) mapFunc {
|
func ltUpper(f mapFunc) mapFunc {
|
||||||
return func(c *context) bool {
|
return func(c *context) bool {
|
||||||
// Unicode:
|
|
||||||
// 0307; 0307; ; ; lt After_Soft_Dotted; # COMBINING DOT ABOVE
|
|
||||||
//
|
|
||||||
// From CLDR:
|
// From CLDR:
|
||||||
// # Remove \u0307 following soft-dotteds (i, j, and the like), with possible
|
|
||||||
// # intervening non-230 marks.
|
|
||||||
// ::NFD();
|
// ::NFD();
|
||||||
// [:Soft_Dotted:] [^[:ccc=Not_Reordered:][:ccc=Above:]]* { \u0307 → ;
|
// [:Soft_Dotted:] [^[:ccc=Not_Reordered:][:ccc=Above:]]* { \u0307 → ;
|
||||||
// ::Any-Upper();
|
// ::Any-Upper();
|
||||||
|
@ -706,8 +511,6 @@ func ltUpper(f mapFunc) mapFunc {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: implement ltUpperSpan (low priority: complex and infrequent).
|
|
||||||
|
|
||||||
func aztrUpper(f mapFunc) mapFunc {
|
func aztrUpper(f mapFunc) mapFunc {
|
||||||
return func(c *context) bool {
|
return func(c *context) bool {
|
||||||
// i→İ;
|
// i→İ;
|
||||||
|
@ -768,8 +571,6 @@ Loop:
|
||||||
return c.writeString("ı") && c.writeBytes(c.src[start:c.pSrc+c.sz]) && done
|
return c.writeString("ı") && c.writeBytes(c.src[start:c.pSrc+c.sz]) && done
|
||||||
}
|
}
|
||||||
|
|
||||||
// aztrLowerSpan would be the same as isLower.
|
|
||||||
|
|
||||||
func nlTitle(c *context) bool {
|
func nlTitle(c *context) bool {
|
||||||
// From CLDR:
|
// From CLDR:
|
||||||
// # Special titlecasing for Dutch initial "ij".
|
// # Special titlecasing for Dutch initial "ij".
|
||||||
|
@ -790,24 +591,6 @@ func nlTitle(c *context) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func nlTitleSpan(c *context) bool {
|
|
||||||
// From CLDR:
|
|
||||||
// # Special titlecasing for Dutch initial "ij".
|
|
||||||
// ::Any-Title();
|
|
||||||
// # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29)
|
|
||||||
// [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ;
|
|
||||||
if c.src[c.pSrc] != 'I' {
|
|
||||||
return isTitle(c)
|
|
||||||
}
|
|
||||||
if !c.next() || c.src[c.pSrc] == 'j' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if c.src[c.pSrc] != 'J' {
|
|
||||||
c.unreadRune()
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not part of CLDR, but see http://unicode.org/cldr/trac/ticket/7078.
|
// Not part of CLDR, but see http://unicode.org/cldr/trac/ticket/7078.
|
||||||
func afnlRewrite(c *context) {
|
func afnlRewrite(c *context) {
|
||||||
if c.hasPrefix("'") || c.hasPrefix("’") {
|
if c.hasPrefix("'") || c.hasPrefix("’") {
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
// This file was generated by go generate; DO NOT EDIT
|
||||||
|
|
||||||
package cases
|
package cases
|
||||||
|
|
||||||
|
@ -22,7 +22,6 @@ package cases
|
||||||
// Only 13..8 are used for XOR patterns.
|
// Only 13..8 are used for XOR patterns.
|
||||||
// 7 inverseFold (fold to upper, not to lower)
|
// 7 inverseFold (fold to upper, not to lower)
|
||||||
// 6 index: interpret the XOR pattern as an index
|
// 6 index: interpret the XOR pattern as an index
|
||||||
// or isMid if case mode is cIgnorableUncased.
|
|
||||||
// 5..4 CCC: zero (normal or break), above or other
|
// 5..4 CCC: zero (normal or break), above or other
|
||||||
// }
|
// }
|
||||||
// 3 exception: interpret this value as an exception index
|
// 3 exception: interpret this value as an exception index
|
||||||
|
@ -45,7 +44,6 @@ const (
|
||||||
ignorableValue = 0x0004
|
ignorableValue = 0x0004
|
||||||
|
|
||||||
inverseFoldBit = 1 << 7
|
inverseFoldBit = 1 << 7
|
||||||
isMidBit = 1 << 6
|
|
||||||
|
|
||||||
exceptionBit = 1 << 3
|
exceptionBit = 1 << 3
|
||||||
exceptionShift = 5
|
exceptionShift = 5
|
||||||
|
@ -55,7 +53,7 @@ const (
|
||||||
xorShift = 8
|
xorShift = 8
|
||||||
|
|
||||||
// There is no mapping if all xor bits and the exception bit are zero.
|
// There is no mapping if all xor bits and the exception bit are zero.
|
||||||
hasMappingMask = 0xff80 | exceptionBit
|
hasMappingMask = 0xffc0 | exceptionBit
|
||||||
)
|
)
|
||||||
|
|
||||||
// The case mode bits encodes the case type of a rune. This includes uncased,
|
// The case mode bits encodes the case type of a rune. This includes uncased,
|
||||||
|
@ -93,6 +91,10 @@ func (c info) isCaseIgnorable() bool {
|
||||||
return c&ignorableMask == ignorableValue
|
return c&ignorableMask == ignorableValue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c info) isCaseIgnorableAndNonBreakStarter() bool {
|
||||||
|
return c&(fullCasedMask|cccMask) == (ignorableValue | cccZero)
|
||||||
|
}
|
||||||
|
|
||||||
func (c info) isNotCasedAndNotCaseIgnorable() bool {
|
func (c info) isNotCasedAndNotCaseIgnorable() bool {
|
||||||
return c&fullCasedMask == 0
|
return c&fullCasedMask == 0
|
||||||
}
|
}
|
||||||
|
@ -101,10 +103,6 @@ func (c info) isCaseIgnorableAndNotCased() bool {
|
||||||
return c&fullCasedMask == cIgnorableUncased
|
return c&fullCasedMask == cIgnorableUncased
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c info) isMid() bool {
|
|
||||||
return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased
|
|
||||||
}
|
|
||||||
|
|
||||||
// The case mapping implementation will need to know about various Canonical
|
// The case mapping implementation will need to know about various Canonical
|
||||||
// Combining Class (CCC) values. We encode two of these in the trie value:
|
// Combining Class (CCC) values. We encode two of these in the trie value:
|
||||||
// cccZero (0) and cccAbove (230). If the value is cccOther, it means that
|
// cccZero (0) and cccAbove (230). If the value is cccOther, it means that
|
||||||
|
|
|
@ -52,7 +52,7 @@ type Decoder struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes converts the given encoded bytes to UTF-8. It returns the converted
|
// Bytes converts the given encoded bytes to UTF-8. It returns the converted
|
||||||
// bytes or nil, err if any error occurred.
|
// bytes or 0, err if any error occurred.
|
||||||
func (d *Decoder) Bytes(b []byte) ([]byte, error) {
|
func (d *Decoder) Bytes(b []byte) ([]byte, error) {
|
||||||
b, _, err := transform.Bytes(d, b)
|
b, _, err := transform.Bytes(d, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -62,7 +62,7 @@ func (d *Decoder) Bytes(b []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// String converts the given encoded string to UTF-8. It returns the converted
|
// String converts the given encoded string to UTF-8. It returns the converted
|
||||||
// string or "", err if any error occurred.
|
// string or 0, err if any error occurred.
|
||||||
func (d *Decoder) String(s string) (string, error) {
|
func (d *Decoder) String(s string) (string, error) {
|
||||||
s, _, err := transform.String(d, s)
|
s, _, err := transform.String(d, s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -95,7 +95,7 @@ type Encoder struct {
|
||||||
_ struct{}
|
_ struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if
|
// Bytes converts bytes from UTF-8. It returns the converted bytes or 0, err if
|
||||||
// any error occurred.
|
// any error occurred.
|
||||||
func (e *Encoder) Bytes(b []byte) ([]byte, error) {
|
func (e *Encoder) Bytes(b []byte) ([]byte, error) {
|
||||||
b, _, err := transform.Bytes(e, b)
|
b, _, err := transform.Bytes(e, b)
|
||||||
|
@ -106,7 +106,7 @@ func (e *Encoder) Bytes(b []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// String converts a string from UTF-8. It returns the converted string or
|
// String converts a string from UTF-8. It returns the converted string or
|
||||||
// "", err if any error occurred.
|
// 0, err if any error occurred.
|
||||||
func (e *Encoder) String(s string) (string, error) {
|
func (e *Encoder) String(s string) (string, error) {
|
||||||
s, _, err := transform.String(e, s)
|
s, _, err := transform.String(e, s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -36,8 +36,8 @@ package identifier
|
||||||
// - http://www.ietf.org/rfc/rfc2978.txt
|
// - http://www.ietf.org/rfc/rfc2978.txt
|
||||||
// - http://www.unicode.org/reports/tr22/
|
// - http://www.unicode.org/reports/tr22/
|
||||||
// - http://www.w3.org/TR/encoding/
|
// - http://www.w3.org/TR/encoding/
|
||||||
|
// - http://www.w3.org/TR/encoding/indexes/encodings.json
|
||||||
// - https://encoding.spec.whatwg.org/
|
// - https://encoding.spec.whatwg.org/
|
||||||
// - https://encoding.spec.whatwg.org/encodings.json
|
|
||||||
// - https://tools.ietf.org/html/rfc6657#section-5
|
// - https://tools.ietf.org/html/rfc6657#section-5
|
||||||
|
|
||||||
// Interface can be implemented by Encodings to define the CCS or CES for which
|
// Interface can be implemented by Encodings to define the CCS or CES for which
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
// This file was generated by go generate; DO NOT EDIT
|
||||||
|
|
||||||
package identifier
|
package identifier
|
||||||
|
|
||||||
|
|
|
@ -1,36 +0,0 @@
|
||||||
package(default_visibility = ["//visibility:public"])
|
|
||||||
|
|
||||||
licenses(["notice"])
|
|
||||||
|
|
||||||
load(
|
|
||||||
"@io_bazel_rules_go//go:def.bzl",
|
|
||||||
"go_library",
|
|
||||||
)
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "go_default_library",
|
|
||||||
srcs = [
|
|
||||||
"internal.go",
|
|
||||||
"match.go",
|
|
||||||
"tables.go",
|
|
||||||
],
|
|
||||||
tags = ["automanaged"],
|
|
||||||
deps = ["//vendor/golang.org/x/text/language:go_default_library"],
|
|
||||||
)
|
|
||||||
|
|
||||||
filegroup(
|
|
||||||
name = "package-srcs",
|
|
||||||
srcs = glob(["**"]),
|
|
||||||
tags = ["automanaged"],
|
|
||||||
visibility = ["//visibility:private"],
|
|
||||||
)
|
|
||||||
|
|
||||||
filegroup(
|
|
||||||
name = "all-srcs",
|
|
||||||
srcs = [
|
|
||||||
":package-srcs",
|
|
||||||
"//vendor/golang.org/x/text/internal/tag:all-srcs",
|
|
||||||
"//vendor/golang.org/x/text/internal/utf8internal:all-srcs",
|
|
||||||
],
|
|
||||||
tags = ["automanaged"],
|
|
||||||
)
|
|
|
@ -1,52 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/gen"
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
"golang.org/x/text/unicode/cldr"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
r := gen.OpenCLDRCoreZip()
|
|
||||||
defer r.Close()
|
|
||||||
|
|
||||||
d := &cldr.Decoder{}
|
|
||||||
data, err := d.DecodeZip(r)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("DecodeZip: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w := gen.NewCodeWriter()
|
|
||||||
defer w.WriteGoFile("tables.go", "internal")
|
|
||||||
|
|
||||||
// Create parents table.
|
|
||||||
parents := make([]uint16, language.NumCompactTags)
|
|
||||||
for _, loc := range data.Locales() {
|
|
||||||
tag := language.MustParse(loc)
|
|
||||||
index, ok := language.CompactIndex(tag)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
parentIndex := 0 // und
|
|
||||||
for p := tag.Parent(); p != language.Und; p = p.Parent() {
|
|
||||||
if x, ok := language.CompactIndex(p); ok {
|
|
||||||
parentIndex = x
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
parents[index] = uint16(parentIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteComment(`
|
|
||||||
Parent maps a compact index of a tag to the compact index of the parent of
|
|
||||||
this tag.`)
|
|
||||||
w.WriteVar("Parent", parents)
|
|
||||||
}
|
|
|
@ -1,51 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
//go:generate go run gen.go
|
|
||||||
|
|
||||||
// Package internal contains non-exported functionality that are used by
|
|
||||||
// packages in the text repository.
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SortTags sorts tags in place.
|
|
||||||
func SortTags(tags []language.Tag) {
|
|
||||||
sort.Sort(sorter(tags))
|
|
||||||
}
|
|
||||||
|
|
||||||
type sorter []language.Tag
|
|
||||||
|
|
||||||
func (s sorter) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sorter) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sorter) Less(i, j int) bool {
|
|
||||||
return s[i].String() < s[j].String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UniqueTags sorts and filters duplicate tags in place and returns a slice with
|
|
||||||
// only unique tags.
|
|
||||||
func UniqueTags(tags []language.Tag) []language.Tag {
|
|
||||||
if len(tags) <= 1 {
|
|
||||||
return tags
|
|
||||||
}
|
|
||||||
SortTags(tags)
|
|
||||||
k := 0
|
|
||||||
for i := 1; i < len(tags); i++ {
|
|
||||||
if tags[k].String() < tags[i].String() {
|
|
||||||
k++
|
|
||||||
tags[k] = tags[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tags[:k+1]
|
|
||||||
}
|
|
|
@ -1,67 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
// This file contains matchers that implement CLDR inheritance.
|
|
||||||
//
|
|
||||||
// See http://unicode.org/reports/tr35/#Locale_Inheritance.
|
|
||||||
//
|
|
||||||
// Some of the inheritance described in this document is already handled by
|
|
||||||
// the cldr package.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: consider if (some of the) matching algorithm needs to be public after
|
|
||||||
// getting some feel about what is generic and what is specific.
|
|
||||||
|
|
||||||
// NewInheritanceMatcher returns a matcher that matches based on the inheritance
|
|
||||||
// chain.
|
|
||||||
//
|
|
||||||
// The matcher uses canonicalization and the parent relationship to find a
|
|
||||||
// match. The resulting match will always be either Und or a language with the
|
|
||||||
// same language and script as the requested language. It will not match
|
|
||||||
// languages for which there is understood to be mutual or one-directional
|
|
||||||
// intelligibility.
|
|
||||||
//
|
|
||||||
// A Match will indicate an Exact match if the language matches after
|
|
||||||
// canonicalization and High if the matched tag is a parent.
|
|
||||||
func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher {
|
|
||||||
tags := &InheritanceMatcher{make(map[language.Tag]int)}
|
|
||||||
for i, tag := range t {
|
|
||||||
ct, err := language.All.Canonicalize(tag)
|
|
||||||
if err != nil {
|
|
||||||
ct = tag
|
|
||||||
}
|
|
||||||
tags.index[ct] = i
|
|
||||||
}
|
|
||||||
return tags
|
|
||||||
}
|
|
||||||
|
|
||||||
type InheritanceMatcher struct {
|
|
||||||
index map[language.Tag]int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) {
|
|
||||||
for _, t := range want {
|
|
||||||
ct, err := language.All.Canonicalize(t)
|
|
||||||
if err != nil {
|
|
||||||
ct = t
|
|
||||||
}
|
|
||||||
conf := language.Exact
|
|
||||||
for {
|
|
||||||
if index, ok := m.index[ct]; ok {
|
|
||||||
return ct, index, conf
|
|
||||||
}
|
|
||||||
if ct == language.Und {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ct = ct.Parent()
|
|
||||||
conf = language.High
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return language.Und, 0, language.No
|
|
||||||
}
|
|
|
@ -1,117 +0,0 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
// Parent maps a compact index of a tag to the compact index of the parent of
|
|
||||||
// this tag.
|
|
||||||
var Parent = []uint16{ // 754 elements
|
|
||||||
// Entry 0 - 3F
|
|
||||||
0x0000, 0x0053, 0x00e5, 0x0000, 0x0003, 0x0003, 0x0000, 0x0006,
|
|
||||||
0x0000, 0x0008, 0x0000, 0x000a, 0x0000, 0x000c, 0x000c, 0x000c,
|
|
||||||
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
|
|
||||||
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
|
|
||||||
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
|
|
||||||
0x000c, 0x0000, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x002e,
|
|
||||||
0x0000, 0x0000, 0x0031, 0x0030, 0x0030, 0x0000, 0x0035, 0x0000,
|
|
||||||
0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x003d, 0x0000,
|
|
||||||
// Entry 40 - 7F
|
|
||||||
0x0000, 0x0040, 0x0000, 0x0042, 0x0042, 0x0000, 0x0045, 0x0045,
|
|
||||||
0x0000, 0x0048, 0x0000, 0x004a, 0x0000, 0x0000, 0x004d, 0x004c,
|
|
||||||
0x004c, 0x0000, 0x0051, 0x0051, 0x0051, 0x0051, 0x0000, 0x0056,
|
|
||||||
0x0000, 0x0058, 0x0000, 0x005a, 0x0000, 0x005c, 0x005c, 0x0000,
|
|
||||||
0x005f, 0x0000, 0x0061, 0x0000, 0x0063, 0x0000, 0x0065, 0x0065,
|
|
||||||
0x0000, 0x0068, 0x0000, 0x006a, 0x006a, 0x006a, 0x006a, 0x006a,
|
|
||||||
0x006a, 0x006a, 0x0000, 0x0072, 0x0000, 0x0074, 0x0000, 0x0076,
|
|
||||||
0x0000, 0x0000, 0x0079, 0x0000, 0x007b, 0x0000, 0x007d, 0x0000,
|
|
||||||
// Entry 80 - BF
|
|
||||||
0x007f, 0x007f, 0x0000, 0x0082, 0x0082, 0x0000, 0x0085, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0085, 0x0087, 0x0086, 0x0086, 0x0086, 0x0085,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0087, 0x0086, 0x0086,
|
|
||||||
0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0086, 0x0085, 0x0086,
|
|
||||||
// Entry C0 - FF
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0085,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0086,
|
|
||||||
0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0085, 0x0086, 0x0086,
|
|
||||||
0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0000, 0x00ee,
|
|
||||||
0x0000, 0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1,
|
|
||||||
0x00f1, 0x00f1, 0x00f1, 0x00f0, 0x00f1, 0x00f0, 0x00f0, 0x00f1,
|
|
||||||
// Entry 100 - 13F
|
|
||||||
0x00f1, 0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f0, 0x00f1,
|
|
||||||
0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x0000, 0x010d, 0x0000,
|
|
||||||
0x010f, 0x0000, 0x0111, 0x0000, 0x0113, 0x0113, 0x0000, 0x0116,
|
|
||||||
0x0116, 0x0116, 0x0116, 0x0000, 0x011b, 0x0000, 0x011d, 0x0000,
|
|
||||||
0x011f, 0x011f, 0x0000, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
|
|
||||||
0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
|
|
||||||
0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
|
|
||||||
0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
|
|
||||||
// Entry 140 - 17F
|
|
||||||
0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
|
|
||||||
0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
|
|
||||||
0x0122, 0x0000, 0x0151, 0x0000, 0x0153, 0x0000, 0x0155, 0x0000,
|
|
||||||
0x0157, 0x0000, 0x0159, 0x0000, 0x015b, 0x015b, 0x015b, 0x0000,
|
|
||||||
0x015f, 0x0000, 0x0000, 0x0162, 0x0000, 0x0164, 0x0000, 0x0166,
|
|
||||||
0x0166, 0x0166, 0x0000, 0x016a, 0x0000, 0x016c, 0x0000, 0x016e,
|
|
||||||
0x0000, 0x0170, 0x0170, 0x0000, 0x0173, 0x0000, 0x0175, 0x0000,
|
|
||||||
0x0177, 0x0000, 0x0179, 0x0000, 0x017b, 0x0000, 0x017d, 0x0000,
|
|
||||||
// Entry 180 - 1BF
|
|
||||||
0x017f, 0x0000, 0x0181, 0x0181, 0x0181, 0x0181, 0x0000, 0x0000,
|
|
||||||
0x0187, 0x0000, 0x0000, 0x018a, 0x0000, 0x018c, 0x0000, 0x0000,
|
|
||||||
0x018f, 0x0000, 0x0191, 0x0000, 0x0000, 0x0194, 0x0000, 0x0000,
|
|
||||||
0x0197, 0x0000, 0x0199, 0x0000, 0x019b, 0x0000, 0x019d, 0x0000,
|
|
||||||
0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000,
|
|
||||||
0x01a7, 0x0000, 0x01a9, 0x0000, 0x01ab, 0x01ab, 0x0000, 0x01ae,
|
|
||||||
0x0000, 0x01b0, 0x0000, 0x01b2, 0x0000, 0x01b4, 0x0000, 0x01b6,
|
|
||||||
0x0000, 0x0000, 0x01b9, 0x0000, 0x01bb, 0x0000, 0x01bd, 0x0000,
|
|
||||||
// Entry 1C0 - 1FF
|
|
||||||
0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x0000, 0x01c5, 0x01c5,
|
|
||||||
0x01c5, 0x01c5, 0x0000, 0x01ca, 0x0000, 0x01cc, 0x01cc, 0x0000,
|
|
||||||
0x01cf, 0x0000, 0x01d1, 0x0000, 0x01d3, 0x0000, 0x01d5, 0x0000,
|
|
||||||
0x01d7, 0x0000, 0x01d9, 0x01d9, 0x0000, 0x01dc, 0x0000, 0x01de,
|
|
||||||
0x0000, 0x01e0, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6,
|
|
||||||
0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x0000, 0x01ee,
|
|
||||||
0x01ee, 0x01ee, 0x0000, 0x01f2, 0x0000, 0x01f4, 0x0000, 0x01f6,
|
|
||||||
0x0000, 0x01f8, 0x0000, 0x0000, 0x01fb, 0x0000, 0x01fd, 0x01fd,
|
|
||||||
// Entry 200 - 23F
|
|
||||||
0x0000, 0x0200, 0x0000, 0x0202, 0x0202, 0x0000, 0x0205, 0x0205,
|
|
||||||
0x0000, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208,
|
|
||||||
0x0000, 0x0210, 0x0000, 0x0212, 0x0000, 0x0214, 0x0000, 0x0000,
|
|
||||||
0x0000, 0x0000, 0x0000, 0x021a, 0x0000, 0x0000, 0x021d, 0x0000,
|
|
||||||
0x021f, 0x021f, 0x0000, 0x0222, 0x0000, 0x0224, 0x0224, 0x0000,
|
|
||||||
0x0000, 0x0228, 0x0227, 0x0227, 0x0000, 0x0000, 0x022d, 0x0000,
|
|
||||||
0x022f, 0x0000, 0x0231, 0x0000, 0x023d, 0x0233, 0x023d, 0x023d,
|
|
||||||
0x023d, 0x023d, 0x023d, 0x023d, 0x023d, 0x0233, 0x023d, 0x023d,
|
|
||||||
// Entry 240 - 27F
|
|
||||||
0x0000, 0x0240, 0x0240, 0x0240, 0x0000, 0x0244, 0x0000, 0x0246,
|
|
||||||
0x0000, 0x0248, 0x0248, 0x0000, 0x024b, 0x0000, 0x024d, 0x024d,
|
|
||||||
0x024d, 0x024d, 0x024d, 0x024d, 0x0000, 0x0254, 0x0000, 0x0256,
|
|
||||||
0x0000, 0x0258, 0x0000, 0x025a, 0x0000, 0x025c, 0x0000, 0x0000,
|
|
||||||
0x025f, 0x025f, 0x025f, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000,
|
|
||||||
0x0267, 0x0000, 0x0000, 0x026a, 0x0269, 0x0269, 0x0000, 0x026e,
|
|
||||||
0x0000, 0x0270, 0x0000, 0x0272, 0x0000, 0x0000, 0x0000, 0x0000,
|
|
||||||
0x0277, 0x0000, 0x0000, 0x027a, 0x0000, 0x027c, 0x027c, 0x027c,
|
|
||||||
// Entry 280 - 2BF
|
|
||||||
0x027c, 0x0000, 0x0281, 0x0281, 0x0281, 0x0000, 0x0285, 0x0285,
|
|
||||||
0x0285, 0x0285, 0x0285, 0x0000, 0x028b, 0x028b, 0x028b, 0x028b,
|
|
||||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0293, 0x0293, 0x0293, 0x0000,
|
|
||||||
0x0297, 0x0297, 0x0297, 0x0297, 0x0000, 0x0000, 0x029d, 0x029d,
|
|
||||||
0x029d, 0x029d, 0x0000, 0x02a2, 0x0000, 0x02a4, 0x02a4, 0x0000,
|
|
||||||
0x02a7, 0x0000, 0x02a9, 0x02a9, 0x0000, 0x0000, 0x02ad, 0x0000,
|
|
||||||
0x0000, 0x02b0, 0x0000, 0x02b2, 0x02b2, 0x0000, 0x0000, 0x02b6,
|
|
||||||
0x0000, 0x02b8, 0x0000, 0x02ba, 0x0000, 0x02bc, 0x0000, 0x02be,
|
|
||||||
// Entry 2C0 - 2FF
|
|
||||||
0x02be, 0x0000, 0x0000, 0x02c2, 0x0000, 0x02c4, 0x02c1, 0x02c1,
|
|
||||||
0x0000, 0x0000, 0x02c9, 0x02c8, 0x02c8, 0x0000, 0x0000, 0x02ce,
|
|
||||||
0x0000, 0x02d0, 0x0000, 0x02d2, 0x0000, 0x0000, 0x02d5, 0x0000,
|
|
||||||
0x0000, 0x0000, 0x02d9, 0x0000, 0x02db, 0x0000, 0x02dd, 0x0000,
|
|
||||||
0x02df, 0x02df, 0x0000, 0x02e2, 0x0000, 0x02e4, 0x0000, 0x02e6,
|
|
||||||
0x02e6, 0x02e6, 0x02e6, 0x02e6, 0x0000, 0x02ec, 0x02ed, 0x02ec,
|
|
||||||
0x0000, 0x02f0,
|
|
||||||
} // Size: 1532 bytes
|
|
||||||
|
|
||||||
// Total table size 1532 bytes (1KiB); checksum: 90718A2
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
// This file was generated by go generate; DO NOT EDIT
|
||||||
|
|
||||||
package language
|
package language
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2,7 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
//go:generate go run gen.go gen_common.go -output tables.go
|
//go:generate go run maketables.go gen_common.go -output tables.go
|
||||||
//go:generate go run gen_index.go
|
//go:generate go run gen_index.go
|
||||||
|
|
||||||
// Package language implements BCP 47 language tags and related functionality.
|
// Package language implements BCP 47 language tags and related functionality.
|
||||||
|
@ -131,13 +131,6 @@ const (
|
||||||
type Tag struct {
|
type Tag struct {
|
||||||
lang langID
|
lang langID
|
||||||
region regionID
|
region regionID
|
||||||
// TODO: we will soon run out of positions for script. Idea: instead of
|
|
||||||
// storing lang, region, and script codes, store only the compact index and
|
|
||||||
// have a lookup table from this code to its expansion. This greatly speeds
|
|
||||||
// up table lookup, speed up common variant cases.
|
|
||||||
// This will also immediately free up 3 extra bytes. Also, the pVariant
|
|
||||||
// field can now be moved to the lookup table, as the compact index uniquely
|
|
||||||
// determines the offset of a possible variant.
|
|
||||||
script scriptID
|
script scriptID
|
||||||
pVariant byte // offset in str, includes preceding '-'
|
pVariant byte // offset in str, includes preceding '-'
|
||||||
pExt uint16 // offset of first extension, includes preceding '-'
|
pExt uint16 // offset of first extension, includes preceding '-'
|
||||||
|
@ -600,7 +593,7 @@ func (t Tag) Extension(x byte) (ext Extension, ok bool) {
|
||||||
return Extension{ext}, true
|
return Extension{ext}, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return Extension{}, false
|
return Extension{string(x)}, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extensions returns all extensions of t.
|
// Extensions returns all extensions of t.
|
||||||
|
|
202
vendor/golang.org/x/text/language/gen.go → vendor/golang.org/x/text/language/maketables.go
generated
vendored
202
vendor/golang.org/x/text/language/gen.go → vendor/golang.org/x/text/language/maketables.go
generated
vendored
|
@ -678,8 +678,6 @@ func (b *builder) parseIndices() {
|
||||||
b.locale.parse(meta.DefaultContent.Locales)
|
b.locale.parse(meta.DefaultContent.Locales)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: region inclusion data will probably not be use used in future matchers.
|
|
||||||
|
|
||||||
func (b *builder) computeRegionGroups() {
|
func (b *builder) computeRegionGroups() {
|
||||||
b.groups = make(map[int]index)
|
b.groups = make(map[int]index)
|
||||||
|
|
||||||
|
@ -688,11 +686,6 @@ func (b *builder) computeRegionGroups() {
|
||||||
b.groups[i] = index(len(b.groups))
|
b.groups[i] = index(len(b.groups))
|
||||||
}
|
}
|
||||||
for _, g := range b.supp.TerritoryContainment.Group {
|
for _, g := range b.supp.TerritoryContainment.Group {
|
||||||
// Skip UN and EURO zone as they are flattening the containment
|
|
||||||
// relationship.
|
|
||||||
if g.Type == "EZ" || g.Type == "UN" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
group := b.region.index(g.Type)
|
group := b.region.index(g.Type)
|
||||||
if _, ok := b.groups[group]; !ok {
|
if _, ok := b.groups[group]; !ok {
|
||||||
b.groups[group] = index(len(b.groups))
|
b.groups[group] = index(len(b.groups))
|
||||||
|
@ -789,7 +782,6 @@ func (b *builder) writeLanguage() {
|
||||||
lang.updateLater("tw", "twi")
|
lang.updateLater("tw", "twi")
|
||||||
lang.updateLater("nb", "nob")
|
lang.updateLater("nb", "nob")
|
||||||
lang.updateLater("ak", "aka")
|
lang.updateLater("ak", "aka")
|
||||||
lang.updateLater("bh", "bih")
|
|
||||||
|
|
||||||
// Ensure that each 2-letter code is matched with a 3-letter code.
|
// Ensure that each 2-letter code is matched with a 3-letter code.
|
||||||
for _, v := range lang.s[1:] {
|
for _, v := range lang.s[1:] {
|
||||||
|
@ -806,10 +798,10 @@ func (b *builder) writeLanguage() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Complete canonicalized language tags.
|
// Complete canonialized language tags.
|
||||||
lang.freeze()
|
lang.freeze()
|
||||||
for i, v := range lang.s {
|
for i, v := range lang.s {
|
||||||
// We can avoid these manual entries by using the IANA registry directly.
|
// We can avoid these manual entries by using the IANI registry directly.
|
||||||
// Seems easier to update the list manually, as changes are rare.
|
// Seems easier to update the list manually, as changes are rare.
|
||||||
// The panic in this loop will trigger if we miss an entry.
|
// The panic in this loop will trigger if we miss an entry.
|
||||||
add := ""
|
add := ""
|
||||||
|
@ -916,7 +908,7 @@ func (b *builder) writeRegion() {
|
||||||
i := b.region.index(s)
|
i := b.region.index(s)
|
||||||
for _, d := range e.description {
|
for _, d := range e.description {
|
||||||
if strings.Contains(d, "Private use") {
|
if strings.Contains(d, "Private use") {
|
||||||
regionTypes[i] = iso3166UserAssigned
|
regionTypes[i] = iso3166UserAssgined
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
regionTypes[i] |= bcp47Region
|
regionTypes[i] |= bcp47Region
|
||||||
|
@ -1073,7 +1065,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
iso3166UserAssigned = 1 << iota
|
iso3166UserAssgined = 1 << iota
|
||||||
ccTLD
|
ccTLD
|
||||||
bcp47Region
|
bcp47Region
|
||||||
)
|
)
|
||||||
|
@ -1363,23 +1355,42 @@ func (b *builder) writeLikelyData() {
|
||||||
|
|
||||||
type mutualIntelligibility struct {
|
type mutualIntelligibility struct {
|
||||||
want, have uint16
|
want, have uint16
|
||||||
distance uint8
|
conf uint8
|
||||||
oneway bool
|
oneway bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type scriptIntelligibility struct {
|
type scriptIntelligibility struct {
|
||||||
wantLang, haveLang uint16
|
lang uint16 // langID or 0 if *
|
||||||
wantScript, haveScript uint8
|
want, have uint8
|
||||||
distance uint8
|
conf uint8
|
||||||
// Always oneway
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type regionIntelligibility struct {
|
type sortByConf []mutualIntelligibility
|
||||||
lang uint16 // compact language id
|
|
||||||
script uint8 // 0 means any
|
func (l sortByConf) Less(a, b int) bool {
|
||||||
group uint8 // 0 means any; if bit 7 is set it means inverse
|
return l[a].conf > l[b].conf
|
||||||
distance uint8
|
}
|
||||||
// Always twoway.
|
|
||||||
|
func (l sortByConf) Swap(a, b int) {
|
||||||
|
l[a], l[b] = l[b], l[a]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l sortByConf) Len() int {
|
||||||
|
return len(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
// toConf converts a percentage value [0, 100] to a confidence class.
|
||||||
|
func toConf(pct uint8) uint8 {
|
||||||
|
switch {
|
||||||
|
case pct == 100:
|
||||||
|
return 3 // Exact
|
||||||
|
case pct >= 90:
|
||||||
|
return 2 // High
|
||||||
|
case pct > 50:
|
||||||
|
return 1 // Low
|
||||||
|
default:
|
||||||
|
return 0 // No
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeMatchData writes tables with languages and scripts for which there is
|
// writeMatchData writes tables with languages and scripts for which there is
|
||||||
|
@ -1389,50 +1400,13 @@ type regionIntelligibility struct {
|
||||||
// We also drop all region-related data as we use a different algorithm to
|
// We also drop all region-related data as we use a different algorithm to
|
||||||
// determine region equivalence.
|
// determine region equivalence.
|
||||||
func (b *builder) writeMatchData() {
|
func (b *builder) writeMatchData() {
|
||||||
lm := b.supp.LanguageMatching.LanguageMatches
|
|
||||||
cldr.MakeSlice(&lm).SelectAnyOf("type", "written_new")
|
|
||||||
|
|
||||||
regionHierarchy := map[string][]string{}
|
|
||||||
for _, g := range b.supp.TerritoryContainment.Group {
|
|
||||||
regions := strings.Split(g.Contains, " ")
|
|
||||||
regionHierarchy[g.Type] = append(regionHierarchy[g.Type], regions...)
|
|
||||||
}
|
|
||||||
regionToGroups := make([]uint8, len(b.region.s))
|
|
||||||
|
|
||||||
idToIndex := map[string]uint8{}
|
|
||||||
for i, mv := range lm[0].MatchVariable {
|
|
||||||
if i > 6 {
|
|
||||||
log.Fatalf("Too many groups: %d", i)
|
|
||||||
}
|
|
||||||
idToIndex[mv.Id] = uint8(i + 1)
|
|
||||||
// TODO: also handle '-'
|
|
||||||
for _, r := range strings.Split(mv.Value, "+") {
|
|
||||||
todo := []string{r}
|
|
||||||
for k := 0; k < len(todo); k++ {
|
|
||||||
r := todo[k]
|
|
||||||
regionToGroups[b.region.index(r)] |= 1 << uint8(i)
|
|
||||||
todo = append(todo, regionHierarchy[r]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b.writeSlice("regionToGroups", regionToGroups)
|
|
||||||
|
|
||||||
b.writeType(mutualIntelligibility{})
|
b.writeType(mutualIntelligibility{})
|
||||||
b.writeType(scriptIntelligibility{})
|
b.writeType(scriptIntelligibility{})
|
||||||
b.writeType(regionIntelligibility{})
|
lm := b.supp.LanguageMatching.LanguageMatches
|
||||||
|
cldr.MakeSlice(&lm).SelectAnyOf("type", "written")
|
||||||
|
|
||||||
matchLang := []mutualIntelligibility{{
|
matchLang := []mutualIntelligibility{}
|
||||||
// TODO: remove once CLDR is fixed.
|
|
||||||
want: uint16(b.langIndex("sr")),
|
|
||||||
have: uint16(b.langIndex("hr")),
|
|
||||||
distance: uint8(5),
|
|
||||||
}, {
|
|
||||||
want: uint16(b.langIndex("sr")),
|
|
||||||
have: uint16(b.langIndex("bs")),
|
|
||||||
distance: uint8(5),
|
|
||||||
}}
|
|
||||||
matchScript := []scriptIntelligibility{}
|
matchScript := []scriptIntelligibility{}
|
||||||
matchRegion := []regionIntelligibility{}
|
|
||||||
// Convert the languageMatch entries in lists keyed by desired language.
|
// Convert the languageMatch entries in lists keyed by desired language.
|
||||||
for _, m := range lm[0].LanguageMatch {
|
for _, m := range lm[0].LanguageMatch {
|
||||||
// Different versions of CLDR use different separators.
|
// Different versions of CLDR use different separators.
|
||||||
|
@ -1440,38 +1414,33 @@ func (b *builder) writeMatchData() {
|
||||||
supported := strings.Replace(m.Supported, "-", "_", -1)
|
supported := strings.Replace(m.Supported, "-", "_", -1)
|
||||||
d := strings.Split(desired, "_")
|
d := strings.Split(desired, "_")
|
||||||
s := strings.Split(supported, "_")
|
s := strings.Split(supported, "_")
|
||||||
if len(d) != len(s) {
|
if len(d) != len(s) || len(d) > 2 {
|
||||||
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
|
// Skip all entries with regions and work around CLDR bug.
|
||||||
continue
|
|
||||||
}
|
|
||||||
distance, _ := strconv.ParseInt(m.Distance, 10, 8)
|
|
||||||
switch len(d) {
|
|
||||||
case 2:
|
|
||||||
if desired == supported && desired == "*_*" {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
pct, _ := strconv.ParseInt(m.Percent, 10, 8)
|
||||||
|
if len(d) == 2 && d[0] == s[0] && len(d[1]) == 4 {
|
||||||
// language-script pair.
|
// language-script pair.
|
||||||
|
lang := uint16(0)
|
||||||
|
if d[0] != "*" {
|
||||||
|
lang = uint16(b.langIndex(d[0]))
|
||||||
|
}
|
||||||
matchScript = append(matchScript, scriptIntelligibility{
|
matchScript = append(matchScript, scriptIntelligibility{
|
||||||
wantLang: uint16(b.langIndex(d[0])),
|
lang: lang,
|
||||||
haveLang: uint16(b.langIndex(s[0])),
|
want: uint8(b.script.index(d[1])),
|
||||||
wantScript: uint8(b.script.index(d[1])),
|
have: uint8(b.script.index(s[1])),
|
||||||
haveScript: uint8(b.script.index(s[1])),
|
conf: toConf(uint8(pct)),
|
||||||
distance: uint8(distance),
|
|
||||||
})
|
})
|
||||||
if m.Oneway != "true" {
|
if m.Oneway != "true" {
|
||||||
matchScript = append(matchScript, scriptIntelligibility{
|
matchScript = append(matchScript, scriptIntelligibility{
|
||||||
wantLang: uint16(b.langIndex(s[0])),
|
lang: lang,
|
||||||
haveLang: uint16(b.langIndex(d[0])),
|
want: uint8(b.script.index(s[1])),
|
||||||
wantScript: uint8(b.script.index(s[1])),
|
have: uint8(b.script.index(d[1])),
|
||||||
haveScript: uint8(b.script.index(d[1])),
|
conf: toConf(uint8(pct)),
|
||||||
distance: uint8(distance),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
case 1:
|
} else if len(d) == 1 && d[0] != "*" {
|
||||||
if desired == supported && desired == "*" {
|
if pct == 100 {
|
||||||
continue
|
|
||||||
}
|
|
||||||
if distance == 1 {
|
|
||||||
// nb == no is already handled by macro mapping. Check there
|
// nb == no is already handled by macro mapping. Check there
|
||||||
// really is only this case.
|
// really is only this case.
|
||||||
if d[0] != "no" || s[0] != "nb" {
|
if d[0] != "no" || s[0] != "nb" {
|
||||||
|
@ -1479,57 +1448,28 @@ func (b *builder) writeMatchData() {
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// TODO: consider dropping oneway field and just doubling the entry.
|
|
||||||
matchLang = append(matchLang, mutualIntelligibility{
|
matchLang = append(matchLang, mutualIntelligibility{
|
||||||
want: uint16(b.langIndex(d[0])),
|
want: uint16(b.langIndex(d[0])),
|
||||||
have: uint16(b.langIndex(s[0])),
|
have: uint16(b.langIndex(s[0])),
|
||||||
distance: uint8(distance),
|
conf: uint8(pct),
|
||||||
oneway: m.Oneway == "true",
|
oneway: m.Oneway == "true",
|
||||||
})
|
})
|
||||||
case 3:
|
} else {
|
||||||
if desired == supported && desired == "*_*_*" {
|
// TODO: Handle other mappings.
|
||||||
continue
|
a := []string{"*;*", "*_*;*_*", "es_MX;es_419"}
|
||||||
}
|
s := strings.Join([]string{desired, supported}, ";")
|
||||||
if desired != supported { // (Weird but correct.)
|
if i := sort.SearchStrings(a, s); i == len(a) || a[i] != s {
|
||||||
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
|
log.Printf("%q not handled", s)
|
||||||
continue
|
|
||||||
}
|
|
||||||
ri := regionIntelligibility{
|
|
||||||
lang: b.langIndex(d[0]),
|
|
||||||
distance: uint8(distance),
|
|
||||||
}
|
|
||||||
if d[1] != "*" {
|
|
||||||
ri.script = uint8(b.script.index(d[1]))
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case d[2] == "*":
|
|
||||||
ri.group = 0x80 // not contained in anything
|
|
||||||
case strings.HasPrefix(d[2], "$!"):
|
|
||||||
ri.group = 0x80
|
|
||||||
d[2] = "$" + d[2][len("$!"):]
|
|
||||||
fallthrough
|
|
||||||
case strings.HasPrefix(d[2], "$"):
|
|
||||||
ri.group |= idToIndex[d[2]]
|
|
||||||
}
|
|
||||||
matchRegion = append(matchRegion, ri)
|
|
||||||
default:
|
|
||||||
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sort.SliceStable(matchLang, func(i, j int) bool {
|
}
|
||||||
return matchLang[i].distance < matchLang[j].distance
|
sort.Stable(sortByConf(matchLang))
|
||||||
})
|
// collapse percentage into confidence classes
|
||||||
|
for i, m := range matchLang {
|
||||||
|
matchLang[i].conf = toConf(m.conf)
|
||||||
|
}
|
||||||
b.writeSlice("matchLang", matchLang)
|
b.writeSlice("matchLang", matchLang)
|
||||||
|
|
||||||
sort.SliceStable(matchScript, func(i, j int) bool {
|
|
||||||
return matchScript[i].distance < matchScript[j].distance
|
|
||||||
})
|
|
||||||
b.writeSlice("matchScript", matchScript)
|
b.writeSlice("matchScript", matchScript)
|
||||||
|
|
||||||
sort.SliceStable(matchRegion, func(i, j int) bool {
|
|
||||||
return matchRegion[i].distance < matchRegion[j].distance
|
|
||||||
})
|
|
||||||
b.writeSlice("matchRegion", matchRegion)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *builder) writeRegionInclusionData() {
|
func (b *builder) writeRegionInclusionData() {
|
||||||
|
@ -1542,11 +1482,6 @@ func (b *builder) writeRegionInclusionData() {
|
||||||
containment = make(map[index][]index)
|
containment = make(map[index][]index)
|
||||||
)
|
)
|
||||||
for _, g := range b.supp.TerritoryContainment.Group {
|
for _, g := range b.supp.TerritoryContainment.Group {
|
||||||
// Skip UN and EURO zone as they are flattening the containment
|
|
||||||
// relationship.
|
|
||||||
if g.Type == "EZ" || g.Type == "UN" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
group := b.region.index(g.Type)
|
group := b.region.index(g.Type)
|
||||||
groupIdx := b.groups[group]
|
groupIdx := b.groups[group]
|
||||||
for _, mem := range strings.Split(g.Contains, " ") {
|
for _, mem := range strings.Split(g.Contains, " ") {
|
||||||
|
@ -1573,6 +1508,7 @@ func (b *builder) writeRegionInclusionData() {
|
||||||
for _, v := range l {
|
for _, v := range l {
|
||||||
regionContainment[g] |= 1 << v
|
regionContainment[g] |= 1 << v
|
||||||
}
|
}
|
||||||
|
// log.Printf("%d: %X", g, regionContainment[g])
|
||||||
}
|
}
|
||||||
b.writeSlice("regionContainment", regionContainment)
|
b.writeSlice("regionContainment", regionContainment)
|
||||||
|
|
|
@ -6,16 +6,6 @@ package language
|
||||||
|
|
||||||
import "errors"
|
import "errors"
|
||||||
|
|
||||||
// A MatchOption configures a Matcher.
|
|
||||||
type MatchOption func(*matcher)
|
|
||||||
|
|
||||||
// PreferSameScript will, in the absence of a match, result in the first
|
|
||||||
// preferred tag with the same script as a supported tag to match this supported
|
|
||||||
// tag. The default is currently true, but this may change in the future.
|
|
||||||
func PreferSameScript(preferSame bool) MatchOption {
|
|
||||||
return func(m *matcher) { m.preferSameScript = preferSame }
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matcher is the interface that wraps the Match method.
|
// Matcher is the interface that wraps the Match method.
|
||||||
//
|
//
|
||||||
// Match returns the best match for any of the given tags, along with
|
// Match returns the best match for any of the given tags, along with
|
||||||
|
@ -46,44 +36,23 @@ func Comprehends(speaker, alternative Tag) Confidence {
|
||||||
// matched tag in t, but is augmented with the Unicode extension ('u')of the
|
// matched tag in t, but is augmented with the Unicode extension ('u')of the
|
||||||
// corresponding preferred tag. This allows user locale options to be passed
|
// corresponding preferred tag. This allows user locale options to be passed
|
||||||
// transparently.
|
// transparently.
|
||||||
func NewMatcher(t []Tag, options ...MatchOption) Matcher {
|
func NewMatcher(t []Tag) Matcher {
|
||||||
return newMatcher(t, options)
|
return newMatcher(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
|
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
|
||||||
match, w, c := m.getBest(want...)
|
match, w, c := m.getBest(want...)
|
||||||
if match != nil {
|
if match == nil {
|
||||||
t, index = match.tag, match.index
|
|
||||||
} else {
|
|
||||||
// TODO: this should be an option
|
|
||||||
t = m.default_.tag
|
t = m.default_.tag
|
||||||
if m.preferSameScript {
|
} else {
|
||||||
outer:
|
t, index = match.tag, match.index
|
||||||
for _, w := range want {
|
|
||||||
script, _ := w.Script()
|
|
||||||
if script.scriptID == 0 {
|
|
||||||
// Don't do anything if there is no script, such as with
|
|
||||||
// private subtags.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for i, h := range m.supported {
|
|
||||||
if script.scriptID == h.maxScript {
|
|
||||||
t, index = h.tag, i
|
|
||||||
break outer
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// TODO: select first language tag based on script.
|
|
||||||
}
|
|
||||||
if w.region != 0 && t.region != 0 && t.region.contains(w.region) {
|
|
||||||
t, _ = Raw.Compose(t, Region{w.region})
|
|
||||||
}
|
}
|
||||||
// Copy options from the user-provided tag into the result tag. This is hard
|
// Copy options from the user-provided tag into the result tag. This is hard
|
||||||
// to do after the fact, so we do it here.
|
// to do after the fact, so we do it here.
|
||||||
// TODO: add in alternative variants to -u-va-.
|
// TODO: consider also adding in variants that are compatible with the
|
||||||
// TODO: add preferred region to -u-rg-.
|
// matched language.
|
||||||
// TODO: add other extensions. Merge with existing extensions.
|
// TODO: Add back region if it is non-ambiguous? Or create another tag to
|
||||||
|
// preserve the region?
|
||||||
if u, ok := w.Extension('u'); ok {
|
if u, ok := w.Extension('u'); ok {
|
||||||
t, _ = Raw.Compose(t, u)
|
t, _ = Raw.Compose(t, u)
|
||||||
}
|
}
|
||||||
|
@ -420,17 +389,15 @@ func minimizeTags(t Tag) (Tag, error) {
|
||||||
// matcher keeps a set of supported language tags, indexed by language.
|
// matcher keeps a set of supported language tags, indexed by language.
|
||||||
type matcher struct {
|
type matcher struct {
|
||||||
default_ *haveTag
|
default_ *haveTag
|
||||||
supported []*haveTag
|
|
||||||
index map[langID]*matchHeader
|
index map[langID]*matchHeader
|
||||||
passSettings bool
|
passSettings bool
|
||||||
preferSameScript bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// matchHeader has the lists of tags for exact matches and matches based on
|
// matchHeader has the lists of tags for exact matches and matches based on
|
||||||
// maximized and canonicalized tags for a given language.
|
// maximized and canonicalized tags for a given language.
|
||||||
type matchHeader struct {
|
type matchHeader struct {
|
||||||
exact []*haveTag
|
exact []haveTag
|
||||||
max []*haveTag
|
max []haveTag
|
||||||
}
|
}
|
||||||
|
|
||||||
// haveTag holds a supported Tag and its maximized script and region. The maximized
|
// haveTag holds a supported Tag and its maximized script and region. The maximized
|
||||||
|
@ -473,10 +440,8 @@ func makeHaveTag(tag Tag, index int) (haveTag, langID) {
|
||||||
// script to map to another and we rely on this to keep the code simple.
|
// script to map to another and we rely on this to keep the code simple.
|
||||||
func altScript(l langID, s scriptID) scriptID {
|
func altScript(l langID, s scriptID) scriptID {
|
||||||
for _, alt := range matchScript {
|
for _, alt := range matchScript {
|
||||||
// TODO: also match cases where language is not the same.
|
if (alt.lang == 0 || langID(alt.lang) == l) && scriptID(alt.have) == s {
|
||||||
if (langID(alt.wantLang) == l || langID(alt.haveLang) == l) &&
|
return scriptID(alt.want)
|
||||||
scriptID(alt.haveScript) == s {
|
|
||||||
return scriptID(alt.wantScript)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
|
@ -492,7 +457,7 @@ func (h *matchHeader) addIfNew(n haveTag, exact bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if exact {
|
if exact {
|
||||||
h.exact = append(h.exact, &n)
|
h.exact = append(h.exact, n)
|
||||||
}
|
}
|
||||||
// Allow duplicate maximized tags, but create a linked list to allow quickly
|
// Allow duplicate maximized tags, but create a linked list to allow quickly
|
||||||
// comparing the equivalents and bail out.
|
// comparing the equivalents and bail out.
|
||||||
|
@ -507,7 +472,7 @@ func (h *matchHeader) addIfNew(n haveTag, exact bool) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
h.max = append(h.max, &n)
|
h.max = append(h.max, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
// header returns the matchHeader for the given language. It creates one if
|
// header returns the matchHeader for the given language. It creates one if
|
||||||
|
@ -521,26 +486,12 @@ func (m *matcher) header(l langID) *matchHeader {
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
func toConf(d uint8) Confidence {
|
|
||||||
if d <= 10 {
|
|
||||||
return High
|
|
||||||
}
|
|
||||||
if d < 30 {
|
|
||||||
return Low
|
|
||||||
}
|
|
||||||
return No
|
|
||||||
}
|
|
||||||
|
|
||||||
// newMatcher builds an index for the given supported tags and returns it as
|
// newMatcher builds an index for the given supported tags and returns it as
|
||||||
// a matcher. It also expands the index by considering various equivalence classes
|
// a matcher. It also expands the index by considering various equivalence classes
|
||||||
// for a given tag.
|
// for a given tag.
|
||||||
func newMatcher(supported []Tag, options []MatchOption) *matcher {
|
func newMatcher(supported []Tag) *matcher {
|
||||||
m := &matcher{
|
m := &matcher{
|
||||||
index: make(map[langID]*matchHeader),
|
index: make(map[langID]*matchHeader),
|
||||||
preferSameScript: true,
|
|
||||||
}
|
|
||||||
for _, o := range options {
|
|
||||||
o(m)
|
|
||||||
}
|
}
|
||||||
if len(supported) == 0 {
|
if len(supported) == 0 {
|
||||||
m.default_ = &haveTag{}
|
m.default_ = &haveTag{}
|
||||||
|
@ -551,9 +502,8 @@ func newMatcher(supported []Tag, options []MatchOption) *matcher {
|
||||||
for i, tag := range supported {
|
for i, tag := range supported {
|
||||||
pair, _ := makeHaveTag(tag, i)
|
pair, _ := makeHaveTag(tag, i)
|
||||||
m.header(tag.lang).addIfNew(pair, true)
|
m.header(tag.lang).addIfNew(pair, true)
|
||||||
m.supported = append(m.supported, &pair)
|
|
||||||
}
|
}
|
||||||
m.default_ = m.header(supported[0].lang).exact[0]
|
m.default_ = &m.header(supported[0].lang).exact[0]
|
||||||
for i, tag := range supported {
|
for i, tag := range supported {
|
||||||
pair, max := makeHaveTag(tag, i)
|
pair, max := makeHaveTag(tag, i)
|
||||||
if max != tag.lang {
|
if max != tag.lang {
|
||||||
|
@ -561,9 +511,6 @@ func newMatcher(supported []Tag, options []MatchOption) *matcher {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: include alt script.
|
|
||||||
// - don't replace regions, but allow regions to be made more specific.
|
|
||||||
|
|
||||||
// update is used to add indexes in the map for equivalent languages.
|
// update is used to add indexes in the map for equivalent languages.
|
||||||
// If force is true, the update will also apply to derived entries. To
|
// If force is true, the update will also apply to derived entries. To
|
||||||
// avoid applying a "transitive closure", use false.
|
// avoid applying a "transitive closure", use false.
|
||||||
|
@ -573,8 +520,7 @@ func newMatcher(supported []Tag, options []MatchOption) *matcher {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hw := m.header(langID(want))
|
hw := m.header(langID(want))
|
||||||
for _, ht := range hh.max {
|
for _, v := range hh.max {
|
||||||
v := *ht
|
|
||||||
if conf < v.conf {
|
if conf < v.conf {
|
||||||
v.conf = conf
|
v.conf = conf
|
||||||
}
|
}
|
||||||
|
@ -590,9 +536,9 @@ func newMatcher(supported []Tag, options []MatchOption) *matcher {
|
||||||
// Add entries for languages with mutual intelligibility as defined by CLDR's
|
// Add entries for languages with mutual intelligibility as defined by CLDR's
|
||||||
// languageMatch data.
|
// languageMatch data.
|
||||||
for _, ml := range matchLang {
|
for _, ml := range matchLang {
|
||||||
update(ml.want, ml.have, toConf(ml.distance), false)
|
update(ml.want, ml.have, Confidence(ml.conf), false)
|
||||||
if !ml.oneway {
|
if !ml.oneway {
|
||||||
update(ml.have, ml.want, toConf(ml.distance), false)
|
update(ml.have, ml.want, Confidence(ml.conf), false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -634,7 +580,7 @@ func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for i := range h.exact {
|
for i := range h.exact {
|
||||||
have := h.exact[i]
|
have := &h.exact[i]
|
||||||
if have.tag.equalsRest(w) {
|
if have.tag.equalsRest(w) {
|
||||||
return have, w, Exact
|
return have, w, Exact
|
||||||
}
|
}
|
||||||
|
@ -645,7 +591,7 @@ func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
|
||||||
// Base language is not defined.
|
// Base language is not defined.
|
||||||
if h != nil {
|
if h != nil {
|
||||||
for i := range h.exact {
|
for i := range h.exact {
|
||||||
have := h.exact[i]
|
have := &h.exact[i]
|
||||||
if have.tag.equalsRest(w) {
|
if have.tag.equalsRest(w) {
|
||||||
return have, w, Exact
|
return have, w, Exact
|
||||||
}
|
}
|
||||||
|
@ -663,11 +609,11 @@ func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
|
||||||
}
|
}
|
||||||
// Check for match based on maximized tag.
|
// Check for match based on maximized tag.
|
||||||
for i := range h.max {
|
for i := range h.max {
|
||||||
have := h.max[i]
|
have := &h.max[i]
|
||||||
best.update(have, w, max.script, max.region)
|
best.update(have, w, max.script, max.region)
|
||||||
if best.conf == Exact {
|
if best.conf == Exact {
|
||||||
for have.nextMax != 0 {
|
for have.nextMax != 0 {
|
||||||
have = h.max[have.nextMax]
|
have = &h.max[have.nextMax]
|
||||||
best.update(have, w, max.script, max.region)
|
best.update(have, w, max.script, max.region)
|
||||||
}
|
}
|
||||||
return best.have, best.want, High
|
return best.have, best.want, High
|
||||||
|
@ -691,7 +637,6 @@ type bestMatch struct {
|
||||||
// Cached results from applying tie-breaking rules.
|
// Cached results from applying tie-breaking rules.
|
||||||
origLang bool
|
origLang bool
|
||||||
origReg bool
|
origReg bool
|
||||||
regGroupDist uint8
|
|
||||||
regDist uint8
|
regDist uint8
|
||||||
origScript bool
|
origScript bool
|
||||||
parentDist uint8 // 255 if have is not an ancestor of want tag.
|
parentDist uint8 // 255 if have is not an ancestor of want tag.
|
||||||
|
@ -748,14 +693,6 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
|
||||||
beaten = true
|
beaten = true
|
||||||
}
|
}
|
||||||
|
|
||||||
regGroupDist := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.lang)
|
|
||||||
if !beaten && m.regGroupDist != regGroupDist {
|
|
||||||
if regGroupDist > m.regGroupDist {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
beaten = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// We prefer if the pre-maximized region was specified and identical.
|
// We prefer if the pre-maximized region was specified and identical.
|
||||||
origReg := have.tag.region == tag.region && tag.region != 0
|
origReg := have.tag.region == tag.region && tag.region != 0
|
||||||
if !beaten && m.origReg != origReg {
|
if !beaten && m.origReg != origReg {
|
||||||
|
@ -765,22 +702,8 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
|
||||||
beaten = true
|
beaten = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: remove the region distance rule. Region distance has been replaced
|
// Next we prefer smaller distances between regions, as defined by regionDist.
|
||||||
// by the region grouping rule. For now we leave it as it still seems to
|
regDist := regionDist(have.maxRegion, maxRegion, tag.lang)
|
||||||
// have a net positive effect when applied after the grouping rule.
|
|
||||||
// Possible solutions:
|
|
||||||
// - apply the primary locale rule first to effectively disable region
|
|
||||||
// region distance if groups are defined.
|
|
||||||
// - express the following errors in terms of grouping (if possible)
|
|
||||||
// - find another method of handling the following cases.
|
|
||||||
// maximization of legacy: find mo in
|
|
||||||
// "sr-Cyrl, sr-Latn, ro, ro-MD": have ro; want ro-MD (High)
|
|
||||||
// region distance French: find fr-US in
|
|
||||||
// "en, fr, fr-CA, fr-CH": have fr; want fr-CA (High)
|
|
||||||
|
|
||||||
// Next we prefer smaller distances between regions, as defined by
|
|
||||||
// regionDist.
|
|
||||||
regDist := uint8(regionDistance(have.maxRegion, maxRegion))
|
|
||||||
if !beaten && m.regDist != regDist {
|
if !beaten && m.regDist != regDist {
|
||||||
if regDist > m.regDist {
|
if regDist > m.regDist {
|
||||||
return
|
return
|
||||||
|
@ -798,9 +721,6 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally we prefer tags which have a closer parent relationship.
|
// Finally we prefer tags which have a closer parent relationship.
|
||||||
// TODO: the parent relationship no longer seems necessary. It doesn't hurt
|
|
||||||
// to leave it in as the final tie-breaker, though, especially until the
|
|
||||||
// grouping data has further matured.
|
|
||||||
parentDist := parentDistance(have.tag.region, tag)
|
parentDist := parentDistance(have.tag.region, tag)
|
||||||
if !beaten && m.parentDist != parentDist {
|
if !beaten && m.parentDist != parentDist {
|
||||||
if parentDist > m.parentDist {
|
if parentDist > m.parentDist {
|
||||||
|
@ -817,7 +737,6 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
|
||||||
m.origLang = origLang
|
m.origLang = origLang
|
||||||
m.origReg = origReg
|
m.origReg = origReg
|
||||||
m.origScript = origScript
|
m.origScript = origScript
|
||||||
m.regGroupDist = regGroupDist
|
|
||||||
m.regDist = regDist
|
m.regDist = regDist
|
||||||
m.parentDist = parentDist
|
m.parentDist = parentDist
|
||||||
}
|
}
|
||||||
|
@ -840,27 +759,15 @@ func parentDistance(haveRegion regionID, tag Tag) uint8 {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
// regionGroupDist computes the distance between two regions based on their
|
// regionDist wraps regionDistance with some exceptions to the algorithmic distance.
|
||||||
// CLDR grouping.
|
func regionDist(a, b regionID, lang langID) uint8 {
|
||||||
func regionGroupDist(a, b regionID, script scriptID, lang langID) uint8 {
|
if lang == _en {
|
||||||
aGroup := uint(regionToGroups[a]) << 1
|
// Two variants of non-US English are close to each other, regardless of distance.
|
||||||
bGroup := uint(regionToGroups[b]) << 1
|
if a != _US && b != _US {
|
||||||
for _, ri := range matchRegion {
|
return 2
|
||||||
if langID(ri.lang) == lang && (ri.script == 0 || scriptID(ri.script) == script) {
|
|
||||||
group := uint(1 << (ri.group &^ 0x80))
|
|
||||||
if 0x80&ri.group == 0 {
|
|
||||||
if aGroup&bGroup&group != 0 { // Both regions are in the group.
|
|
||||||
return ri.distance
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (aGroup|bGroup)&group == 0 { // Both regions are not in the group.
|
|
||||||
return ri.distance
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
return uint8(regionDistance(a, b))
|
||||||
}
|
|
||||||
const defaultDistance = 4
|
|
||||||
return defaultDistance
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// regionDistance computes the distance between two regions based on the
|
// regionDistance computes the distance between two regions based on the
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -41,35 +41,20 @@ func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
|
||||||
if tNotIn == nil {
|
if tNotIn == nil {
|
||||||
tNotIn = transform.Nop
|
tNotIn = transform.Nop
|
||||||
}
|
}
|
||||||
sIn, ok := tIn.(transform.SpanningTransformer)
|
|
||||||
if !ok {
|
|
||||||
sIn = dummySpan{tIn}
|
|
||||||
}
|
|
||||||
sNotIn, ok := tNotIn.(transform.SpanningTransformer)
|
|
||||||
if !ok {
|
|
||||||
sNotIn = dummySpan{tNotIn}
|
|
||||||
}
|
|
||||||
|
|
||||||
a := &cond{
|
a := &cond{
|
||||||
tIn: sIn,
|
tIn: tIn,
|
||||||
tNotIn: sNotIn,
|
tNotIn: tNotIn,
|
||||||
f: s.Contains,
|
f: s.Contains,
|
||||||
}
|
}
|
||||||
a.Reset()
|
a.Reset()
|
||||||
return Transformer{a}
|
return Transformer{a}
|
||||||
}
|
}
|
||||||
|
|
||||||
type dummySpan struct{ transform.Transformer }
|
|
||||||
|
|
||||||
func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
return 0, transform.ErrEndOfSpan
|
|
||||||
}
|
|
||||||
|
|
||||||
type cond struct {
|
type cond struct {
|
||||||
tIn, tNotIn transform.SpanningTransformer
|
tIn, tNotIn transform.Transformer
|
||||||
f func(rune) bool
|
f func(rune) bool
|
||||||
check func(rune) bool // current check to perform
|
check func(rune) bool // current check to perform
|
||||||
t transform.SpanningTransformer // current transformer to use
|
t transform.Transformer // current transformer to use
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset implements transform.Transformer.
|
// Reset implements transform.Transformer.
|
||||||
|
@ -99,51 +84,6 @@ func (t *cond) isNot(r rune) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// This implementation of Span doesn't help all too much, but it needs to be
|
|
||||||
// there to satisfy this package's Transformer interface.
|
|
||||||
// TODO: there are certainly room for improvements, though. For example, if
|
|
||||||
// t.t == transform.Nop (which will a common occurrence) it will save a bundle
|
|
||||||
// to special-case that loop.
|
|
||||||
func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
p := 0
|
|
||||||
for n < len(src) && err == nil {
|
|
||||||
// Don't process too much at a time as the Spanner that will be
|
|
||||||
// called on this block may terminate early.
|
|
||||||
const maxChunk = 4096
|
|
||||||
max := len(src)
|
|
||||||
if v := n + maxChunk; v < max {
|
|
||||||
max = v
|
|
||||||
}
|
|
||||||
atEnd := false
|
|
||||||
size := 0
|
|
||||||
current := t.t
|
|
||||||
for ; p < max; p += size {
|
|
||||||
r := rune(src[p])
|
|
||||||
if r < utf8.RuneSelf {
|
|
||||||
size = 1
|
|
||||||
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
|
|
||||||
if !atEOF && !utf8.FullRune(src[p:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !t.check(r) {
|
|
||||||
// The next rune will be the start of a new run.
|
|
||||||
atEnd = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
|
|
||||||
n += n2
|
|
||||||
if err2 != nil {
|
|
||||||
return n, err2
|
|
||||||
}
|
|
||||||
// At this point either err != nil or t.check will pass for the rune at p.
|
|
||||||
p = n + size
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
p := 0
|
p := 0
|
||||||
for nSrc < len(src) && err == nil {
|
for nSrc < len(src) && err == nil {
|
||||||
|
@ -159,10 +99,9 @@ func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error
|
||||||
size := 0
|
size := 0
|
||||||
current := t.t
|
current := t.t
|
||||||
for ; p < max; p += size {
|
for ; p < max; p += size {
|
||||||
r := rune(src[p])
|
var r rune
|
||||||
if r < utf8.RuneSelf {
|
r, size = utf8.DecodeRune(src[p:])
|
||||||
size = 1
|
if r == utf8.RuneError && size == 1 {
|
||||||
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
|
|
||||||
if !atEOF && !utf8.FullRune(src[p:]) {
|
if !atEOF && !utf8.FullRune(src[p:]) {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
break
|
break
|
||||||
|
|
|
@ -46,19 +46,9 @@ func Predicate(f func(rune) bool) Set {
|
||||||
|
|
||||||
// Transformer implements the transform.Transformer interface.
|
// Transformer implements the transform.Transformer interface.
|
||||||
type Transformer struct {
|
type Transformer struct {
|
||||||
t transform.SpanningTransformer
|
transform.Transformer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
return t.t.Transform(dst, src, atEOF)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
|
|
||||||
return t.t.Span(b, atEOF)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Transformer) Reset() { t.t.Reset() }
|
|
||||||
|
|
||||||
// Bytes returns a new byte slice with the result of converting b using t. It
|
// Bytes returns a new byte slice with the result of converting b using t. It
|
||||||
// calls Reset on t. It returns nil if any error was found. This can only happen
|
// calls Reset on t. It returns nil if any error was found. This can only happen
|
||||||
// if an error-producing Transformer is passed to If.
|
// if an error-producing Transformer is passed to If.
|
||||||
|
@ -106,35 +96,15 @@ type remove func(r rune) bool
|
||||||
|
|
||||||
func (remove) Reset() {}
|
func (remove) Reset() {}
|
||||||
|
|
||||||
// Span implements transform.Spanner.
|
|
||||||
func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
for r, size := rune(0), 0; n < len(src); {
|
|
||||||
if r = rune(src[n]); r < utf8.RuneSelf {
|
|
||||||
size = 1
|
|
||||||
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
|
|
||||||
// Invalid rune.
|
|
||||||
if !atEOF && !utf8.FullRune(src[n:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
} else {
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if t(r) {
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
break
|
|
||||||
}
|
|
||||||
n += size
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transform implements transform.Transformer.
|
// Transform implements transform.Transformer.
|
||||||
func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
for r, size := rune(0), 0; nSrc < len(src); {
|
for r, size := rune(0), 0; nSrc < len(src); {
|
||||||
if r = rune(src[nSrc]); r < utf8.RuneSelf {
|
if r = rune(src[nSrc]); r < utf8.RuneSelf {
|
||||||
size = 1
|
size = 1
|
||||||
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
|
} else {
|
||||||
|
r, size = utf8.DecodeRune(src[nSrc:])
|
||||||
|
|
||||||
|
if size == 1 {
|
||||||
// Invalid rune.
|
// Invalid rune.
|
||||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
|
@ -157,6 +127,8 @@ func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro
|
||||||
nSrc++
|
nSrc++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if t(r) {
|
if t(r) {
|
||||||
nSrc += size
|
nSrc += size
|
||||||
continue
|
continue
|
||||||
|
@ -185,28 +157,6 @@ type mapper func(rune) rune
|
||||||
|
|
||||||
func (mapper) Reset() {}
|
func (mapper) Reset() {}
|
||||||
|
|
||||||
// Span implements transform.Spanner.
|
|
||||||
func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
for r, size := rune(0), 0; n < len(src); n += size {
|
|
||||||
if r = rune(src[n]); r < utf8.RuneSelf {
|
|
||||||
size = 1
|
|
||||||
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
|
|
||||||
// Invalid rune.
|
|
||||||
if !atEOF && !utf8.FullRune(src[n:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
} else {
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if t(r) != r {
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transform implements transform.Transformer.
|
// Transform implements transform.Transformer.
|
||||||
func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
var replacement rune
|
var replacement rune
|
||||||
|
@ -280,51 +230,24 @@ func ReplaceIllFormed() Transformer {
|
||||||
|
|
||||||
type replaceIllFormed struct{ transform.NopResetter }
|
type replaceIllFormed struct{ transform.NopResetter }
|
||||||
|
|
||||||
func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
for n < len(src) {
|
|
||||||
// ASCII fast path.
|
|
||||||
if src[n] < utf8.RuneSelf {
|
|
||||||
n++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
r, size := utf8.DecodeRune(src[n:])
|
|
||||||
|
|
||||||
// Look for a valid non-ASCII rune.
|
|
||||||
if r != utf8.RuneError || size != 1 {
|
|
||||||
n += size
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for short source data.
|
|
||||||
if !atEOF && !utf8.FullRune(src[n:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have an invalid rune.
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
for nSrc < len(src) {
|
for nSrc < len(src) {
|
||||||
// ASCII fast path.
|
r, size := utf8.DecodeRune(src[nSrc:])
|
||||||
if r := src[nSrc]; r < utf8.RuneSelf {
|
|
||||||
|
// Look for an ASCII rune.
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
if nDst == len(dst) {
|
if nDst == len(dst) {
|
||||||
err = transform.ErrShortDst
|
err = transform.ErrShortDst
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
dst[nDst] = r
|
dst[nDst] = byte(r)
|
||||||
nDst++
|
nDst++
|
||||||
nSrc++
|
nSrc++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look for a valid non-ASCII rune.
|
// Look for a valid non-ASCII rune.
|
||||||
if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
|
if r != utf8.RuneError || size != 1 {
|
||||||
if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
|
if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
|
||||||
err = transform.ErrShortDst
|
err = transform.ErrShortDst
|
||||||
break
|
break
|
||||||
|
|
|
@ -123,64 +123,34 @@ var transitions = [...][2]ruleTransition{
|
||||||
// vice versa.
|
// vice versa.
|
||||||
const exclusiveRTL = uint16(1<<bidi.EN | 1<<bidi.AN)
|
const exclusiveRTL = uint16(1<<bidi.EN | 1<<bidi.AN)
|
||||||
|
|
||||||
// From RFC 5893
|
// Direction reports the direction of the given label as defined by RFC 5893 or
|
||||||
// An RTL label is a label that contains at least one character of type
|
// an error if b is not a valid label according to the Bidi Rule.
|
||||||
// R, AL, or AN.
|
func Direction(b []byte) (bidi.Direction, error) {
|
||||||
//
|
t := Transformer{}
|
||||||
// An LTR label is any label that is not an RTL label.
|
if n, ok := t.advance(b); ok && n == len(b) {
|
||||||
|
switch t.state {
|
||||||
// Direction reports the direction of the given label as defined by RFC 5893.
|
case ruleLTRFinal, ruleInitial:
|
||||||
// The Bidi Rule does not have to be applied to labels of the category
|
return bidi.LeftToRight, nil
|
||||||
// LeftToRight.
|
case ruleRTLFinal:
|
||||||
func Direction(b []byte) bidi.Direction {
|
return bidi.RightToLeft, nil
|
||||||
for i := 0; i < len(b); {
|
|
||||||
e, sz := bidi.Lookup(b[i:])
|
|
||||||
if sz == 0 {
|
|
||||||
i++
|
|
||||||
}
|
}
|
||||||
c := e.Class()
|
|
||||||
if c == bidi.R || c == bidi.AL || c == bidi.AN {
|
|
||||||
return bidi.RightToLeft
|
|
||||||
}
|
}
|
||||||
i += sz
|
return bidi.Neutral, ErrInvalid
|
||||||
}
|
|
||||||
return bidi.LeftToRight
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirectionString reports the direction of the given label as defined by RFC
|
// DirectionString reports the direction of the given label as defined by RFC
|
||||||
// 5893. The Bidi Rule does not have to be applied to labels of the category
|
// 5893 or an error if s is not a valid label according to the Bidi Rule.
|
||||||
// LeftToRight.
|
func DirectionString(s string) (bidi.Direction, error) {
|
||||||
func DirectionString(s string) bidi.Direction {
|
t := Transformer{}
|
||||||
for i := 0; i < len(s); {
|
if n, ok := t.advanceString(s); ok && n == len(s) {
|
||||||
e, sz := bidi.LookupString(s[i:])
|
switch t.state {
|
||||||
if sz == 0 {
|
case ruleLTRFinal, ruleInitial:
|
||||||
i++
|
return bidi.LeftToRight, nil
|
||||||
|
case ruleRTLFinal:
|
||||||
|
return bidi.RightToLeft, nil
|
||||||
}
|
}
|
||||||
c := e.Class()
|
|
||||||
if c == bidi.R || c == bidi.AL || c == bidi.AN {
|
|
||||||
return bidi.RightToLeft
|
|
||||||
}
|
}
|
||||||
i += sz
|
return bidi.Neutral, ErrInvalid
|
||||||
}
|
|
||||||
return bidi.LeftToRight
|
|
||||||
}
|
|
||||||
|
|
||||||
// Valid reports whether b conforms to the BiDi rule.
|
|
||||||
func Valid(b []byte) bool {
|
|
||||||
var t Transformer
|
|
||||||
if n, ok := t.advance(b); !ok || n < len(b) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return t.isFinal()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidString reports whether s conforms to the BiDi rule.
|
|
||||||
func ValidString(s string) bool {
|
|
||||||
var t Transformer
|
|
||||||
if n, ok := t.advanceString(s); !ok || n < len(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return t.isFinal()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a Transformer that verifies that input adheres to the Bidi Rule.
|
// New returns a Transformer that verifies that input adheres to the Bidi Rule.
|
||||||
|
@ -191,24 +161,9 @@ func New() *Transformer {
|
||||||
// Transformer implements transform.Transform.
|
// Transformer implements transform.Transform.
|
||||||
type Transformer struct {
|
type Transformer struct {
|
||||||
state ruleState
|
state ruleState
|
||||||
hasRTL bool
|
|
||||||
seen uint16
|
seen uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
// A rule can only be violated for "Bidi Domain names", meaning if one of the
|
|
||||||
// following categories has been observed.
|
|
||||||
func (t *Transformer) isRTL() bool {
|
|
||||||
const isRTL = 1<<bidi.R | 1<<bidi.AL | 1<<bidi.AN
|
|
||||||
return t.seen&isRTL != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Transformer) isFinal() bool {
|
|
||||||
if !t.isRTL() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return t.state == ruleLTRFinal || t.state == ruleRTLFinal || t.state == ruleInitial
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset implements transform.Transformer.
|
// Reset implements transform.Transformer.
|
||||||
func (t *Transformer) Reset() { *t = Transformer{} }
|
func (t *Transformer) Reset() { *t = Transformer{} }
|
||||||
|
|
||||||
|
@ -230,7 +185,7 @@ func (t *Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, er
|
||||||
|
|
||||||
// Span returns the first n bytes of src that conform to the Bidi rule.
|
// Span returns the first n bytes of src that conform to the Bidi rule.
|
||||||
func (t *Transformer) Span(src []byte, atEOF bool) (n int, err error) {
|
func (t *Transformer) Span(src []byte, atEOF bool) (n int, err error) {
|
||||||
if t.state == ruleInvalid && t.isRTL() {
|
if t.state == ruleInvalid {
|
||||||
return 0, ErrInvalid
|
return 0, ErrInvalid
|
||||||
}
|
}
|
||||||
n, ok := t.advance(src)
|
n, ok := t.advance(src)
|
||||||
|
@ -243,7 +198,7 @@ func (t *Transformer) Span(src []byte, atEOF bool) (n int, err error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
err = ErrInvalid
|
err = ErrInvalid
|
||||||
case !t.isFinal():
|
case t.state != ruleLTRFinal && t.state != ruleRTLFinal && t.state != ruleInitial:
|
||||||
err = ErrInvalid
|
err = ErrInvalid
|
||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
|
@ -270,15 +225,12 @@ func (t *Transformer) advance(s []byte) (n int, ok bool) {
|
||||||
e, sz = bidi.Lookup(s[n:])
|
e, sz = bidi.Lookup(s[n:])
|
||||||
if sz <= 1 {
|
if sz <= 1 {
|
||||||
if sz == 1 {
|
if sz == 1 {
|
||||||
// We always consider invalid UTF-8 to be invalid, even if
|
return n, false // invalid UTF-8
|
||||||
// the string has not yet been determined to be RTL.
|
|
||||||
// TODO: is this correct?
|
|
||||||
return n, false
|
|
||||||
}
|
}
|
||||||
return n, true // incomplete UTF-8 encoding
|
return n, true // incomplete UTF-8 encoding
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO: using CompactClass would result in noticeable speedup.
|
// TODO: using CompactClass results in noticeable speedup.
|
||||||
// See unicode/bidi/prop.go:Properties.CompactClass.
|
// See unicode/bidi/prop.go:Properties.CompactClass.
|
||||||
c := uint16(1 << e.Class())
|
c := uint16(1 << e.Class())
|
||||||
t.seen |= c
|
t.seen |= c
|
||||||
|
@ -293,10 +245,8 @@ func (t *Transformer) advance(s []byte) (n int, ok bool) {
|
||||||
t.state = tr[1].next
|
t.state = tr[1].next
|
||||||
default:
|
default:
|
||||||
t.state = ruleInvalid
|
t.state = ruleInvalid
|
||||||
if t.isRTL() {
|
|
||||||
return n, false
|
return n, false
|
||||||
}
|
}
|
||||||
}
|
|
||||||
n += sz
|
n += sz
|
||||||
}
|
}
|
||||||
return n, true
|
return n, true
|
||||||
|
@ -332,10 +282,8 @@ func (t *Transformer) advanceString(s string) (n int, ok bool) {
|
||||||
t.state = tr[1].next
|
t.state = tr[1].next
|
||||||
default:
|
default:
|
||||||
t.state = ruleInvalid
|
t.state = ruleInvalid
|
||||||
if t.isRTL() {
|
|
||||||
return n, false
|
return n, false
|
||||||
}
|
}
|
||||||
}
|
|
||||||
n += sz
|
n += sz
|
||||||
}
|
}
|
||||||
return n, true
|
return n, true
|
||||||
|
|
|
@ -17,7 +17,6 @@ go_library(
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//vendor/golang.org/x/text/cases:go_default_library",
|
"//vendor/golang.org/x/text/cases:go_default_library",
|
||||||
"//vendor/golang.org/x/text/language:go_default_library",
|
|
||||||
"//vendor/golang.org/x/text/runes:go_default_library",
|
"//vendor/golang.org/x/text/runes:go_default_library",
|
||||||
"//vendor/golang.org/x/text/secure/bidirule:go_default_library",
|
"//vendor/golang.org/x/text/secure/bidirule:go_default_library",
|
||||||
"//vendor/golang.org/x/text/transform:go_default_library",
|
"//vendor/golang.org/x/text/transform:go_default_library",
|
||||||
|
|
|
@ -6,10 +6,10 @@ package precis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/text/cases"
|
"golang.org/x/text/cases"
|
||||||
"golang.org/x/text/language"
|
|
||||||
"golang.org/x/text/runes"
|
"golang.org/x/text/runes"
|
||||||
"golang.org/x/text/transform"
|
"golang.org/x/text/transform"
|
||||||
"golang.org/x/text/unicode/norm"
|
"golang.org/x/text/unicode/norm"
|
||||||
|
"golang.org/x/text/width"
|
||||||
)
|
)
|
||||||
|
|
||||||
// An Option is used to define the behavior and rules of a Profile.
|
// An Option is used to define the behavior and rules of a Profile.
|
||||||
|
@ -20,12 +20,11 @@ type options struct {
|
||||||
foldWidth bool
|
foldWidth bool
|
||||||
|
|
||||||
// Enforcement options
|
// Enforcement options
|
||||||
asciiLower bool
|
cases transform.Transformer
|
||||||
cases transform.SpanningTransformer
|
|
||||||
disallow runes.Set
|
disallow runes.Set
|
||||||
norm transform.SpanningTransformer
|
norm norm.Form
|
||||||
additional []func() transform.SpanningTransformer
|
additional []func() transform.Transformer
|
||||||
width transform.SpanningTransformer
|
width *width.Transformer
|
||||||
disallowEmpty bool
|
disallowEmpty bool
|
||||||
bidiRule bool
|
bidiRule bool
|
||||||
|
|
||||||
|
@ -37,11 +36,6 @@ func getOpts(o ...Option) (res options) {
|
||||||
for _, f := range o {
|
for _, f := range o {
|
||||||
f(&res)
|
f(&res)
|
||||||
}
|
}
|
||||||
// Using a SpanningTransformer, instead of norm.Form prevents an allocation
|
|
||||||
// down the road.
|
|
||||||
if res.norm == nil {
|
|
||||||
res.norm = norm.NFC
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,36 +74,11 @@ var (
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: move this logic to package transform
|
|
||||||
|
|
||||||
type spanWrap struct{ transform.Transformer }
|
|
||||||
|
|
||||||
func (s spanWrap) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
return 0, transform.ErrEndOfSpan
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: allow different types? For instance:
|
|
||||||
// func() transform.Transformer
|
|
||||||
// func() transform.SpanningTransformer
|
|
||||||
// func([]byte) bool // validation only
|
|
||||||
//
|
|
||||||
// Also, would be great if we could detect if a transformer is reentrant.
|
|
||||||
|
|
||||||
// The AdditionalMapping option defines the additional mapping rule for the
|
// The AdditionalMapping option defines the additional mapping rule for the
|
||||||
// Profile by applying Transformer's in sequence.
|
// Profile by applying Transformer's in sequence.
|
||||||
func AdditionalMapping(t ...func() transform.Transformer) Option {
|
func AdditionalMapping(t ...func() transform.Transformer) Option {
|
||||||
return func(o *options) {
|
return func(o *options) {
|
||||||
for _, f := range t {
|
o.additional = t
|
||||||
sf := func() transform.SpanningTransformer {
|
|
||||||
return f().(transform.SpanningTransformer)
|
|
||||||
}
|
|
||||||
if _, ok := f().(transform.SpanningTransformer); !ok {
|
|
||||||
sf = func() transform.SpanningTransformer {
|
|
||||||
return spanWrap{f()}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o.additional = append(o.additional, sf)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,26 +93,10 @@ func Norm(f norm.Form) Option {
|
||||||
// provided to determine the type of case folding used.
|
// provided to determine the type of case folding used.
|
||||||
func FoldCase(opts ...cases.Option) Option {
|
func FoldCase(opts ...cases.Option) Option {
|
||||||
return func(o *options) {
|
return func(o *options) {
|
||||||
o.asciiLower = true
|
|
||||||
o.cases = cases.Fold(opts...)
|
o.cases = cases.Fold(opts...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The LowerCase option defines a Profile's case mapping rule. Options can be
|
|
||||||
// provided to determine the type of case folding used.
|
|
||||||
func LowerCase(opts ...cases.Option) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.asciiLower = true
|
|
||||||
if len(opts) == 0 {
|
|
||||||
o.cases = cases.Lower(language.Und, cases.HandleFinalSigma(false))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
opts = append([]cases.Option{cases.HandleFinalSigma(false)}, opts...)
|
|
||||||
o.cases = cases.Lower(language.Und, opts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Disallow option further restricts a Profile's allowed characters beyond
|
// The Disallow option further restricts a Profile's allowed characters beyond
|
||||||
// what is disallowed by the underlying string class.
|
// what is disallowed by the underlying string class.
|
||||||
func Disallow(set runes.Set) Option {
|
func Disallow(set runes.Set) Option {
|
||||||
|
|
|
@ -5,12 +5,9 @@
|
||||||
package precis
|
package precis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"errors"
|
"errors"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"golang.org/x/text/cases"
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
"golang.org/x/text/runes"
|
"golang.org/x/text/runes"
|
||||||
"golang.org/x/text/secure/bidirule"
|
"golang.org/x/text/secure/bidirule"
|
||||||
"golang.org/x/text/transform"
|
"golang.org/x/text/transform"
|
||||||
|
@ -93,80 +90,32 @@ type buffers struct {
|
||||||
next int
|
next int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *buffers) apply(t transform.SpanningTransformer) (err error) {
|
func (b *buffers) init(n int) {
|
||||||
n, err := t.Span(b.src, true)
|
b.buf[0] = make([]byte, 0, n)
|
||||||
if err != transform.ErrEndOfSpan {
|
b.buf[1] = make([]byte, 0, n)
|
||||||
return err
|
}
|
||||||
}
|
|
||||||
|
func (b *buffers) apply(t transform.Transformer) (err error) {
|
||||||
|
// TODO: use Span, once available.
|
||||||
x := b.next & 1
|
x := b.next & 1
|
||||||
if b.buf[x] == nil {
|
b.src, _, err = transform.Append(t, b.buf[x][:0], b.src)
|
||||||
b.buf[x] = make([]byte, 0, 8+len(b.src)+len(b.src)>>2)
|
|
||||||
}
|
|
||||||
span := append(b.buf[x][:0], b.src[:n]...)
|
|
||||||
b.src, _, err = transform.Append(t, span, b.src[n:])
|
|
||||||
b.buf[x] = b.src
|
b.buf[x] = b.src
|
||||||
b.next++
|
b.next++
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pre-allocate transformers when possible. In some cases this avoids allocation.
|
func (b *buffers) enforce(p *Profile, src []byte) (str []byte, err error) {
|
||||||
var (
|
|
||||||
foldWidthT transform.SpanningTransformer = width.Fold
|
|
||||||
lowerCaseT transform.SpanningTransformer = cases.Lower(language.Und, cases.HandleFinalSigma(false))
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: make this a method on profile.
|
|
||||||
|
|
||||||
func (b *buffers) enforce(p *Profile, src []byte, comparing bool) (str []byte, err error) {
|
|
||||||
b.src = src
|
b.src = src
|
||||||
|
|
||||||
ascii := true
|
|
||||||
for _, c := range src {
|
|
||||||
if c >= utf8.RuneSelf {
|
|
||||||
ascii = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// ASCII fast path.
|
|
||||||
if ascii {
|
|
||||||
for _, f := range p.options.additional {
|
|
||||||
if err = b.apply(f()); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case p.options.asciiLower || (comparing && p.options.ignorecase):
|
|
||||||
for i, c := range b.src {
|
|
||||||
if 'A' <= c && c <= 'Z' {
|
|
||||||
b.src[i] = c ^ 1<<5
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case p.options.cases != nil:
|
|
||||||
b.apply(p.options.cases)
|
|
||||||
}
|
|
||||||
c := checker{p: p}
|
|
||||||
if _, err := c.span(b.src, true); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if p.disallow != nil {
|
|
||||||
for _, c := range b.src {
|
|
||||||
if p.disallow.Contains(rune(c)) {
|
|
||||||
return nil, errDisallowedRune
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.options.disallowEmpty && len(b.src) == 0 {
|
|
||||||
return nil, errEmptyString
|
|
||||||
}
|
|
||||||
return b.src, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// These transforms are applied in the order defined in
|
// These transforms are applied in the order defined in
|
||||||
// https://tools.ietf.org/html/rfc7564#section-7
|
// https://tools.ietf.org/html/rfc7564#section-7
|
||||||
|
|
||||||
// TODO: allow different width transforms options.
|
// TODO: allow different width transforms options.
|
||||||
if p.options.foldWidth || (p.options.ignorecase && comparing) {
|
if p.options.foldWidth {
|
||||||
b.apply(foldWidthT)
|
// TODO: use Span, once available.
|
||||||
|
if err = b.apply(width.Fold); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for _, f := range p.options.additional {
|
for _, f := range p.options.additional {
|
||||||
if err = b.apply(f()); err != nil {
|
if err = b.apply(f()); err != nil {
|
||||||
|
@ -174,14 +123,24 @@ func (b *buffers) enforce(p *Profile, src []byte, comparing bool) (str []byte, e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if p.options.cases != nil {
|
if p.options.cases != nil {
|
||||||
b.apply(p.options.cases)
|
if err = b.apply(p.options.cases); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
if comparing && p.options.ignorecase {
|
|
||||||
b.apply(lowerCaseT)
|
|
||||||
}
|
}
|
||||||
b.apply(p.norm)
|
if n := p.norm.QuickSpan(b.src); n < len(b.src) {
|
||||||
if p.options.bidiRule && !bidirule.Valid(b.src) {
|
x := b.next & 1
|
||||||
return nil, bidirule.ErrInvalid
|
n = copy(b.buf[x], b.src[:n])
|
||||||
|
b.src, _, err = transform.Append(p.norm, b.buf[x][:n], b.src[n:])
|
||||||
|
b.buf[x] = b.src
|
||||||
|
b.next++
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if p.options.bidiRule {
|
||||||
|
if err := b.apply(bidirule.New()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
c := checker{p: p}
|
c := checker{p: p}
|
||||||
if _, err := c.span(b.src, true); err != nil {
|
if _, err := c.span(b.src, true); err != nil {
|
||||||
|
@ -196,6 +155,9 @@ func (b *buffers) enforce(p *Profile, src []byte, comparing bool) (str []byte, e
|
||||||
i += size
|
i += size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Add the disallow empty rule with a dummy transformer?
|
||||||
|
|
||||||
if p.options.disallowEmpty && len(b.src) == 0 {
|
if p.options.disallowEmpty && len(b.src) == 0 {
|
||||||
return nil, errEmptyString
|
return nil, errEmptyString
|
||||||
}
|
}
|
||||||
|
@ -206,16 +168,19 @@ func (b *buffers) enforce(p *Profile, src []byte, comparing bool) (str []byte, e
|
||||||
// It returns an error if the input string is invalid.
|
// It returns an error if the input string is invalid.
|
||||||
func (p *Profile) Append(dst, src []byte) ([]byte, error) {
|
func (p *Profile) Append(dst, src []byte) ([]byte, error) {
|
||||||
var buf buffers
|
var buf buffers
|
||||||
b, err := buf.enforce(p, src, false)
|
buf.init(8 + len(src) + len(src)>>2)
|
||||||
|
b, err := buf.enforce(p, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return append(dst, b...), nil
|
return append(dst, b...), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func processBytes(p *Profile, b []byte, key bool) ([]byte, error) {
|
// Bytes returns a new byte slice with the result of applying the profile to b.
|
||||||
|
func (p *Profile) Bytes(b []byte) ([]byte, error) {
|
||||||
var buf buffers
|
var buf buffers
|
||||||
b, err := buf.enforce(p, b, key)
|
buf.init(8 + len(b) + len(b)>>2)
|
||||||
|
b, err := buf.enforce(p, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -227,62 +192,39 @@ func processBytes(p *Profile, b []byte, key bool) ([]byte, error) {
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes returns a new byte slice with the result of applying the profile to b.
|
// String returns a string with the result of applying the profile to s.
|
||||||
func (p *Profile) Bytes(b []byte) ([]byte, error) {
|
func (p *Profile) String(s string) (string, error) {
|
||||||
return processBytes(p, b, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendCompareKey appends the result of applying p to src (including any
|
|
||||||
// optional rules to make strings comparable or useful in a map key such as
|
|
||||||
// applying lowercasing) writing the result to dst. It returns an error if the
|
|
||||||
// input string is invalid.
|
|
||||||
func (p *Profile) AppendCompareKey(dst, src []byte) ([]byte, error) {
|
|
||||||
var buf buffers
|
var buf buffers
|
||||||
b, err := buf.enforce(p, src, true)
|
buf.init(8 + len(s) + len(s)>>2)
|
||||||
if err != nil {
|
b, err := buf.enforce(p, []byte(s))
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return append(dst, b...), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func processString(p *Profile, s string, key bool) (string, error) {
|
|
||||||
var buf buffers
|
|
||||||
b, err := buf.enforce(p, []byte(s), key)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return string(b), nil
|
return string(b), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns a string with the result of applying the profile to s.
|
|
||||||
func (p *Profile) String(s string) (string, error) {
|
|
||||||
return processString(p, s, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompareKey returns a string that can be used for comparison, hashing, or
|
|
||||||
// collation.
|
|
||||||
func (p *Profile) CompareKey(s string) (string, error) {
|
|
||||||
return processString(p, s, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare enforces both strings, and then compares them for bit-string identity
|
// Compare enforces both strings, and then compares them for bit-string identity
|
||||||
// (byte-for-byte equality). If either string cannot be enforced, the comparison
|
// (byte-for-byte equality). If either string cannot be enforced, the comparison
|
||||||
// is false.
|
// is false.
|
||||||
func (p *Profile) Compare(a, b string) bool {
|
func (p *Profile) Compare(a, b string) bool {
|
||||||
var buf buffers
|
a, err := p.String(a)
|
||||||
|
if err != nil {
|
||||||
akey, err := buf.enforce(p, []byte(a), true)
|
return false
|
||||||
|
}
|
||||||
|
b, err = p.String(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = buffers{}
|
// TODO: This is out of order. Need to extract the transformation logic and
|
||||||
bkey, err := buf.enforce(p, []byte(b), true)
|
// put this in where the normal case folding would go (but only for
|
||||||
if err != nil {
|
// comparison).
|
||||||
return false
|
if p.options.ignorecase {
|
||||||
|
a = width.Fold.String(a)
|
||||||
|
b = width.Fold.String(a)
|
||||||
}
|
}
|
||||||
|
|
||||||
return bytes.Compare(akey, bkey) == 0
|
return a == b
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allowed returns a runes.Set containing every rune that is a member of the
|
// Allowed returns a runes.Set containing every rune that is a member of the
|
||||||
|
@ -322,36 +264,34 @@ func (c *checker) span(src []byte, atEOF bool) (n int, err error) {
|
||||||
}
|
}
|
||||||
return n, errDisallowedRune
|
return n, errDisallowedRune
|
||||||
}
|
}
|
||||||
doLookAhead := false
|
|
||||||
if property(e) < c.p.class.validFrom {
|
if property(e) < c.p.class.validFrom {
|
||||||
if d.rule == nil {
|
if d.rule == nil {
|
||||||
return n, errDisallowedRune
|
return n, errDisallowedRune
|
||||||
}
|
}
|
||||||
doLookAhead, err = d.rule(c.beforeBits)
|
doLookAhead, err := d.rule(c.beforeBits)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
if doLookAhead {
|
||||||
|
c.beforeBits &= d.keep
|
||||||
|
c.beforeBits |= d.set
|
||||||
|
// We may still have a lookahead rule which we will require to
|
||||||
|
// complete (by checking termBits == 0) before setting the new
|
||||||
|
// bits.
|
||||||
|
if c.termBits != 0 && (!c.checkLookahead() || c.termBits == 0) {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
c.termBits = d.term
|
||||||
|
c.acceptBits = d.accept
|
||||||
|
n += sz
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
c.beforeBits &= d.keep
|
c.beforeBits &= d.keep
|
||||||
c.beforeBits |= d.set
|
c.beforeBits |= d.set
|
||||||
if c.termBits != 0 {
|
if c.termBits != 0 && !c.checkLookahead() {
|
||||||
// We are currently in an unterminated lookahead.
|
|
||||||
if c.beforeBits&c.termBits != 0 {
|
|
||||||
c.termBits = 0
|
|
||||||
c.acceptBits = 0
|
|
||||||
} else if c.beforeBits&c.acceptBits == 0 {
|
|
||||||
// Invalid continuation of the unterminated lookahead sequence.
|
|
||||||
return n, errContext
|
return n, errContext
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if doLookAhead {
|
|
||||||
if c.termBits != 0 {
|
|
||||||
// A previous lookahead run has not been terminated yet.
|
|
||||||
return n, errContext
|
|
||||||
}
|
|
||||||
c.termBits = d.term
|
|
||||||
c.acceptBits = d.accept
|
|
||||||
}
|
|
||||||
n += sz
|
n += sz
|
||||||
}
|
}
|
||||||
if m := c.beforeBits >> finalShift; c.beforeBits&m != m || c.termBits != 0 {
|
if m := c.beforeBits >> finalShift; c.beforeBits&m != m || c.termBits != 0 {
|
||||||
|
@ -360,6 +300,18 @@ func (c *checker) span(src []byte, atEOF bool) (n int, err error) {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *checker) checkLookahead() bool {
|
||||||
|
switch {
|
||||||
|
case c.beforeBits&c.termBits != 0:
|
||||||
|
c.termBits = 0
|
||||||
|
c.acceptBits = 0
|
||||||
|
case c.beforeBits&c.acceptBits != 0:
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: we may get rid of this transform if transform.Chain understands
|
// TODO: we may get rid of this transform if transform.Chain understands
|
||||||
// something like a Spanner interface.
|
// something like a Spanner interface.
|
||||||
func (c checker) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (c checker) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
|
|
@ -13,66 +13,44 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Implements the Nickname profile specified in RFC 7700.
|
Nickname *Profile = nickname // Implements the Nickname profile specified in RFC 7700.
|
||||||
// The nickname profile is not idempotent and may need to be applied multiple
|
UsernameCaseMapped *Profile = usernameCaseMap // Implements the UsernameCaseMapped profile specified in RFC 7613.
|
||||||
// times before being used for comparisons.
|
UsernameCasePreserved *Profile = usernameNoCaseMap // Implements the UsernameCasePreserved profile specified in RFC 7613.
|
||||||
Nickname *Profile = nickname
|
OpaqueString *Profile = opaquestring // Implements the OpaqueString profile defined in RFC 7613 for passwords and other secure labels.
|
||||||
|
|
||||||
// Implements the UsernameCaseMapped profile specified in RFC 7613.
|
|
||||||
UsernameCaseMapped *Profile = usernameCaseMap
|
|
||||||
|
|
||||||
// Implements the UsernameCasePreserved profile specified in RFC 7613.
|
|
||||||
UsernameCasePreserved *Profile = usernameNoCaseMap
|
|
||||||
|
|
||||||
// Implements the OpaqueString profile defined in RFC 7613 for passwords and other secure labels.
|
|
||||||
OpaqueString *Profile = opaquestring
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO: mvl: "Ultimately, I would manually define the structs for the internal
|
||||||
|
// profiles. This avoid pulling in unneeded tables when they are not used."
|
||||||
var (
|
var (
|
||||||
nickname = &Profile{
|
nickname = NewFreeform(
|
||||||
options: getOpts(
|
|
||||||
AdditionalMapping(func() transform.Transformer {
|
AdditionalMapping(func() transform.Transformer {
|
||||||
return &nickAdditionalMapping{}
|
return &nickAdditionalMapping{}
|
||||||
}),
|
}),
|
||||||
IgnoreCase,
|
IgnoreCase,
|
||||||
Norm(norm.NFKC),
|
Norm(norm.NFKC),
|
||||||
DisallowEmpty,
|
DisallowEmpty,
|
||||||
),
|
)
|
||||||
class: freeform,
|
usernameCaseMap = NewIdentifier(
|
||||||
}
|
|
||||||
usernameCaseMap = &Profile{
|
|
||||||
options: getOpts(
|
|
||||||
FoldWidth,
|
FoldWidth,
|
||||||
LowerCase(),
|
FoldCase(),
|
||||||
Norm(norm.NFC),
|
Norm(norm.NFC),
|
||||||
BidiRule,
|
BidiRule,
|
||||||
),
|
)
|
||||||
class: identifier,
|
usernameNoCaseMap = NewIdentifier(
|
||||||
}
|
|
||||||
usernameNoCaseMap = &Profile{
|
|
||||||
options: getOpts(
|
|
||||||
FoldWidth,
|
FoldWidth,
|
||||||
Norm(norm.NFC),
|
Norm(norm.NFC),
|
||||||
BidiRule,
|
BidiRule,
|
||||||
),
|
)
|
||||||
class: identifier,
|
opaquestring = NewFreeform(
|
||||||
}
|
|
||||||
opaquestring = &Profile{
|
|
||||||
options: getOpts(
|
|
||||||
AdditionalMapping(func() transform.Transformer {
|
AdditionalMapping(func() transform.Transformer {
|
||||||
return mapSpaces
|
return runes.Map(func(r rune) rune {
|
||||||
}),
|
|
||||||
Norm(norm.NFC),
|
|
||||||
DisallowEmpty,
|
|
||||||
),
|
|
||||||
class: freeform,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// mapSpaces is a shared value of a runes.Map transformer.
|
|
||||||
var mapSpaces transform.Transformer = runes.Map(func(r rune) rune {
|
|
||||||
if unicode.Is(unicode.Zs, r) {
|
if unicode.Is(unicode.Zs, r) {
|
||||||
return ' '
|
return ' '
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
})
|
})
|
||||||
|
}),
|
||||||
|
Norm(norm.NFC),
|
||||||
|
DisallowEmpty,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
// This file was generated by go generate; DO NOT EDIT
|
||||||
|
|
||||||
package precis
|
package precis
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
// This file was generated by go generate; DO NOT EDIT
|
||||||
|
|
||||||
package precis
|
package precis
|
||||||
|
|
||||||
|
|
|
@ -24,10 +24,6 @@ var (
|
||||||
// complete the transformation.
|
// complete the transformation.
|
||||||
ErrShortSrc = errors.New("transform: short source buffer")
|
ErrShortSrc = errors.New("transform: short source buffer")
|
||||||
|
|
||||||
// ErrEndOfSpan means that the input and output (the transformed input)
|
|
||||||
// are not identical.
|
|
||||||
ErrEndOfSpan = errors.New("transform: input and output are not identical")
|
|
||||||
|
|
||||||
// errInconsistentByteCount means that Transform returned success (nil
|
// errInconsistentByteCount means that Transform returned success (nil
|
||||||
// error) but also returned nSrc inconsistent with the src argument.
|
// error) but also returned nSrc inconsistent with the src argument.
|
||||||
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
|
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
|
||||||
|
@ -64,41 +60,6 @@ type Transformer interface {
|
||||||
Reset()
|
Reset()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SpanningTransformer extends the Transformer interface with a Span method
|
|
||||||
// that determines how much of the input already conforms to the Transformer.
|
|
||||||
type SpanningTransformer interface {
|
|
||||||
Transformer
|
|
||||||
|
|
||||||
// Span returns a position in src such that transforming src[:n] results in
|
|
||||||
// identical output src[:n] for these bytes. It does not necessarily return
|
|
||||||
// the largest such n. The atEOF argument tells whether src represents the
|
|
||||||
// last bytes of the input.
|
|
||||||
//
|
|
||||||
// Callers should always account for the n bytes consumed before
|
|
||||||
// considering the error err.
|
|
||||||
//
|
|
||||||
// A nil error means that all input bytes are known to be identical to the
|
|
||||||
// output produced by the Transformer. A nil error can be be returned
|
|
||||||
// regardless of whether atEOF is true. If err is nil, then then n must
|
|
||||||
// equal len(src); the converse is not necessarily true.
|
|
||||||
//
|
|
||||||
// ErrEndOfSpan means that the Transformer output may differ from the
|
|
||||||
// input after n bytes. Note that n may be len(src), meaning that the output
|
|
||||||
// would contain additional bytes after otherwise identical output.
|
|
||||||
// ErrShortSrc means that src had insufficient data to determine whether the
|
|
||||||
// remaining bytes would change. Other than the error conditions listed
|
|
||||||
// here, implementations are free to report other errors that arise.
|
|
||||||
//
|
|
||||||
// Calling Span can modify the Transformer state as a side effect. In
|
|
||||||
// effect, it does the transformation just as calling Transform would, only
|
|
||||||
// without copying to a destination buffer and only up to a point it can
|
|
||||||
// determine the input and output bytes are the same. This is obviously more
|
|
||||||
// limited than calling Transform, but can be more efficient in terms of
|
|
||||||
// copying and allocating buffers. Calls to Span and Transform may be
|
|
||||||
// interleaved.
|
|
||||||
Span(src []byte, atEOF bool) (n int, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NopResetter can be embedded by implementations of Transformer to add a nop
|
// NopResetter can be embedded by implementations of Transformer to add a nop
|
||||||
// Reset method.
|
// Reset method.
|
||||||
type NopResetter struct{}
|
type NopResetter struct{}
|
||||||
|
@ -317,10 +278,6 @@ func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
return n, n, err
|
return n, n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nop) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
return len(src), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type discard struct{ NopResetter }
|
type discard struct{ NopResetter }
|
||||||
|
|
||||||
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
@ -332,8 +289,8 @@ var (
|
||||||
// by consuming all bytes and writing nothing.
|
// by consuming all bytes and writing nothing.
|
||||||
Discard Transformer = discard{}
|
Discard Transformer = discard{}
|
||||||
|
|
||||||
// Nop is a SpanningTransformer that copies src to dst.
|
// Nop is a Transformer that copies src to dst.
|
||||||
Nop SpanningTransformer = nop{}
|
Nop Transformer = nop{}
|
||||||
)
|
)
|
||||||
|
|
||||||
// chain is a sequence of links. A chain with N Transformers has N+1 links and
|
// chain is a sequence of links. A chain with N Transformers has N+1 links and
|
||||||
|
@ -401,8 +358,6 @@ func (c *chain) Reset() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: make chain use Span (is going to be fun to implement!)
|
|
||||||
|
|
||||||
// Transform applies the transformers of c in sequence.
|
// Transform applies the transformers of c in sequence.
|
||||||
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
// Set up src and dst in the chain.
|
// Set up src and dst in the chain.
|
||||||
|
@ -493,7 +448,8 @@ func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro
|
||||||
return dstL.n, srcL.p, err
|
return dstL.n, srcL.p, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: use runes.Remove instead.
|
// RemoveFunc returns a Transformer that removes from the input all runes r for
|
||||||
|
// which f(r) is true. Illegal bytes in the input are replaced by RuneError.
|
||||||
func RemoveFunc(f func(r rune) bool) Transformer {
|
func RemoveFunc(f func(r rune) bool) Transformer {
|
||||||
return removeF(f)
|
return removeF(f)
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,7 +84,7 @@ func resolvePairedBrackets(s *isolatingRunSequence) {
|
||||||
dirEmbed = R
|
dirEmbed = R
|
||||||
}
|
}
|
||||||
p.locateBrackets(s.p.pairTypes, s.p.pairValues)
|
p.locateBrackets(s.p.pairTypes, s.p.pairValues)
|
||||||
p.resolveBrackets(dirEmbed, s.p.initialTypes)
|
p.resolveBrackets(dirEmbed)
|
||||||
}
|
}
|
||||||
|
|
||||||
type bracketPairer struct {
|
type bracketPairer struct {
|
||||||
|
@ -125,8 +125,6 @@ func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool
|
||||||
return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]]
|
return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]]
|
||||||
}
|
}
|
||||||
|
|
||||||
const maxPairingDepth = 63
|
|
||||||
|
|
||||||
// locateBrackets locates matching bracket pairs according to BD16.
|
// locateBrackets locates matching bracket pairs according to BD16.
|
||||||
//
|
//
|
||||||
// This implementation uses a linked list instead of a stack, because, while
|
// This implementation uses a linked list instead of a stack, because, while
|
||||||
|
@ -138,17 +136,11 @@ func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []run
|
||||||
for i, index := range p.indexes {
|
for i, index := range p.indexes {
|
||||||
|
|
||||||
// look at the bracket type for each character
|
// look at the bracket type for each character
|
||||||
if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON {
|
|
||||||
// continue scanning
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch pairTypes[index] {
|
switch pairTypes[index] {
|
||||||
|
case bpNone:
|
||||||
|
// continue scanning
|
||||||
|
|
||||||
case bpOpen:
|
case bpOpen:
|
||||||
// check if maximum pairing depth reached
|
|
||||||
if p.openers.Len() == maxPairingDepth {
|
|
||||||
p.openers.Init()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// remember opener location, most recent first
|
// remember opener location, most recent first
|
||||||
p.openers.PushFront(i)
|
p.openers.PushFront(i)
|
||||||
|
|
||||||
|
@ -278,7 +270,7 @@ func (p *bracketPairer) classBeforePair(loc bracketPair) Class {
|
||||||
}
|
}
|
||||||
|
|
||||||
// assignBracketType implements rule N0 for a single bracket pair.
|
// assignBracketType implements rule N0 for a single bracket pair.
|
||||||
func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) {
|
func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class) {
|
||||||
// rule "N0, a", inspect contents of pair
|
// rule "N0, a", inspect contents of pair
|
||||||
dirPair := p.classifyPairContent(loc, dirEmbed)
|
dirPair := p.classifyPairContent(loc, dirEmbed)
|
||||||
|
|
||||||
|
@ -303,33 +295,13 @@ func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initi
|
||||||
// direction
|
// direction
|
||||||
|
|
||||||
// set the bracket types to the type found
|
// set the bracket types to the type found
|
||||||
p.setBracketsToType(loc, dirPair, initialTypes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) {
|
|
||||||
p.codesIsolatedRun[loc.opener] = dirPair
|
p.codesIsolatedRun[loc.opener] = dirPair
|
||||||
p.codesIsolatedRun[loc.closer] = dirPair
|
p.codesIsolatedRun[loc.closer] = dirPair
|
||||||
|
|
||||||
for i := loc.opener + 1; i < loc.closer; i++ {
|
|
||||||
index := p.indexes[i]
|
|
||||||
if initialTypes[index] != NSM {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
p.codesIsolatedRun[i] = dirPair
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := loc.closer + 1; i < len(p.indexes); i++ {
|
|
||||||
index := p.indexes[i]
|
|
||||||
if initialTypes[index] != NSM {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
p.codesIsolatedRun[i] = dirPair
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveBrackets implements rule N0 for a list of pairs.
|
// resolveBrackets implements rule N0 for a list of pairs.
|
||||||
func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) {
|
func (p *bracketPairer) resolveBrackets(dirEmbed Class) {
|
||||||
for _, loc := range p.pairPositions {
|
for _, loc := range p.pairPositions {
|
||||||
p.assignBracketType(loc, dirEmbed, initialTypes)
|
p.assignBracketType(loc, dirEmbed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -309,9 +309,6 @@ func (p *paragraph) determineExplicitEmbeddingLevels() {
|
||||||
}
|
}
|
||||||
if isIsolate {
|
if isIsolate {
|
||||||
p.resultLevels[i] = stack.lastEmbeddingLevel()
|
p.resultLevels[i] = stack.lastEmbeddingLevel()
|
||||||
if stack.lastDirectionalOverrideStatus() != ON {
|
|
||||||
p.resultTypes[i] = stack.lastDirectionalOverrideStatus()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var newLevel level
|
var newLevel level
|
||||||
|
@ -726,7 +723,7 @@ loop:
|
||||||
continue loop
|
continue loop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Panicf("invalid bidi code %v present in assertOnly at position %d", t, s.indexes[i])
|
log.Panicf("invalid bidi code %s present in assertOnly at position %d", t, s.indexes[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
// This file was generated by go generate; DO NOT EDIT
|
||||||
|
|
||||||
package bidi
|
package bidi
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
// This file was generated by go generate; DO NOT EDIT
|
||||||
|
|
||||||
package bidi
|
package bidi
|
||||||
|
|
||||||
|
|
|
@ -33,9 +33,17 @@ const (
|
||||||
// streamSafe implements the policy of when a CGJ should be inserted.
|
// streamSafe implements the policy of when a CGJ should be inserted.
|
||||||
type streamSafe uint8
|
type streamSafe uint8
|
||||||
|
|
||||||
// first inserts the first rune of a segment. It is a faster version of next if
|
// mkStreamSafe is a shorthand for declaring a streamSafe var and calling
|
||||||
// it is known p represents the first rune in a segment.
|
// first on it.
|
||||||
|
func mkStreamSafe(p Properties) streamSafe {
|
||||||
|
return streamSafe(p.nTrailingNonStarters())
|
||||||
|
}
|
||||||
|
|
||||||
|
// first inserts the first rune of a segment.
|
||||||
func (ss *streamSafe) first(p Properties) {
|
func (ss *streamSafe) first(p Properties) {
|
||||||
|
if *ss != 0 {
|
||||||
|
panic("!= 0")
|
||||||
|
}
|
||||||
*ss = streamSafe(p.nTrailingNonStarters())
|
*ss = streamSafe(p.nTrailingNonStarters())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,7 +66,7 @@ func (ss *streamSafe) next(p Properties) ssState {
|
||||||
// be a non-starter. Note that it always hold that if nLead > 0 then
|
// be a non-starter. Note that it always hold that if nLead > 0 then
|
||||||
// nLead == nTrail.
|
// nLead == nTrail.
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
*ss = streamSafe(p.nTrailingNonStarters())
|
*ss = 0
|
||||||
return ssStarter
|
return ssStarter
|
||||||
}
|
}
|
||||||
return ssSuccess
|
return ssSuccess
|
||||||
|
@ -134,6 +142,7 @@ func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) {
|
||||||
func (rb *reorderBuffer) reset() {
|
func (rb *reorderBuffer) reset() {
|
||||||
rb.nrune = 0
|
rb.nrune = 0
|
||||||
rb.nbyte = 0
|
rb.nbyte = 0
|
||||||
|
rb.ss = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rb *reorderBuffer) doFlush() bool {
|
func (rb *reorderBuffer) doFlush() bool {
|
||||||
|
@ -248,9 +257,6 @@ func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) {
|
||||||
// It flushes the buffer on each new segment start.
|
// It flushes the buffer on each new segment start.
|
||||||
func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr {
|
func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr {
|
||||||
rb.tmpBytes.setBytes(dcomp)
|
rb.tmpBytes.setBytes(dcomp)
|
||||||
// As the streamSafe accounting already handles the counting for modifiers,
|
|
||||||
// we don't have to call next. However, we do need to keep the accounting
|
|
||||||
// intact when flushing the buffer.
|
|
||||||
for i := 0; i < len(dcomp); {
|
for i := 0; i < len(dcomp); {
|
||||||
info := rb.f.info(rb.tmpBytes, i)
|
info := rb.f.info(rb.tmpBytes, i)
|
||||||
if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() {
|
if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() {
|
||||||
|
|
|
@ -10,7 +10,7 @@ package norm
|
||||||
// and its corresponding decomposing form share the same trie. Each trie maps
|
// and its corresponding decomposing form share the same trie. Each trie maps
|
||||||
// a rune to a uint16. The values take two forms. For v >= 0x8000:
|
// a rune to a uint16. The values take two forms. For v >= 0x8000:
|
||||||
// bits
|
// bits
|
||||||
// 15: 1 (inverse of NFD_QC bit of qcInfo)
|
// 15: 1 (inverse of NFD_QD bit of qcInfo)
|
||||||
// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
|
// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
|
||||||
// 6..0: ccc (compressed CCC value).
|
// 6..0: ccc (compressed CCC value).
|
||||||
// For v < 0x8000, the respective rune has a decomposition and v is an index
|
// For v < 0x8000, the respective rune has a decomposition and v is an index
|
||||||
|
@ -56,31 +56,28 @@ type formInfo struct {
|
||||||
nextMain iterFunc
|
nextMain iterFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
var formTable = []*formInfo{{
|
var formTable []*formInfo
|
||||||
form: NFC,
|
|
||||||
composing: true,
|
func init() {
|
||||||
compatibility: false,
|
formTable = make([]*formInfo, 4)
|
||||||
info: lookupInfoNFC,
|
|
||||||
nextMain: nextComposed,
|
for i := range formTable {
|
||||||
}, {
|
f := &formInfo{}
|
||||||
form: NFD,
|
formTable[i] = f
|
||||||
composing: false,
|
f.form = Form(i)
|
||||||
compatibility: false,
|
if Form(i) == NFKD || Form(i) == NFKC {
|
||||||
info: lookupInfoNFC,
|
f.compatibility = true
|
||||||
nextMain: nextDecomposed,
|
f.info = lookupInfoNFKC
|
||||||
}, {
|
} else {
|
||||||
form: NFKC,
|
f.info = lookupInfoNFC
|
||||||
composing: true,
|
}
|
||||||
compatibility: true,
|
f.nextMain = nextDecomposed
|
||||||
info: lookupInfoNFKC,
|
if Form(i) == NFC || Form(i) == NFKC {
|
||||||
nextMain: nextComposed,
|
f.nextMain = nextComposed
|
||||||
}, {
|
f.composing = true
|
||||||
form: NFKD,
|
}
|
||||||
composing: false,
|
}
|
||||||
compatibility: true,
|
}
|
||||||
info: lookupInfoNFKC,
|
|
||||||
nextMain: nextDecomposed,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
|
// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
|
||||||
// unexpected behavior for the user. For example, in NFD, there is a boundary
|
// unexpected behavior for the user. For example, in NFD, there is a boundary
|
||||||
|
|
|
@ -90,20 +90,16 @@ func (in *input) charinfoNFKC(p int) (uint16, int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (in *input) hangul(p int) (r rune) {
|
func (in *input) hangul(p int) (r rune) {
|
||||||
var size int
|
|
||||||
if in.bytes == nil {
|
if in.bytes == nil {
|
||||||
if !isHangulString(in.str[p:]) {
|
if !isHangulString(in.str[p:]) {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
r, size = utf8.DecodeRuneInString(in.str[p:])
|
r, _ = utf8.DecodeRuneInString(in.str[p:])
|
||||||
} else {
|
} else {
|
||||||
if !isHangul(in.bytes[p:]) {
|
if !isHangul(in.bytes[p:]) {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
r, size = utf8.DecodeRune(in.bytes[p:])
|
r, _ = utf8.DecodeRune(in.bytes[p:])
|
||||||
}
|
|
||||||
if size != hangulUTF8Size {
|
|
||||||
return 0
|
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,6 @@ func (i *Iter) Init(f Form, src []byte) {
|
||||||
i.next = i.rb.f.nextMain
|
i.next = i.rb.f.nextMain
|
||||||
i.asciiF = nextASCIIBytes
|
i.asciiF = nextASCIIBytes
|
||||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
i.rb.ss.first(i.info)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitString initializes i to iterate over src after normalizing it to Form f.
|
// InitString initializes i to iterate over src after normalizing it to Form f.
|
||||||
|
@ -57,12 +56,11 @@ func (i *Iter) InitString(f Form, src string) {
|
||||||
i.next = i.rb.f.nextMain
|
i.next = i.rb.f.nextMain
|
||||||
i.asciiF = nextASCIIString
|
i.asciiF = nextASCIIString
|
||||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
i.rb.ss.first(i.info)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seek sets the segment to be returned by the next call to Next to start
|
// Seek sets the segment to be returned by the next call to Next to start
|
||||||
// at position p. It is the responsibility of the caller to set p to the
|
// at position p. It is the responsibility of the caller to set p to the
|
||||||
// start of a segment.
|
// start of a UTF8 rune.
|
||||||
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
|
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
|
||||||
var abs int64
|
var abs int64
|
||||||
switch whence {
|
switch whence {
|
||||||
|
@ -86,7 +84,6 @@ func (i *Iter) Seek(offset int64, whence int) (int64, error) {
|
||||||
i.multiSeg = nil
|
i.multiSeg = nil
|
||||||
i.next = i.rb.f.nextMain
|
i.next = i.rb.f.nextMain
|
||||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
i.rb.ss.first(i.info)
|
|
||||||
return abs, nil
|
return abs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,7 +161,6 @@ func nextHangul(i *Iter) []byte {
|
||||||
if next >= i.rb.nsrc {
|
if next >= i.rb.nsrc {
|
||||||
i.setDone()
|
i.setDone()
|
||||||
} else if i.rb.src.hangul(next) == 0 {
|
} else if i.rb.src.hangul(next) == 0 {
|
||||||
i.rb.ss.next(i.info)
|
|
||||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
i.next = i.rb.f.nextMain
|
i.next = i.rb.f.nextMain
|
||||||
return i.next(i)
|
return i.next(i)
|
||||||
|
@ -208,10 +204,12 @@ func nextMultiNorm(i *Iter) []byte {
|
||||||
if info.BoundaryBefore() {
|
if info.BoundaryBefore() {
|
||||||
i.rb.compose()
|
i.rb.compose()
|
||||||
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
|
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
|
||||||
|
i.rb.ss.first(info)
|
||||||
i.rb.insertUnsafe(input{bytes: d}, j, info)
|
i.rb.insertUnsafe(input{bytes: d}, j, info)
|
||||||
i.multiSeg = d[j+int(info.size):]
|
i.multiSeg = d[j+int(info.size):]
|
||||||
return seg
|
return seg
|
||||||
}
|
}
|
||||||
|
i.rb.ss.next(info)
|
||||||
i.rb.insertUnsafe(input{bytes: d}, j, info)
|
i.rb.insertUnsafe(input{bytes: d}, j, info)
|
||||||
j += int(info.size)
|
j += int(info.size)
|
||||||
}
|
}
|
||||||
|
@ -224,9 +222,9 @@ func nextMultiNorm(i *Iter) []byte {
|
||||||
func nextDecomposed(i *Iter) (next []byte) {
|
func nextDecomposed(i *Iter) (next []byte) {
|
||||||
outp := 0
|
outp := 0
|
||||||
inCopyStart, outCopyStart := i.p, 0
|
inCopyStart, outCopyStart := i.p, 0
|
||||||
|
ss := mkStreamSafe(i.info)
|
||||||
for {
|
for {
|
||||||
if sz := int(i.info.size); sz <= 1 {
|
if sz := int(i.info.size); sz <= 1 {
|
||||||
i.rb.ss = 0
|
|
||||||
p := i.p
|
p := i.p
|
||||||
i.p++ // ASCII or illegal byte. Either way, advance by 1.
|
i.p++ // ASCII or illegal byte. Either way, advance by 1.
|
||||||
if i.p >= i.rb.nsrc {
|
if i.p >= i.rb.nsrc {
|
||||||
|
@ -245,8 +243,6 @@ func nextDecomposed(i *Iter) (next []byte) {
|
||||||
p := outp + len(d)
|
p := outp + len(d)
|
||||||
if outp > 0 {
|
if outp > 0 {
|
||||||
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
|
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
|
||||||
// TODO: this condition should not be possible, but we leave it
|
|
||||||
// in for defensive purposes.
|
|
||||||
if p > len(i.buf) {
|
if p > len(i.buf) {
|
||||||
return i.buf[:outp]
|
return i.buf[:outp]
|
||||||
}
|
}
|
||||||
|
@ -270,7 +266,7 @@ func nextDecomposed(i *Iter) (next []byte) {
|
||||||
} else {
|
} else {
|
||||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
}
|
}
|
||||||
switch i.rb.ss.next(i.info) {
|
switch ss.next(i.info) {
|
||||||
case ssOverflow:
|
case ssOverflow:
|
||||||
i.next = nextCGJDecompose
|
i.next = nextCGJDecompose
|
||||||
fallthrough
|
fallthrough
|
||||||
|
@ -313,7 +309,7 @@ func nextDecomposed(i *Iter) (next []byte) {
|
||||||
}
|
}
|
||||||
prevCC := i.info.tccc
|
prevCC := i.info.tccc
|
||||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
if v := i.rb.ss.next(i.info); v == ssStarter {
|
if v := ss.next(i.info); v == ssStarter {
|
||||||
break
|
break
|
||||||
} else if v == ssOverflow {
|
} else if v == ssOverflow {
|
||||||
i.next = nextCGJDecompose
|
i.next = nextCGJDecompose
|
||||||
|
@ -339,6 +335,10 @@ doNorm:
|
||||||
|
|
||||||
func doNormDecomposed(i *Iter) []byte {
|
func doNormDecomposed(i *Iter) []byte {
|
||||||
for {
|
for {
|
||||||
|
if s := i.rb.ss.next(i.info); s == ssOverflow {
|
||||||
|
i.next = nextCGJDecompose
|
||||||
|
break
|
||||||
|
}
|
||||||
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
|
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
|
||||||
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
|
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
|
||||||
i.setDone()
|
i.setDone()
|
||||||
|
@ -348,10 +348,6 @@ func doNormDecomposed(i *Iter) []byte {
|
||||||
if i.info.ccc == 0 {
|
if i.info.ccc == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if s := i.rb.ss.next(i.info); s == ssOverflow {
|
|
||||||
i.next = nextCGJDecompose
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// new segment or too many combining characters: exit normalization
|
// new segment or too many combining characters: exit normalization
|
||||||
return i.buf[:i.rb.flushCopy(i.buf[:])]
|
return i.buf[:i.rb.flushCopy(i.buf[:])]
|
||||||
|
@ -361,7 +357,6 @@ func nextCGJDecompose(i *Iter) []byte {
|
||||||
i.rb.ss = 0
|
i.rb.ss = 0
|
||||||
i.rb.insertCGJ()
|
i.rb.insertCGJ()
|
||||||
i.next = nextDecomposed
|
i.next = nextDecomposed
|
||||||
i.rb.ss.first(i.info)
|
|
||||||
buf := doNormDecomposed(i)
|
buf := doNormDecomposed(i)
|
||||||
return buf
|
return buf
|
||||||
}
|
}
|
||||||
|
@ -370,6 +365,7 @@ func nextCGJDecompose(i *Iter) []byte {
|
||||||
func nextComposed(i *Iter) []byte {
|
func nextComposed(i *Iter) []byte {
|
||||||
outp, startp := 0, i.p
|
outp, startp := 0, i.p
|
||||||
var prevCC uint8
|
var prevCC uint8
|
||||||
|
ss := mkStreamSafe(i.info)
|
||||||
for {
|
for {
|
||||||
if !i.info.isYesC() {
|
if !i.info.isYesC() {
|
||||||
goto doNorm
|
goto doNorm
|
||||||
|
@ -389,12 +385,11 @@ func nextComposed(i *Iter) []byte {
|
||||||
i.setDone()
|
i.setDone()
|
||||||
break
|
break
|
||||||
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
|
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
|
||||||
i.rb.ss = 0
|
|
||||||
i.next = i.asciiF
|
i.next = i.asciiF
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
if v := i.rb.ss.next(i.info); v == ssStarter {
|
if v := ss.next(i.info); v == ssStarter {
|
||||||
break
|
break
|
||||||
} else if v == ssOverflow {
|
} else if v == ssOverflow {
|
||||||
i.next = nextCGJCompose
|
i.next = nextCGJCompose
|
||||||
|
@ -406,10 +401,8 @@ func nextComposed(i *Iter) []byte {
|
||||||
}
|
}
|
||||||
return i.returnSlice(startp, i.p)
|
return i.returnSlice(startp, i.p)
|
||||||
doNorm:
|
doNorm:
|
||||||
// reset to start position
|
|
||||||
i.p = startp
|
i.p = startp
|
||||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
i.rb.ss.first(i.info)
|
|
||||||
if i.info.multiSegment() {
|
if i.info.multiSegment() {
|
||||||
d := i.info.Decomposition()
|
d := i.info.Decomposition()
|
||||||
info := i.rb.f.info(input{bytes: d}, 0)
|
info := i.rb.f.info(input{bytes: d}, 0)
|
||||||
|
|
|
@ -35,9 +35,12 @@ func main() {
|
||||||
computeNonStarterCounts()
|
computeNonStarterCounts()
|
||||||
verifyComputed()
|
verifyComputed()
|
||||||
printChars()
|
printChars()
|
||||||
|
if *test {
|
||||||
testDerived()
|
testDerived()
|
||||||
printTestdata()
|
printTestdata()
|
||||||
|
} else {
|
||||||
makeTables()
|
makeTables()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -599,7 +602,6 @@ func printCharInfoTables(w io.Writer) int {
|
||||||
}
|
}
|
||||||
index := normalDecomp
|
index := normalDecomp
|
||||||
nTrail := chars[r].nTrailingNonStarters
|
nTrail := chars[r].nTrailingNonStarters
|
||||||
nLead := chars[r].nLeadingNonStarters
|
|
||||||
if tccc > 0 || lccc > 0 || nTrail > 0 {
|
if tccc > 0 || lccc > 0 || nTrail > 0 {
|
||||||
tccc <<= 2
|
tccc <<= 2
|
||||||
tccc |= nTrail
|
tccc |= nTrail
|
||||||
|
@ -610,7 +612,7 @@ func printCharInfoTables(w io.Writer) int {
|
||||||
index = firstCCC
|
index = firstCCC
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if lccc > 0 || nLead > 0 {
|
if lccc > 0 {
|
||||||
s += string([]byte{lccc})
|
s += string([]byte{lccc})
|
||||||
if index == firstCCC {
|
if index == firstCCC {
|
||||||
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
|
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
|
||||||
|
|
|
@ -2,18 +2,13 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// Note: the file data_test.go that is generated should not be checked in.
|
|
||||||
//go:generate go run maketables.go triegen.go
|
//go:generate go run maketables.go triegen.go
|
||||||
//go:generate go test -tags test
|
//go:generate go run maketables.go triegen.go -test
|
||||||
|
|
||||||
// Package norm contains types and functions for normalizing Unicode strings.
|
// Package norm contains types and functions for normalizing Unicode strings.
|
||||||
package norm
|
package norm
|
||||||
|
|
||||||
import (
|
import "unicode/utf8"
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/text/transform"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Form denotes a canonical representation of Unicode code points.
|
// A Form denotes a canonical representation of Unicode code points.
|
||||||
// The Unicode-defined normalization and equivalence forms are:
|
// The Unicode-defined normalization and equivalence forms are:
|
||||||
|
@ -268,34 +263,6 @@ func (f Form) QuickSpan(b []byte) int {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
// Span implements transform.SpanningTransformer. It returns a boundary n such
|
|
||||||
// that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n.
|
|
||||||
func (f Form) Span(b []byte, atEOF bool) (n int, err error) {
|
|
||||||
n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF)
|
|
||||||
if n < len(b) {
|
|
||||||
if !ok {
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
} else {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SpanString returns a boundary n such that s[0:n] == f(s[0:n]).
|
|
||||||
// It is not guaranteed to return the largest such n.
|
|
||||||
func (f Form) SpanString(s string, atEOF bool) (n int, err error) {
|
|
||||||
n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF)
|
|
||||||
if n < len(s) {
|
|
||||||
if !ok {
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
} else {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and
|
// quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and
|
||||||
// whether any non-normalized parts were found. If atEOF is false, n will
|
// whether any non-normalized parts were found. If atEOF is false, n will
|
||||||
// not point past the last segment if this segment might be become
|
// not point past the last segment if this segment might be become
|
||||||
|
@ -324,6 +291,7 @@ func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool)
|
||||||
// have an overflow for runes that are starters (e.g. with U+FF9E).
|
// have an overflow for runes that are starters (e.g. with U+FF9E).
|
||||||
switch ss.next(info) {
|
switch ss.next(info) {
|
||||||
case ssStarter:
|
case ssStarter:
|
||||||
|
ss.first(info)
|
||||||
lastSegStart = i
|
lastSegStart = i
|
||||||
case ssOverflow:
|
case ssOverflow:
|
||||||
return lastSegStart, false
|
return lastSegStart, false
|
||||||
|
@ -353,7 +321,7 @@ func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool)
|
||||||
return lastSegStart, false
|
return lastSegStart, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]).
|
// QuickSpanString returns a boundary n such that b[0:n] == f(s[0:n]).
|
||||||
// It is not guaranteed to return the largest such n.
|
// It is not guaranteed to return the largest such n.
|
||||||
func (f Form) QuickSpanString(s string) int {
|
func (f Form) QuickSpanString(s string) int {
|
||||||
n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true)
|
n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true)
|
||||||
|
@ -440,8 +408,6 @@ func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int {
|
||||||
}
|
}
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
// TODO: Using streamSafe to determine the boundary isn't the same as
|
|
||||||
// using BoundaryBefore. Determine which should be used.
|
|
||||||
if s := ss.next(info); s != ssSuccess {
|
if s := ss.next(info); s != ssSuccess {
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
|
@ -506,15 +472,16 @@ func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int {
|
||||||
if info.size == 0 {
|
if info.size == 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
if s := rb.ss.next(info); s == ssStarter {
|
|
||||||
// TODO: this could be removed if we don't support merging.
|
|
||||||
if rb.nrune > 0 {
|
if rb.nrune > 0 {
|
||||||
|
if s := rb.ss.next(info); s == ssStarter {
|
||||||
goto end
|
goto end
|
||||||
}
|
|
||||||
} else if s == ssOverflow {
|
} else if s == ssOverflow {
|
||||||
rb.insertCGJ()
|
rb.insertCGJ()
|
||||||
goto end
|
goto end
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
rb.ss.first(info)
|
||||||
|
}
|
||||||
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
|
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
|
||||||
return int(err)
|
return int(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,6 +112,7 @@ func (r *normReader) Read(p []byte) (int, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
panic("should not reach here")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reader returns a new reader that implements Read
|
// Reader returns a new reader that implements Read
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by "stringer -type=Kind"; DO NOT EDIT.
|
// Code generated by "stringer -type=Kind"; DO NOT EDIT
|
||||||
|
|
||||||
package width
|
package width
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
// This file was generated by go generate; DO NOT EDIT
|
||||||
|
|
||||||
package width
|
package width
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue