Bumped gRPC version to 1.3.0

pull/6/head
Renaud Gaubert 2017-08-23 02:10:53 -07:00
parent b0ad3a1c5d
commit 1daaeb352f
152 changed files with 19742 additions and 6311 deletions

263
Godeps/Godeps.json generated
View File

@ -430,32 +430,32 @@
}, },
{ {
"ImportPath": "github.com/containernetworking/cni/libcni", "ImportPath": "github.com/containernetworking/cni/libcni",
"Comment": "spec-v0.3.1", "Comment": "v0.5.2",
"Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e"
}, },
{ {
"ImportPath": "github.com/containernetworking/cni/pkg/invoke", "ImportPath": "github.com/containernetworking/cni/pkg/invoke",
"Comment": "spec-v0.3.1", "Comment": "v0.5.2",
"Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e"
}, },
{ {
"ImportPath": "github.com/containernetworking/cni/pkg/types", "ImportPath": "github.com/containernetworking/cni/pkg/types",
"Comment": "spec-v0.3.1", "Comment": "v0.5.2",
"Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e"
}, },
{ {
"ImportPath": "github.com/containernetworking/cni/pkg/types/020", "ImportPath": "github.com/containernetworking/cni/pkg/types/020",
"Comment": "spec-v0.3.1", "Comment": "v0.5.2",
"Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e"
}, },
{ {
"ImportPath": "github.com/containernetworking/cni/pkg/types/current", "ImportPath": "github.com/containernetworking/cni/pkg/types/current",
"Comment": "spec-v0.3.1", "Comment": "v0.5.2",
"Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e"
}, },
{ {
"ImportPath": "github.com/containernetworking/cni/pkg/version", "ImportPath": "github.com/containernetworking/cni/pkg/version",
"Comment": "spec-v0.3.1", "Comment": "v0.5.2",
"Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e"
}, },
{ {
@ -838,12 +838,12 @@
}, },
{ {
"ImportPath": "github.com/docker/distribution/digest", "ImportPath": "github.com/docker/distribution/digest",
"Comment": "v2.4.0-rc.1-38-gcd27f179", "Comment": "v2.4.0-rc.1-38-gcd27f17",
"Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51" "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
}, },
{ {
"ImportPath": "github.com/docker/distribution/reference", "ImportPath": "github.com/docker/distribution/reference",
"Comment": "v2.4.0-rc.1-38-gcd27f179", "Comment": "v2.4.0-rc.1-38-gcd27f17",
"Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51" "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
}, },
{ {
@ -1171,127 +1171,127 @@
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/gogoproto", "ImportPath": "github.com/gogo/protobuf/gogoproto",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/compare", "ImportPath": "github.com/gogo/protobuf/plugin/compare",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck", "ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/description", "ImportPath": "github.com/gogo/protobuf/plugin/description",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/embedcheck", "ImportPath": "github.com/gogo/protobuf/plugin/embedcheck",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/enumstringer", "ImportPath": "github.com/gogo/protobuf/plugin/enumstringer",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/equal", "ImportPath": "github.com/gogo/protobuf/plugin/equal",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/face", "ImportPath": "github.com/gogo/protobuf/plugin/face",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/gostring", "ImportPath": "github.com/gogo/protobuf/plugin/gostring",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/marshalto", "ImportPath": "github.com/gogo/protobuf/plugin/marshalto",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck", "ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/populate", "ImportPath": "github.com/gogo/protobuf/plugin/populate",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/size", "ImportPath": "github.com/gogo/protobuf/plugin/size",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/stringer", "ImportPath": "github.com/gogo/protobuf/plugin/stringer",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/testgen", "ImportPath": "github.com/gogo/protobuf/plugin/testgen",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/union", "ImportPath": "github.com/gogo/protobuf/plugin/union",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/unmarshal", "ImportPath": "github.com/gogo/protobuf/plugin/unmarshal",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/proto", "ImportPath": "github.com/gogo/protobuf/proto",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator", "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc", "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin", "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/sortkeys", "ImportPath": "github.com/gogo/protobuf/sortkeys",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/vanity", "ImportPath": "github.com/gogo/protobuf/vanity",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/vanity/command", "ImportPath": "github.com/gogo/protobuf/vanity/command",
"Comment": "v0.4-3-gc0656edd", "Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
}, },
{ {
@ -2152,82 +2152,82 @@
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer", "ImportPath": "github.com/opencontainers/runc/libcontainer",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor", "ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups", "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs", "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd", "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs", "ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate", "ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc", "ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/keys", "ImportPath": "github.com/opencontainers/runc/libcontainer/keys",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/label", "ImportPath": "github.com/opencontainers/runc/libcontainer/label",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp", "ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/selinux", "ImportPath": "github.com/opencontainers/runc/libcontainer/selinux",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace", "ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/system", "ImportPath": "github.com/opencontainers/runc/libcontainer/system",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/user", "ImportPath": "github.com/opencontainers/runc/libcontainer/user",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/utils", "ImportPath": "github.com/opencontainers/runc/libcontainer/utils",
"Comment": "v1.0.0-rc2-49-gd223e2ad", "Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089" "Rev": "d223e2adae83f62d58448a799a5da05730228089"
}, },
{ {
@ -2298,107 +2298,107 @@
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud", "ImportPath": "github.com/rackspace/gophercloud",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack", "ImportPath": "github.com/rackspace/gophercloud/openstack",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes", "ImportPath": "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume", "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig", "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach", "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/flavors", "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/flavors",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/images", "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/images",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/servers", "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/servers",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tenants", "ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tenants",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tokens", "ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tokens",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v3/tokens", "ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v3/tokens",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/openstack/utils", "ImportPath": "github.com/rackspace/gophercloud/openstack/utils",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/pagination", "ImportPath": "github.com/rackspace/gophercloud/pagination",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/rackspace", "ImportPath": "github.com/rackspace/gophercloud/rackspace",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes", "ImportPath": "github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/rackspace/compute/v2/servers", "ImportPath": "github.com/rackspace/gophercloud/rackspace/compute/v2/servers",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach", "ImportPath": "github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/rackspace/identity/v2/tokens", "ImportPath": "github.com/rackspace/gophercloud/rackspace/identity/v2/tokens",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/testhelper", "ImportPath": "github.com/rackspace/gophercloud/testhelper",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud/testhelper/client", "ImportPath": "github.com/rackspace/gophercloud/testhelper/client",
"Comment": "v1.0.0-1012-ge00690e8", "Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
}, },
{ {
@ -2715,51 +2715,51 @@
}, },
{ {
"ImportPath": "golang.org/x/net/context", "ImportPath": "golang.org/x/net/context",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/context/ctxhttp", "ImportPath": "golang.org/x/net/context/ctxhttp",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/html", "ImportPath": "golang.org/x/net/html",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/html/atom", "ImportPath": "golang.org/x/net/html/atom",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/internal/timeseries", "ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/proxy", "ImportPath": "golang.org/x/net/proxy",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/trace", "ImportPath": "golang.org/x/net/trace",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/websocket", "ImportPath": "golang.org/x/net/websocket",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/oauth2", "ImportPath": "golang.org/x/oauth2",
@ -2791,63 +2791,67 @@
}, },
{ {
"ImportPath": "golang.org/x/text/cases", "ImportPath": "golang.org/x/text/cases",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/encoding", "ImportPath": "golang.org/x/text/encoding",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/encoding/internal", "ImportPath": "golang.org/x/text/encoding/internal",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/encoding/internal/identifier", "ImportPath": "golang.org/x/text/encoding/internal/identifier",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/encoding/unicode", "ImportPath": "golang.org/x/text/encoding/unicode",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/internal/tag", "ImportPath": "golang.org/x/text/internal/tag",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/internal/utf8internal", "ImportPath": "golang.org/x/text/internal/utf8internal",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/language", "ImportPath": "golang.org/x/text/language",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/runes", "ImportPath": "golang.org/x/text/runes",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/bidirule", "ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/precis", "ImportPath": "golang.org/x/text/secure/precis",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/transform", "ImportPath": "golang.org/x/text/transform",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/bidi", "ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/norm", "ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/width", "ImportPath": "golang.org/x/text/width",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/time/rate", "ImportPath": "golang.org/x/time/rate",
@ -2909,50 +2913,79 @@
"ImportPath": "google.golang.org/api/pubsub/v1", "ImportPath": "google.golang.org/api/pubsub/v1",
"Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24" "Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24"
}, },
{
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
},
{ {
"ImportPath": "google.golang.org/grpc", "ImportPath": "google.golang.org/grpc",
"Comment": "v1.0.4", "Comment": "v1.3.0",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/codes", "ImportPath": "google.golang.org/grpc/codes",
"Comment": "v1.0.4", "Comment": "v1.3.0",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/credentials", "ImportPath": "google.golang.org/grpc/credentials",
"Comment": "v1.0.4", "Comment": "v1.3.0",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
"Comment": "v1.3.0",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/grpclog", "ImportPath": "google.golang.org/grpc/grpclog",
"Comment": "v1.0.4", "Comment": "v1.3.0",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/internal", "ImportPath": "google.golang.org/grpc/internal",
"Comment": "v1.0.4", "Comment": "v1.3.0",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/keepalive",
"Comment": "v1.3.0",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/metadata", "ImportPath": "google.golang.org/grpc/metadata",
"Comment": "v1.0.4", "Comment": "v1.3.0",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/naming", "ImportPath": "google.golang.org/grpc/naming",
"Comment": "v1.0.4", "Comment": "v1.3.0",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/peer", "ImportPath": "google.golang.org/grpc/peer",
"Comment": "v1.0.4", "Comment": "v1.3.0",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/stats",
"Comment": "v1.3.0",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/status",
"Comment": "v1.3.0",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/tap",
"Comment": "v1.3.0",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/transport", "ImportPath": "google.golang.org/grpc/transport",
"Comment": "v1.0.4", "Comment": "v1.3.0",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "gopkg.in/gcfg.v1", "ImportPath": "gopkg.in/gcfg.v1",

425
Godeps/LICENSES generated
View File

@ -86430,6 +86430,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================ ================================================================================
================================================================================
= vendor/golang.org/x/text/internal licensed under: =
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/golang.org/x/text/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707 -
================================================================================
================================================================================ ================================================================================
= vendor/golang.org/x/text/internal/tag licensed under: = = vendor/golang.org/x/text/internal/tag licensed under: =
@ -87305,6 +87340,216 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================ ================================================================================
================================================================================
= vendor/google.golang.org/genproto/googleapis/rpc/status licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/google.golang.org/genproto/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -
================================================================================
================================================================================ ================================================================================
= vendor/google.golang.org/grpc licensed under: = = vendor/google.golang.org/grpc licensed under: =
@ -87413,6 +87658,42 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================ ================================================================================
================================================================================
= vendor/google.golang.org/grpc/grpclb/grpc_lb_v1 licensed under: =
Copyright 2014, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 -
================================================================================
================================================================================ ================================================================================
= vendor/google.golang.org/grpc/grpclog licensed under: = = vendor/google.golang.org/grpc/grpclog licensed under: =
@ -87485,6 +87766,42 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================ ================================================================================
================================================================================
= vendor/google.golang.org/grpc/keepalive licensed under: =
Copyright 2014, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 -
================================================================================
================================================================================ ================================================================================
= vendor/google.golang.org/grpc/metadata licensed under: = = vendor/google.golang.org/grpc/metadata licensed under: =
@ -87593,6 +87910,114 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================ ================================================================================
================================================================================
= vendor/google.golang.org/grpc/stats licensed under: =
Copyright 2014, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 -
================================================================================
================================================================================
= vendor/google.golang.org/grpc/status licensed under: =
Copyright 2014, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 -
================================================================================
================================================================================
= vendor/google.golang.org/grpc/tap licensed under: =
Copyright 2014, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 -
================================================================================
================================================================================ ================================================================================
= vendor/google.golang.org/grpc/transport licensed under: = = vendor/google.golang.org/grpc/transport licensed under: =

View File

@ -69,7 +69,384 @@ filegroup(
"//staging:all-srcs", "//staging:all-srcs",
"//test:all-srcs", "//test:all-srcs",
"//third_party:all-srcs", "//third_party:all-srcs",
"//vendor:all-srcs", "//vendor/bitbucket.org/bertimus9/systemstat:all-srcs",
"//vendor/bitbucket.org/ww/goautoneg:all-srcs",
"//vendor/cloud.google.com/go/compute/metadata:all-srcs",
"//vendor/cloud.google.com/go/internal:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/storage:all-srcs",
"//vendor/github.com/Azure/go-ansiterm:all-srcs",
"//vendor/github.com/Azure/go-autorest/autorest:all-srcs",
"//vendor/github.com/MakeNowJust/heredoc:all-srcs",
"//vendor/github.com/Microsoft/go-winio:all-srcs",
"//vendor/github.com/NYTimes/gziphandler:all-srcs",
"//vendor/github.com/PuerkitoBio/purell:all-srcs",
"//vendor/github.com/PuerkitoBio/urlesc:all-srcs",
"//vendor/github.com/Sirupsen/logrus:all-srcs",
"//vendor/github.com/abbot/go-http-auth:all-srcs",
"//vendor/github.com/appc/spec/schema:all-srcs",
"//vendor/github.com/armon/circbuf:all-srcs",
"//vendor/github.com/asaskevich/govalidator:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/aws:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/private/protocol:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/private/waiter:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/ecr:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/elb:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/route53:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/sts:all-srcs",
"//vendor/github.com/beorn7/perks/quantile:all-srcs",
"//vendor/github.com/blang/semver:all-srcs",
"//vendor/github.com/boltdb/bolt:all-srcs",
"//vendor/github.com/chai2010/gettext-go/gettext:all-srcs",
"//vendor/github.com/cloudflare/cfssl/auth:all-srcs",
"//vendor/github.com/cloudflare/cfssl/certdb:all-srcs",
"//vendor/github.com/cloudflare/cfssl/config:all-srcs",
"//vendor/github.com/cloudflare/cfssl/crypto/pkcs7:all-srcs",
"//vendor/github.com/cloudflare/cfssl/csr:all-srcs",
"//vendor/github.com/cloudflare/cfssl/errors:all-srcs",
"//vendor/github.com/cloudflare/cfssl/helpers:all-srcs",
"//vendor/github.com/cloudflare/cfssl/info:all-srcs",
"//vendor/github.com/cloudflare/cfssl/log:all-srcs",
"//vendor/github.com/cloudflare/cfssl/ocsp/config:all-srcs",
"//vendor/github.com/cloudflare/cfssl/signer:all-srcs",
"//vendor/github.com/clusterhq/flocker-go:all-srcs",
"//vendor/github.com/codedellemc/goscaleio:all-srcs",
"//vendor/github.com/codegangsta/negroni:all-srcs",
"//vendor/github.com/containernetworking/cni/libcni:all-srcs",
"//vendor/github.com/containernetworking/cni/pkg/invoke:all-srcs",
"//vendor/github.com/containernetworking/cni/pkg/types:all-srcs",
"//vendor/github.com/containernetworking/cni/pkg/version:all-srcs",
"//vendor/github.com/coreos/etcd/alarm:all-srcs",
"//vendor/github.com/coreos/etcd/auth:all-srcs",
"//vendor/github.com/coreos/etcd/client:all-srcs",
"//vendor/github.com/coreos/etcd/clientv3:all-srcs",
"//vendor/github.com/coreos/etcd/compactor:all-srcs",
"//vendor/github.com/coreos/etcd/discovery:all-srcs",
"//vendor/github.com/coreos/etcd/error:all-srcs",
"//vendor/github.com/coreos/etcd/etcdserver:all-srcs",
"//vendor/github.com/coreos/etcd/integration:all-srcs",
"//vendor/github.com/coreos/etcd/lease:all-srcs",
"//vendor/github.com/coreos/etcd/mvcc:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/adt:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/contention:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/cpuutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/crc:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/fileutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/httputil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/idutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/ioutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/logutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/monotime:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/netutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/pathutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/pbutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/runtime:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/schedule:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/testutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/tlsutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/transport:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/types:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/wait:all-srcs",
"//vendor/github.com/coreos/etcd/proxy/grpcproxy:all-srcs",
"//vendor/github.com/coreos/etcd/raft:all-srcs",
"//vendor/github.com/coreos/etcd/rafthttp:all-srcs",
"//vendor/github.com/coreos/etcd/snap:all-srcs",
"//vendor/github.com/coreos/etcd/store:all-srcs",
"//vendor/github.com/coreos/etcd/version:all-srcs",
"//vendor/github.com/coreos/etcd/wal:all-srcs",
"//vendor/github.com/coreos/go-oidc/http:all-srcs",
"//vendor/github.com/coreos/go-oidc/jose:all-srcs",
"//vendor/github.com/coreos/go-oidc/key:all-srcs",
"//vendor/github.com/coreos/go-oidc/oauth2:all-srcs",
"//vendor/github.com/coreos/go-oidc/oidc:all-srcs",
"//vendor/github.com/coreos/go-semver/semver:all-srcs",
"//vendor/github.com/coreos/go-systemd/daemon:all-srcs",
"//vendor/github.com/coreos/go-systemd/dbus:all-srcs",
"//vendor/github.com/coreos/go-systemd/journal:all-srcs",
"//vendor/github.com/coreos/go-systemd/unit:all-srcs",
"//vendor/github.com/coreos/go-systemd/util:all-srcs",
"//vendor/github.com/coreos/pkg/capnslog:all-srcs",
"//vendor/github.com/coreos/pkg/dlopen:all-srcs",
"//vendor/github.com/coreos/pkg/health:all-srcs",
"//vendor/github.com/coreos/pkg/httputil:all-srcs",
"//vendor/github.com/coreos/pkg/timeutil:all-srcs",
"//vendor/github.com/coreos/rkt/api/v1alpha:all-srcs",
"//vendor/github.com/cpuguy83/go-md2man/md2man:all-srcs",
"//vendor/github.com/davecgh/go-spew/spew:all-srcs",
"//vendor/github.com/daviddengcn/go-colortext:all-srcs",
"//vendor/github.com/dgrijalva/jwt-go:all-srcs",
"//vendor/github.com/docker/distribution/digest:all-srcs",
"//vendor/github.com/docker/distribution/reference:all-srcs",
"//vendor/github.com/docker/docker/api/types:all-srcs",
"//vendor/github.com/docker/docker/client:all-srcs",
"//vendor/github.com/docker/docker/pkg/jsonlog:all-srcs",
"//vendor/github.com/docker/docker/pkg/jsonmessage:all-srcs",
"//vendor/github.com/docker/docker/pkg/longpath:all-srcs",
"//vendor/github.com/docker/docker/pkg/mount:all-srcs",
"//vendor/github.com/docker/docker/pkg/stdcopy:all-srcs",
"//vendor/github.com/docker/docker/pkg/symlink:all-srcs",
"//vendor/github.com/docker/docker/pkg/system:all-srcs",
"//vendor/github.com/docker/docker/pkg/term:all-srcs",
"//vendor/github.com/docker/docker/pkg/tlsconfig:all-srcs",
"//vendor/github.com/docker/engine-api/client:all-srcs",
"//vendor/github.com/docker/engine-api/types:all-srcs",
"//vendor/github.com/docker/go-connections/nat:all-srcs",
"//vendor/github.com/docker/go-connections/sockets:all-srcs",
"//vendor/github.com/docker/go-connections/tlsconfig:all-srcs",
"//vendor/github.com/docker/go-units:all-srcs",
"//vendor/github.com/docker/spdystream:all-srcs",
"//vendor/github.com/elazarl/go-bindata-assetfs:all-srcs",
"//vendor/github.com/elazarl/goproxy:all-srcs",
"//vendor/github.com/emicklei/go-restful:all-srcs",
"//vendor/github.com/emicklei/go-restful-swagger12:all-srcs",
"//vendor/github.com/evanphx/json-patch:all-srcs",
"//vendor/github.com/exponent-io/jsonpath:all-srcs",
"//vendor/github.com/fatih/camelcase:all-srcs",
"//vendor/github.com/fsnotify/fsnotify:all-srcs",
"//vendor/github.com/garyburd/redigo/internal:all-srcs",
"//vendor/github.com/garyburd/redigo/redis:all-srcs",
"//vendor/github.com/ghodss/yaml:all-srcs",
"//vendor/github.com/go-ini/ini:all-srcs",
"//vendor/github.com/go-openapi/analysis:all-srcs",
"//vendor/github.com/go-openapi/errors:all-srcs",
"//vendor/github.com/go-openapi/jsonpointer:all-srcs",
"//vendor/github.com/go-openapi/jsonreference:all-srcs",
"//vendor/github.com/go-openapi/loads:all-srcs",
"//vendor/github.com/go-openapi/runtime:all-srcs",
"//vendor/github.com/go-openapi/spec:all-srcs",
"//vendor/github.com/go-openapi/strfmt:all-srcs",
"//vendor/github.com/go-openapi/swag:all-srcs",
"//vendor/github.com/go-openapi/validate:all-srcs",
"//vendor/github.com/godbus/dbus:all-srcs",
"//vendor/github.com/gogo/protobuf/gogoproto:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/compare:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/defaultcheck:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/description:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/embedcheck:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/enumstringer:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/equal:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/face:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/gostring:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/marshalto:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/oneofcheck:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/populate:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/size:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/stringer:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/testgen:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/union:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/unmarshal:all-srcs",
"//vendor/github.com/gogo/protobuf/proto:all-srcs",
"//vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor:all-srcs",
"//vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator:all-srcs",
"//vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc:all-srcs",
"//vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin:all-srcs",
"//vendor/github.com/gogo/protobuf/sortkeys:all-srcs",
"//vendor/github.com/gogo/protobuf/vanity:all-srcs",
"//vendor/github.com/golang/glog:all-srcs",
"//vendor/github.com/golang/groupcache/lru:all-srcs",
"//vendor/github.com/golang/mock/gomock:all-srcs",
"//vendor/github.com/golang/protobuf/jsonpb:all-srcs",
"//vendor/github.com/golang/protobuf/proto:all-srcs",
"//vendor/github.com/golang/protobuf/ptypes:all-srcs",
"//vendor/github.com/google/btree:all-srcs",
"//vendor/github.com/google/cadvisor/api:all-srcs",
"//vendor/github.com/google/cadvisor/cache/memory:all-srcs",
"//vendor/github.com/google/cadvisor/client/v2:all-srcs",
"//vendor/github.com/google/cadvisor/collector:all-srcs",
"//vendor/github.com/google/cadvisor/container:all-srcs",
"//vendor/github.com/google/cadvisor/devicemapper:all-srcs",
"//vendor/github.com/google/cadvisor/events:all-srcs",
"//vendor/github.com/google/cadvisor/fs:all-srcs",
"//vendor/github.com/google/cadvisor/healthz:all-srcs",
"//vendor/github.com/google/cadvisor/http:all-srcs",
"//vendor/github.com/google/cadvisor/info/v1:all-srcs",
"//vendor/github.com/google/cadvisor/info/v2:all-srcs",
"//vendor/github.com/google/cadvisor/machine:all-srcs",
"//vendor/github.com/google/cadvisor/manager:all-srcs",
"//vendor/github.com/google/cadvisor/metrics:all-srcs",
"//vendor/github.com/google/cadvisor/pages:all-srcs",
"//vendor/github.com/google/cadvisor/storage:all-srcs",
"//vendor/github.com/google/cadvisor/summary:all-srcs",
"//vendor/github.com/google/cadvisor/utils:all-srcs",
"//vendor/github.com/google/cadvisor/validate:all-srcs",
"//vendor/github.com/google/cadvisor/version:all-srcs",
"//vendor/github.com/google/cadvisor/zfs:all-srcs",
"//vendor/github.com/google/certificate-transparency/go:all-srcs",
"//vendor/github.com/google/gofuzz:all-srcs",
"//vendor/github.com/googleapis/gnostic/OpenAPIv2:all-srcs",
"//vendor/github.com/googleapis/gnostic/compiler:all-srcs",
"//vendor/github.com/googleapis/gnostic/extensions:all-srcs",
"//vendor/github.com/gophercloud/gophercloud:all-srcs",
"//vendor/github.com/gorilla/context:all-srcs",
"//vendor/github.com/gorilla/mux:all-srcs",
"//vendor/github.com/gorilla/websocket:all-srcs",
"//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:all-srcs",
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:all-srcs",
"//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:all-srcs",
"//vendor/github.com/hashicorp/golang-lru:all-srcs",
"//vendor/github.com/hashicorp/hcl:all-srcs",
"//vendor/github.com/hawkular/hawkular-client-go/metrics:all-srcs",
"//vendor/github.com/heketi/heketi/client/api/go-client:all-srcs",
"//vendor/github.com/heketi/heketi/pkg/glusterfs/api:all-srcs",
"//vendor/github.com/heketi/heketi/pkg/utils:all-srcs",
"//vendor/github.com/howeyc/gopass:all-srcs",
"//vendor/github.com/imdario/mergo:all-srcs",
"//vendor/github.com/inconshreveable/mousetrap:all-srcs",
"//vendor/github.com/influxdata/influxdb/client:all-srcs",
"//vendor/github.com/influxdata/influxdb/models:all-srcs",
"//vendor/github.com/influxdata/influxdb/pkg/escape:all-srcs",
"//vendor/github.com/jmespath/go-jmespath:all-srcs",
"//vendor/github.com/jonboulle/clockwork:all-srcs",
"//vendor/github.com/jteeuwen/go-bindata:all-srcs",
"//vendor/github.com/juju/ratelimit:all-srcs",
"//vendor/github.com/kardianos/osext:all-srcs",
"//vendor/github.com/karlseguin/ccache:all-srcs",
"//vendor/github.com/kr/fs:all-srcs",
"//vendor/github.com/kr/pty:all-srcs",
"//vendor/github.com/libopenstorage/openstorage/api:all-srcs",
"//vendor/github.com/libopenstorage/openstorage/pkg/units:all-srcs",
"//vendor/github.com/libopenstorage/openstorage/volume:all-srcs",
"//vendor/github.com/lpabon/godbc:all-srcs",
"//vendor/github.com/magiconair/properties:all-srcs",
"//vendor/github.com/mailru/easyjson/buffer:all-srcs",
"//vendor/github.com/mailru/easyjson/jlexer:all-srcs",
"//vendor/github.com/mailru/easyjson/jwriter:all-srcs",
"//vendor/github.com/matttproud/golang_protobuf_extensions/pbutil:all-srcs",
"//vendor/github.com/miekg/coredns/middleware/etcd/msg:all-srcs",
"//vendor/github.com/miekg/dns:all-srcs",
"//vendor/github.com/mistifyio/go-zfs:all-srcs",
"//vendor/github.com/mitchellh/go-wordwrap:all-srcs",
"//vendor/github.com/mitchellh/mapstructure:all-srcs",
"//vendor/github.com/mreiferson/go-httpclient:all-srcs",
"//vendor/github.com/mvdan/xurls:all-srcs",
"//vendor/github.com/mxk/go-flowrate/flowrate:all-srcs",
"//vendor/github.com/onsi/ginkgo:all-srcs",
"//vendor/github.com/onsi/gomega:all-srcs",
"//vendor/github.com/opencontainers/runc/libcontainer:all-srcs",
"//vendor/github.com/pborman/uuid:all-srcs",
"//vendor/github.com/pelletier/go-buffruneio:all-srcs",
"//vendor/github.com/pelletier/go-toml:all-srcs",
"//vendor/github.com/pkg/errors:all-srcs",
"//vendor/github.com/pkg/sftp:all-srcs",
"//vendor/github.com/pmezard/go-difflib/difflib:all-srcs",
"//vendor/github.com/prometheus/client_golang/prometheus:all-srcs",
"//vendor/github.com/prometheus/client_model/go:all-srcs",
"//vendor/github.com/prometheus/common/expfmt:all-srcs",
"//vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg:all-srcs",
"//vendor/github.com/prometheus/common/model:all-srcs",
"//vendor/github.com/prometheus/procfs:all-srcs",
"//vendor/github.com/quobyte/api:all-srcs",
"//vendor/github.com/rackspace/gophercloud:all-srcs",
"//vendor/github.com/rancher/go-rancher/client:all-srcs",
"//vendor/github.com/renstrom/dedent:all-srcs",
"//vendor/github.com/robfig/cron:all-srcs",
"//vendor/github.com/rubiojr/go-vhd/vhd:all-srcs",
"//vendor/github.com/russross/blackfriday:all-srcs",
"//vendor/github.com/satori/uuid:all-srcs",
"//vendor/github.com/seccomp/libseccomp-golang:all-srcs",
"//vendor/github.com/shurcooL/sanitized_anchor_name:all-srcs",
"//vendor/github.com/spf13/afero:all-srcs",
"//vendor/github.com/spf13/cast:all-srcs",
"//vendor/github.com/spf13/cobra:all-srcs",
"//vendor/github.com/spf13/jwalterweatherman:all-srcs",
"//vendor/github.com/spf13/pflag:all-srcs",
"//vendor/github.com/spf13/viper:all-srcs",
"//vendor/github.com/square/go-jose:all-srcs",
"//vendor/github.com/storageos/go-api:all-srcs",
"//vendor/github.com/stretchr/objx:all-srcs",
"//vendor/github.com/stretchr/testify/assert:all-srcs",
"//vendor/github.com/stretchr/testify/mock:all-srcs",
"//vendor/github.com/stretchr/testify/require:all-srcs",
"//vendor/github.com/syndtr/gocapability/capability:all-srcs",
"//vendor/github.com/ugorji/go/codec:all-srcs",
"//vendor/github.com/vishvananda/netlink:all-srcs",
"//vendor/github.com/vmware/govmomi:all-srcs",
"//vendor/github.com/vmware/photon-controller-go-sdk/SSPI:all-srcs",
"//vendor/github.com/vmware/photon-controller-go-sdk/photon:all-srcs",
"//vendor/github.com/xanzy/go-cloudstack/cloudstack:all-srcs",
"//vendor/github.com/xiang90/probing:all-srcs",
"//vendor/github.com/xyproto/simpleredis:all-srcs",
"//vendor/go.pedge.io/pb/go/google/protobuf:all-srcs",
"//vendor/go4.org/errorutil:all-srcs",
"//vendor/golang.org/x/crypto/bcrypt:all-srcs",
"//vendor/golang.org/x/crypto/blowfish:all-srcs",
"//vendor/golang.org/x/crypto/curve25519:all-srcs",
"//vendor/golang.org/x/crypto/ed25519:all-srcs",
"//vendor/golang.org/x/crypto/nacl/secretbox:all-srcs",
"//vendor/golang.org/x/crypto/pkcs12:all-srcs",
"//vendor/golang.org/x/crypto/poly1305:all-srcs",
"//vendor/golang.org/x/crypto/salsa20/salsa:all-srcs",
"//vendor/golang.org/x/crypto/ssh:all-srcs",
"//vendor/golang.org/x/exp/inotify:all-srcs",
"//vendor/golang.org/x/net/context:all-srcs",
"//vendor/golang.org/x/net/html:all-srcs",
"//vendor/golang.org/x/net/http2:all-srcs",
"//vendor/golang.org/x/net/idna:all-srcs",
"//vendor/golang.org/x/net/internal/timeseries:all-srcs",
"//vendor/golang.org/x/net/lex/httplex:all-srcs",
"//vendor/golang.org/x/net/proxy:all-srcs",
"//vendor/golang.org/x/net/trace:all-srcs",
"//vendor/golang.org/x/net/websocket:all-srcs",
"//vendor/golang.org/x/oauth2:all-srcs",
"//vendor/golang.org/x/sys/unix:all-srcs",
"//vendor/golang.org/x/sys/windows:all-srcs",
"//vendor/golang.org/x/text/cases:all-srcs",
"//vendor/golang.org/x/text/encoding:all-srcs",
"//vendor/golang.org/x/text/internal:all-srcs",
"//vendor/golang.org/x/text/language:all-srcs",
"//vendor/golang.org/x/text/runes:all-srcs",
"//vendor/golang.org/x/text/secure/bidirule:all-srcs",
"//vendor/golang.org/x/text/secure/precis:all-srcs",
"//vendor/golang.org/x/text/transform:all-srcs",
"//vendor/golang.org/x/text/unicode/bidi:all-srcs",
"//vendor/golang.org/x/text/unicode/norm:all-srcs",
"//vendor/golang.org/x/text/width:all-srcs",
"//vendor/golang.org/x/time/rate:all-srcs",
"//vendor/golang.org/x/tools/container/intsets:all-srcs",
"//vendor/google.golang.org/api/cloudkms/v1:all-srcs",
"//vendor/google.golang.org/api/cloudmonitoring/v2beta2:all-srcs",
"//vendor/google.golang.org/api/compute/v0.alpha:all-srcs",
"//vendor/google.golang.org/api/compute/v0.beta:all-srcs",
"//vendor/google.golang.org/api/compute/v1:all-srcs",
"//vendor/google.golang.org/api/container/v1:all-srcs",
"//vendor/google.golang.org/api/dns/v1:all-srcs",
"//vendor/google.golang.org/api/gensupport:all-srcs",
"//vendor/google.golang.org/api/googleapi:all-srcs",
"//vendor/google.golang.org/api/logging/v2beta1:all-srcs",
"//vendor/google.golang.org/api/monitoring/v3:all-srcs",
"//vendor/google.golang.org/api/pubsub/v1:all-srcs",
"//vendor/google.golang.org/genproto/googleapis/rpc/status:all-srcs",
"//vendor/google.golang.org/grpc:all-srcs",
"//vendor/gopkg.in/gcfg.v1:all-srcs",
"//vendor/gopkg.in/inf.v0:all-srcs",
"//vendor/gopkg.in/natefinch/lumberjack.v2:all-srcs",
"//vendor/gopkg.in/warnings.v0:all-srcs",
"//vendor/gopkg.in/yaml.v2:all-srcs",
"//vendor/k8s.io/gengo/args:all-srcs",
"//vendor/k8s.io/gengo/examples/deepcopy-gen/generators:all-srcs",
"//vendor/k8s.io/gengo/examples/defaulter-gen/generators:all-srcs",
"//vendor/k8s.io/gengo/examples/import-boss/generators:all-srcs",
"//vendor/k8s.io/gengo/examples/set-gen/generators:all-srcs",
"//vendor/k8s.io/gengo/examples/set-gen/sets:all-srcs",
"//vendor/k8s.io/gengo/generator:all-srcs",
"//vendor/k8s.io/gengo/namer:all-srcs",
"//vendor/k8s.io/gengo/parser:all-srcs",
"//vendor/k8s.io/gengo/types:all-srcs",
"//vendor/k8s.io/heapster/metrics/api/v1/types:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/aggregator:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/builder:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/common:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/generators:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/handler:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/util:all-srcs",
"//vendor/k8s.io/utils/exec:all-srcs",
"//vendor/vbom.ml/util/sortorder:all-srcs",
], ],
tags = ["automanaged"], tags = ["automanaged"],
) )

View File

@ -84,59 +84,63 @@
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/text/cases", "ImportPath": "golang.org/x/text/cases",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/internal/tag", "ImportPath": "golang.org/x/text/internal/tag",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/language", "ImportPath": "golang.org/x/text/language",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/runes", "ImportPath": "golang.org/x/text/runes",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/bidirule", "ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/precis", "ImportPath": "golang.org/x/text/secure/precis",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/transform", "ImportPath": "golang.org/x/text/transform",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/bidi", "ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/norm", "ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/width", "ImportPath": "golang.org/x/text/width",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "gopkg.in/inf.v0", "ImportPath": "gopkg.in/inf.v0",

View File

@ -304,43 +304,43 @@
}, },
{ {
"ImportPath": "golang.org/x/net/context", "ImportPath": "golang.org/x/net/context",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/html", "ImportPath": "golang.org/x/net/html",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/html/atom", "ImportPath": "golang.org/x/net/html/atom",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/internal/timeseries", "ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/trace", "ImportPath": "golang.org/x/net/trace",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/websocket", "ImportPath": "golang.org/x/net/websocket",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/sys/unix", "ImportPath": "golang.org/x/sys/unix",
@ -348,79 +348,107 @@
}, },
{ {
"ImportPath": "golang.org/x/text/cases", "ImportPath": "golang.org/x/text/cases",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/internal/tag", "ImportPath": "golang.org/x/text/internal/tag",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/language", "ImportPath": "golang.org/x/text/language",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/runes", "ImportPath": "golang.org/x/text/runes",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/bidirule", "ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/precis", "ImportPath": "golang.org/x/text/secure/precis",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/transform", "ImportPath": "golang.org/x/text/transform",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/bidi", "ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/norm", "ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/width", "ImportPath": "golang.org/x/text/width",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
}, },
{ {
"ImportPath": "google.golang.org/grpc", "ImportPath": "google.golang.org/grpc",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/codes", "ImportPath": "google.golang.org/grpc/codes",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/credentials", "ImportPath": "google.golang.org/grpc/credentials",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/grpclog", "ImportPath": "google.golang.org/grpc/grpclog",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/internal", "ImportPath": "google.golang.org/grpc/internal",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/keepalive",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/metadata", "ImportPath": "google.golang.org/grpc/metadata",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/naming", "ImportPath": "google.golang.org/grpc/naming",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/peer", "ImportPath": "google.golang.org/grpc/peer",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/stats",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/status",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/tap",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/transport", "ImportPath": "google.golang.org/grpc/transport",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "gopkg.in/inf.v0", "ImportPath": "gopkg.in/inf.v0",

View File

@ -136,71 +136,75 @@
}, },
{ {
"ImportPath": "golang.org/x/net/html", "ImportPath": "golang.org/x/net/html",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/html/atom", "ImportPath": "golang.org/x/net/html/atom",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/websocket", "ImportPath": "golang.org/x/net/websocket",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/text/cases", "ImportPath": "golang.org/x/text/cases",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/internal/tag", "ImportPath": "golang.org/x/text/internal/tag",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/language", "ImportPath": "golang.org/x/text/language",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/runes", "ImportPath": "golang.org/x/text/runes",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/bidirule", "ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/precis", "ImportPath": "golang.org/x/text/secure/precis",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/transform", "ImportPath": "golang.org/x/text/transform",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/bidi", "ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/norm", "ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/width", "ImportPath": "golang.org/x/text/width",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "gopkg.in/inf.v0", "ImportPath": "gopkg.in/inf.v0",

View File

@ -580,43 +580,43 @@
}, },
{ {
"ImportPath": "golang.org/x/net/context", "ImportPath": "golang.org/x/net/context",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/html", "ImportPath": "golang.org/x/net/html",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/html/atom", "ImportPath": "golang.org/x/net/html/atom",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/internal/timeseries", "ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/trace", "ImportPath": "golang.org/x/net/trace",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/websocket", "ImportPath": "golang.org/x/net/websocket",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/sys/unix", "ImportPath": "golang.org/x/sys/unix",
@ -624,83 +624,111 @@
}, },
{ {
"ImportPath": "golang.org/x/text/cases", "ImportPath": "golang.org/x/text/cases",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/internal/tag", "ImportPath": "golang.org/x/text/internal/tag",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/language", "ImportPath": "golang.org/x/text/language",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/runes", "ImportPath": "golang.org/x/text/runes",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/bidirule", "ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/precis", "ImportPath": "golang.org/x/text/secure/precis",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/transform", "ImportPath": "golang.org/x/text/transform",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/bidi", "ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/norm", "ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/width", "ImportPath": "golang.org/x/text/width",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/time/rate", "ImportPath": "golang.org/x/time/rate",
"Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631" "Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631"
}, },
{
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
},
{ {
"ImportPath": "google.golang.org/grpc", "ImportPath": "google.golang.org/grpc",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/codes", "ImportPath": "google.golang.org/grpc/codes",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/credentials", "ImportPath": "google.golang.org/grpc/credentials",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/grpclog", "ImportPath": "google.golang.org/grpc/grpclog",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/internal", "ImportPath": "google.golang.org/grpc/internal",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/keepalive",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/metadata", "ImportPath": "google.golang.org/grpc/metadata",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/naming", "ImportPath": "google.golang.org/grpc/naming",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/peer", "ImportPath": "google.golang.org/grpc/peer",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/stats",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/status",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/tap",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/transport", "ImportPath": "google.golang.org/grpc/transport",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "gopkg.in/inf.v0", "ImportPath": "gopkg.in/inf.v0",

View File

@ -256,27 +256,27 @@
}, },
{ {
"ImportPath": "golang.org/x/net/context", "ImportPath": "golang.org/x/net/context",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/context/ctxhttp", "ImportPath": "golang.org/x/net/context/ctxhttp",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/oauth2", "ImportPath": "golang.org/x/oauth2",
@ -304,43 +304,47 @@
}, },
{ {
"ImportPath": "golang.org/x/text/cases", "ImportPath": "golang.org/x/text/cases",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/internal/tag", "ImportPath": "golang.org/x/text/internal/tag",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/language", "ImportPath": "golang.org/x/text/language",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/runes", "ImportPath": "golang.org/x/text/runes",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/bidirule", "ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/precis", "ImportPath": "golang.org/x/text/secure/precis",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/transform", "ImportPath": "golang.org/x/text/transform",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/bidi", "ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/norm", "ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/width", "ImportPath": "golang.org/x/text/width",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "gopkg.in/inf.v0", "ImportPath": "gopkg.in/inf.v0",

View File

@ -224,59 +224,63 @@
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/text/cases", "ImportPath": "golang.org/x/text/cases",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/internal/tag", "ImportPath": "golang.org/x/text/internal/tag",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/language", "ImportPath": "golang.org/x/text/language",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/runes", "ImportPath": "golang.org/x/text/runes",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/bidirule", "ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/precis", "ImportPath": "golang.org/x/text/secure/precis",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/transform", "ImportPath": "golang.org/x/text/transform",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/bidi", "ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/norm", "ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/width", "ImportPath": "golang.org/x/text/width",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "gopkg.in/inf.v0", "ImportPath": "gopkg.in/inf.v0",

View File

@ -300,43 +300,43 @@
}, },
{ {
"ImportPath": "golang.org/x/net/context", "ImportPath": "golang.org/x/net/context",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/html", "ImportPath": "golang.org/x/net/html",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/html/atom", "ImportPath": "golang.org/x/net/html/atom",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/internal/timeseries", "ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/trace", "ImportPath": "golang.org/x/net/trace",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/websocket", "ImportPath": "golang.org/x/net/websocket",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/sys/unix", "ImportPath": "golang.org/x/sys/unix",
@ -344,79 +344,107 @@
}, },
{ {
"ImportPath": "golang.org/x/text/cases", "ImportPath": "golang.org/x/text/cases",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/internal/tag", "ImportPath": "golang.org/x/text/internal/tag",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/language", "ImportPath": "golang.org/x/text/language",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/runes", "ImportPath": "golang.org/x/text/runes",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/bidirule", "ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/precis", "ImportPath": "golang.org/x/text/secure/precis",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/transform", "ImportPath": "golang.org/x/text/transform",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/bidi", "ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/norm", "ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/width", "ImportPath": "golang.org/x/text/width",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
}, },
{ {
"ImportPath": "google.golang.org/grpc", "ImportPath": "google.golang.org/grpc",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/codes", "ImportPath": "google.golang.org/grpc/codes",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/credentials", "ImportPath": "google.golang.org/grpc/credentials",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/grpclog", "ImportPath": "google.golang.org/grpc/grpclog",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/internal", "ImportPath": "google.golang.org/grpc/internal",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/keepalive",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/metadata", "ImportPath": "google.golang.org/grpc/metadata",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/naming", "ImportPath": "google.golang.org/grpc/naming",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/peer", "ImportPath": "google.golang.org/grpc/peer",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/stats",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/status",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/tap",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/transport", "ImportPath": "google.golang.org/grpc/transport",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "gopkg.in/inf.v0", "ImportPath": "gopkg.in/inf.v0",

View File

@ -124,59 +124,63 @@
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/text/cases", "ImportPath": "golang.org/x/text/cases",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/internal/tag", "ImportPath": "golang.org/x/text/internal/tag",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/language", "ImportPath": "golang.org/x/text/language",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/runes", "ImportPath": "golang.org/x/text/runes",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/bidirule", "ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/precis", "ImportPath": "golang.org/x/text/secure/precis",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/transform", "ImportPath": "golang.org/x/text/transform",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/bidi", "ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/norm", "ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/width", "ImportPath": "golang.org/x/text/width",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "gopkg.in/inf.v0", "ImportPath": "gopkg.in/inf.v0",

View File

@ -292,43 +292,43 @@
}, },
{ {
"ImportPath": "golang.org/x/net/context", "ImportPath": "golang.org/x/net/context",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/html", "ImportPath": "golang.org/x/net/html",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/html/atom", "ImportPath": "golang.org/x/net/html/atom",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/internal/timeseries", "ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/trace", "ImportPath": "golang.org/x/net/trace",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/net/websocket", "ImportPath": "golang.org/x/net/websocket",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
}, },
{ {
"ImportPath": "golang.org/x/sys/unix", "ImportPath": "golang.org/x/sys/unix",
@ -336,79 +336,107 @@
}, },
{ {
"ImportPath": "golang.org/x/text/cases", "ImportPath": "golang.org/x/text/cases",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/internal/tag", "ImportPath": "golang.org/x/text/internal/tag",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/language", "ImportPath": "golang.org/x/text/language",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/runes", "ImportPath": "golang.org/x/text/runes",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/bidirule", "ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/secure/precis", "ImportPath": "golang.org/x/text/secure/precis",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/transform", "ImportPath": "golang.org/x/text/transform",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/bidi", "ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/norm", "ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
}, },
{ {
"ImportPath": "golang.org/x/text/width", "ImportPath": "golang.org/x/text/width",
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
}, },
{ {
"ImportPath": "google.golang.org/grpc", "ImportPath": "google.golang.org/grpc",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/codes", "ImportPath": "google.golang.org/grpc/codes",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/credentials", "ImportPath": "google.golang.org/grpc/credentials",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/grpclog", "ImportPath": "google.golang.org/grpc/grpclog",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/internal", "ImportPath": "google.golang.org/grpc/internal",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/keepalive",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/metadata", "ImportPath": "google.golang.org/grpc/metadata",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/naming", "ImportPath": "google.golang.org/grpc/naming",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/peer", "ImportPath": "google.golang.org/grpc/peer",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/stats",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/status",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/tap",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/transport", "ImportPath": "google.golang.org/grpc/transport",
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "gopkg.in/inf.v0", "ImportPath": "gopkg.in/inf.v0",

393
vendor/BUILD vendored
View File

@ -1,393 +0,0 @@
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/bitbucket.org/bertimus9/systemstat:all-srcs",
"//vendor/bitbucket.org/ww/goautoneg:all-srcs",
"//vendor/cloud.google.com/go/compute/metadata:all-srcs",
"//vendor/cloud.google.com/go/internal:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/storage:all-srcs",
"//vendor/github.com/Azure/go-ansiterm:all-srcs",
"//vendor/github.com/Azure/go-autorest/autorest:all-srcs",
"//vendor/github.com/MakeNowJust/heredoc:all-srcs",
"//vendor/github.com/Microsoft/go-winio:all-srcs",
"//vendor/github.com/NYTimes/gziphandler:all-srcs",
"//vendor/github.com/PuerkitoBio/purell:all-srcs",
"//vendor/github.com/PuerkitoBio/urlesc:all-srcs",
"//vendor/github.com/Sirupsen/logrus:all-srcs",
"//vendor/github.com/abbot/go-http-auth:all-srcs",
"//vendor/github.com/appc/spec/schema:all-srcs",
"//vendor/github.com/armon/circbuf:all-srcs",
"//vendor/github.com/asaskevich/govalidator:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/aws:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/private/protocol:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/private/waiter:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/ecr:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/elb:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/route53:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/sts:all-srcs",
"//vendor/github.com/beorn7/perks/quantile:all-srcs",
"//vendor/github.com/blang/semver:all-srcs",
"//vendor/github.com/boltdb/bolt:all-srcs",
"//vendor/github.com/chai2010/gettext-go/gettext:all-srcs",
"//vendor/github.com/cloudflare/cfssl/auth:all-srcs",
"//vendor/github.com/cloudflare/cfssl/certdb:all-srcs",
"//vendor/github.com/cloudflare/cfssl/config:all-srcs",
"//vendor/github.com/cloudflare/cfssl/crypto/pkcs7:all-srcs",
"//vendor/github.com/cloudflare/cfssl/csr:all-srcs",
"//vendor/github.com/cloudflare/cfssl/errors:all-srcs",
"//vendor/github.com/cloudflare/cfssl/helpers:all-srcs",
"//vendor/github.com/cloudflare/cfssl/info:all-srcs",
"//vendor/github.com/cloudflare/cfssl/log:all-srcs",
"//vendor/github.com/cloudflare/cfssl/ocsp/config:all-srcs",
"//vendor/github.com/cloudflare/cfssl/signer:all-srcs",
"//vendor/github.com/clusterhq/flocker-go:all-srcs",
"//vendor/github.com/codedellemc/goscaleio:all-srcs",
"//vendor/github.com/codegangsta/negroni:all-srcs",
"//vendor/github.com/containernetworking/cni/libcni:all-srcs",
"//vendor/github.com/containernetworking/cni/pkg/invoke:all-srcs",
"//vendor/github.com/containernetworking/cni/pkg/types:all-srcs",
"//vendor/github.com/containernetworking/cni/pkg/version:all-srcs",
"//vendor/github.com/coreos/etcd/alarm:all-srcs",
"//vendor/github.com/coreos/etcd/auth:all-srcs",
"//vendor/github.com/coreos/etcd/client:all-srcs",
"//vendor/github.com/coreos/etcd/clientv3:all-srcs",
"//vendor/github.com/coreos/etcd/compactor:all-srcs",
"//vendor/github.com/coreos/etcd/discovery:all-srcs",
"//vendor/github.com/coreos/etcd/error:all-srcs",
"//vendor/github.com/coreos/etcd/etcdserver:all-srcs",
"//vendor/github.com/coreos/etcd/integration:all-srcs",
"//vendor/github.com/coreos/etcd/lease:all-srcs",
"//vendor/github.com/coreos/etcd/mvcc:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/adt:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/contention:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/cpuutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/crc:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/fileutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/httputil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/idutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/ioutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/logutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/monotime:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/netutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/pathutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/pbutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/runtime:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/schedule:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/testutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/tlsutil:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/transport:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/types:all-srcs",
"//vendor/github.com/coreos/etcd/pkg/wait:all-srcs",
"//vendor/github.com/coreos/etcd/proxy/grpcproxy:all-srcs",
"//vendor/github.com/coreos/etcd/raft:all-srcs",
"//vendor/github.com/coreos/etcd/rafthttp:all-srcs",
"//vendor/github.com/coreos/etcd/snap:all-srcs",
"//vendor/github.com/coreos/etcd/store:all-srcs",
"//vendor/github.com/coreos/etcd/version:all-srcs",
"//vendor/github.com/coreos/etcd/wal:all-srcs",
"//vendor/github.com/coreos/go-oidc/http:all-srcs",
"//vendor/github.com/coreos/go-oidc/jose:all-srcs",
"//vendor/github.com/coreos/go-oidc/key:all-srcs",
"//vendor/github.com/coreos/go-oidc/oauth2:all-srcs",
"//vendor/github.com/coreos/go-oidc/oidc:all-srcs",
"//vendor/github.com/coreos/go-semver/semver:all-srcs",
"//vendor/github.com/coreos/go-systemd/daemon:all-srcs",
"//vendor/github.com/coreos/go-systemd/dbus:all-srcs",
"//vendor/github.com/coreos/go-systemd/journal:all-srcs",
"//vendor/github.com/coreos/go-systemd/unit:all-srcs",
"//vendor/github.com/coreos/go-systemd/util:all-srcs",
"//vendor/github.com/coreos/pkg/capnslog:all-srcs",
"//vendor/github.com/coreos/pkg/dlopen:all-srcs",
"//vendor/github.com/coreos/pkg/health:all-srcs",
"//vendor/github.com/coreos/pkg/httputil:all-srcs",
"//vendor/github.com/coreos/pkg/timeutil:all-srcs",
"//vendor/github.com/coreos/rkt/api/v1alpha:all-srcs",
"//vendor/github.com/cpuguy83/go-md2man/md2man:all-srcs",
"//vendor/github.com/davecgh/go-spew/spew:all-srcs",
"//vendor/github.com/daviddengcn/go-colortext:all-srcs",
"//vendor/github.com/dgrijalva/jwt-go:all-srcs",
"//vendor/github.com/docker/distribution/digest:all-srcs",
"//vendor/github.com/docker/distribution/reference:all-srcs",
"//vendor/github.com/docker/docker/api/types:all-srcs",
"//vendor/github.com/docker/docker/client:all-srcs",
"//vendor/github.com/docker/docker/pkg/jsonlog:all-srcs",
"//vendor/github.com/docker/docker/pkg/jsonmessage:all-srcs",
"//vendor/github.com/docker/docker/pkg/longpath:all-srcs",
"//vendor/github.com/docker/docker/pkg/mount:all-srcs",
"//vendor/github.com/docker/docker/pkg/stdcopy:all-srcs",
"//vendor/github.com/docker/docker/pkg/symlink:all-srcs",
"//vendor/github.com/docker/docker/pkg/system:all-srcs",
"//vendor/github.com/docker/docker/pkg/term:all-srcs",
"//vendor/github.com/docker/docker/pkg/tlsconfig:all-srcs",
"//vendor/github.com/docker/engine-api/client:all-srcs",
"//vendor/github.com/docker/engine-api/types:all-srcs",
"//vendor/github.com/docker/go-connections/nat:all-srcs",
"//vendor/github.com/docker/go-connections/sockets:all-srcs",
"//vendor/github.com/docker/go-connections/tlsconfig:all-srcs",
"//vendor/github.com/docker/go-units:all-srcs",
"//vendor/github.com/docker/spdystream:all-srcs",
"//vendor/github.com/elazarl/go-bindata-assetfs:all-srcs",
"//vendor/github.com/elazarl/goproxy:all-srcs",
"//vendor/github.com/emicklei/go-restful:all-srcs",
"//vendor/github.com/emicklei/go-restful-swagger12:all-srcs",
"//vendor/github.com/evanphx/json-patch:all-srcs",
"//vendor/github.com/exponent-io/jsonpath:all-srcs",
"//vendor/github.com/fatih/camelcase:all-srcs",
"//vendor/github.com/fsnotify/fsnotify:all-srcs",
"//vendor/github.com/garyburd/redigo/internal:all-srcs",
"//vendor/github.com/garyburd/redigo/redis:all-srcs",
"//vendor/github.com/ghodss/yaml:all-srcs",
"//vendor/github.com/go-ini/ini:all-srcs",
"//vendor/github.com/go-openapi/analysis:all-srcs",
"//vendor/github.com/go-openapi/errors:all-srcs",
"//vendor/github.com/go-openapi/jsonpointer:all-srcs",
"//vendor/github.com/go-openapi/jsonreference:all-srcs",
"//vendor/github.com/go-openapi/loads:all-srcs",
"//vendor/github.com/go-openapi/runtime:all-srcs",
"//vendor/github.com/go-openapi/spec:all-srcs",
"//vendor/github.com/go-openapi/strfmt:all-srcs",
"//vendor/github.com/go-openapi/swag:all-srcs",
"//vendor/github.com/go-openapi/validate:all-srcs",
"//vendor/github.com/godbus/dbus:all-srcs",
"//vendor/github.com/gogo/protobuf/gogoproto:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/compare:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/defaultcheck:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/description:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/embedcheck:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/enumstringer:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/equal:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/face:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/gostring:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/marshalto:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/oneofcheck:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/populate:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/size:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/stringer:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/testgen:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/union:all-srcs",
"//vendor/github.com/gogo/protobuf/plugin/unmarshal:all-srcs",
"//vendor/github.com/gogo/protobuf/proto:all-srcs",
"//vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor:all-srcs",
"//vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator:all-srcs",
"//vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc:all-srcs",
"//vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin:all-srcs",
"//vendor/github.com/gogo/protobuf/sortkeys:all-srcs",
"//vendor/github.com/gogo/protobuf/vanity:all-srcs",
"//vendor/github.com/golang/glog:all-srcs",
"//vendor/github.com/golang/groupcache/lru:all-srcs",
"//vendor/github.com/golang/mock/gomock:all-srcs",
"//vendor/github.com/golang/protobuf/jsonpb:all-srcs",
"//vendor/github.com/golang/protobuf/proto:all-srcs",
"//vendor/github.com/golang/protobuf/ptypes:all-srcs",
"//vendor/github.com/google/btree:all-srcs",
"//vendor/github.com/google/cadvisor/api:all-srcs",
"//vendor/github.com/google/cadvisor/cache/memory:all-srcs",
"//vendor/github.com/google/cadvisor/client/v2:all-srcs",
"//vendor/github.com/google/cadvisor/collector:all-srcs",
"//vendor/github.com/google/cadvisor/container:all-srcs",
"//vendor/github.com/google/cadvisor/devicemapper:all-srcs",
"//vendor/github.com/google/cadvisor/events:all-srcs",
"//vendor/github.com/google/cadvisor/fs:all-srcs",
"//vendor/github.com/google/cadvisor/healthz:all-srcs",
"//vendor/github.com/google/cadvisor/http:all-srcs",
"//vendor/github.com/google/cadvisor/info/v1:all-srcs",
"//vendor/github.com/google/cadvisor/info/v2:all-srcs",
"//vendor/github.com/google/cadvisor/machine:all-srcs",
"//vendor/github.com/google/cadvisor/manager:all-srcs",
"//vendor/github.com/google/cadvisor/metrics:all-srcs",
"//vendor/github.com/google/cadvisor/pages:all-srcs",
"//vendor/github.com/google/cadvisor/storage:all-srcs",
"//vendor/github.com/google/cadvisor/summary:all-srcs",
"//vendor/github.com/google/cadvisor/utils:all-srcs",
"//vendor/github.com/google/cadvisor/validate:all-srcs",
"//vendor/github.com/google/cadvisor/version:all-srcs",
"//vendor/github.com/google/cadvisor/zfs:all-srcs",
"//vendor/github.com/google/certificate-transparency/go:all-srcs",
"//vendor/github.com/google/gofuzz:all-srcs",
"//vendor/github.com/googleapis/gnostic/OpenAPIv2:all-srcs",
"//vendor/github.com/googleapis/gnostic/compiler:all-srcs",
"//vendor/github.com/googleapis/gnostic/extensions:all-srcs",
"//vendor/github.com/gophercloud/gophercloud:all-srcs",
"//vendor/github.com/gorilla/context:all-srcs",
"//vendor/github.com/gorilla/mux:all-srcs",
"//vendor/github.com/gorilla/websocket:all-srcs",
"//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:all-srcs",
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:all-srcs",
"//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:all-srcs",
"//vendor/github.com/hashicorp/golang-lru:all-srcs",
"//vendor/github.com/hashicorp/hcl:all-srcs",
"//vendor/github.com/hawkular/hawkular-client-go/metrics:all-srcs",
"//vendor/github.com/heketi/heketi/client/api/go-client:all-srcs",
"//vendor/github.com/heketi/heketi/pkg/glusterfs/api:all-srcs",
"//vendor/github.com/heketi/heketi/pkg/utils:all-srcs",
"//vendor/github.com/howeyc/gopass:all-srcs",
"//vendor/github.com/imdario/mergo:all-srcs",
"//vendor/github.com/inconshreveable/mousetrap:all-srcs",
"//vendor/github.com/influxdata/influxdb/client:all-srcs",
"//vendor/github.com/influxdata/influxdb/models:all-srcs",
"//vendor/github.com/influxdata/influxdb/pkg/escape:all-srcs",
"//vendor/github.com/jmespath/go-jmespath:all-srcs",
"//vendor/github.com/jonboulle/clockwork:all-srcs",
"//vendor/github.com/jteeuwen/go-bindata:all-srcs",
"//vendor/github.com/juju/ratelimit:all-srcs",
"//vendor/github.com/kardianos/osext:all-srcs",
"//vendor/github.com/karlseguin/ccache:all-srcs",
"//vendor/github.com/kr/fs:all-srcs",
"//vendor/github.com/kr/pty:all-srcs",
"//vendor/github.com/libopenstorage/openstorage/api:all-srcs",
"//vendor/github.com/libopenstorage/openstorage/pkg/units:all-srcs",
"//vendor/github.com/libopenstorage/openstorage/volume:all-srcs",
"//vendor/github.com/lpabon/godbc:all-srcs",
"//vendor/github.com/magiconair/properties:all-srcs",
"//vendor/github.com/mailru/easyjson/buffer:all-srcs",
"//vendor/github.com/mailru/easyjson/jlexer:all-srcs",
"//vendor/github.com/mailru/easyjson/jwriter:all-srcs",
"//vendor/github.com/matttproud/golang_protobuf_extensions/pbutil:all-srcs",
"//vendor/github.com/miekg/coredns/middleware/etcd/msg:all-srcs",
"//vendor/github.com/miekg/dns:all-srcs",
"//vendor/github.com/mistifyio/go-zfs:all-srcs",
"//vendor/github.com/mitchellh/go-wordwrap:all-srcs",
"//vendor/github.com/mitchellh/mapstructure:all-srcs",
"//vendor/github.com/mreiferson/go-httpclient:all-srcs",
"//vendor/github.com/mvdan/xurls:all-srcs",
"//vendor/github.com/mxk/go-flowrate/flowrate:all-srcs",
"//vendor/github.com/onsi/ginkgo:all-srcs",
"//vendor/github.com/onsi/gomega:all-srcs",
"//vendor/github.com/opencontainers/runc/libcontainer:all-srcs",
"//vendor/github.com/pborman/uuid:all-srcs",
"//vendor/github.com/pelletier/go-buffruneio:all-srcs",
"//vendor/github.com/pelletier/go-toml:all-srcs",
"//vendor/github.com/pkg/errors:all-srcs",
"//vendor/github.com/pkg/sftp:all-srcs",
"//vendor/github.com/pmezard/go-difflib/difflib:all-srcs",
"//vendor/github.com/prometheus/client_golang/prometheus:all-srcs",
"//vendor/github.com/prometheus/client_model/go:all-srcs",
"//vendor/github.com/prometheus/common/expfmt:all-srcs",
"//vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg:all-srcs",
"//vendor/github.com/prometheus/common/model:all-srcs",
"//vendor/github.com/prometheus/procfs:all-srcs",
"//vendor/github.com/quobyte/api:all-srcs",
"//vendor/github.com/rackspace/gophercloud:all-srcs",
"//vendor/github.com/rancher/go-rancher/client:all-srcs",
"//vendor/github.com/renstrom/dedent:all-srcs",
"//vendor/github.com/robfig/cron:all-srcs",
"//vendor/github.com/rubiojr/go-vhd/vhd:all-srcs",
"//vendor/github.com/russross/blackfriday:all-srcs",
"//vendor/github.com/satori/uuid:all-srcs",
"//vendor/github.com/seccomp/libseccomp-golang:all-srcs",
"//vendor/github.com/shurcooL/sanitized_anchor_name:all-srcs",
"//vendor/github.com/spf13/afero:all-srcs",
"//vendor/github.com/spf13/cast:all-srcs",
"//vendor/github.com/spf13/cobra:all-srcs",
"//vendor/github.com/spf13/jwalterweatherman:all-srcs",
"//vendor/github.com/spf13/pflag:all-srcs",
"//vendor/github.com/spf13/viper:all-srcs",
"//vendor/github.com/square/go-jose:all-srcs",
"//vendor/github.com/storageos/go-api:all-srcs",
"//vendor/github.com/stretchr/objx:all-srcs",
"//vendor/github.com/stretchr/testify/assert:all-srcs",
"//vendor/github.com/stretchr/testify/mock:all-srcs",
"//vendor/github.com/stretchr/testify/require:all-srcs",
"//vendor/github.com/syndtr/gocapability/capability:all-srcs",
"//vendor/github.com/ugorji/go/codec:all-srcs",
"//vendor/github.com/vishvananda/netlink:all-srcs",
"//vendor/github.com/vmware/govmomi:all-srcs",
"//vendor/github.com/vmware/photon-controller-go-sdk/SSPI:all-srcs",
"//vendor/github.com/vmware/photon-controller-go-sdk/photon:all-srcs",
"//vendor/github.com/xanzy/go-cloudstack/cloudstack:all-srcs",
"//vendor/github.com/xiang90/probing:all-srcs",
"//vendor/github.com/xyproto/simpleredis:all-srcs",
"//vendor/go.pedge.io/pb/go/google/protobuf:all-srcs",
"//vendor/go4.org/errorutil:all-srcs",
"//vendor/golang.org/x/crypto/bcrypt:all-srcs",
"//vendor/golang.org/x/crypto/blowfish:all-srcs",
"//vendor/golang.org/x/crypto/curve25519:all-srcs",
"//vendor/golang.org/x/crypto/ed25519:all-srcs",
"//vendor/golang.org/x/crypto/nacl/secretbox:all-srcs",
"//vendor/golang.org/x/crypto/pkcs12:all-srcs",
"//vendor/golang.org/x/crypto/poly1305:all-srcs",
"//vendor/golang.org/x/crypto/salsa20/salsa:all-srcs",
"//vendor/golang.org/x/crypto/ssh:all-srcs",
"//vendor/golang.org/x/exp/inotify:all-srcs",
"//vendor/golang.org/x/net/context:all-srcs",
"//vendor/golang.org/x/net/html:all-srcs",
"//vendor/golang.org/x/net/http2:all-srcs",
"//vendor/golang.org/x/net/idna:all-srcs",
"//vendor/golang.org/x/net/internal/timeseries:all-srcs",
"//vendor/golang.org/x/net/lex/httplex:all-srcs",
"//vendor/golang.org/x/net/proxy:all-srcs",
"//vendor/golang.org/x/net/trace:all-srcs",
"//vendor/golang.org/x/net/websocket:all-srcs",
"//vendor/golang.org/x/oauth2:all-srcs",
"//vendor/golang.org/x/sys/unix:all-srcs",
"//vendor/golang.org/x/sys/windows:all-srcs",
"//vendor/golang.org/x/text/cases:all-srcs",
"//vendor/golang.org/x/text/encoding:all-srcs",
"//vendor/golang.org/x/text/internal/tag:all-srcs",
"//vendor/golang.org/x/text/internal/utf8internal:all-srcs",
"//vendor/golang.org/x/text/language:all-srcs",
"//vendor/golang.org/x/text/runes:all-srcs",
"//vendor/golang.org/x/text/secure/bidirule:all-srcs",
"//vendor/golang.org/x/text/secure/precis:all-srcs",
"//vendor/golang.org/x/text/transform:all-srcs",
"//vendor/golang.org/x/text/unicode/bidi:all-srcs",
"//vendor/golang.org/x/text/unicode/norm:all-srcs",
"//vendor/golang.org/x/text/width:all-srcs",
"//vendor/golang.org/x/time/rate:all-srcs",
"//vendor/golang.org/x/tools/container/intsets:all-srcs",
"//vendor/google.golang.org/api/cloudkms/v1:all-srcs",
"//vendor/google.golang.org/api/cloudmonitoring/v2beta2:all-srcs",
"//vendor/google.golang.org/api/compute/v0.alpha:all-srcs",
"//vendor/google.golang.org/api/compute/v0.beta:all-srcs",
"//vendor/google.golang.org/api/compute/v1:all-srcs",
"//vendor/google.golang.org/api/container/v1:all-srcs",
"//vendor/google.golang.org/api/dns/v1:all-srcs",
"//vendor/google.golang.org/api/gensupport:all-srcs",
"//vendor/google.golang.org/api/googleapi:all-srcs",
"//vendor/google.golang.org/api/logging/v2beta1:all-srcs",
"//vendor/google.golang.org/api/monitoring/v3:all-srcs",
"//vendor/google.golang.org/api/pubsub/v1:all-srcs",
"//vendor/google.golang.org/grpc:all-srcs",
"//vendor/gopkg.in/gcfg.v1:all-srcs",
"//vendor/gopkg.in/inf.v0:all-srcs",
"//vendor/gopkg.in/natefinch/lumberjack.v2:all-srcs",
"//vendor/gopkg.in/warnings.v0:all-srcs",
"//vendor/gopkg.in/yaml.v2:all-srcs",
"//vendor/k8s.io/gengo/args:all-srcs",
"//vendor/k8s.io/gengo/examples/deepcopy-gen/generators:all-srcs",
"//vendor/k8s.io/gengo/examples/defaulter-gen/generators:all-srcs",
"//vendor/k8s.io/gengo/examples/import-boss/generators:all-srcs",
"//vendor/k8s.io/gengo/examples/set-gen/generators:all-srcs",
"//vendor/k8s.io/gengo/examples/set-gen/sets:all-srcs",
"//vendor/k8s.io/gengo/generator:all-srcs",
"//vendor/k8s.io/gengo/namer:all-srcs",
"//vendor/k8s.io/gengo/parser:all-srcs",
"//vendor/k8s.io/gengo/types:all-srcs",
"//vendor/k8s.io/heapster/metrics/api/v1/types:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/aggregator:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/builder:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/common:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/generators:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/handler:all-srcs",
"//vendor/k8s.io/kube-openapi/pkg/util:all-srcs",
"//vendor/k8s.io/utils/exec:all-srcs",
"//vendor/vbom.ml/util/sortorder:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -5,7 +5,9 @@ go_library(
srcs = [ srcs = [
"context.go", "context.go",
"go17.go", "go17.go",
"go19.go",
"pre_go17.go", "pre_go17.go",
"pre_go19.go",
], ],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )

View File

@ -7,7 +7,7 @@
// and between processes. // and between processes.
// //
// Incoming requests to a server should create a Context, and outgoing calls to // Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must // servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created // propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue. // using WithDeadline, WithTimeout, WithCancel, or WithValue.
// //
@ -16,14 +16,14 @@
// propagation: // propagation:
// //
// Do not store Contexts inside a struct type; instead, pass a Context // Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first // explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx: // parameter, typically named ctx:
// //
// func DoSomething(ctx context.Context, arg Arg) error { // func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ... // // ... use ctx ...
// } // }
// //
// Do not pass a nil Context, even if a function permits it. Pass context.TODO // Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use. // if you are unsure about which Context to use.
// //
// Use context Values only for request-scoped data that transits processes and // Use context Values only for request-scoped data that transits processes and
@ -36,112 +36,15 @@
// Contexts. // Contexts.
package context package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// Background returns a non-nil, empty Context. It is never canceled, has no // Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function, // values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming // initialization, and tests, and as the top-level Context for incoming
// requests. // requests.
func Background() Context { func Background() Context {
return background return background
} }
// TODO returns a non-nil, empty Context. Code should use context.TODO when // TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the // it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context // surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine // parameter). TODO is recognized by static analysis tools that determine
@ -149,8 +52,3 @@ func Background() Context {
func TODO() Context { func TODO() Context {
return todo return todo
} }
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

View File

@ -35,8 +35,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
} }
// WithDeadline returns a copy of the parent context with the deadline adjusted // WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d, // to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned // WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned // context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is // cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first. // closed, whichever happens first.

20
vendor/golang.org/x/net/context/go19.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package context
import "context" // standard library's context, as of Go 1.7
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context = context.Context
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc = context.CancelFunc

View File

@ -13,7 +13,7 @@ import (
"time" "time"
) )
// An emptyCtx is never canceled, has no values, and has no deadline. It is not // An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses. // struct{}, since vars of this type must have distinct addresses.
type emptyCtx int type emptyCtx int
@ -104,7 +104,7 @@ func propagateCancel(parent Context, child canceler) {
} }
// parentCancelCtx follows a chain of parent references until it finds a // parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this // *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent. // package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) { func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for { for {
@ -134,14 +134,14 @@ func removeChild(parent Context, child canceler) {
p.mu.Unlock() p.mu.Unlock()
} }
// A canceler is a context type that can be canceled directly. The // A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx. // implementations are *cancelCtx and *timerCtx.
type canceler interface { type canceler interface {
cancel(removeFromParent bool, err error) cancel(removeFromParent bool, err error)
Done() <-chan struct{} Done() <-chan struct{}
} }
// A cancelCtx can be canceled. When canceled, it also cancels any children // A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler. // that implement canceler.
type cancelCtx struct { type cancelCtx struct {
Context Context
@ -193,8 +193,8 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) {
} }
// WithDeadline returns a copy of the parent context with the deadline adjusted // WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d, // to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned // WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned // context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is // cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first. // closed, whichever happens first.
@ -226,8 +226,8 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
return c, func() { c.cancel(true, Canceled) } return c, func() { c.cancel(true, Canceled) }
} }
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to // A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then // implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel. // delegating to cancelCtx.cancel.
type timerCtx struct { type timerCtx struct {
*cancelCtx *cancelCtx
@ -281,7 +281,7 @@ func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val} return &valueCtx{parent, key, val}
} }
// A valueCtx carries a key-value pair. It implements Value for that key and // A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context. // delegates all other calls to the embedded Context.
type valueCtx struct { type valueCtx struct {
Context Context

109
vendor/golang.org/x/net/context/pre_go19.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.9
package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

View File

@ -3,22 +3,25 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = [ srcs = [
"ciphers.go",
"client_conn_pool.go", "client_conn_pool.go",
"configure_transport.go", "configure_transport.go",
"databuffer.go",
"errors.go", "errors.go",
"fixed_buffer.go",
"flow.go", "flow.go",
"frame.go", "frame.go",
"go16.go", "go16.go",
"go17.go", "go17.go",
"go17_not18.go", "go17_not18.go",
"go18.go", "go18.go",
"go19.go",
"gotrack.go", "gotrack.go",
"headermap.go", "headermap.go",
"http2.go", "http2.go",
"not_go16.go", "not_go16.go",
"not_go17.go", "not_go17.go",
"not_go18.go", "not_go18.go",
"not_go19.go",
"pipe.go", "pipe.go",
"server.go", "server.go",
"transport.go", "transport.go",

641
vendor/golang.org/x/net/http2/ciphers.go generated vendored Normal file
View File

@ -0,0 +1,641 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
// A list of the possible cipher suite ids. Taken from
// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt
const (
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
// Reserved uint16 = 0x001C-1D
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
// Reserved uint16 = 0x0047-4F
// Reserved uint16 = 0x0050-58
// Reserved uint16 = 0x0059-5C
// Unassigned uint16 = 0x005D-5F
// Reserved uint16 = 0x0060-66
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
// Unassigned uint16 = 0x006E-83
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
// Unassigned uint16 = 0x00C6-FE
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
// Unassigned uint16 = 0x01-55,*
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
// Unassigned uint16 = 0x5601 - 0xC000
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
// Unassigned uint16 = 0xC0B0-FF
// Unassigned uint16 = 0xC1-CB,*
// Unassigned uint16 = 0xCC00-A7
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
)
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
// References:
// https://tools.ietf.org/html/rfc7540#appendix-A
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
func isBadCipher(cipher uint16) bool {
switch cipher {
case cipher_TLS_NULL_WITH_NULL_NULL,
cipher_TLS_RSA_WITH_NULL_MD5,
cipher_TLS_RSA_WITH_NULL_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_RSA_WITH_RC4_128_MD5,
cipher_TLS_RSA_WITH_RC4_128_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_RSA_WITH_DES_CBC_SHA,
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_RC4_128_SHA,
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
cipher_TLS_KRB5_WITH_RC4_128_MD5,
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_PSK_WITH_NULL_SHA,
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_NULL_SHA256,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_PSK_WITH_RC4_128_SHA,
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_PSK_WITH_NULL_SHA256,
cipher_TLS_PSK_WITH_NULL_SHA384,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_AES_128_CCM,
cipher_TLS_RSA_WITH_AES_256_CCM,
cipher_TLS_RSA_WITH_AES_128_CCM_8,
cipher_TLS_RSA_WITH_AES_256_CCM_8,
cipher_TLS_PSK_WITH_AES_128_CCM,
cipher_TLS_PSK_WITH_AES_256_CCM,
cipher_TLS_PSK_WITH_AES_128_CCM_8,
cipher_TLS_PSK_WITH_AES_256_CCM_8:
return true
default:
return false
}
}

View File

@ -247,7 +247,7 @@ func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
} }
// noDialClientConnPool is an implementation of http2.ClientConnPool // noDialClientConnPool is an implementation of http2.ClientConnPool
// which never dials. We let the HTTP/1.1 client dial and use its TLS // which never dials. We let the HTTP/1.1 client dial and use its TLS
// connection instead. // connection instead.
type noDialClientConnPool struct{ *clientConnPool } type noDialClientConnPool struct{ *clientConnPool }

View File

@ -56,7 +56,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) {
} }
// registerHTTPSProtocol calls Transport.RegisterProtocol but // registerHTTPSProtocol calls Transport.RegisterProtocol but
// convering panics into errors. // converting panics into errors.
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {

146
vendor/golang.org/x/net/http2/databuffer.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
"fmt"
"sync"
)
// Buffer chunks are allocated from a pool to reduce pressure on GC.
// The maximum wasted space per dataBuffer is 2x the largest size class,
// which happens when the dataBuffer has multiple chunks and there is
// one unread byte in both the first and last chunks. We use a few size
// classes to minimize overheads for servers that typically receive very
// small request bodies.
//
// TODO: Benchmark to determine if the pools are necessary. The GC may have
// improved enough that we can instead allocate chunks like this:
// make([]byte, max(16<<10, expectedBytesRemaining))
var (
dataChunkSizeClasses = []int{
1 << 10,
2 << 10,
4 << 10,
8 << 10,
16 << 10,
}
dataChunkPools = [...]sync.Pool{
{New: func() interface{} { return make([]byte, 1<<10) }},
{New: func() interface{} { return make([]byte, 2<<10) }},
{New: func() interface{} { return make([]byte, 4<<10) }},
{New: func() interface{} { return make([]byte, 8<<10) }},
{New: func() interface{} { return make([]byte, 16<<10) }},
}
)
func getDataBufferChunk(size int64) []byte {
i := 0
for ; i < len(dataChunkSizeClasses)-1; i++ {
if size <= int64(dataChunkSizeClasses[i]) {
break
}
}
return dataChunkPools[i].Get().([]byte)
}
func putDataBufferChunk(p []byte) {
for i, n := range dataChunkSizeClasses {
if len(p) == n {
dataChunkPools[i].Put(p)
return
}
}
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
// Each dataBuffer is used to read DATA frames on a single stream.
// The buffer is divided into chunks so the server can limit the
// total memory used by a single connection without limiting the
// request body size on any single stream.
type dataBuffer struct {
chunks [][]byte
r int // next byte to read is chunks[0][r]
w int // next byte to write is chunks[len(chunks)-1][w]
size int // total buffered bytes
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
}
var errReadEmpty = errors.New("read from empty dataBuffer")
// Read copies bytes from the buffer into p.
// It is an error to read when no data is available.
func (b *dataBuffer) Read(p []byte) (int, error) {
if b.size == 0 {
return 0, errReadEmpty
}
var ntotal int
for len(p) > 0 && b.size > 0 {
readFrom := b.bytesFromFirstChunk()
n := copy(p, readFrom)
p = p[n:]
ntotal += n
b.r += n
b.size -= n
// If the first chunk has been consumed, advance to the next chunk.
if b.r == len(b.chunks[0]) {
putDataBufferChunk(b.chunks[0])
end := len(b.chunks) - 1
copy(b.chunks[:end], b.chunks[1:])
b.chunks[end] = nil
b.chunks = b.chunks[:end]
b.r = 0
}
}
return ntotal, nil
}
func (b *dataBuffer) bytesFromFirstChunk() []byte {
if len(b.chunks) == 1 {
return b.chunks[0][b.r:b.w]
}
return b.chunks[0][b.r:]
}
// Len returns the number of bytes of the unread portion of the buffer.
func (b *dataBuffer) Len() int {
return b.size
}
// Write appends p to the buffer.
func (b *dataBuffer) Write(p []byte) (int, error) {
ntotal := len(p)
for len(p) > 0 {
// If the last chunk is empty, allocate a new chunk. Try to allocate
// enough to fully copy p plus any additional bytes we expect to
// receive. However, this may allocate less than len(p).
want := int64(len(p))
if b.expected > want {
want = b.expected
}
chunk := b.lastChunkOrAlloc(want)
n := copy(chunk[b.w:], p)
p = p[n:]
b.w += n
b.size += n
b.expected -= int64(n)
}
return ntotal, nil
}
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
if len(b.chunks) != 0 {
last := b.chunks[len(b.chunks)-1]
if b.w < len(last) {
return last
}
}
chunk := getDataBufferChunk(want)
b.chunks = append(b.chunks, chunk)
b.w = 0
return chunk
}

View File

@ -87,13 +87,16 @@ type goAwayFlowError struct{}
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
// connErrorReason wraps a ConnectionError with an informative error about why it occurs. // connError represents an HTTP/2 ConnectionError error code, along
// with a string (for debugging) explaining why.
//
// Errors of this type are only returned by the frame parser functions // Errors of this type are only returned by the frame parser functions
// and converted into ConnectionError(ErrCodeProtocol). // and converted into ConnectionError(Code), after stashing away
// the Reason into the Framer's errDetail field, accessible via
// the (*Framer).ErrorDetail method.
type connError struct { type connError struct {
Code ErrCode Code ErrCode // the ConnectionError error code
Reason string Reason string // additional reason
} }
func (e connError) Error() string { func (e connError) Error() string {

View File

@ -1,60 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
)
// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
// It never allocates, but moves old data as new data is written.
type fixedBuffer struct {
buf []byte
r, w int
}
var (
errReadEmpty = errors.New("read from empty fixedBuffer")
errWriteFull = errors.New("write on full fixedBuffer")
)
// Read copies bytes from the buffer into p.
// It is an error to read when no data is available.
func (b *fixedBuffer) Read(p []byte) (n int, err error) {
if b.r == b.w {
return 0, errReadEmpty
}
n = copy(p, b.buf[b.r:b.w])
b.r += n
if b.r == b.w {
b.r = 0
b.w = 0
}
return n, nil
}
// Len returns the number of bytes of the unread portion of the buffer.
func (b *fixedBuffer) Len() int {
return b.w - b.r
}
// Write copies bytes from p into the buffer.
// It is an error to write more data than the buffer can hold.
func (b *fixedBuffer) Write(p []byte) (n int, err error) {
// Slide existing data to beginning.
if b.r > 0 && len(p) > len(b.buf)-b.w {
copy(b.buf, b.buf[b.r:b.w])
b.w -= b.r
b.r = 0
}
// Write new data.
n = copy(b.buf[b.w:], p)
b.w += n
if n < len(p) {
err = errWriteFull
}
return n, err
}

View File

@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
// a frameParser parses a frame given its FrameHeader and payload // a frameParser parses a frame given its FrameHeader and payload
// bytes. The length of payload will always equal fh.Length (which // bytes. The length of payload will always equal fh.Length (which
// might be 0). // might be 0).
type frameParser func(fh FrameHeader, payload []byte) (Frame, error) type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
var frameParsers = map[FrameType]frameParser{ var frameParsers = map[FrameType]frameParser{
FrameData: parseDataFrame, FrameData: parseDataFrame,
@ -312,7 +312,7 @@ type Framer struct {
MaxHeaderListSize uint32 MaxHeaderListSize uint32
// TODO: track which type of frame & with which flags was sent // TODO: track which type of frame & with which flags was sent
// last. Then return an error (unless AllowIllegalWrites) if // last. Then return an error (unless AllowIllegalWrites) if
// we're in the middle of a header block and a // we're in the middle of a header block and a
// non-Continuation or Continuation on a different stream is // non-Continuation or Continuation on a different stream is
// attempted to be written. // attempted to be written.
@ -323,6 +323,8 @@ type Framer struct {
debugFramerBuf *bytes.Buffer debugFramerBuf *bytes.Buffer
debugReadLoggerf func(string, ...interface{}) debugReadLoggerf func(string, ...interface{})
debugWriteLoggerf func(string, ...interface{}) debugWriteLoggerf func(string, ...interface{})
frameCache *frameCache // nil if frames aren't reused (default)
} }
func (fr *Framer) maxHeaderListSize() uint32 { func (fr *Framer) maxHeaderListSize() uint32 {
@ -398,6 +400,27 @@ const (
maxFrameSize = 1<<24 - 1 maxFrameSize = 1<<24 - 1
) )
// SetReuseFrames allows the Framer to reuse Frames.
// If called on a Framer, Frames returned by calls to ReadFrame are only
// valid until the next call to ReadFrame.
func (fr *Framer) SetReuseFrames() {
if fr.frameCache != nil {
return
}
fr.frameCache = &frameCache{}
}
type frameCache struct {
dataFrame DataFrame
}
func (fc *frameCache) getDataFrame() *DataFrame {
if fc == nil {
return &DataFrame{}
}
return &fc.dataFrame
}
// NewFramer returns a Framer that writes frames to w and reads them from r. // NewFramer returns a Framer that writes frames to w and reads them from r.
func NewFramer(w io.Writer, r io.Reader) *Framer { func NewFramer(w io.Writer, r io.Reader) *Framer {
fr := &Framer{ fr := &Framer{
@ -477,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
if _, err := io.ReadFull(fr.r, payload); err != nil { if _, err := io.ReadFull(fr.r, payload); err != nil {
return nil, err return nil, err
} }
f, err := typeFrameParser(fh.Type)(fh, payload) f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
if err != nil { if err != nil {
if ce, ok := err.(connError); ok { if ce, ok := err.(connError); ok {
return nil, fr.connError(ce.Code, ce.Reason) return nil, fr.connError(ce.Code, ce.Reason)
@ -565,7 +588,7 @@ func (f *DataFrame) Data() []byte {
return f.data return f.data
} }
func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 { if fh.StreamID == 0 {
// DATA frames MUST be associated with a stream. If a // DATA frames MUST be associated with a stream. If a
// DATA frame is received whose stream identifier // DATA frame is received whose stream identifier
@ -574,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
// PROTOCOL_ERROR. // PROTOCOL_ERROR.
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
} }
f := &DataFrame{ f := fc.getDataFrame()
FrameHeader: fh, f.FrameHeader = fh
}
var padSize byte var padSize byte
if fh.Flags.Has(FlagDataPadded) { if fh.Flags.Has(FlagDataPadded) {
var err error var err error
@ -600,6 +623,7 @@ var (
errStreamID = errors.New("invalid stream ID") errStreamID = errors.New("invalid stream ID")
errDepStreamID = errors.New("invalid dependent stream ID") errDepStreamID = errors.New("invalid dependent stream ID")
errPadLength = errors.New("pad length too large") errPadLength = errors.New("pad length too large")
errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
) )
func validStreamIDOrZero(streamID uint32) bool { func validStreamIDOrZero(streamID uint32) bool {
@ -623,6 +647,7 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
// //
// If pad is nil, the padding bit is not sent. // If pad is nil, the padding bit is not sent.
// The length of pad must not exceed 255 bytes. // The length of pad must not exceed 255 bytes.
// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
// //
// It will perform exactly one Write to the underlying Writer. // It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility not to violate the maximum frame size // It is the caller's responsibility not to violate the maximum frame size
@ -631,8 +656,18 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by
if !validStreamID(streamID) && !f.AllowIllegalWrites { if !validStreamID(streamID) && !f.AllowIllegalWrites {
return errStreamID return errStreamID
} }
if len(pad) > 255 { if len(pad) > 0 {
return errPadLength if len(pad) > 255 {
return errPadLength
}
if !f.AllowIllegalWrites {
for _, b := range pad {
if b != 0 {
// "Padding octets MUST be set to zero when sending."
return errPadBytes
}
}
}
} }
var flags Flags var flags Flags
if endStream { if endStream {
@ -660,10 +695,10 @@ type SettingsFrame struct {
p []byte p []byte
} }
func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
// When this (ACK 0x1) bit is set, the payload of the // When this (ACK 0x1) bit is set, the payload of the
// SETTINGS frame MUST be empty. Receipt of a // SETTINGS frame MUST be empty. Receipt of a
// SETTINGS frame with the ACK flag set and a length // SETTINGS frame with the ACK flag set and a length
// field value other than 0 MUST be treated as a // field value other than 0 MUST be treated as a
// connection error (Section 5.4.1) of type // connection error (Section 5.4.1) of type
@ -672,7 +707,7 @@ func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
} }
if fh.StreamID != 0 { if fh.StreamID != 0 {
// SETTINGS frames always apply to a connection, // SETTINGS frames always apply to a connection,
// never a single stream. The stream identifier for a // never a single stream. The stream identifier for a
// SETTINGS frame MUST be zero (0x0). If an endpoint // SETTINGS frame MUST be zero (0x0). If an endpoint
// receives a SETTINGS frame whose stream identifier // receives a SETTINGS frame whose stream identifier
// field is anything other than 0x0, the endpoint MUST // field is anything other than 0x0, the endpoint MUST
@ -762,7 +797,7 @@ type PingFrame struct {
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if len(payload) != 8 { if len(payload) != 8 {
return nil, ConnectionError(ErrCodeFrameSize) return nil, ConnectionError(ErrCodeFrameSize)
} }
@ -802,7 +837,7 @@ func (f *GoAwayFrame) DebugData() []byte {
return f.debugData return f.debugData
} }
func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID != 0 { if fh.StreamID != 0 {
return nil, ConnectionError(ErrCodeProtocol) return nil, ConnectionError(ErrCodeProtocol)
} }
@ -842,7 +877,7 @@ func (f *UnknownFrame) Payload() []byte {
return f.p return f.p
} }
func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
return &UnknownFrame{fh, p}, nil return &UnknownFrame{fh, p}, nil
} }
@ -853,7 +888,7 @@ type WindowUpdateFrame struct {
Increment uint32 // never read with high bit set Increment uint32 // never read with high bit set
} }
func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 { if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize) return nil, ConnectionError(ErrCodeFrameSize)
} }
@ -918,12 +953,12 @@ func (f *HeadersFrame) HasPriority() bool {
return f.FrameHeader.Flags.Has(FlagHeadersPriority) return f.FrameHeader.Flags.Has(FlagHeadersPriority)
} }
func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
hf := &HeadersFrame{ hf := &HeadersFrame{
FrameHeader: fh, FrameHeader: fh,
} }
if fh.StreamID == 0 { if fh.StreamID == 0 {
// HEADERS frames MUST be associated with a stream. If a HEADERS frame // HEADERS frames MUST be associated with a stream. If a HEADERS frame
// is received whose stream identifier field is 0x0, the recipient MUST // is received whose stream identifier field is 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type // respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR. // PROTOCOL_ERROR.
@ -1045,7 +1080,7 @@ type PriorityParam struct {
Exclusive bool Exclusive bool
// Weight is the stream's zero-indexed weight. It should be // Weight is the stream's zero-indexed weight. It should be
// set together with StreamDep, or neither should be set. Per // set together with StreamDep, or neither should be set. Per
// the spec, "Add one to the value to obtain a weight between // the spec, "Add one to the value to obtain a weight between
// 1 and 256." // 1 and 256."
Weight uint8 Weight uint8
@ -1055,7 +1090,7 @@ func (p PriorityParam) IsZero() bool {
return p == PriorityParam{} return p == PriorityParam{}
} }
func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 { if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
} }
@ -1102,7 +1137,7 @@ type RSTStreamFrame struct {
ErrCode ErrCode ErrCode ErrCode
} }
func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 { if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize) return nil, ConnectionError(ErrCodeFrameSize)
} }
@ -1132,7 +1167,7 @@ type ContinuationFrame struct {
headerFragBuf []byte headerFragBuf []byte
} }
func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID == 0 { if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
} }
@ -1182,7 +1217,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
} }
func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
pp := &PushPromiseFrame{ pp := &PushPromiseFrame{
FrameHeader: fh, FrameHeader: fh,
} }

View File

@ -7,7 +7,6 @@
package http2 package http2
import ( import (
"crypto/tls"
"net/http" "net/http"
"time" "time"
) )
@ -15,29 +14,3 @@ import (
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
return t1.ExpectContinueTimeout return t1.ExpectContinueTimeout
} }
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
func isBadCipher(cipher uint16) bool {
switch cipher {
case tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
return true
default:
return false
}
}

View File

@ -12,7 +12,11 @@ import (
"net/http" "net/http"
) )
func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() } func cloneTLSConfig(c *tls.Config) *tls.Config {
c2 := c.Clone()
c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
return c2
}
var _ http.Pusher = (*responseWriter)(nil) var _ http.Pusher = (*responseWriter)(nil)
@ -48,3 +52,5 @@ func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
func reqBodyIsNoBody(body io.ReadCloser) bool { func reqBodyIsNoBody(body io.ReadCloser) bool {
return body == http.NoBody return body == http.NoBody
} }
func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only

16
vendor/golang.org/x/net/http2/go19.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package http2
import (
"net/http"
)
func configureServer19(s *http.Server, conf *Server) error {
s.RegisterOnShutdown(conf.state.startGracefulShutdown)
return nil
}

View File

@ -39,13 +39,14 @@ func NewEncoder(w io.Writer) *Encoder {
tableSizeUpdate: false, tableSizeUpdate: false,
w: w, w: w,
} }
e.dynTab.table.init()
e.dynTab.setMaxSize(initialHeaderTableSize) e.dynTab.setMaxSize(initialHeaderTableSize)
return e return e
} }
// WriteField encodes f into a single Write to e's underlying Writer. // WriteField encodes f into a single Write to e's underlying Writer.
// This function may also produce bytes for "Header Table Size Update" // This function may also produce bytes for "Header Table Size Update"
// if necessary. If produced, it is done before encoding f. // if necessary. If produced, it is done before encoding f.
func (e *Encoder) WriteField(f HeaderField) error { func (e *Encoder) WriteField(f HeaderField) error {
e.buf = e.buf[:0] e.buf = e.buf[:0]
@ -88,29 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error {
// only name matches, i points to that index and nameValueMatch // only name matches, i points to that index and nameValueMatch
// becomes false. // becomes false.
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
for idx, hf := range staticTable { i, nameValueMatch = staticTable.search(f)
if !constantTimeStringCompare(hf.Name, f.Name) { if nameValueMatch {
continue return i, true
}
if i == 0 {
i = uint64(idx + 1)
}
if f.Sensitive {
continue
}
if !constantTimeStringCompare(hf.Value, f.Value) {
continue
}
i = uint64(idx + 1)
nameValueMatch = true
return
} }
j, nameValueMatch := e.dynTab.search(f) j, nameValueMatch := e.dynTab.table.search(f)
if nameValueMatch || (i == 0 && j != 0) { if nameValueMatch || (i == 0 && j != 0) {
i = j + uint64(len(staticTable)) return j + uint64(staticTable.len()), nameValueMatch
} }
return
return i, false
} }
// SetMaxDynamicTableSize changes the dynamic header table size to v. // SetMaxDynamicTableSize changes the dynamic header table size to v.

View File

@ -61,7 +61,7 @@ func (hf HeaderField) String() string {
func (hf HeaderField) Size() uint32 { func (hf HeaderField) Size() uint32 {
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
// "The size of the dynamic table is the sum of the size of // "The size of the dynamic table is the sum of the size of
// its entries. The size of an entry is the sum of its name's // its entries. The size of an entry is the sum of its name's
// length in octets (as defined in Section 5.2), its value's // length in octets (as defined in Section 5.2), its value's
// length in octets (see Section 5.2), plus 32. The size of // length in octets (see Section 5.2), plus 32. The size of
// an entry is calculated using the length of the name and // an entry is calculated using the length of the name and
@ -102,6 +102,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod
emit: emitFunc, emit: emitFunc,
emitEnabled: true, emitEnabled: true,
} }
d.dynTab.table.init()
d.dynTab.allowedMaxSize = maxDynamicTableSize d.dynTab.allowedMaxSize = maxDynamicTableSize
d.dynTab.setMaxSize(maxDynamicTableSize) d.dynTab.setMaxSize(maxDynamicTableSize)
return d return d
@ -154,12 +155,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
} }
type dynamicTable struct { type dynamicTable struct {
// ents is the FIFO described at
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
// The newest (low index) is append at the end, and items are table headerFieldTable
// evicted from the front. size uint32 // in bytes
ents []HeaderField
size uint32
maxSize uint32 // current maxSize maxSize uint32 // current maxSize
allowedMaxSize uint32 // maxSize may go up to this, inclusive allowedMaxSize uint32 // maxSize may go up to this, inclusive
} }
@ -169,95 +167,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) {
dt.evict() dt.evict()
} }
// TODO: change dynamicTable to be a struct with a slice and a size int field,
// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
//
//
// Then make add increment the size. maybe the max size should move from Decoder to
// dynamicTable and add should return an ok bool if there was enough space.
//
// Later we'll need a remove operation on dynamicTable.
func (dt *dynamicTable) add(f HeaderField) { func (dt *dynamicTable) add(f HeaderField) {
dt.ents = append(dt.ents, f) dt.table.addEntry(f)
dt.size += f.Size() dt.size += f.Size()
dt.evict() dt.evict()
} }
// If we're too big, evict old stuff (front of the slice) // If we're too big, evict old stuff.
func (dt *dynamicTable) evict() { func (dt *dynamicTable) evict() {
base := dt.ents // keep base pointer of slice var n int
for dt.size > dt.maxSize { for dt.size > dt.maxSize && n < dt.table.len() {
dt.size -= dt.ents[0].Size() dt.size -= dt.table.ents[n].Size()
dt.ents = dt.ents[1:] n++
} }
dt.table.evictOldest(n)
// Shift slice contents down if we evicted things.
if len(dt.ents) != len(base) {
copy(base, dt.ents)
dt.ents = base[:len(dt.ents)]
}
}
// constantTimeStringCompare compares string a and b in a constant
// time manner.
func constantTimeStringCompare(a, b string) bool {
if len(a) != len(b) {
return false
}
c := byte(0)
for i := 0; i < len(a); i++ {
c |= a[i] ^ b[i]
}
return c == 0
}
// Search searches f in the table. The return value i is 0 if there is
// no name match. If there is name match or name/value match, i is the
// index of that entry (1-based). If both name and value match,
// nameValueMatch becomes true.
func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
l := len(dt.ents)
for j := l - 1; j >= 0; j-- {
ent := dt.ents[j]
if !constantTimeStringCompare(ent.Name, f.Name) {
continue
}
if i == 0 {
i = uint64(l - j)
}
if f.Sensitive {
continue
}
if !constantTimeStringCompare(ent.Value, f.Value) {
continue
}
i = uint64(l - j)
nameValueMatch = true
return
}
return
} }
func (d *Decoder) maxTableIndex() int { func (d *Decoder) maxTableIndex() int {
return len(d.dynTab.ents) + len(staticTable) // This should never overflow. RFC 7540 Section 6.5.2 limits the size of
// the dynamic table to 2^32 bytes, where each entry will occupy more than
// one byte. Further, the staticTable has a fixed, small length.
return d.dynTab.table.len() + staticTable.len()
} }
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
if i < 1 { // See Section 2.3.3.
if i == 0 {
return return
} }
if i <= uint64(staticTable.len()) {
return staticTable.ents[i-1], true
}
if i > uint64(d.maxTableIndex()) { if i > uint64(d.maxTableIndex()) {
return return
} }
if i <= uint64(len(staticTable)) { // In the dynamic table, newer entries have lower indices.
return staticTable[i-1], true // However, dt.ents[0] is the oldest entry. Hence, dt.ents is
} // the reversed dynamic table.
dents := d.dynTab.ents dt := d.dynTab.table
return dents[len(dents)-(int(i)-len(staticTable))], true return dt.ents[dt.len()-(int(i)-staticTable.len())], true
} }
// Decode decodes an entire block. // Decode decodes an entire block.
@ -307,7 +255,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) {
err = d.parseHeaderFieldRepr() err = d.parseHeaderFieldRepr()
if err == errNeedMore { if err == errNeedMore {
// Extra paranoia, making sure saveBuf won't // Extra paranoia, making sure saveBuf won't
// get too large. All the varint and string // get too large. All the varint and string
// reading code earlier should already catch // reading code earlier should already catch
// overlong things and return ErrStringLength, // overlong things and return ErrStringLength,
// but keep this as a last resort. // but keep this as a last resort.

View File

@ -4,73 +4,200 @@
package hpack package hpack
func pair(name, value string) HeaderField { import (
return HeaderField{Name: name, Value: value} "fmt"
)
// headerFieldTable implements a list of HeaderFields.
// This is used to implement the static and dynamic tables.
type headerFieldTable struct {
// For static tables, entries are never evicted.
//
// For dynamic tables, entries are evicted from ents[0] and added to the end.
// Each entry has a unique id that starts at one and increments for each
// entry that is added. This unique id is stable across evictions, meaning
// it can be used as a pointer to a specific entry. As in hpack, unique ids
// are 1-based. The unique id for ents[k] is k + evictCount + 1.
//
// Zero is not a valid unique id.
//
// evictCount should not overflow in any remotely practical situation. In
// practice, we will have one dynamic table per HTTP/2 connection. If we
// assume a very powerful server that handles 1M QPS per connection and each
// request adds (then evicts) 100 entries from the table, it would still take
// 2M years for evictCount to overflow.
ents []HeaderField
evictCount uint64
// byName maps a HeaderField name to the unique id of the newest entry with
// the same name. See above for a definition of "unique id".
byName map[string]uint64
// byNameValue maps a HeaderField name/value pair to the unique id of the newest
// entry with the same name and value. See above for a definition of "unique id".
byNameValue map[pairNameValue]uint64
}
type pairNameValue struct {
name, value string
}
func (t *headerFieldTable) init() {
t.byName = make(map[string]uint64)
t.byNameValue = make(map[pairNameValue]uint64)
}
// len reports the number of entries in the table.
func (t *headerFieldTable) len() int {
return len(t.ents)
}
// addEntry adds a new entry.
func (t *headerFieldTable) addEntry(f HeaderField) {
id := uint64(t.len()) + t.evictCount + 1
t.byName[f.Name] = id
t.byNameValue[pairNameValue{f.Name, f.Value}] = id
t.ents = append(t.ents, f)
}
// evictOldest evicts the n oldest entries in the table.
func (t *headerFieldTable) evictOldest(n int) {
if n > t.len() {
panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
}
for k := 0; k < n; k++ {
f := t.ents[k]
id := t.evictCount + uint64(k) + 1
if t.byName[f.Name] == id {
delete(t.byName, f.Name)
}
if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
delete(t.byNameValue, p)
}
}
copy(t.ents, t.ents[n:])
for k := t.len() - n; k < t.len(); k++ {
t.ents[k] = HeaderField{} // so strings can be garbage collected
}
t.ents = t.ents[:t.len()-n]
if t.evictCount+uint64(n) < t.evictCount {
panic("evictCount overflow")
}
t.evictCount += uint64(n)
}
// search finds f in the table. If there is no match, i is 0.
// If both name and value match, i is the matched index and nameValueMatch
// becomes true. If only name matches, i points to that index and
// nameValueMatch becomes false.
//
// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
// table, the return value i actually refers to the entry t.ents[t.len()-i].
//
// All tables are assumed to be a dynamic tables except for the global
// staticTable pointer.
//
// See Section 2.3.3.
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
if !f.Sensitive {
if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
return t.idToIndex(id), true
}
}
if id := t.byName[f.Name]; id != 0 {
return t.idToIndex(id), false
}
return 0, false
}
// idToIndex converts a unique id to an HPACK index.
// See Section 2.3.3.
func (t *headerFieldTable) idToIndex(id uint64) uint64 {
if id <= t.evictCount {
panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
}
k := id - t.evictCount - 1 // convert id to an index t.ents[k]
if t != staticTable {
return uint64(t.len()) - k // dynamic table
}
return k + 1
} }
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
var staticTable = [...]HeaderField{ var staticTable = newStaticTable()
pair(":authority", ""), // index 1 (1-based) var staticTableEntries = [...]HeaderField{
pair(":method", "GET"), {Name: ":authority"},
pair(":method", "POST"), {Name: ":method", Value: "GET"},
pair(":path", "/"), {Name: ":method", Value: "POST"},
pair(":path", "/index.html"), {Name: ":path", Value: "/"},
pair(":scheme", "http"), {Name: ":path", Value: "/index.html"},
pair(":scheme", "https"), {Name: ":scheme", Value: "http"},
pair(":status", "200"), {Name: ":scheme", Value: "https"},
pair(":status", "204"), {Name: ":status", Value: "200"},
pair(":status", "206"), {Name: ":status", Value: "204"},
pair(":status", "304"), {Name: ":status", Value: "206"},
pair(":status", "400"), {Name: ":status", Value: "304"},
pair(":status", "404"), {Name: ":status", Value: "400"},
pair(":status", "500"), {Name: ":status", Value: "404"},
pair("accept-charset", ""), {Name: ":status", Value: "500"},
pair("accept-encoding", "gzip, deflate"), {Name: "accept-charset"},
pair("accept-language", ""), {Name: "accept-encoding", Value: "gzip, deflate"},
pair("accept-ranges", ""), {Name: "accept-language"},
pair("accept", ""), {Name: "accept-ranges"},
pair("access-control-allow-origin", ""), {Name: "accept"},
pair("age", ""), {Name: "access-control-allow-origin"},
pair("allow", ""), {Name: "age"},
pair("authorization", ""), {Name: "allow"},
pair("cache-control", ""), {Name: "authorization"},
pair("content-disposition", ""), {Name: "cache-control"},
pair("content-encoding", ""), {Name: "content-disposition"},
pair("content-language", ""), {Name: "content-encoding"},
pair("content-length", ""), {Name: "content-language"},
pair("content-location", ""), {Name: "content-length"},
pair("content-range", ""), {Name: "content-location"},
pair("content-type", ""), {Name: "content-range"},
pair("cookie", ""), {Name: "content-type"},
pair("date", ""), {Name: "cookie"},
pair("etag", ""), {Name: "date"},
pair("expect", ""), {Name: "etag"},
pair("expires", ""), {Name: "expect"},
pair("from", ""), {Name: "expires"},
pair("host", ""), {Name: "from"},
pair("if-match", ""), {Name: "host"},
pair("if-modified-since", ""), {Name: "if-match"},
pair("if-none-match", ""), {Name: "if-modified-since"},
pair("if-range", ""), {Name: "if-none-match"},
pair("if-unmodified-since", ""), {Name: "if-range"},
pair("last-modified", ""), {Name: "if-unmodified-since"},
pair("link", ""), {Name: "last-modified"},
pair("location", ""), {Name: "link"},
pair("max-forwards", ""), {Name: "location"},
pair("proxy-authenticate", ""), {Name: "max-forwards"},
pair("proxy-authorization", ""), {Name: "proxy-authenticate"},
pair("range", ""), {Name: "proxy-authorization"},
pair("referer", ""), {Name: "range"},
pair("refresh", ""), {Name: "referer"},
pair("retry-after", ""), {Name: "refresh"},
pair("server", ""), {Name: "retry-after"},
pair("set-cookie", ""), {Name: "server"},
pair("strict-transport-security", ""), {Name: "set-cookie"},
pair("transfer-encoding", ""), {Name: "strict-transport-security"},
pair("user-agent", ""), {Name: "transfer-encoding"},
pair("vary", ""), {Name: "user-agent"},
pair("via", ""), {Name: "vary"},
pair("www-authenticate", ""), {Name: "via"},
{Name: "www-authenticate"},
}
func newStaticTable() *headerFieldTable {
t := &headerFieldTable{}
t.init()
for _, e := range staticTableEntries[:] {
t.addEntry(e)
}
return t
} }
var huffmanCodes = [256]uint32{ var huffmanCodes = [256]uint32{

View File

@ -376,12 +376,16 @@ func (s *sorter) SortStrings(ss []string) {
// validPseudoPath reports whether v is a valid :path pseudo-header // validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either: // value. It must be either:
// //
// *) a non-empty string starting with '/', but not with with "//", // *) a non-empty string starting with '/'
// *) the string '*', for OPTIONS requests. // *) the string '*', for OPTIONS requests.
// //
// For now this is only used a quick check for deciding when to clean // For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport. // up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847 // See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func validPseudoPath(v string) bool { func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*" return (len(v) > 0 && v[0] == '/') || v == "*"
} }

View File

@ -7,7 +7,6 @@
package http2 package http2
import ( import (
"crypto/tls"
"net/http" "net/http"
"time" "time"
) )
@ -20,27 +19,3 @@ func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
return 0 return 0
} }
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
func isBadCipher(cipher uint16) bool {
switch cipher {
case tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
return true
default:
return false
}
}

View File

@ -25,3 +25,5 @@ func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
} }
func reqBodyIsNoBody(io.ReadCloser) bool { return false } func reqBodyIsNoBody(io.ReadCloser) bool { return false }
func go18httpNoBody() io.ReadCloser { return nil } // for tests only

16
vendor/golang.org/x/net/http2/not_go19.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.9
package http2
import (
"net/http"
)
func configureServer19(s *http.Server, conf *Server) error {
// not supported prior to go1.9
return nil
}

View File

@ -10,13 +10,13 @@ import (
"sync" "sync"
) )
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like // pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
// io.Pipe except there are no PipeReader/PipeWriter halves, and the // io.Pipe except there are no PipeReader/PipeWriter halves, and the
// underlying buffer is an interface. (io.Pipe is always unbuffered) // underlying buffer is an interface. (io.Pipe is always unbuffered)
type pipe struct { type pipe struct {
mu sync.Mutex mu sync.Mutex
c sync.Cond // c.L lazily initialized to &p.mu c sync.Cond // c.L lazily initialized to &p.mu
b pipeBuffer b pipeBuffer // nil when done reading
err error // read error once empty. non-nil means closed. err error // read error once empty. non-nil means closed.
breakErr error // immediate read error (caller doesn't see rest of b) breakErr error // immediate read error (caller doesn't see rest of b)
donec chan struct{} // closed on error donec chan struct{} // closed on error
@ -32,6 +32,9 @@ type pipeBuffer interface {
func (p *pipe) Len() int { func (p *pipe) Len() int {
p.mu.Lock() p.mu.Lock()
defer p.mu.Unlock() defer p.mu.Unlock()
if p.b == nil {
return 0
}
return p.b.Len() return p.b.Len()
} }
@ -47,7 +50,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
if p.breakErr != nil { if p.breakErr != nil {
return 0, p.breakErr return 0, p.breakErr
} }
if p.b.Len() > 0 { if p.b != nil && p.b.Len() > 0 {
return p.b.Read(d) return p.b.Read(d)
} }
if p.err != nil { if p.err != nil {
@ -55,6 +58,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
p.readFn() // e.g. copy trailers p.readFn() // e.g. copy trailers
p.readFn = nil // not sticky like p.err p.readFn = nil // not sticky like p.err
} }
p.b = nil
return 0, p.err return 0, p.err
} }
p.c.Wait() p.c.Wait()
@ -75,6 +79,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
if p.err != nil { if p.err != nil {
return 0, errClosedPipeWrite return 0, errClosedPipeWrite
} }
if p.breakErr != nil {
return len(d), nil // discard when there is no reader
}
return p.b.Write(d) return p.b.Write(d)
} }
@ -109,6 +116,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) {
return return
} }
p.readFn = fn p.readFn = fn
if dst == &p.breakErr {
p.b = nil
}
*dst = err *dst = err
p.closeDoneLocked() p.closeDoneLocked()
} }

View File

@ -110,9 +110,41 @@ type Server struct {
// activity for the purposes of IdleTimeout. // activity for the purposes of IdleTimeout.
IdleTimeout time.Duration IdleTimeout time.Duration
// MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1.
// If the value is outside this range, a default value will be
// used instead.
MaxUploadBufferPerConnection int32
// MaxUploadBufferPerStream is the size of the initial flow control
// window for each stream. The HTTP/2 spec does not allow this to
// be larger than 2^32-1. If the value is zero or larger than the
// maximum, a default value will be used instead.
MaxUploadBufferPerStream int32
// NewWriteScheduler constructs a write scheduler for a connection. // NewWriteScheduler constructs a write scheduler for a connection.
// If nil, a default scheduler is chosen. // If nil, a default scheduler is chosen.
NewWriteScheduler func() WriteScheduler NewWriteScheduler func() WriteScheduler
// Internal state. This is a pointer (rather than embedded directly)
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
}
func (s *Server) initialConnRecvWindowSize() int32 {
if s.MaxUploadBufferPerConnection > initialWindowSize {
return s.MaxUploadBufferPerConnection
}
return 1 << 20
}
func (s *Server) initialStreamRecvWindowSize() int32 {
if s.MaxUploadBufferPerStream > 0 {
return s.MaxUploadBufferPerStream
}
return 1 << 20
} }
func (s *Server) maxReadFrameSize() uint32 { func (s *Server) maxReadFrameSize() uint32 {
@ -129,6 +161,40 @@ func (s *Server) maxConcurrentStreams() uint32 {
return defaultMaxStreams return defaultMaxStreams
} }
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
}
func (s *serverInternalState) registerConn(sc *serverConn) {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
s.activeConns[sc] = struct{}{}
s.mu.Unlock()
}
func (s *serverInternalState) unregisterConn(sc *serverConn) {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
delete(s.activeConns, sc)
s.mu.Unlock()
}
func (s *serverInternalState) startGracefulShutdown() {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
for sc := range s.activeConns {
sc.startGracefulShutdown()
}
s.mu.Unlock()
}
// ConfigureServer adds HTTP/2 support to a net/http Server. // ConfigureServer adds HTTP/2 support to a net/http Server.
// //
// The configuration conf may be nil. // The configuration conf may be nil.
@ -141,9 +207,13 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if conf == nil { if conf == nil {
conf = new(Server) conf = new(Server)
} }
conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
if err := configureServer18(s, conf); err != nil { if err := configureServer18(s, conf); err != nil {
return err return err
} }
if err := configureServer19(s, conf); err != nil {
return err
}
if s.TLSConfig == nil { if s.TLSConfig == nil {
s.TLSConfig = new(tls.Config) s.TLSConfig = new(tls.Config)
@ -255,35 +325,37 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
defer cancel() defer cancel()
sc := &serverConn{ sc := &serverConn{
srv: s, srv: s,
hs: opts.baseConfig(), hs: opts.baseConfig(),
conn: c, conn: c,
baseCtx: baseCtx, baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(), remoteAddrStr: c.RemoteAddr().String(),
bw: newBufferedWriter(c), bw: newBufferedWriter(c),
handler: opts.handler(), handler: opts.handler(),
streams: make(map[uint32]*stream), streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult), readFrameCh: make(chan readFrameResult),
wantWriteFrameCh: make(chan FrameWriteRequest, 8), wantWriteFrameCh: make(chan FrameWriteRequest, 8),
wantStartPushCh: make(chan startPushRequest, 8), serveMsgCh: make(chan interface{}, 8),
wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}), doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
advMaxStreams: s.maxConcurrentStreams(), advMaxStreams: s.maxConcurrentStreams(),
initialWindowSize: initialWindowSize, initialStreamSendWindowSize: initialWindowSize,
maxFrameSize: initialMaxFrameSize, maxFrameSize: initialMaxFrameSize,
headerTableSize: initialHeaderTableSize, headerTableSize: initialHeaderTableSize,
serveG: newGoroutineLock(), serveG: newGoroutineLock(),
pushEnabled: true, pushEnabled: true,
} }
s.state.registerConn(sc)
defer s.state.unregisterConn(sc)
// The net/http package sets the write deadline from the // The net/http package sets the write deadline from the
// http.Server.WriteTimeout during the TLS handshake, but then // http.Server.WriteTimeout during the TLS handshake, but then
// passes the connection off to us with the deadline already // passes the connection off to us with the deadline already set.
// set. Disarm it here so that it is not applied to additional // Write deadlines are set per stream in serverConn.newStream.
// streams opened on this connection. // Disarm the net.Conn write deadline here.
// TODO: implement WriteTimeout fully. See Issue 18437.
if sc.hs.WriteTimeout != 0 { if sc.hs.WriteTimeout != 0 {
sc.conn.SetWriteDeadline(time.Time{}) sc.conn.SetWriteDeadline(time.Time{})
} }
@ -294,6 +366,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
sc.writeSched = NewRandomWriteScheduler() sc.writeSched = NewRandomWriteScheduler()
} }
// These start at the RFC-specified defaults. If there is a higher
// configured value for inflow, that will be updated when we send a
// WINDOW_UPDATE shortly after sending SETTINGS.
sc.flow.add(initialWindowSize) sc.flow.add(initialWindowSize)
sc.inflow.add(initialWindowSize) sc.inflow.add(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
@ -376,10 +451,9 @@ type serverConn struct {
doneServing chan struct{} // closed when serverConn.serve ends doneServing chan struct{} // closed when serverConn.serve ends
readFrameCh chan readFrameResult // written by serverConn.readFrames readFrameCh chan readFrameResult // written by serverConn.readFrames
wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
wantStartPushCh chan startPushRequest // from handlers -> serve
wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
bodyReadCh chan bodyReadMsg // from handlers -> serve bodyReadCh chan bodyReadMsg // from handlers -> serve
testHookCh chan func(int) // code to run on the serve loop serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
flow flow // conn-wide (not stream-specific) outbound flow control flow flow // conn-wide (not stream-specific) outbound flow control
inflow flow // conn-wide inbound flow control inflow flow // conn-wide inbound flow control
tlsState *tls.ConnectionState // shared by all handlers, like net/http tlsState *tls.ConnectionState // shared by all handlers, like net/http
@ -387,38 +461,39 @@ type serverConn struct {
writeSched WriteScheduler writeSched WriteScheduler
// Everything following is owned by the serve loop; use serveG.check(): // Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve() serveG goroutineLock // used to verify funcs are on serve()
pushEnabled bool pushEnabled bool
sawFirstSettings bool // got the initial SETTINGS frame after the preface sawFirstSettings bool // got the initial SETTINGS frame after the preface
needToSendSettingsAck bool needToSendSettingsAck bool
unackedSettings int // how many SETTINGS have we sent without ACKs? unackedSettings int // how many SETTINGS have we sent without ACKs?
clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client curClientStreams uint32 // number of open streams initiated by the client
curPushedStreams uint32 // number of open streams initiated by server push curPushedStreams uint32 // number of open streams initiated by server push
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
streams map[uint32]*stream streams map[uint32]*stream
initialWindowSize int32 initialStreamSendWindowSize int32
maxFrameSize int32 maxFrameSize int32
headerTableSize uint32 headerTableSize uint32
peerMaxHeaderListSize uint32 // zero means unknown (default) peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
writingFrame bool // started writing a frame (on serve goroutine or separate) writingFrame bool // started writing a frame (on serve goroutine or separate)
writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
needsFrameFlush bool // last frame write wasn't a flush needsFrameFlush bool // last frame write wasn't a flush
inGoAway bool // we've started to or sent GOAWAY inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write needToSendGoAway bool // we need to schedule a GOAWAY frame write
goAwayCode ErrCode goAwayCode ErrCode
shutdownTimerCh <-chan time.Time // nil until used shutdownTimer *time.Timer // nil until used
shutdownTimer *time.Timer // nil until used idleTimer *time.Timer // nil if unused
idleTimer *time.Timer // nil if unused
idleTimerCh <-chan time.Time // nil if unused
// Owned by the writeFrameAsync goroutine: // Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer headerWriteBuf bytes.Buffer
hpackEncoder *hpack.Encoder hpackEncoder *hpack.Encoder
// Used by startGracefulShutdown.
shutdownOnce sync.Once
} }
func (sc *serverConn) maxHeaderListSize() uint32 { func (sc *serverConn) maxHeaderListSize() uint32 {
@ -463,10 +538,10 @@ type stream struct {
numTrailerValues int64 numTrailerValues int64
weight uint8 weight uint8
state streamState state streamState
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen gotTrailerHeader bool // HEADER frame for trailers was seen
wroteHeaders bool // whether we wrote headers (not status 100) wroteHeaders bool // whether we wrote headers (not status 100)
reqBuf []byte // if non-nil, body pipe buffer to return later at EOF writeDeadline *time.Timer // nil if unused
trailer http.Header // accumulated trailers trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer reqTrailer http.Header // handler's Request.Trailer
@ -696,48 +771,48 @@ func (sc *serverConn) serve() {
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
// TODO: more actual settings, notably
// SettingInitialWindowSize, but then we also
// want to bump up the conn window size the
// same amount here right after the settings
}, },
}) })
sc.unackedSettings++ sc.unackedSettings++
// Each connection starts with intialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
}
if err := sc.readPreface(); err != nil { if err := sc.readPreface(); err != nil {
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
return return
} }
// Now that we've got the preface, get us out of the // Now that we've got the preface, get us out of the
// "StateNew" state. We can't go directly to idle, though. // "StateNew" state. We can't go directly to idle, though.
// Active means we read some data and anticipate a request. We'll // Active means we read some data and anticipate a request. We'll
// do another Active when we get a HEADERS frame. // do another Active when we get a HEADERS frame.
sc.setConnState(http.StateActive) sc.setConnState(http.StateActive)
sc.setConnState(http.StateIdle) sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout != 0 { if sc.srv.IdleTimeout != 0 {
sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout) sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop() defer sc.idleTimer.Stop()
sc.idleTimerCh = sc.idleTimer.C
}
var gracefulShutdownCh <-chan struct{}
if sc.hs != nil {
gracefulShutdownCh = h1ServerShutdownChan(sc.hs)
} }
go sc.readFrames() // closed by defer sc.conn.Close above go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := time.NewTimer(firstSettingsTimeout) settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
loopNum := 0 loopNum := 0
for { for {
loopNum++ loopNum++
select { select {
case wr := <-sc.wantWriteFrameCh: case wr := <-sc.wantWriteFrameCh:
if se, ok := wr.write.(StreamError); ok {
sc.resetStream(se)
break
}
sc.writeFrame(wr) sc.writeFrame(wr)
case spr := <-sc.wantStartPushCh:
sc.startPush(spr)
case res := <-sc.wroteFrameCh: case res := <-sc.wroteFrameCh:
sc.wroteFrame(res) sc.wroteFrame(res)
case res := <-sc.readFrameCh: case res := <-sc.readFrameCh:
@ -745,26 +820,37 @@ func (sc *serverConn) serve() {
return return
} }
res.readMore() res.readMore()
if settingsTimer.C != nil { if settingsTimer != nil {
settingsTimer.Stop() settingsTimer.Stop()
settingsTimer.C = nil settingsTimer = nil
} }
case m := <-sc.bodyReadCh: case m := <-sc.bodyReadCh:
sc.noteBodyRead(m.st, m.n) sc.noteBodyRead(m.st, m.n)
case <-settingsTimer.C: case msg := <-sc.serveMsgCh:
sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) switch v := msg.(type) {
return case func(int):
case <-gracefulShutdownCh: v(loopNum) // for testing
gracefulShutdownCh = nil case *serverMessage:
sc.startGracefulShutdown() switch v {
case <-sc.shutdownTimerCh: case settingsTimerMsg:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
return return
case <-sc.idleTimerCh: case idleTimerMsg:
sc.vlogf("connection is idle") sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo) sc.goAway(ErrCodeNo)
case fn := <-sc.testHookCh: case shutdownTimerMsg:
fn(loopNum) sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
case gracefulShutdownMsg:
sc.startGracefulShutdownInternal()
default:
panic("unknown timer")
}
case *startPushRequest:
sc.startPush(v)
default:
panic(fmt.Sprintf("unexpected type %T", v))
}
} }
if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame {
@ -773,6 +859,36 @@ func (sc *serverConn) serve() {
} }
} }
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
select {
case <-sc.doneServing:
case <-sharedCh:
close(privateCh)
}
}
type serverMessage int
// Message values sent to serveMsgCh.
var (
settingsTimerMsg = new(serverMessage)
idleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
)
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
func (sc *serverConn) sendServeMsg(msg interface{}) {
sc.serveG.checkNotOn() // NOT
select {
case sc.serveMsgCh <- msg:
case <-sc.doneServing:
}
}
// readPreface reads the ClientPreface greeting from the peer // readPreface reads the ClientPreface greeting from the peer
// or returns an error on timeout or an invalid greeting. // or returns an error on timeout or an invalid greeting.
func (sc *serverConn) readPreface() error { func (sc *serverConn) readPreface() error {
@ -1014,7 +1130,11 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
// stateClosed after the RST_STREAM frame is // stateClosed after the RST_STREAM frame is
// written. // written.
st.state = stateHalfClosedLocal st.state = stateHalfClosedLocal
sc.resetStream(streamError(st.id, ErrCodeCancel)) // Section 8.1: a server MAY request that the client abort
// transmission of a request without error by sending a
// RST_STREAM with an error code of NO_ERROR after sending
// a complete response.
sc.resetStream(streamError(st.id, ErrCodeNo))
case stateHalfClosedRemote: case stateHalfClosedRemote:
sc.closeStream(st, errHandlerComplete) sc.closeStream(st, errHandlerComplete)
} }
@ -1086,10 +1206,19 @@ func (sc *serverConn) scheduleFrameWrite() {
sc.inFrameScheduleLoop = false sc.inFrameScheduleLoop = false
} }
// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the // startGracefulShutdown gracefully shuts down a connection. This
// client we're gracefully shutting down. The connection isn't closed // sends GOAWAY with ErrCodeNo to tell the client we're gracefully
// until all current streams are done. // shutting down. The connection isn't closed until all current
// streams are done.
//
// startGracefulShutdown returns immediately; it does not wait until
// the connection has shut down.
func (sc *serverConn) startGracefulShutdown() { func (sc *serverConn) startGracefulShutdown() {
sc.serveG.checkNotOn() // NOT
sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
}
func (sc *serverConn) startGracefulShutdownInternal() {
sc.goAwayIn(ErrCodeNo, 0) sc.goAwayIn(ErrCodeNo, 0)
} }
@ -1121,8 +1250,7 @@ func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) {
func (sc *serverConn) shutDownIn(d time.Duration) { func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check() sc.serveG.check()
sc.shutdownTimer = time.NewTimer(d) sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
sc.shutdownTimerCh = sc.shutdownTimer.C
} }
func (sc *serverConn) resetStream(se StreamError) { func (sc *serverConn) resetStream(se StreamError) {
@ -1305,6 +1433,9 @@ func (sc *serverConn) closeStream(st *stream, err error) {
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
} }
st.state = stateClosed st.state = stateClosed
if st.writeDeadline != nil {
st.writeDeadline.Stop()
}
if st.isPushed() { if st.isPushed() {
sc.curPushedStreams-- sc.curPushedStreams--
} else { } else {
@ -1317,7 +1448,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
sc.idleTimer.Reset(sc.srv.IdleTimeout) sc.idleTimer.Reset(sc.srv.IdleTimeout)
} }
if h1ServerKeepAlivesDisabled(sc.hs) { if h1ServerKeepAlivesDisabled(sc.hs) {
sc.startGracefulShutdown() sc.startGracefulShutdownInternal()
} }
} }
if p := st.body; p != nil { if p := st.body; p != nil {
@ -1395,9 +1526,9 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
// adjust the size of all stream flow control windows that it // adjust the size of all stream flow control windows that it
// maintains by the difference between the new value and the // maintains by the difference between the new value and the
// old value." // old value."
old := sc.initialWindowSize old := sc.initialStreamSendWindowSize
sc.initialWindowSize = int32(val) sc.initialStreamSendWindowSize = int32(val)
growth := sc.initialWindowSize - old // may be negative growth := int32(val) - old // may be negative
for _, st := range sc.streams { for _, st := range sc.streams {
if !st.flow.add(growth) { if !st.flow.add(growth) {
// 6.9.2 Initial Flow Control Window Size // 6.9.2 Initial Flow Control Window Size
@ -1504,7 +1635,7 @@ func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
} else { } else {
sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
} }
sc.startGracefulShutdown() sc.startGracefulShutdownInternal()
// http://tools.ietf.org/html/rfc7540#section-6.8 // http://tools.ietf.org/html/rfc7540#section-6.8
// We should not create any new streams, which means we should disable push. // We should not create any new streams, which means we should disable push.
sc.pushEnabled = false sc.pushEnabled = false
@ -1543,6 +1674,12 @@ func (st *stream) copyTrailersToHandlerRequest() {
} }
} }
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
// when the stream's WriteTimeout has fired.
func (st *stream) onWriteTimeout() {
st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})
}
func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
sc.serveG.check() sc.serveG.check()
id := f.StreamID id := f.StreamID
@ -1719,9 +1856,12 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
} }
st.cw.Init() st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialWindowSize) st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.conn = &sc.inflow // link to conn-level counter st.inflow.conn = &sc.inflow // link to conn-level counter
st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings st.inflow.add(sc.srv.initialStreamRecvWindowSize())
if sc.hs.WriteTimeout != 0 {
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st sc.streams[id] = st
sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
@ -1785,16 +1925,14 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return nil, nil, err return nil, nil, err
} }
if bodyOpen { if bodyOpen {
st.reqBuf = getRequestBodyBuf()
req.Body.(*requestBody).pipe = &pipe{
b: &fixedBuffer{buf: st.reqBuf},
}
if vv, ok := rp.header["Content-Length"]; ok { if vv, ok := rp.header["Content-Length"]; ok {
req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
} else { } else {
req.ContentLength = -1 req.ContentLength = -1
} }
req.Body.(*requestBody).pipe = &pipe{
b: &dataBuffer{expected: req.ContentLength},
}
} }
return rw, req, nil return rw, req, nil
} }
@ -1890,24 +2028,6 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
return rw, req, nil return rw, req, nil
} }
var reqBodyCache = make(chan []byte, 8)
func getRequestBodyBuf() []byte {
select {
case b := <-reqBodyCache:
return b
default:
return make([]byte, initialWindowSize)
}
}
func putRequestBodyBuf(b []byte) {
select {
case reqBodyCache <- b:
default:
}
}
// Run on its own goroutine. // Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
didPanic := true didPanic := true
@ -2003,12 +2123,6 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
case <-sc.doneServing: case <-sc.doneServing:
} }
} }
if err == io.EOF {
if buf := st.reqBuf; buf != nil {
st.reqBuf = nil // shouldn't matter; field unused by other
putRequestBodyBuf(buf)
}
}
} }
func (sc *serverConn) noteBodyRead(st *stream, n int) { func (sc *serverConn) noteBodyRead(st *stream, n int) {
@ -2103,8 +2217,8 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
return return
} }
// responseWriter is the http.ResponseWriter implementation. It's // responseWriter is the http.ResponseWriter implementation. It's
// intentionally small (1 pointer wide) to minimize garbage. The // intentionally small (1 pointer wide) to minimize garbage. The
// responseWriterState pointer inside is zeroed at the end of a // responseWriterState pointer inside is zeroed at the end of a
// request (in handlerDone) and calls on the responseWriter thereafter // request (in handlerDone) and calls on the responseWriter thereafter
// simply crash (caller's mistake), but the much larger responseWriterState // simply crash (caller's mistake), but the much larger responseWriterState
@ -2138,6 +2252,7 @@ type responseWriterState struct {
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame? sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished handlerDone bool // handler has finished
dirty bool // a Write failed; don't reuse this responseWriterState
sentContentLen int64 // non-zero if handler set a Content-Length header sentContentLen int64 // non-zero if handler set a Content-Length header
wroteBytes int64 wroteBytes int64
@ -2219,6 +2334,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
date: date, date: date,
}) })
if err != nil { if err != nil {
rws.dirty = true
return 0, err return 0, err
} }
if endStream { if endStream {
@ -2240,6 +2356,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
if len(p) > 0 || endStream { if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream. // only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
rws.dirty = true
return 0, err return 0, err
} }
} }
@ -2251,6 +2368,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
trailers: rws.trailers, trailers: rws.trailers,
endStream: true, endStream: true,
}) })
if err != nil {
rws.dirty = true
}
return len(p), err return len(p), err
} }
return len(p), nil return len(p), nil
@ -2278,7 +2398,7 @@ const TrailerPrefix = "Trailer:"
// says you SHOULD (but not must) predeclare any trailers in the // says you SHOULD (but not must) predeclare any trailers in the
// header, the official ResponseWriter rules said trailers in Go must // header, the official ResponseWriter rules said trailers in Go must
// be predeclared, and then we reuse the same ResponseWriter.Header() // be predeclared, and then we reuse the same ResponseWriter.Header()
// map to mean both Headers and Trailers. When it's time to write the // map to mean both Headers and Trailers. When it's time to write the
// Trailers, we pick out the fields of Headers that were declared as // Trailers, we pick out the fields of Headers that were declared as
// trailers. That worked for a while, until we found the first major // trailers. That worked for a while, until we found the first major
// user of Trailers in the wild: gRPC (using them only over http2), // user of Trailers in the wild: gRPC (using them only over http2),
@ -2390,7 +2510,7 @@ func cloneHeader(h http.Header) http.Header {
// //
// * Handler calls w.Write or w.WriteString -> // * Handler calls w.Write or w.WriteString ->
// * -> rws.bw (*bufio.Writer) -> // * -> rws.bw (*bufio.Writer) ->
// * (Handler migth call Flush) // * (Handler might call Flush)
// * -> chunkWriter{rws} // * -> chunkWriter{rws}
// * -> responseWriterState.writeChunk(p []byte) // * -> responseWriterState.writeChunk(p []byte)
// * -> responseWriterState.writeChunk (most of the magic; see comment there) // * -> responseWriterState.writeChunk (most of the magic; see comment there)
@ -2429,10 +2549,19 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
func (w *responseWriter) handlerDone() { func (w *responseWriter) handlerDone() {
rws := w.rws rws := w.rws
dirty := rws.dirty
rws.handlerDone = true rws.handlerDone = true
w.Flush() w.Flush()
w.rws = nil w.rws = nil
responseWriterStatePool.Put(rws) if !dirty {
// Only recycle the pool if all prior Write calls to
// the serverConn goroutine completed successfully. If
// they returned earlier due to resets from the peer
// there might still be write goroutines outstanding
// from the serverConn referencing the rws memory. See
// issue 20704.
responseWriterStatePool.Put(rws)
}
} }
// Push errors. // Push errors.
@ -2514,7 +2643,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error {
return fmt.Errorf("method %q must be GET or HEAD", opts.Method) return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
} }
msg := startPushRequest{ msg := &startPushRequest{
parent: st, parent: st,
method: opts.Method, method: opts.Method,
url: u, url: u,
@ -2527,7 +2656,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error {
return errClientDisconnected return errClientDisconnected
case <-st.cw: case <-st.cw:
return errStreamClosed return errStreamClosed
case sc.wantStartPushCh <- msg: case sc.serveMsgCh <- msg:
} }
select { select {
@ -2549,7 +2678,7 @@ type startPushRequest struct {
done chan error done chan error
} }
func (sc *serverConn) startPush(msg startPushRequest) { func (sc *serverConn) startPush(msg *startPushRequest) {
sc.serveG.check() sc.serveG.check()
// http://tools.ietf.org/html/rfc7540#section-6.6. // http://tools.ietf.org/html/rfc7540#section-6.6.
@ -2588,7 +2717,7 @@ func (sc *serverConn) startPush(msg startPushRequest) {
// A server that is unable to establish a new stream identifier can send a GOAWAY // A server that is unable to establish a new stream identifier can send a GOAWAY
// frame so that the client is forced to open a new connection for new streams. // frame so that the client is forced to open a new connection for new streams.
if sc.maxPushPromiseID+2 >= 1<<31 { if sc.maxPushPromiseID+2 >= 1<<31 {
sc.startGracefulShutdown() sc.startGracefulShutdownInternal()
return 0, ErrPushLimitReached return 0, ErrPushLimitReached
} }
sc.maxPushPromiseID += 2 sc.maxPushPromiseID += 2
@ -2713,31 +2842,6 @@ var badTrailer = map[string]bool{
"Www-Authenticate": true, "Www-Authenticate": true,
} }
// h1ServerShutdownChan returns a channel that will be closed when the
// provided *http.Server wants to shut down.
//
// This is a somewhat hacky way to get at http1 innards. It works
// when the http2 code is bundled into the net/http package in the
// standard library. The alternatives ended up making the cmd/go tool
// depend on http Servers. This is the lightest option for now.
// This is tested via the TestServeShutdown* tests in net/http.
func h1ServerShutdownChan(hs *http.Server) <-chan struct{} {
if fn := testh1ServerShutdownChan; fn != nil {
return fn(hs)
}
var x interface{} = hs
type I interface {
getDoneChan() <-chan struct{}
}
if hs, ok := x.(I); ok {
return hs.getDoneChan()
}
return nil
}
// optional test hook for h1ServerShutdownChan.
var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{}
// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
// disabled. See comments on h1ServerShutdownChan above for why // disabled. See comments on h1ServerShutdownChan above for why
// the code is written this way. // the code is written this way.

View File

@ -18,6 +18,7 @@ import (
"io/ioutil" "io/ioutil"
"log" "log"
"math" "math"
mathrand "math/rand"
"net" "net"
"net/http" "net/http"
"sort" "sort"
@ -164,6 +165,7 @@ type ClientConn struct {
goAwayDebug string // goAway frame's debug data, retained as a string goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*clientStream // client-initiated streams map[uint32]*clientStream // client-initiated
nextStreamID uint32 nextStreamID uint32
pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
pings map[[8]byte]chan struct{} // in flight ping data to notification channel pings map[[8]byte]chan struct{} // in flight ping data to notification channel
bw *bufio.Writer bw *bufio.Writer
br *bufio.Reader br *bufio.Reader
@ -216,35 +218,45 @@ type clientStream struct {
resTrailer *http.Header // client's Response.Trailer resTrailer *http.Header // client's Response.Trailer
} }
// awaitRequestCancel runs in its own goroutine and waits for the user // awaitRequestCancel waits for the user to cancel a request or for the done
// to cancel a RoundTrip request, its context to expire, or for the // channel to be signaled. A non-nil error is returned only if the request was
// request to be done (any way it might be removed from the cc.streams // canceled.
// map: peer reset, successful completion, TCP connection breakage, func awaitRequestCancel(req *http.Request, done <-chan struct{}) error {
// etc)
func (cs *clientStream) awaitRequestCancel(req *http.Request) {
ctx := reqContext(req) ctx := reqContext(req)
if req.Cancel == nil && ctx.Done() == nil { if req.Cancel == nil && ctx.Done() == nil {
return return nil
} }
select { select {
case <-req.Cancel: case <-req.Cancel:
cs.cancelStream() return errRequestCanceled
cs.bufPipe.CloseWithError(errRequestCanceled)
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err()
case <-done:
return nil
}
}
// awaitRequestCancel waits for the user to cancel a request, its context to
// expire, or for the request to be done (any way it might be removed from the
// cc.streams map: peer reset, successful completion, TCP connection breakage,
// etc). If the request is canceled, then cs will be canceled and closed.
func (cs *clientStream) awaitRequestCancel(req *http.Request) {
if err := awaitRequestCancel(req, cs.done); err != nil {
cs.cancelStream() cs.cancelStream()
cs.bufPipe.CloseWithError(ctx.Err()) cs.bufPipe.CloseWithError(err)
case <-cs.done:
} }
} }
func (cs *clientStream) cancelStream() { func (cs *clientStream) cancelStream() {
cs.cc.mu.Lock() cc := cs.cc
cc.mu.Lock()
didReset := cs.didReset didReset := cs.didReset
cs.didReset = true cs.didReset = true
cs.cc.mu.Unlock() cc.mu.Unlock()
if !didReset { if !didReset {
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
cc.forgetStreamID(cs.ID)
} }
} }
@ -329,7 +341,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
} }
addr := authorityAddr(req.URL.Scheme, req.URL.Host) addr := authorityAddr(req.URL.Scheme, req.URL.Host)
for { for retry := 0; ; retry++ {
cc, err := t.connPool().GetClientConn(req, addr) cc, err := t.connPool().GetClientConn(req, addr)
if err != nil { if err != nil {
t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
@ -337,9 +349,25 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
} }
traceGotConn(req, cc) traceGotConn(req, cc)
res, err := cc.RoundTrip(req) res, err := cc.RoundTrip(req)
if err != nil { if err != nil && retry <= 6 {
if req, err = shouldRetryRequest(req, err); err == nil { afterBodyWrite := false
continue if e, ok := err.(afterReqBodyWriteError); ok {
err = e
afterBodyWrite = true
}
if req, err = shouldRetryRequest(req, err, afterBodyWrite); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
continue
}
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
select {
case <-time.After(time.Second * time.Duration(backoff)):
continue
case <-reqContext(req).Done():
return nil, reqContext(req).Err()
}
} }
} }
if err != nil { if err != nil {
@ -360,43 +388,60 @@ func (t *Transport) CloseIdleConnections() {
} }
var ( var (
errClientConnClosed = errors.New("http2: client conn is closed") errClientConnClosed = errors.New("http2: client conn is closed")
errClientConnUnusable = errors.New("http2: client conn not usable") errClientConnUnusable = errors.New("http2: client conn not usable")
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written")
) )
// afterReqBodyWriteError is a wrapper around errors returned by ClientConn.RoundTrip.
// It is used to signal that err happened after part of Request.Body was sent to the server.
type afterReqBodyWriteError struct {
err error
}
func (e afterReqBodyWriteError) Error() string {
return e.err.Error() + "; some request body already written"
}
// shouldRetryRequest is called by RoundTrip when a request fails to get // shouldRetryRequest is called by RoundTrip when a request fails to get
// response headers. It is always called with a non-nil error. // response headers. It is always called with a non-nil error.
// It returns either a request to retry (either the same request, or a // It returns either a request to retry (either the same request, or a
// modified clone), or an error if the request can't be replayed. // modified clone), or an error if the request can't be replayed.
func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) {
switch err { if !canRetryError(err) {
default:
return nil, err return nil, err
case errClientConnUnusable, errClientConnGotGoAway:
return req, nil
case errClientConnGotGoAwayAfterSomeReqBody:
// If the Body is nil (or http.NoBody), it's safe to reuse
// this request and its Body.
if req.Body == nil || reqBodyIsNoBody(req.Body) {
return req, nil
}
// Otherwise we depend on the Request having its GetBody
// func defined.
getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody
if getBody == nil {
return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error")
}
body, err := getBody()
if err != nil {
return nil, err
}
newReq := *req
newReq.Body = body
return &newReq, nil
} }
if !afterBodyWrite {
return req, nil
}
// If the Body is nil (or http.NoBody), it's safe to reuse
// this request and its Body.
if req.Body == nil || reqBodyIsNoBody(req.Body) {
return req, nil
}
// Otherwise we depend on the Request having its GetBody
// func defined.
getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody
if getBody == nil {
return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
}
body, err := getBody()
if err != nil {
return nil, err
}
newReq := *req
newReq.Body = body
return &newReq, nil
}
func canRetryError(err error) bool {
if err == errClientConnUnusable || err == errClientConnGotGoAway {
return true
}
if se, ok := err.(StreamError); ok {
return se.Code == ErrCodeRefusedStream
}
return false
} }
func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
@ -560,6 +605,8 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
} }
} }
// CanTakeNewRequest reports whether the connection can take a new request,
// meaning it has not been closed or received or sent a GOAWAY.
func (cc *ClientConn) CanTakeNewRequest() bool { func (cc *ClientConn) CanTakeNewRequest() bool {
cc.mu.Lock() cc.mu.Lock()
defer cc.mu.Unlock() defer cc.mu.Unlock()
@ -571,11 +618,10 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
return false return false
} }
return cc.goAway == nil && !cc.closed && return cc.goAway == nil && !cc.closed &&
int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32
cc.nextStreamID < math.MaxInt32
} }
// onIdleTimeout is called from a time.AfterFunc goroutine. It will // onIdleTimeout is called from a time.AfterFunc goroutine. It will
// only be called when we're idle, but because we're coming from a new // only be called when we're idle, but because we're coming from a new
// goroutine, there could be a new request coming in at the same time, // goroutine, there could be a new request coming in at the same time,
// so this simply calls the synchronized closeIfIdle to shut down this // so this simply calls the synchronized closeIfIdle to shut down this
@ -694,7 +740,7 @@ func checkConnHeaders(req *http.Request) error {
// req.ContentLength, where 0 actually means zero (not unknown) and -1 // req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown. // means unknown.
func actualContentLength(req *http.Request) int64 { func actualContentLength(req *http.Request) int64 {
if req.Body == nil { if req.Body == nil || reqBodyIsNoBody(req.Body) {
return 0 return 0
} }
if req.ContentLength != 0 { if req.ContentLength != 0 {
@ -718,15 +764,14 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
hasTrailers := trailers != "" hasTrailers := trailers != ""
cc.mu.Lock() cc.mu.Lock()
cc.lastActive = time.Now() if err := cc.awaitOpenSlotForRequest(req); err != nil {
if cc.closed || !cc.canTakeNewRequestLocked() {
cc.mu.Unlock() cc.mu.Unlock()
return nil, errClientConnUnusable return nil, err
} }
body := req.Body body := req.Body
hasBody := body != nil
contentLen := actualContentLength(req) contentLen := actualContentLength(req)
hasBody := contentLen != 0
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
var requestedGzip bool var requestedGzip bool
@ -809,21 +854,20 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
// 2xx, however, then assume the server DOES potentially // 2xx, however, then assume the server DOES potentially
// want our body (e.g. full-duplex streaming: // want our body (e.g. full-duplex streaming:
// golang.org/issue/13444). If it turns out the server // golang.org/issue/13444). If it turns out the server
// doesn't, they'll RST_STREAM us soon enough. This is a // doesn't, they'll RST_STREAM us soon enough. This is a
// heuristic to avoid adding knobs to Transport. Hopefully // heuristic to avoid adding knobs to Transport. Hopefully
// we can keep it. // we can keep it.
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWrite) cs.abortRequestBodyWrite(errStopReqBodyWrite)
} }
if re.err != nil { if re.err != nil {
if re.err == errClientConnGotGoAway { cc.mu.Lock()
cc.mu.Lock() afterBodyWrite := cs.startedWrite
if cs.startedWrite { cc.mu.Unlock()
re.err = errClientConnGotGoAwayAfterSomeReqBody
}
cc.mu.Unlock()
}
cc.forgetStreamID(cs.ID) cc.forgetStreamID(cs.ID)
if afterBodyWrite {
return nil, afterReqBodyWriteError{re.err}
}
return nil, re.err return nil, re.err
} }
res.Request = req res.Request = req
@ -836,31 +880,31 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
case re := <-readLoopResCh: case re := <-readLoopResCh:
return handleReadLoopResponse(re) return handleReadLoopResponse(re)
case <-respHeaderTimer: case <-respHeaderTimer:
cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten { if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
} else { } else {
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
} }
cc.forgetStreamID(cs.ID)
return nil, errTimeout return nil, errTimeout
case <-ctx.Done(): case <-ctx.Done():
cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten { if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
} else { } else {
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
} }
cc.forgetStreamID(cs.ID)
return nil, ctx.Err() return nil, ctx.Err()
case <-req.Cancel: case <-req.Cancel:
cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten { if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
} else { } else {
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
} }
cc.forgetStreamID(cs.ID)
return nil, errRequestCanceled return nil, errRequestCanceled
case <-cs.peerReset: case <-cs.peerReset:
// processResetStream already removed the // processResetStream already removed the
@ -887,6 +931,45 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
} }
} }
// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams.
// Must hold cc.mu.
func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
var waitingForConn chan struct{}
var waitingForConnErr error // guarded by cc.mu
for {
cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() {
return errClientConnUnusable
}
if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
if waitingForConn != nil {
close(waitingForConn)
}
return nil
}
// Unfortunately, we cannot wait on a condition variable and channel at
// the same time, so instead, we spin up a goroutine to check if the
// request is canceled while we wait for a slot to open in the connection.
if waitingForConn == nil {
waitingForConn = make(chan struct{})
go func() {
if err := awaitRequestCancel(req, waitingForConn); err != nil {
cc.mu.Lock()
waitingForConnErr = err
cc.cond.Broadcast()
cc.mu.Unlock()
}
}()
}
cc.pendingRequests++
cc.cond.Wait()
cc.pendingRequests--
if waitingForConnErr != nil {
return waitingForConnErr
}
}
}
// requires cc.wmu be held // requires cc.wmu be held
func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
first := true // first frame written (HEADERS is first, then CONTINUATION) first := true // first frame written (HEADERS is first, then CONTINUATION)
@ -1246,7 +1329,9 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
cc.idleTimer.Reset(cc.idleTimeout) cc.idleTimer.Reset(cc.idleTimeout)
} }
close(cs.done) close(cs.done)
cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl // Wake up checkResetOrDone via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
cc.cond.Broadcast()
} }
return cs return cs
} }
@ -1345,8 +1430,9 @@ func (rl *clientConnReadLoop) run() error {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
} }
if se, ok := err.(StreamError); ok { if se, ok := err.(StreamError); ok {
if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { if cs := cc.streamByID(se.StreamID, false); cs != nil {
cs.cc.writeStreamReset(cs.ID, se.Code, err) cs.cc.writeStreamReset(cs.ID, se.Code, err)
cs.cc.forgetStreamID(cs.ID)
if se.Cause == nil { if se.Cause == nil {
se.Cause = cc.fr.errDetail se.Cause = cc.fr.errDetail
} }
@ -1528,8 +1614,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
return res, nil return res, nil
} }
buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}
cs.bufPipe = pipe{b: buf}
cs.bytesRemain = res.ContentLength cs.bytesRemain = res.ContentLength
res.Body = transportResponseBody{cs} res.Body = transportResponseBody{cs}
go cs.awaitRequestCancel(cs.req) go cs.awaitRequestCancel(cs.req)
@ -1656,6 +1741,7 @@ func (b transportResponseBody) Close() error {
cc.wmu.Lock() cc.wmu.Lock()
if !serverSentStreamEnd { if !serverSentStreamEnd {
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
cs.didReset = true
} }
// Return connection-level flow control. // Return connection-level flow control.
if unread > 0 { if unread > 0 {
@ -1668,6 +1754,7 @@ func (b transportResponseBody) Close() error {
} }
cs.bufPipe.BreakWithError(errClosedResponseBody) cs.bufPipe.BreakWithError(errClosedResponseBody)
cc.forgetStreamID(cs.ID)
return nil return nil
} }
@ -1703,12 +1790,6 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
return nil return nil
} }
if f.Length > 0 { if f.Length > 0 {
if len(data) > 0 && cs.bufPipe.b == nil {
// Data frame after it's already closed?
cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
return ConnectionError(ErrCodeProtocol)
}
// Check connection-level flow control. // Check connection-level flow control.
cc.mu.Lock() cc.mu.Lock()
if cs.inflow.available() >= int32(f.Length) { if cs.inflow.available() >= int32(f.Length) {
@ -1719,16 +1800,27 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
} }
// Return any padded flow control now, since we won't // Return any padded flow control now, since we won't
// refund it later on body reads. // refund it later on body reads.
if pad := int32(f.Length) - int32(len(data)); pad > 0 { var refund int
cs.inflow.add(pad) if pad := int(f.Length) - len(data); pad > 0 {
cc.inflow.add(pad) refund += pad
}
// Return len(data) now if the stream is already closed,
// since data will never be read.
didReset := cs.didReset
if didReset {
refund += len(data)
}
if refund > 0 {
cc.inflow.add(int32(refund))
cc.wmu.Lock() cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(pad)) cc.fr.WriteWindowUpdate(0, uint32(refund))
cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) if !didReset {
cs.inflow.add(int32(refund))
cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
}
cc.bw.Flush() cc.bw.Flush()
cc.wmu.Unlock() cc.wmu.Unlock()
} }
didReset := cs.didReset
cc.mu.Unlock() cc.mu.Unlock()
if len(data) > 0 && !didReset { if len(data) > 0 && !didReset {

View File

@ -53,7 +53,7 @@ type PriorityWriteSchedulerConfig struct {
} }
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules // NewPriorityWriteScheduler constructs a WriteScheduler that schedules
// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3. // frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
// If cfg is nil, default options are used. // If cfg is nil, default options are used.
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
if cfg == nil { if cfg == nil {

7
vendor/golang.org/x/net/idna/BUILD generated vendored
View File

@ -5,8 +5,15 @@ go_library(
srcs = [ srcs = [
"idna.go", "idna.go",
"punycode.go", "punycode.go",
"tables.go",
"trie.go",
"trieval.go",
], ],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [
"//vendor/golang.org/x/text/secure/bidirule:go_default_library",
"//vendor/golang.org/x/text/unicode/norm:go_default_library",
],
) )
filegroup( filegroup(

686
vendor/golang.org/x/net/idna/idna.go generated vendored
View File

@ -1,61 +1,673 @@
// Copyright 2012 The Go Authors. All rights reserved. // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package idna implements IDNA2008 (Internationalized Domain Names for // Package idna implements IDNA2008 using the compatibility processing
// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and // defined by UTS (Unicode Technical Standard) #46, which defines a standard to
// RFC 5894. // deal with the transition from IDNA2003.
//
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
// UTS #46 is defined in http://www.unicode.org/reports/tr46.
// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the
// differences between these two standards.
package idna package idna
import ( import (
"fmt"
"strings" "strings"
"unicode/utf8" "unicode/utf8"
"golang.org/x/text/secure/bidirule"
"golang.org/x/text/unicode/norm"
) )
// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or // NOTE: Unlike common practice in Go APIs, the functions will return a
// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 // sanitized domain name in case of errors. Browsers sometimes use a partially
// evaluated string as lookup.
// TODO: the current error handling is, in my opinion, the least opinionated.
// Other strategies are also viable, though:
// Option 1) Return an empty string in case of error, but allow the user to
// specify explicitly which errors to ignore.
// Option 2) Return the partially evaluated string if it is itself a valid
// string, otherwise return the empty string in case of error.
// Option 3) Option 1 and 2.
// Option 4) Always return an empty string for now and implement Option 1 as
// needed, and document that the return string may not be empty in case of
// error in the future.
// I think Option 1 is best, but it is quite opinionated.
// acePrefix is the ASCII Compatible Encoding prefix. // ToASCII is a wrapper for Punycode.ToASCII.
const acePrefix = "xn--" func ToASCII(s string) (string, error) {
return Punycode.process(s, true)
}
// ToUnicode is a wrapper for Punycode.ToUnicode.
func ToUnicode(s string) (string, error) {
return Punycode.process(s, false)
}
// An Option configures a Profile at creation time.
type Option func(*options)
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
// compatibility. It is used by most browsers when resolving domain names. This
// option is only meaningful if combined with MapForLookup.
func Transitional(transitional bool) Option {
return func(o *options) { o.transitional = true }
}
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
// are longer than allowed by the RFC.
func VerifyDNSLength(verify bool) Option {
return func(o *options) { o.verifyDNSLength = verify }
}
// RemoveLeadingDots removes leading label separators. Leading runes that map to
// dots, such as U+3002, are removed as well.
//
// This is the behavior suggested by the UTS #46 and is adopted by some
// browsers.
func RemoveLeadingDots(remove bool) Option {
return func(o *options) { o.removeLeadingDots = remove }
}
// ValidateLabels sets whether to check the mandatory label validation criteria
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
// of hyphens ('-'), normalization, validity of runes, and the context rules.
func ValidateLabels(enable bool) Option {
return func(o *options) {
// Don't override existing mappings, but set one that at least checks
// normalization if it is not set.
if o.mapping == nil && enable {
o.mapping = normalize
}
o.trie = trie
o.validateLabels = enable
o.fromPuny = validateFromPunycode
}
}
// StrictDomainName limits the set of permissable ASCII characters to those
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
//
// This option is useful, for instance, for browsers that allow characters
// outside this range, for example a '_' (U+005F LOW LINE). See
// http://www.rfc-editor.org/std/std3.txt for more details This option
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
func StrictDomainName(use bool) Option {
return func(o *options) {
o.trie = trie
o.useSTD3Rules = use
o.fromPuny = validateFromPunycode
}
}
// NOTE: the following options pull in tables. The tables should not be linked
// in as long as the options are not used.
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
// that relies on proper validation of labels should include this rule.
func BidiRule() Option {
return func(o *options) { o.bidirule = bidirule.ValidString }
}
// ValidateForRegistration sets validation options to verify that a given IDN is
// properly formatted for registration as defined by Section 4 of RFC 5891.
func ValidateForRegistration() Option {
return func(o *options) {
o.mapping = validateRegistration
StrictDomainName(true)(o)
ValidateLabels(true)(o)
VerifyDNSLength(true)(o)
BidiRule()(o)
}
}
// MapForLookup sets validation and mapping options such that a given IDN is
// transformed for domain name lookup according to the requirements set out in
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
// to add this check.
//
// The mappings include normalization and mapping case, width and other
// compatibility mappings.
func MapForLookup() Option {
return func(o *options) {
o.mapping = validateAndMap
StrictDomainName(true)(o)
ValidateLabels(true)(o)
RemoveLeadingDots(true)(o)
}
}
type options struct {
transitional bool
useSTD3Rules bool
validateLabels bool
verifyDNSLength bool
removeLeadingDots bool
trie *idnaTrie
// fromPuny calls validation rules when converting A-labels to U-labels.
fromPuny func(p *Profile, s string) error
// mapping implements a validation and mapping step as defined in RFC 5895
// or UTS 46, tailored to, for example, domain registration or lookup.
mapping func(p *Profile, s string) (string, error)
// bidirule, if specified, checks whether s conforms to the Bidi Rule
// defined in RFC 5893.
bidirule func(s string) bool
}
// A Profile defines the configuration of a IDNA mapper.
type Profile struct {
options
}
func apply(o *options, opts []Option) {
for _, f := range opts {
f(o)
}
}
// New creates a new Profile.
//
// With no options, the returned Profile is the most permissive and equals the
// Punycode Profile. Options can be passed to further restrict the Profile. The
// MapForLookup and ValidateForRegistration options set a collection of options,
// for lookup and registration purposes respectively, which can be tailored by
// adding more fine-grained options, where later options override earlier
// options.
func New(o ...Option) *Profile {
p := &Profile{}
apply(&p.options, o)
return p
}
// ToASCII converts a domain or domain label to its ASCII form. For example, // ToASCII converts a domain or domain label to its ASCII form. For example,
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and // ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
// ToASCII("golang") is "golang". // ToASCII("golang") is "golang". If an error is encountered it will return
func ToASCII(s string) (string, error) { // an error and a (partially) processed result.
if ascii(s) { func (p *Profile) ToASCII(s string) (string, error) {
return s, nil return p.process(s, true)
}
labels := strings.Split(s, ".")
for i, label := range labels {
if !ascii(label) {
a, err := encode(acePrefix, label)
if err != nil {
return "", err
}
labels[i] = a
}
}
return strings.Join(labels, "."), nil
} }
// ToUnicode converts a domain or domain label to its Unicode form. For example, // ToUnicode converts a domain or domain label to its Unicode form. For example,
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and // ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
// ToUnicode("golang") is "golang". // ToUnicode("golang") is "golang". If an error is encountered it will return
func ToUnicode(s string) (string, error) { // an error and a (partially) processed result.
if !strings.Contains(s, acePrefix) { func (p *Profile) ToUnicode(s string) (string, error) {
return s, nil pp := *p
pp.transitional = false
return pp.process(s, false)
}
// String reports a string with a description of the profile for debugging
// purposes. The string format may change with different versions.
func (p *Profile) String() string {
s := ""
if p.transitional {
s = "Transitional"
} else {
s = "NonTransitional"
} }
labels := strings.Split(s, ".") if p.useSTD3Rules {
for i, label := range labels { s += ":UseSTD3Rules"
if strings.HasPrefix(label, acePrefix) { }
u, err := decode(label[len(acePrefix):]) if p.validateLabels {
if err != nil { s += ":ValidateLabels"
return "", err }
} if p.verifyDNSLength {
labels[i] = u s += ":VerifyDNSLength"
}
return s
}
var (
// Punycode is a Profile that does raw punycode processing with a minimum
// of validation.
Punycode *Profile = punycode
// Lookup is the recommended profile for looking up domain names, according
// to Section 5 of RFC 5891. The exact configuration of this profile may
// change over time.
Lookup *Profile = lookup
// Display is the recommended profile for displaying domain names.
// The configuration of this profile may change over time.
Display *Profile = display
// Registration is the recommended profile for checking whether a given
// IDN is valid for registration, according to Section 4 of RFC 5891.
Registration *Profile = registration
punycode = &Profile{}
lookup = &Profile{options{
transitional: true,
useSTD3Rules: true,
validateLabels: true,
removeLeadingDots: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
}}
display = &Profile{options{
useSTD3Rules: true,
validateLabels: true,
removeLeadingDots: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
}}
registration = &Profile{options{
useSTD3Rules: true,
validateLabels: true,
verifyDNSLength: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateRegistration,
bidirule: bidirule.ValidString,
}}
// TODO: profiles
// Register: recommended for approving domain names: don't do any mappings
// but rather reject on invalid input. Bundle or block deviation characters.
)
type labelError struct{ label, code_ string }
func (e labelError) code() string { return e.code_ }
func (e labelError) Error() string {
return fmt.Sprintf("idna: invalid label %q", e.label)
}
type runeError rune
func (e runeError) code() string { return "P1" }
func (e runeError) Error() string {
return fmt.Sprintf("idna: disallowed rune %U", e)
}
// process implements the algorithm described in section 4 of UTS #46,
// see http://www.unicode.org/reports/tr46.
func (p *Profile) process(s string, toASCII bool) (string, error) {
var err error
if p.mapping != nil {
s, err = p.mapping(p, s)
}
// Remove leading empty labels.
if p.removeLeadingDots {
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
} }
} }
return strings.Join(labels, "."), nil // It seems like we should only create this error on ToASCII, but the
// UTS 46 conformance tests suggests we should always check this.
if err == nil && p.verifyDNSLength && s == "" {
err = &labelError{s, "A4"}
}
labels := labelIter{orig: s}
for ; !labels.done(); labels.next() {
label := labels.label()
if label == "" {
// Empty labels are not okay. The label iterator skips the last
// label if it is empty.
if err == nil && p.verifyDNSLength {
err = &labelError{s, "A4"}
}
continue
}
if strings.HasPrefix(label, acePrefix) {
u, err2 := decode(label[len(acePrefix):])
if err2 != nil {
if err == nil {
err = err2
}
// Spec says keep the old label.
continue
}
labels.set(u)
if err == nil && p.validateLabels {
err = p.fromPuny(p, u)
}
if err == nil {
// This should be called on NonTransitional, according to the
// spec, but that currently does not have any effect. Use the
// original profile to preserve options.
err = p.validateLabel(u)
}
} else if err == nil {
err = p.validateLabel(label)
}
}
if toASCII {
for labels.reset(); !labels.done(); labels.next() {
label := labels.label()
if !ascii(label) {
a, err2 := encode(acePrefix, label)
if err == nil {
err = err2
}
label = a
labels.set(a)
}
n := len(label)
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
err = &labelError{label, "A4"}
}
}
}
s = labels.result()
if toASCII && p.verifyDNSLength && err == nil {
// Compute the length of the domain name minus the root label and its dot.
n := len(s)
if n > 0 && s[n-1] == '.' {
n--
}
if len(s) < 1 || n > 253 {
err = &labelError{s, "A4"}
}
}
return s, err
}
func normalize(p *Profile, s string) (string, error) {
return norm.NFC.String(s), nil
}
func validateRegistration(p *Profile, s string) (string, error) {
if !norm.NFC.IsNormalString(s) {
return s, &labelError{s, "V1"}
}
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
// Copy bytes not copied so far.
switch p.simplify(info(v).category()) {
// TODO: handle the NV8 defined in the Unicode idna data set to allow
// for strict conformance to IDNA2008.
case valid, deviation:
case disallowed, mapped, unknown, ignored:
r, _ := utf8.DecodeRuneInString(s[i:])
return s, runeError(r)
}
i += sz
}
return s, nil
}
func validateAndMap(p *Profile, s string) (string, error) {
var (
err error
b []byte
k int
)
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
start := i
i += sz
// Copy bytes not copied so far.
switch p.simplify(info(v).category()) {
case valid:
continue
case disallowed:
if err == nil {
r, _ := utf8.DecodeRuneInString(s[start:])
err = runeError(r)
}
continue
case mapped, deviation:
b = append(b, s[k:start]...)
b = info(v).appendMapping(b, s[start:i])
case ignored:
b = append(b, s[k:start]...)
// drop the rune
case unknown:
b = append(b, s[k:start]...)
b = append(b, "\ufffd"...)
}
k = i
}
if k == 0 {
// No changes so far.
s = norm.NFC.String(s)
} else {
b = append(b, s[k:]...)
if norm.NFC.QuickSpan(b) != len(b) {
b = norm.NFC.Bytes(b)
}
// TODO: the punycode converters require strings as input.
s = string(b)
}
return s, err
}
// A labelIter allows iterating over domain name labels.
type labelIter struct {
orig string
slice []string
curStart int
curEnd int
i int
}
func (l *labelIter) reset() {
l.curStart = 0
l.curEnd = 0
l.i = 0
}
func (l *labelIter) done() bool {
return l.curStart >= len(l.orig)
}
func (l *labelIter) result() string {
if l.slice != nil {
return strings.Join(l.slice, ".")
}
return l.orig
}
func (l *labelIter) label() string {
if l.slice != nil {
return l.slice[l.i]
}
p := strings.IndexByte(l.orig[l.curStart:], '.')
l.curEnd = l.curStart + p
if p == -1 {
l.curEnd = len(l.orig)
}
return l.orig[l.curStart:l.curEnd]
}
// next sets the value to the next label. It skips the last label if it is empty.
func (l *labelIter) next() {
l.i++
if l.slice != nil {
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
l.curStart = len(l.orig)
}
} else {
l.curStart = l.curEnd + 1
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
l.curStart = len(l.orig)
}
}
}
func (l *labelIter) set(s string) {
if l.slice == nil {
l.slice = strings.Split(l.orig, ".")
}
l.slice[l.i] = s
}
// acePrefix is the ASCII Compatible Encoding prefix.
const acePrefix = "xn--"
func (p *Profile) simplify(cat category) category {
switch cat {
case disallowedSTD3Mapped:
if p.useSTD3Rules {
cat = disallowed
} else {
cat = mapped
}
case disallowedSTD3Valid:
if p.useSTD3Rules {
cat = disallowed
} else {
cat = valid
}
case deviation:
if !p.transitional {
cat = valid
}
case validNV8, validXV8:
// TODO: handle V2008
cat = valid
}
return cat
}
func validateFromPunycode(p *Profile, s string) error {
if !norm.NFC.IsNormalString(s) {
return &labelError{s, "V1"}
}
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
if c := p.simplify(info(v).category()); c != valid && c != deviation {
return &labelError{s, "V6"}
}
i += sz
}
return nil
}
const (
zwnj = "\u200c"
zwj = "\u200d"
)
type joinState int8
const (
stateStart joinState = iota
stateVirama
stateBefore
stateBeforeVirama
stateAfter
stateFAIL
)
var joinStates = [][numJoinTypes]joinState{
stateStart: {
joiningL: stateBefore,
joiningD: stateBefore,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateVirama,
},
stateVirama: {
joiningL: stateBefore,
joiningD: stateBefore,
},
stateBefore: {
joiningL: stateBefore,
joiningD: stateBefore,
joiningT: stateBefore,
joinZWNJ: stateAfter,
joinZWJ: stateFAIL,
joinVirama: stateBeforeVirama,
},
stateBeforeVirama: {
joiningL: stateBefore,
joiningD: stateBefore,
joiningT: stateBefore,
},
stateAfter: {
joiningL: stateFAIL,
joiningD: stateBefore,
joiningT: stateAfter,
joiningR: stateStart,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateAfter, // no-op as we can't accept joiners here
},
stateFAIL: {
0: stateFAIL,
joiningL: stateFAIL,
joiningD: stateFAIL,
joiningT: stateFAIL,
joiningR: stateFAIL,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateFAIL,
},
}
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
// already implicitly satisfied by the overall implementation.
func (p *Profile) validateLabel(s string) error {
if s == "" {
if p.verifyDNSLength {
return &labelError{s, "A4"}
}
return nil
}
if p.bidirule != nil && !p.bidirule(s) {
return &labelError{s, "B"}
}
if !p.validateLabels {
return nil
}
trie := p.trie // p.validateLabels is only set if trie is set.
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
return &labelError{s, "V2"}
}
if s[0] == '-' || s[len(s)-1] == '-' {
return &labelError{s, "V3"}
}
// TODO: merge the use of this in the trie.
v, sz := trie.lookupString(s)
x := info(v)
if x.isModifier() {
return &labelError{s, "V5"}
}
// Quickly return in the absence of zero-width (non) joiners.
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
return nil
}
st := stateStart
for i := 0; ; {
jt := x.joinType()
if s[i:i+sz] == zwj {
jt = joinZWJ
} else if s[i:i+sz] == zwnj {
jt = joinZWNJ
}
st = joinStates[st][jt]
if x.isViramaModifier() {
st = joinStates[st][joinVirama]
}
if i += sz; i == len(s) {
break
}
v, sz = trie.lookupString(s[i:])
x = info(v)
}
if st == stateFAIL || st == stateAfter {
return &labelError{s, "C"}
}
return nil
} }
func ascii(s string) bool { func ascii(s string) bool {

View File

@ -1,4 +1,6 @@
// Copyright 2012 The Go Authors. All rights reserved. // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
@ -7,7 +9,6 @@ package idna
// This file implements the Punycode algorithm from RFC 3492. // This file implements the Punycode algorithm from RFC 3492.
import ( import (
"fmt"
"math" "math"
"strings" "strings"
"unicode/utf8" "unicode/utf8"
@ -27,6 +28,8 @@ const (
tmin int32 = 1 tmin int32 = 1
) )
func punyError(s string) error { return &labelError{s, "A3"} }
// decode decodes a string as specified in section 6.2. // decode decodes a string as specified in section 6.2.
func decode(encoded string) (string, error) { func decode(encoded string) (string, error) {
if encoded == "" { if encoded == "" {
@ -34,7 +37,7 @@ func decode(encoded string) (string, error) {
} }
pos := 1 + strings.LastIndex(encoded, "-") pos := 1 + strings.LastIndex(encoded, "-")
if pos == 1 { if pos == 1 {
return "", fmt.Errorf("idna: invalid label %q", encoded) return "", punyError(encoded)
} }
if pos == len(encoded) { if pos == len(encoded) {
return encoded[:len(encoded)-1], nil return encoded[:len(encoded)-1], nil
@ -50,16 +53,16 @@ func decode(encoded string) (string, error) {
oldI, w := i, int32(1) oldI, w := i, int32(1)
for k := base; ; k += base { for k := base; ; k += base {
if pos == len(encoded) { if pos == len(encoded) {
return "", fmt.Errorf("idna: invalid label %q", encoded) return "", punyError(encoded)
} }
digit, ok := decodeDigit(encoded[pos]) digit, ok := decodeDigit(encoded[pos])
if !ok { if !ok {
return "", fmt.Errorf("idna: invalid label %q", encoded) return "", punyError(encoded)
} }
pos++ pos++
i += digit * w i += digit * w
if i < 0 { if i < 0 {
return "", fmt.Errorf("idna: invalid label %q", encoded) return "", punyError(encoded)
} }
t := k - bias t := k - bias
if t < tmin { if t < tmin {
@ -72,7 +75,7 @@ func decode(encoded string) (string, error) {
} }
w *= base - t w *= base - t
if w >= math.MaxInt32/base { if w >= math.MaxInt32/base {
return "", fmt.Errorf("idna: invalid label %q", encoded) return "", punyError(encoded)
} }
} }
x := int32(len(output) + 1) x := int32(len(output) + 1)
@ -80,7 +83,7 @@ func decode(encoded string) (string, error) {
n += i / x n += i / x
i %= x i %= x
if n > utf8.MaxRune || len(output) >= 1024 { if n > utf8.MaxRune || len(output) >= 1024 {
return "", fmt.Errorf("idna: invalid label %q", encoded) return "", punyError(encoded)
} }
output = append(output, 0) output = append(output, 0)
copy(output[i+1:], output[i:]) copy(output[i+1:], output[i:])
@ -121,14 +124,14 @@ func encode(prefix, s string) (string, error) {
} }
delta += (m - n) * (h + 1) delta += (m - n) * (h + 1)
if delta < 0 { if delta < 0 {
return "", fmt.Errorf("idna: invalid label %q", s) return "", punyError(s)
} }
n = m n = m
for _, r := range s { for _, r := range s {
if r < n { if r < n {
delta++ delta++
if delta < 0 { if delta < 0 {
return "", fmt.Errorf("idna: invalid label %q", s) return "", punyError(s)
} }
continue continue
} }

4477
vendor/golang.org/x/net/idna/tables.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

72
vendor/golang.org/x/net/idna/trie.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package idna
// appendMapping appends the mapping for the respective rune. isMapped must be
// true. A mapping is a categorization of a rune as defined in UTS #46.
func (c info) appendMapping(b []byte, s string) []byte {
index := int(c >> indexShift)
if c&xorBit == 0 {
s := mappings[index:]
return append(b, s[1:s[0]+1]...)
}
b = append(b, s...)
if c&inlineXOR == inlineXOR {
// TODO: support and handle two-byte inline masks
b[len(b)-1] ^= byte(index)
} else {
for p := len(b) - int(xorData[index]); p < len(b); p++ {
index++
b[p] ^= xorData[index]
}
}
return b
}
// Sparse block handling code.
type valueRange struct {
value uint16 // header: value:stride
lo, hi byte // header: lo:n
}
type sparseBlocks struct {
values []valueRange
offset []uint16
}
var idnaSparse = sparseBlocks{
values: idnaSparseValues[:],
offset: idnaSparseOffset[:],
}
// Don't use newIdnaTrie to avoid unconditional linking in of the table.
var trie = &idnaTrie{}
// lookup determines the type of block n and looks up the value for b.
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
// is a list of ranges with an accompanying value. Given a matching range r,
// the value for b is by r.value + (b - r.lo) * stride.
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
offset := t.offset[n]
header := t.values[offset]
lo := offset + 1
hi := lo + uint16(header.lo)
for lo < hi {
m := lo + (hi-lo)/2
r := t.values[m]
if r.lo <= b && b <= r.hi {
return r.value + uint16(b-r.lo)*header.value
}
if b < r.lo {
hi = m
} else {
lo = m + 1
}
}
return 0
}

114
vendor/golang.org/x/net/idna/trieval.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package idna
// This file contains definitions for interpreting the trie value of the idna
// trie generated by "go run gen*.go". It is shared by both the generator
// program and the resultant package. Sharing is achieved by the generator
// copying gen_trieval.go to trieval.go and changing what's above this comment.
// info holds information from the IDNA mapping table for a single rune. It is
// the value returned by a trie lookup. In most cases, all information fits in
// a 16-bit value. For mappings, this value may contain an index into a slice
// with the mapped string. Such mappings can consist of the actual mapped value
// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
// input rune. This technique is used by the cases packages and reduces the
// table size significantly.
//
// The per-rune values have the following format:
//
// if mapped {
// if inlinedXOR {
// 15..13 inline XOR marker
// 12..11 unused
// 10..3 inline XOR mask
// } else {
// 15..3 index into xor or mapping table
// }
// } else {
// 15..13 unused
// 12 modifier (including virama)
// 11 virama modifier
// 10..8 joining type
// 7..3 category type
// }
// 2 use xor pattern
// 1..0 mapped category
//
// See the definitions below for a more detailed description of the various
// bits.
type info uint16
const (
catSmallMask = 0x3
catBigMask = 0xF8
indexShift = 3
xorBit = 0x4 // interpret the index as an xor pattern
inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
joinShift = 8
joinMask = 0x07
viramaModifier = 0x0800
modifier = 0x1000
)
// A category corresponds to a category defined in the IDNA mapping table.
type category uint16
const (
unknown category = 0 // not defined currently in unicode.
mapped category = 1
disallowedSTD3Mapped category = 2
deviation category = 3
)
const (
valid category = 0x08
validNV8 category = 0x18
validXV8 category = 0x28
disallowed category = 0x40
disallowedSTD3Valid category = 0x80
ignored category = 0xC0
)
// join types and additional rune information
const (
joiningL = (iota + 1)
joiningD
joiningT
joiningR
//the following types are derived during processing
joinZWJ
joinZWNJ
joinVirama
numJoinTypes
)
func (c info) isMapped() bool {
return c&0x3 != 0
}
func (c info) category() category {
small := c & catSmallMask
if small != 0 {
return category(small)
}
return category(c & catBigMask)
}
func (c info) joinType() info {
if c.isMapped() {
return 0
}
return (c >> joinShift) & joinMask
}
func (c info) isModifier() bool {
return c&(modifier|catSmallMask) == modifier
}
func (c info) isViramaModifier() bool {
return c&(viramaModifier|catSmallMask) == viramaModifier
}

View File

@ -371,7 +371,7 @@ func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observabl
} }
} }
// Failed to find a level that covers the desired range. So just // Failed to find a level that covers the desired range. So just
// extract from the last level, even if it doesn't cover the entire // extract from the last level, even if it doesn't cover the entire
// desired range. // desired range.
ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)

View File

@ -9,7 +9,7 @@ import (
"strings" "strings"
) )
// A PerHost directs connections to a default Dialer unless the hostname // A PerHost directs connections to a default Dialer unless the host name
// requested matches one of a number of exceptions. // requested matches one of a number of exceptions.
type PerHost struct { type PerHost struct {
def, bypass Dialer def, bypass Dialer
@ -76,7 +76,7 @@ func (p *PerHost) dialerForRequest(host string) Dialer {
// AddFromString parses a string that contains comma-separated values // AddFromString parses a string that contains comma-separated values
// specifying hosts that should use the bypass proxy. Each value is either an // specifying hosts that should use the bypass proxy. Each value is either an
// IP address, a CIDR range, a zone (*.example.com) or a hostname // IP address, a CIDR range, a zone (*.example.com) or a host name
// (localhost). A best effort is made to parse the string and errors are // (localhost). A best effort is made to parse the string and errors are
// ignored. // ignored.
func (p *PerHost) AddFromString(s string) { func (p *PerHost) AddFromString(s string) {
@ -131,7 +131,7 @@ func (p *PerHost) AddZone(zone string) {
p.bypassZones = append(p.bypassZones, zone) p.bypassZones = append(p.bypassZones, zone)
} }
// AddHost specifies a hostname that will use the bypass proxy. // AddHost specifies a host name that will use the bypass proxy.
func (p *PerHost) AddHost(host string) { func (p *PerHost) AddHost(host string) {
if strings.HasSuffix(host, ".") { if strings.HasSuffix(host, ".") {
host = host[:len(host)-1] host = host[:len(host)-1]

View File

@ -11,6 +11,7 @@ import (
"net" "net"
"net/url" "net/url"
"os" "os"
"sync"
) )
// A Dialer is a means to establish a connection. // A Dialer is a means to establish a connection.
@ -27,7 +28,7 @@ type Auth struct {
// FromEnvironment returns the dialer specified by the proxy related variables in // FromEnvironment returns the dialer specified by the proxy related variables in
// the environment. // the environment.
func FromEnvironment() Dialer { func FromEnvironment() Dialer {
allProxy := os.Getenv("all_proxy") allProxy := allProxyEnv.Get()
if len(allProxy) == 0 { if len(allProxy) == 0 {
return Direct return Direct
} }
@ -41,7 +42,7 @@ func FromEnvironment() Dialer {
return Direct return Direct
} }
noProxy := os.Getenv("no_proxy") noProxy := noProxyEnv.Get()
if len(noProxy) == 0 { if len(noProxy) == 0 {
return proxy return proxy
} }
@ -92,3 +93,42 @@ func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
return nil, errors.New("proxy: unknown scheme: " + u.Scheme) return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
} }
var (
allProxyEnv = &envOnce{
names: []string{"ALL_PROXY", "all_proxy"},
}
noProxyEnv = &envOnce{
names: []string{"NO_PROXY", "no_proxy"},
}
)
// envOnce looks up an environment variable (optionally by multiple
// names) once. It mitigates expensive lookups on some platforms
// (e.g. Windows).
// (Borrowed from net/http/transport.go)
type envOnce struct {
names []string
once sync.Once
val string
}
func (e *envOnce) Get() string {
e.once.Do(e.init)
return e.val
}
func (e *envOnce) init() {
for _, n := range e.names {
e.val = os.Getenv(n)
if e.val != "" {
return
}
}
}
// reset is used by tests
func (e *envOnce) reset() {
e.once = sync.Once{}
e.val = ""
}

View File

@ -72,24 +72,28 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
closeConn := &conn if err := s.connect(conn, addr); err != nil {
defer func() { conn.Close()
if closeConn != nil {
(*closeConn).Close()
}
}()
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
return nil, err return nil, err
} }
return conn, nil
}
// connect takes an existing connection to a socks5 proxy server,
// and commands the server to extend that connection to target,
// which must be a canonical address with a host and port.
func (s *socks5) connect(conn net.Conn, target string) error {
host, portStr, err := net.SplitHostPort(target)
if err != nil {
return err
}
port, err := strconv.Atoi(portStr) port, err := strconv.Atoi(portStr)
if err != nil { if err != nil {
return nil, errors.New("proxy: failed to parse port number: " + portStr) return errors.New("proxy: failed to parse port number: " + portStr)
} }
if port < 1 || port > 0xffff { if port < 1 || port > 0xffff {
return nil, errors.New("proxy: port number out of range: " + portStr) return errors.New("proxy: port number out of range: " + portStr)
} }
// the size here is just an estimate // the size here is just an estimate
@ -103,17 +107,17 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) {
} }
if _, err := conn.Write(buf); err != nil { if _, err := conn.Write(buf); err != nil {
return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
} }
if _, err := io.ReadFull(conn, buf[:2]); err != nil { if _, err := io.ReadFull(conn, buf[:2]); err != nil {
return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
} }
if buf[0] != 5 { if buf[0] != 5 {
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
} }
if buf[1] == 0xff { if buf[1] == 0xff {
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
} }
if buf[1] == socks5AuthPassword { if buf[1] == socks5AuthPassword {
@ -125,15 +129,15 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) {
buf = append(buf, s.password...) buf = append(buf, s.password...)
if _, err := conn.Write(buf); err != nil { if _, err := conn.Write(buf); err != nil {
return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
} }
if _, err := io.ReadFull(conn, buf[:2]); err != nil { if _, err := io.ReadFull(conn, buf[:2]); err != nil {
return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
} }
if buf[1] != 0 { if buf[1] != 0 {
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
} }
} }
@ -150,7 +154,7 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) {
buf = append(buf, ip...) buf = append(buf, ip...)
} else { } else {
if len(host) > 255 { if len(host) > 255 {
return nil, errors.New("proxy: destination hostname too long: " + host) return errors.New("proxy: destination host name too long: " + host)
} }
buf = append(buf, socks5Domain) buf = append(buf, socks5Domain)
buf = append(buf, byte(len(host))) buf = append(buf, byte(len(host)))
@ -159,11 +163,11 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) {
buf = append(buf, byte(port>>8), byte(port)) buf = append(buf, byte(port>>8), byte(port))
if _, err := conn.Write(buf); err != nil { if _, err := conn.Write(buf); err != nil {
return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
} }
if _, err := io.ReadFull(conn, buf[:4]); err != nil { if _, err := io.ReadFull(conn, buf[:4]); err != nil {
return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
} }
failure := "unknown error" failure := "unknown error"
@ -172,7 +176,7 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) {
} }
if len(failure) > 0 { if len(failure) > 0 {
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
} }
bytesToDiscard := 0 bytesToDiscard := 0
@ -184,11 +188,11 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) {
case socks5Domain: case socks5Domain:
_, err := io.ReadFull(conn, buf[:1]) _, err := io.ReadFull(conn, buf[:1])
if err != nil { if err != nil {
return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
} }
bytesToDiscard = int(buf[0]) bytesToDiscard = int(buf[0])
default: default:
return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
} }
if cap(buf) < bytesToDiscard { if cap(buf) < bytesToDiscard {
@ -197,14 +201,13 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) {
buf = buf[:bytesToDiscard] buf = buf[:bytesToDiscard]
} }
if _, err := io.ReadFull(conn, buf); err != nil { if _, err := io.ReadFull(conn, buf); err != nil {
return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
} }
// Also need to discard the port number // Also need to discard the port number
if _, err := io.ReadFull(conn, buf[:2]); err != nil { if _, err := io.ReadFull(conn, buf[:2]); err != nil {
return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
} }
closeConn = nil return nil
return conn, nil
} }

View File

@ -6,6 +6,8 @@ go_library(
"events.go", "events.go",
"histogram.go", "histogram.go",
"trace.go", "trace.go",
"trace_go16.go",
"trace_go17.go",
], ],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [

View File

@ -39,9 +39,9 @@ var buckets = []bucket{
} }
// RenderEvents renders the HTML page typically served at /debug/events. // RenderEvents renders the HTML page typically served at /debug/events.
// It does not do any auth checking; see AuthRequest for the default auth check // It does not do any auth checking. The request may be nil.
// used by the handler registered on http.DefaultServeMux. //
// req may be nil. // Most users will use the Events handler.
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
now := time.Now() now := time.Now()
data := &struct { data := &struct {

View File

@ -77,7 +77,6 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"golang.org/x/net/context"
"golang.org/x/net/internal/timeseries" "golang.org/x/net/internal/timeseries"
) )
@ -111,30 +110,46 @@ var AuthRequest = func(req *http.Request) (any, sensitive bool) {
} }
func init() { func init() {
http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { // TODO(jbd): Serve Traces from /debug/traces in the future?
any, sensitive := AuthRequest(req) // There is no requirement for a request to be present to have traces.
if !any { http.HandleFunc("/debug/requests", Traces)
http.Error(w, "not allowed", http.StatusUnauthorized) http.HandleFunc("/debug/events", Events)
return }
}
w.Header().Set("Content-Type", "text/html; charset=utf-8") // Traces responds with traces from the program.
Render(w, req, sensitive) // The package initialization registers it in http.DefaultServeMux
}) // at /debug/requests.
http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { //
any, sensitive := AuthRequest(req) // It performs authorization by running AuthRequest.
if !any { func Traces(w http.ResponseWriter, req *http.Request) {
http.Error(w, "not allowed", http.StatusUnauthorized) any, sensitive := AuthRequest(req)
return if !any {
} http.Error(w, "not allowed", http.StatusUnauthorized)
w.Header().Set("Content-Type", "text/html; charset=utf-8") return
RenderEvents(w, req, sensitive) }
}) w.Header().Set("Content-Type", "text/html; charset=utf-8")
Render(w, req, sensitive)
}
// Events responds with a page of events collected by EventLogs.
// The package initialization registers it in http.DefaultServeMux
// at /debug/events.
//
// It performs authorization by running AuthRequest.
func Events(w http.ResponseWriter, req *http.Request) {
any, sensitive := AuthRequest(req)
if !any {
http.Error(w, "not allowed", http.StatusUnauthorized)
return
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
RenderEvents(w, req, sensitive)
} }
// Render renders the HTML page typically served at /debug/requests. // Render renders the HTML page typically served at /debug/requests.
// It does not do any auth checking; see AuthRequest for the default auth check // It does not do any auth checking. The request may be nil.
// used by the handler registered on http.DefaultServeMux. //
// req may be nil. // Most users will use the Traces handler.
func Render(w io.Writer, req *http.Request, sensitive bool) { func Render(w io.Writer, req *http.Request, sensitive bool) {
data := &struct { data := &struct {
Families []string Families []string
@ -271,18 +286,6 @@ type contextKeyT string
var contextKey = contextKeyT("golang.org/x/net/trace.Trace") var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
// NewContext returns a copy of the parent context
// and associates it with a Trace.
func NewContext(ctx context.Context, tr Trace) context.Context {
return context.WithValue(ctx, contextKey, tr)
}
// FromContext returns the Trace bound to the context, if any.
func FromContext(ctx context.Context) (tr Trace, ok bool) {
tr, ok = ctx.Value(contextKey).(Trace)
return
}
// Trace represents an active request. // Trace represents an active request.
type Trace interface { type Trace interface {
// LazyLog adds x to the event log. It will be evaluated each time the // LazyLog adds x to the event log. It will be evaluated each time the

21
vendor/golang.org/x/net/trace/trace_go16.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package trace
import "golang.org/x/net/context"
// NewContext returns a copy of the parent context
// and associates it with a Trace.
func NewContext(ctx context.Context, tr Trace) context.Context {
return context.WithValue(ctx, contextKey, tr)
}
// FromContext returns the Trace bound to the context, if any.
func FromContext(ctx context.Context) (tr Trace, ok bool) {
tr, ok = ctx.Value(contextKey).(Trace)
return
}

21
vendor/golang.org/x/net/trace/trace_go17.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package trace
import "context"
// NewContext returns a copy of the parent context
// and associates it with a Trace.
func NewContext(ctx context.Context, tr Trace) context.Context {
return context.WithValue(ctx, contextKey, tr)
}
// FromContext returns the Trace bound to the context, if any.
func FromContext(ctx context.Context) (tr Trace, ok bool) {
tr, ok = ctx.Value(contextKey).(Trace)
return
}

View File

@ -11,8 +11,10 @@ go_library(
"tables.go", "tables.go",
"trieval.go", "trieval.go",
], ],
cgo = True,
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//vendor/golang.org/x/text/internal:go_default_library",
"//vendor/golang.org/x/text/language:go_default_library", "//vendor/golang.org/x/text/language:go_default_library",
"//vendor/golang.org/x/text/transform:go_default_library", "//vendor/golang.org/x/text/transform:go_default_library",
"//vendor/golang.org/x/text/unicode/norm:go_default_library", "//vendor/golang.org/x/text/unicode/norm:go_default_library",

View File

@ -35,7 +35,7 @@ import (
// A Caser may be stateful and should therefore not be shared between // A Caser may be stateful and should therefore not be shared between
// goroutines. // goroutines.
type Caser struct { type Caser struct {
t transform.Transformer t transform.SpanningTransformer
} }
// Bytes returns a new byte slice with the result of converting b to the case // Bytes returns a new byte slice with the result of converting b to the case
@ -56,12 +56,17 @@ func (c Caser) String(s string) string {
// Transform. // Transform.
func (c Caser) Reset() { c.t.Reset() } func (c Caser) Reset() { c.t.Reset() }
// Transform implements the Transformer interface and transforms the given input // Transform implements the transform.Transformer interface and transforms the
// to the case form implemented by c. // given input to the case form implemented by c.
func (c Caser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { func (c Caser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return c.t.Transform(dst, src, atEOF) return c.t.Transform(dst, src, atEOF)
} }
// Span implements the transform.SpanningTransformer interface.
func (c Caser) Span(src []byte, atEOF bool) (n int, err error) {
return c.t.Span(src, atEOF)
}
// Upper returns a Caser for language-specific uppercasing. // Upper returns a Caser for language-specific uppercasing.
func Upper(t language.Tag, opts ...Option) Caser { func Upper(t language.Tag, opts ...Option) Caser {
return Caser{makeUpper(t, getOpts(opts...))} return Caser{makeUpper(t, getOpts(opts...))}
@ -83,14 +88,20 @@ func Title(t language.Tag, opts ...Option) Caser {
// //
// Case folding does not normalize the input and may not preserve a normal form. // Case folding does not normalize the input and may not preserve a normal form.
// Use the collate or search package for more convenient and linguistically // Use the collate or search package for more convenient and linguistically
// sound comparisons. Use unicode/precis for string comparisons where security // sound comparisons. Use golang.org/x/text/secure/precis for string comparisons
// aspects are a concern. // where security aspects are a concern.
func Fold(opts ...Option) Caser { func Fold(opts ...Option) Caser {
return Caser{makeFold(getOpts(opts...))} return Caser{makeFold(getOpts(opts...))}
} }
// An Option is used to modify the behavior of a Caser. // An Option is used to modify the behavior of a Caser.
type Option func(o *options) type Option func(o options) options
// TODO: consider these options to take a boolean as well, like FinalSigma.
// The advantage of using this approach is that other providers of a lower-case
// algorithm could set different defaults by prefixing a user-provided slice
// of options with their own. This is handy, for instance, for the precis
// package which would override the default to not handle the Greek final sigma.
var ( var (
// NoLower disables the lowercasing of non-leading letters for a title // NoLower disables the lowercasing of non-leading letters for a title
@ -110,20 +121,42 @@ type options struct {
// TODO: segmenter, max ignorable, alternative versions, etc. // TODO: segmenter, max ignorable, alternative versions, etc.
noFinalSigma bool // Only used for testing. ignoreFinalSigma bool
} }
func getOpts(o ...Option) (res options) { func getOpts(o ...Option) (res options) {
for _, f := range o { for _, f := range o {
f(&res) res = f(res)
} }
return return
} }
func noLower(o *options) { func noLower(o options) options {
o.noLower = true o.noLower = true
return o
} }
func compact(o *options) { func compact(o options) options {
o.simple = true o.simple = true
return o
}
// HandleFinalSigma specifies whether the special handling of Greek final sigma
// should be enabled. Unicode prescribes handling the Greek final sigma for all
// locales, but standards like IDNA and PRECIS override this default.
func HandleFinalSigma(enable bool) Option {
if enable {
return handleFinalSigma
}
return ignoreFinalSigma
}
func ignoreFinalSigma(o options) options {
o.ignoreFinalSigma = true
return o
}
func handleFinalSigma(o options) options {
o.ignoreFinalSigma = false
return o
} }

View File

@ -4,9 +4,7 @@
package cases package cases
import ( import "golang.org/x/text/transform"
"golang.org/x/text/transform"
)
// A context is used for iterating over source bytes, fetching case info and // A context is used for iterating over source bytes, fetching case info and
// writing to a destination buffer. // writing to a destination buffer.
@ -56,6 +54,14 @@ func (c *context) ret() (nDst, nSrc int, err error) {
return c.nDst, c.nSrc, transform.ErrShortSrc return c.nDst, c.nSrc, transform.ErrShortSrc
} }
// retSpan returns the return values for the Span method. It checks whether
// there were insufficient bytes in src to complete and introduces an error
// accordingly, if necessary.
func (c *context) retSpan() (n int, err error) {
_, nSrc, err := c.ret()
return nSrc, err
}
// checkpoint sets the return value buffer points for Transform to the current // checkpoint sets the return value buffer points for Transform to the current
// positions. // positions.
func (c *context) checkpoint() { func (c *context) checkpoint() {
@ -200,6 +206,23 @@ func lower(c *context) bool {
return c.copy() return c.copy()
} }
func isLower(c *context) bool {
ct := c.caseType()
if c.info&hasMappingMask == 0 || ct == cLower {
return true
}
if c.info&exceptionBit == 0 {
c.err = transform.ErrEndOfSpan
return false
}
e := exceptions[c.info>>exceptionShift:]
if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange {
c.err = transform.ErrEndOfSpan
return false
}
return true
}
// upper writes the uppercase version of the current rune to dst. // upper writes the uppercase version of the current rune to dst.
func upper(c *context) bool { func upper(c *context) bool {
ct := c.caseType() ct := c.caseType()
@ -226,6 +249,29 @@ func upper(c *context) bool {
return c.copy() return c.copy()
} }
// isUpper writes the isUppercase version of the current rune to dst.
func isUpper(c *context) bool {
ct := c.caseType()
if c.info&hasMappingMask == 0 || ct == cUpper {
return true
}
if c.info&exceptionBit == 0 {
c.err = transform.ErrEndOfSpan
return false
}
e := exceptions[c.info>>exceptionShift:]
// Get length of first special case mapping.
n := (e[1] >> lengthBits) & lengthMask
if ct == cTitle {
n = e[1] & lengthMask
}
if n != noChange {
c.err = transform.ErrEndOfSpan
return false
}
return true
}
// title writes the title case version of the current rune to dst. // title writes the title case version of the current rune to dst.
func title(c *context) bool { func title(c *context) bool {
ct := c.caseType() ct := c.caseType()
@ -257,6 +303,33 @@ func title(c *context) bool {
return c.copy() return c.copy()
} }
// isTitle reports whether the current rune is in title case.
func isTitle(c *context) bool {
ct := c.caseType()
if c.info&hasMappingMask == 0 || ct == cTitle {
return true
}
if c.info&exceptionBit == 0 {
if ct == cLower {
c.err = transform.ErrEndOfSpan
return false
}
return true
}
// Get the exception data.
e := exceptions[c.info>>exceptionShift:]
if nTitle := e[1] & lengthMask; nTitle != noChange {
c.err = transform.ErrEndOfSpan
return false
}
nFirst := (e[1] >> lengthBits) & lengthMask
if ct == cLower && nFirst != noChange {
c.err = transform.ErrEndOfSpan
return false
}
return true
}
// foldFull writes the foldFull version of the current rune to dst. // foldFull writes the foldFull version of the current rune to dst.
func foldFull(c *context) bool { func foldFull(c *context) bool {
if c.info&hasMappingMask == 0 { if c.info&hasMappingMask == 0 {
@ -279,3 +352,25 @@ func foldFull(c *context) bool {
} }
return c.writeString(e[2 : 2+n]) return c.writeString(e[2 : 2+n])
} }
// isFoldFull reports whether the current run is mapped to foldFull
func isFoldFull(c *context) bool {
if c.info&hasMappingMask == 0 {
return true
}
ct := c.caseType()
if c.info&exceptionBit == 0 {
if ct != cLower || c.info&inverseFoldBit != 0 {
c.err = transform.ErrEndOfSpan
return false
}
return true
}
e := exceptions[c.info>>exceptionShift:]
n := e[0] & lengthMask
if n == 0 && ct == cLower {
return true
}
c.err = transform.ErrEndOfSpan
return false
}

View File

@ -18,7 +18,15 @@ func (t *caseFolder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
return c.ret() return c.ret()
} }
func makeFold(o options) transform.Transformer { func (t *caseFolder) Span(src []byte, atEOF bool) (n int, err error) {
c := context{src: src, atEOF: atEOF}
for c.next() && isFoldFull(&c) {
c.checkpoint()
}
return c.retSpan()
}
func makeFold(o options) transform.SpanningTransformer {
// TODO: Special case folding, through option Language, Special/Turkic, or // TODO: Special case folding, through option Language, Special/Turkic, or
// both. // both.
// TODO: Implement Compact options. // TODO: Implement Compact options.

View File

@ -76,7 +76,7 @@ type breakCategory int
const ( const (
breakBreak breakCategory = iota breakBreak breakCategory = iota
breakLetter breakLetter
breakIgnored breakMid
) )
// mapping returns the case mapping for the given case type. // mapping returns the case mapping for the given case type.
@ -162,9 +162,14 @@ func parseUCD() []runeInfo {
// We collapse the word breaking properties onto the categories we need. // We collapse the word breaking properties onto the categories we need.
switch p.String(1) { // TODO: officially we need to canonicalize. switch p.String(1) { // TODO: officially we need to canonicalize.
case "Format", "MidLetter", "MidNumLet", "Single_Quote": case "MidLetter", "MidNumLet", "Single_Quote":
ri.BreakCat = breakIgnored ri.BreakCat = breakMid
case "ALetter", "Hebrew_Letter", "Numeric", "Extend", "ExtendNumLet": if !ri.CaseIgnorable {
// finalSigma relies on the fact that all breakMid runes are
// also a Case_Ignorable. Revisit this code when this changes.
log.Fatalf("Rune %U, which has a break category mid, is not a case ignorable", ri)
}
case "ALetter", "Hebrew_Letter", "Numeric", "Extend", "ExtendNumLet", "Format", "ZWJ":
ri.BreakCat = breakLetter ri.BreakCat = breakLetter
} }
}) })
@ -240,8 +245,11 @@ func makeEntry(ri *runeInfo) {
case above: // Above case above: // Above
ccc = cccAbove ccc = cccAbove
} }
if ri.BreakCat == breakBreak { switch ri.BreakCat {
case breakBreak:
ccc = cccBreak ccc = cccBreak
case breakMid:
ri.entry |= isMidBit
} }
ri.entry |= ccc ri.entry |= ccc
@ -588,7 +596,7 @@ func verifyProperties(chars []runeInfo) {
// decomposition is greater than U+00FF, the rune is always // decomposition is greater than U+00FF, the rune is always
// great and not a modifier. // great and not a modifier.
if f := runes[0]; unicode.IsMark(f) || f > 0xFF && !unicode.Is(unicode.Greek, f) { if f := runes[0]; unicode.IsMark(f) || f > 0xFF && !unicode.Is(unicode.Greek, f) {
log.Fatalf("%U: expeced first rune of Greek decomposition to be letter, found %U", r, f) log.Fatalf("%U: expected first rune of Greek decomposition to be letter, found %U", r, f)
} }
// A.6.2: Any follow-up rune in a Greek decomposition is a // A.6.2: Any follow-up rune in a Greek decomposition is a
// modifier of which the first should be gobbled in // modifier of which the first should be gobbled in
@ -597,7 +605,7 @@ func verifyProperties(chars []runeInfo) {
switch m { switch m {
case 0x0313, 0x0314, 0x0301, 0x0300, 0x0306, 0x0342, 0x0308, 0x0304, 0x345: case 0x0313, 0x0314, 0x0301, 0x0300, 0x0306, 0x0342, 0x0308, 0x0304, 0x345:
default: default:
log.Fatalf("%U: modifier %U is outside of expeced Greek modifier set", r, m) log.Fatalf("%U: modifier %U is outside of expected Greek modifier set", r, m)
} }
} }
} }
@ -690,7 +698,7 @@ func genTablesTest() {
parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) { parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) {
switch p.String(1) { switch p.String(1) {
case "Extend", "Format", "MidLetter", "MidNumLet", "Single_Quote", case "Extend", "Format", "MidLetter", "MidNumLet", "Single_Quote",
"ALetter", "Hebrew_Letter", "Numeric", "ExtendNumLet": "ALetter", "Hebrew_Letter", "Numeric", "ExtendNumLet", "ZWJ":
notBreak[p.Rune(0)] = true notBreak[p.Rune(0)] = true
} }
}) })

View File

@ -26,6 +26,7 @@ package main
// Only 13..8 are used for XOR patterns. // Only 13..8 are used for XOR patterns.
// 7 inverseFold (fold to upper, not to lower) // 7 inverseFold (fold to upper, not to lower)
// 6 index: interpret the XOR pattern as an index // 6 index: interpret the XOR pattern as an index
// or isMid if case mode is cIgnorableUncased.
// 5..4 CCC: zero (normal or break), above or other // 5..4 CCC: zero (normal or break), above or other
// } // }
// 3 exception: interpret this value as an exception index // 3 exception: interpret this value as an exception index
@ -48,6 +49,7 @@ const (
ignorableValue = 0x0004 ignorableValue = 0x0004
inverseFoldBit = 1 << 7 inverseFoldBit = 1 << 7
isMidBit = 1 << 6
exceptionBit = 1 << 3 exceptionBit = 1 << 3
exceptionShift = 5 exceptionShift = 5
@ -57,7 +59,7 @@ const (
xorShift = 8 xorShift = 8
// There is no mapping if all xor bits and the exception bit are zero. // There is no mapping if all xor bits and the exception bit are zero.
hasMappingMask = 0xffc0 | exceptionBit hasMappingMask = 0xff80 | exceptionBit
) )
// The case mode bits encodes the case type of a rune. This includes uncased, // The case mode bits encodes the case type of a rune. This includes uncased,
@ -95,10 +97,6 @@ func (c info) isCaseIgnorable() bool {
return c&ignorableMask == ignorableValue return c&ignorableMask == ignorableValue
} }
func (c info) isCaseIgnorableAndNonBreakStarter() bool {
return c&(fullCasedMask|cccMask) == (ignorableValue | cccZero)
}
func (c info) isNotCasedAndNotCaseIgnorable() bool { func (c info) isNotCasedAndNotCaseIgnorable() bool {
return c&fullCasedMask == 0 return c&fullCasedMask == 0
} }
@ -107,6 +105,10 @@ func (c info) isCaseIgnorableAndNotCased() bool {
return c&fullCasedMask == cIgnorableUncased return c&fullCasedMask == cIgnorableUncased
} }
func (c info) isMid() bool {
return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased
}
// The case mapping implementation will need to know about various Canonical // The case mapping implementation will need to know about various Canonical
// Combining Class (CCC) values. We encode two of these in the trie value: // Combining Class (CCC) values. We encode two of these in the trie value:
// cccZero (0) and cccAbove (230). If the value is cccOther, it means that // cccZero (0) and cccAbove (230). If the value is cccOther, it means that

61
vendor/golang.org/x/text/cases/icu.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build icu
package cases
// Ideally these functions would be defined in a test file, but go test doesn't
// allow CGO in tests. The build tag should ensure either way that these
// functions will not end up in the package.
// TODO: Ensure that the correct ICU version is set.
/*
#cgo LDFLAGS: -licui18n.57 -licuuc.57
#include <stdlib.h>
#include <unicode/ustring.h>
#include <unicode/utypes.h>
#include <unicode/localpointer.h>
#include <unicode/ucasemap.h>
*/
import "C"
import "unsafe"
func doICU(tag, caser, input string) string {
err := C.UErrorCode(0)
loc := C.CString(tag)
cm := C.ucasemap_open(loc, C.uint32_t(0), &err)
buf := make([]byte, len(input)*4)
dst := (*C.char)(unsafe.Pointer(&buf[0]))
src := C.CString(input)
cn := C.int32_t(0)
switch caser {
case "fold":
cn = C.ucasemap_utf8FoldCase(cm,
dst, C.int32_t(len(buf)),
src, C.int32_t(len(input)),
&err)
case "lower":
cn = C.ucasemap_utf8ToLower(cm,
dst, C.int32_t(len(buf)),
src, C.int32_t(len(input)),
&err)
case "upper":
cn = C.ucasemap_utf8ToUpper(cm,
dst, C.int32_t(len(buf)),
src, C.int32_t(len(input)),
&err)
case "title":
cn = C.ucasemap_utf8ToTitle(cm,
dst, C.int32_t(len(buf)),
src, C.int32_t(len(input)),
&err)
}
return string(buf[:cn])
}

View File

@ -28,9 +28,6 @@ func (c info) cccType() info {
// only makes sense, though, if the performance and/or space penalty of using // only makes sense, though, if the performance and/or space penalty of using
// the generic breaker is big. Extra data will only be needed for non-cased // the generic breaker is big. Extra data will only be needed for non-cased
// runes, which means there are sufficient bits left in the caseType. // runes, which means there are sufficient bits left in the caseType.
// Also note that the standard breaking algorithm doesn't always make sense
// for title casing. For example, a4a -> A4a, but a"4a -> A"4A (where " stands
// for modifier \u0308).
// ICU prohibits breaking in such cases as well. // ICU prohibits breaking in such cases as well.
// For the purpose of title casing we use an approximation of the Unicode Word // For the purpose of title casing we use an approximation of the Unicode Word
@ -41,17 +38,19 @@ func (c info) cccType() info {
// categories, with associated rules: // categories, with associated rules:
// //
// 1) Letter: // 1) Letter:
// ALetter, Hebrew_Letter, Numeric, ExtendNumLet, Extend. // ALetter, Hebrew_Letter, Numeric, ExtendNumLet, Extend, Format_FE, ZWJ.
// Rule: Never break between consecutive runes of this category. // Rule: Never break between consecutive runes of this category.
// //
// 2) Mid: // 2) Mid:
// Format, MidLetter, MidNumLet, Single_Quote. // MidLetter, MidNumLet, Single_Quote.
// (Cf. case-ignorable: MidLetter, MidNumLet or cat is Mn, Me, Cf, Lm or Sk). // (Cf. case-ignorable: MidLetter, MidNumLet, Single_Quote or cat is Mn,
// Me, Cf, Lm or Sk).
// Rule: Don't break between Letter and Mid, but break between two Mids. // Rule: Don't break between Letter and Mid, but break between two Mids.
// //
// 3) Break: // 3) Break:
// Any other category, including NewLine, CR, LF and Double_Quote. These // Any other category: NewLine, MidNum, CR, LF, Double_Quote, Katakana, and
// categories should always result in a break between two cased letters. // Other.
// These categories should always result in a break between two cased letters.
// Rule: Always break. // Rule: Always break.
// //
// Note 1: the Katakana and MidNum categories can, in esoteric cases, result in // Note 1: the Katakana and MidNum categories can, in esoteric cases, result in

389
vendor/golang.org/x/text/cases/map.go generated vendored
View File

@ -13,6 +13,7 @@ import (
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
"golang.org/x/text/internal"
"golang.org/x/text/language" "golang.org/x/text/language"
"golang.org/x/text/transform" "golang.org/x/text/transform"
"golang.org/x/text/unicode/norm" "golang.org/x/text/unicode/norm"
@ -24,6 +25,11 @@ import (
// dst so far won't need changing as we see more source bytes. // dst so far won't need changing as we see more source bytes.
type mapFunc func(*context) bool type mapFunc func(*context) bool
// A spanFunc takes a context set to the current rune and returns whether this
// rune would be altered when written to the output. It may advance the context
// to the next rune. It returns whether a checkpoint is possible.
type spanFunc func(*context) bool
// maxIgnorable defines the maximum number of ignorables to consider for // maxIgnorable defines the maximum number of ignorables to consider for
// lookahead operations. // lookahead operations.
const maxIgnorable = 30 const maxIgnorable = 30
@ -36,12 +42,12 @@ func init() {
for _, s := range strings.Split(supported, " ") { for _, s := range strings.Split(supported, " ") {
tags = append(tags, language.MustParse(s)) tags = append(tags, language.MustParse(s))
} }
matcher = language.NewMatcher(tags) matcher = internal.NewInheritanceMatcher(tags)
Supported = language.NewCoverage(tags) Supported = language.NewCoverage(tags)
} }
var ( var (
matcher language.Matcher matcher *internal.InheritanceMatcher
Supported language.Coverage Supported language.Coverage
@ -50,56 +56,69 @@ var (
// Some uppercase mappers are stateless, so we can precompute the // Some uppercase mappers are stateless, so we can precompute the
// Transformers and save a bit on runtime allocations. // Transformers and save a bit on runtime allocations.
upperFunc = []mapFunc{ upperFunc = []struct {
nil, // und upper mapFunc
nil, // af span spanFunc
aztrUpper(upper), // az }{
elUpper, // el {nil, nil}, // und
ltUpper(upper), // lt {nil, nil}, // af
nil, // nl {aztrUpper(upper), isUpper}, // az
aztrUpper(upper), // tr {elUpper, noSpan}, // el
{ltUpper(upper), noSpan}, // lt
{nil, nil}, // nl
{aztrUpper(upper), isUpper}, // tr
} }
undUpper transform.Transformer = &undUpperCaser{} undUpper transform.SpanningTransformer = &undUpperCaser{}
undLower transform.SpanningTransformer = &undLowerCaser{}
undLowerIgnoreSigma transform.SpanningTransformer = &undLowerIgnoreSigmaCaser{}
lowerFunc = []mapFunc{ lowerFunc = []mapFunc{
lower, // und nil, // und
lower, // af nil, // af
aztrLower, // az aztrLower, // az
lower, // el nil, // el
ltLower, // lt ltLower, // lt
lower, // nl nil, // nl
aztrLower, // tr aztrLower, // tr
} }
titleInfos = []struct { titleInfos = []struct {
title, lower mapFunc title mapFunc
rewrite func(*context) lower mapFunc
titleSpan spanFunc
rewrite func(*context)
}{ }{
{title, lower, nil}, // und {title, lower, isTitle, nil}, // und
{title, lower, afnlRewrite}, // af {title, lower, isTitle, afnlRewrite}, // af
{aztrUpper(title), aztrLower, nil}, // az {aztrUpper(title), aztrLower, isTitle, nil}, // az
{title, lower, nil}, // el {title, lower, isTitle, nil}, // el
{ltUpper(title), ltLower, nil}, // lt {ltUpper(title), ltLower, noSpan, nil}, // lt
{nlTitle, lower, afnlRewrite}, // nl {nlTitle, lower, nlTitleSpan, afnlRewrite}, // nl
{aztrUpper(title), aztrLower, nil}, // tr {aztrUpper(title), aztrLower, isTitle, nil}, // tr
} }
) )
func makeUpper(t language.Tag, o options) transform.Transformer { func makeUpper(t language.Tag, o options) transform.SpanningTransformer {
_, i, _ := matcher.Match(t) _, i, _ := matcher.Match(t)
f := upperFunc[i] f := upperFunc[i].upper
if f == nil { if f == nil {
return undUpper return undUpper
} }
return &simpleCaser{f: f} return &simpleCaser{f: f, span: upperFunc[i].span}
} }
func makeLower(t language.Tag, o options) transform.Transformer { func makeLower(t language.Tag, o options) transform.SpanningTransformer {
_, i, _ := matcher.Match(t) _, i, _ := matcher.Match(t)
f := lowerFunc[i] f := lowerFunc[i]
if o.noFinalSigma { if f == nil {
return &simpleCaser{f: f} if o.ignoreFinalSigma {
return undLowerIgnoreSigma
}
return undLower
}
if o.ignoreFinalSigma {
return &simpleCaser{f: f, span: isLower}
} }
return &lowerCaser{ return &lowerCaser{
first: f, first: f,
@ -107,22 +126,28 @@ func makeLower(t language.Tag, o options) transform.Transformer {
} }
} }
func makeTitle(t language.Tag, o options) transform.Transformer { func makeTitle(t language.Tag, o options) transform.SpanningTransformer {
_, i, _ := matcher.Match(t) _, i, _ := matcher.Match(t)
x := &titleInfos[i] x := &titleInfos[i]
lower := x.lower lower := x.lower
if o.noLower { if o.noLower {
lower = (*context).copy lower = (*context).copy
} else if !o.noFinalSigma { } else if !o.ignoreFinalSigma {
lower = finalSigma(lower) lower = finalSigma(lower)
} }
return &titleCaser{ return &titleCaser{
title: x.title, title: x.title,
lower: lower, lower: lower,
rewrite: x.rewrite, titleSpan: x.titleSpan,
rewrite: x.rewrite,
} }
} }
func noSpan(c *context) bool {
c.err = transform.ErrEndOfSpan
return false
}
// TODO: consider a similar special case for the fast majority lower case. This // TODO: consider a similar special case for the fast majority lower case. This
// is a bit more involved so will require some more precise benchmarking to // is a bit more involved so will require some more precise benchmarking to
// justify it. // justify it.
@ -132,7 +157,7 @@ type undUpperCaser struct{ transform.NopResetter }
// undUpperCaser implements the Transformer interface for doing an upper case // undUpperCaser implements the Transformer interface for doing an upper case
// mapping for the root locale (und). It eliminates the need for an allocation // mapping for the root locale (und). It eliminates the need for an allocation
// as it prevents escaping by not using function pointers. // as it prevents escaping by not using function pointers.
func (t *undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { func (t undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
c := context{dst: dst, src: src, atEOF: atEOF} c := context{dst: dst, src: src, atEOF: atEOF}
for c.next() { for c.next() {
upper(&c) upper(&c)
@ -141,26 +166,117 @@ func (t *undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int,
return c.ret() return c.ret()
} }
func (t undUpperCaser) Span(src []byte, atEOF bool) (n int, err error) {
c := context{src: src, atEOF: atEOF}
for c.next() && isUpper(&c) {
c.checkpoint()
}
return c.retSpan()
}
// undLowerIgnoreSigmaCaser implements the Transformer interface for doing
// a lower case mapping for the root locale (und) ignoring final sigma
// handling. This casing algorithm is used in some performance-critical packages
// like secure/precis and x/net/http/idna, which warrants its special-casing.
type undLowerIgnoreSigmaCaser struct{ transform.NopResetter }
func (t undLowerIgnoreSigmaCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
c := context{dst: dst, src: src, atEOF: atEOF}
for c.next() && lower(&c) {
c.checkpoint()
}
return c.ret()
}
// Span implements a generic lower-casing. This is possible as isLower works
// for all lowercasing variants. All lowercase variants only vary in how they
// transform a non-lowercase letter. They will never change an already lowercase
// letter. In addition, there is no state.
func (t undLowerIgnoreSigmaCaser) Span(src []byte, atEOF bool) (n int, err error) {
c := context{src: src, atEOF: atEOF}
for c.next() && isLower(&c) {
c.checkpoint()
}
return c.retSpan()
}
type simpleCaser struct { type simpleCaser struct {
context context
f mapFunc f mapFunc
span spanFunc
} }
// simpleCaser implements the Transformer interface for doing a case operation // simpleCaser implements the Transformer interface for doing a case operation
// on a rune-by-rune basis. // on a rune-by-rune basis.
func (t *simpleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { func (t *simpleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
t.context = context{dst: dst, src: src, atEOF: atEOF} c := context{dst: dst, src: src, atEOF: atEOF}
c := &t.context for c.next() && t.f(&c) {
for c.next() && t.f(c) {
c.checkpoint() c.checkpoint()
} }
return c.ret() return c.ret()
} }
func (t *simpleCaser) Span(src []byte, atEOF bool) (n int, err error) {
c := context{src: src, atEOF: atEOF}
for c.next() && t.span(&c) {
c.checkpoint()
}
return c.retSpan()
}
// undLowerCaser implements the Transformer interface for doing a lower case
// mapping for the root locale (und) ignoring final sigma handling. This casing
// algorithm is used in some performance-critical packages like secure/precis
// and x/net/http/idna, which warrants its special-casing.
type undLowerCaser struct{ transform.NopResetter }
func (t undLowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
c := context{dst: dst, src: src, atEOF: atEOF}
for isInterWord := true; c.next(); {
if isInterWord {
if c.info.isCased() {
if !lower(&c) {
break
}
isInterWord = false
} else if !c.copy() {
break
}
} else {
if c.info.isNotCasedAndNotCaseIgnorable() {
if !c.copy() {
break
}
isInterWord = true
} else if !c.hasPrefix("Σ") {
if !lower(&c) {
break
}
} else if !finalSigmaBody(&c) {
break
}
}
c.checkpoint()
}
return c.ret()
}
func (t undLowerCaser) Span(src []byte, atEOF bool) (n int, err error) {
c := context{src: src, atEOF: atEOF}
for c.next() && isLower(&c) {
c.checkpoint()
}
return c.retSpan()
}
// lowerCaser implements the Transformer interface. The default Unicode lower // lowerCaser implements the Transformer interface. The default Unicode lower
// casing requires different treatment for the first and subsequent characters // casing requires different treatment for the first and subsequent characters
// of a word, most notably to handle the Greek final Sigma. // of a word, most notably to handle the Greek final Sigma.
type lowerCaser struct { type lowerCaser struct {
undLowerIgnoreSigmaCaser
context context
first, midWord mapFunc first, midWord mapFunc
@ -202,7 +318,9 @@ type titleCaser struct {
context context
// rune mappings used by the actual casing algorithms. // rune mappings used by the actual casing algorithms.
title, lower mapFunc title mapFunc
lower mapFunc
titleSpan spanFunc
rewrite func(*context) rewrite func(*context)
} }
@ -228,10 +346,10 @@ func (t *titleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
t.rewrite(c) t.rewrite(c)
} }
wasMid := p.isCaseIgnorableAndNonBreakStarter() wasMid := p.isMid()
// Break out of this loop on failure to ensure we do not modify the // Break out of this loop on failure to ensure we do not modify the
// state incorrectly. // state incorrectly.
if p.isCased() && !p.isCaseIgnorableAndNotCased() { if p.isCased() {
if !c.isMidWord { if !c.isMidWord {
if !t.title(c) { if !t.title(c) {
break break
@ -242,71 +360,139 @@ func (t *titleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
} }
} else if !c.copy() { } else if !c.copy() {
break break
} } else if p.isBreak() {
// TODO: make this an "else if" if we can prove that no rune that does
// not match the first condition of the if statement can be a break.
if p.isBreak() {
c.isMidWord = false c.isMidWord = false
} }
// As we save the state of the transformer, it is safe to call // As we save the state of the transformer, it is safe to call
// checkpoint after any successful write. // checkpoint after any successful write.
c.checkpoint() if !(c.isMidWord && wasMid) {
c.checkpoint()
}
if !c.next() { if !c.next() {
break break
} }
if wasMid && c.info.isCaseIgnorableAndNonBreakStarter() { if wasMid && c.info.isMid() {
c.isMidWord = false c.isMidWord = false
} }
} }
return c.ret() return c.ret()
} }
func (t *titleCaser) Span(src []byte, atEOF bool) (n int, err error) {
t.context = context{src: src, atEOF: atEOF, isMidWord: t.isMidWord}
c := &t.context
if !c.next() {
return c.retSpan()
}
for {
p := c.info
if t.rewrite != nil {
t.rewrite(c)
}
wasMid := p.isMid()
// Break out of this loop on failure to ensure we do not modify the
// state incorrectly.
if p.isCased() {
if !c.isMidWord {
if !t.titleSpan(c) {
break
}
c.isMidWord = true
} else if !isLower(c) {
break
}
} else if p.isBreak() {
c.isMidWord = false
}
// As we save the state of the transformer, it is safe to call
// checkpoint after any successful write.
if !(c.isMidWord && wasMid) {
c.checkpoint()
}
if !c.next() {
break
}
if wasMid && c.info.isMid() {
c.isMidWord = false
}
}
return c.retSpan()
}
// finalSigma adds Greek final Sigma handing to another casing function. It // finalSigma adds Greek final Sigma handing to another casing function. It
// determines whether a lowercased sigma should be σ or ς, by looking ahead for // determines whether a lowercased sigma should be σ or ς, by looking ahead for
// case-ignorables and a cased letters. // case-ignorables and a cased letters.
func finalSigma(f mapFunc) mapFunc { func finalSigma(f mapFunc) mapFunc {
return func(c *context) bool { return func(c *context) bool {
// ::NFD();
// # 03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA
// Σ } [:case-ignorable:]* [:cased:] → σ;
// [:cased:] [:case-ignorable:]* { Σ → ς;
// ::Any-Lower;
// ::NFC();
if !c.hasPrefix("Σ") { if !c.hasPrefix("Σ") {
return f(c) return f(c)
} }
return finalSigmaBody(c)
p := c.pDst
c.writeString("ς")
// We need to do one more iteration after maxIgnorable, as a cased
// letter is not an ignorable and may modify the result.
for i := 0; i < maxIgnorable+1; i++ {
if !c.next() {
return false
}
if !c.info.isCaseIgnorable() {
if c.info.isCased() {
// p+1 is guaranteed to be in bounds: if writing ς was
// successful, p+1 will contain the second byte of ς. If not,
// this function will have returned after c.next returned false.
c.dst[p+1]++ // ς → σ
}
c.unreadRune()
return true
}
// A case ignorable may also introduce a word break, so we may need
// to continue searching even after detecting a break.
c.isMidWord = c.isMidWord && !c.info.isBreak()
c.copy()
}
return true
} }
} }
func finalSigmaBody(c *context) bool {
// Current rune must be ∑.
// ::NFD();
// # 03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA
// Σ } [:case-ignorable:]* [:cased:] → σ;
// [:cased:] [:case-ignorable:]* { Σ → ς;
// ::Any-Lower;
// ::NFC();
p := c.pDst
c.writeString("ς")
// TODO: we should do this here, but right now this will never have an
// effect as this is called when the prefix is Sigma, whereas Dutch and
// Afrikaans only test for an apostrophe.
//
// if t.rewrite != nil {
// t.rewrite(c)
// }
// We need to do one more iteration after maxIgnorable, as a cased
// letter is not an ignorable and may modify the result.
wasMid := false
for i := 0; i < maxIgnorable+1; i++ {
if !c.next() {
return false
}
if !c.info.isCaseIgnorable() {
// All Midword runes are also case ignorable, so we are
// guaranteed to have a letter or word break here. As we are
// unreading the run, there is no need to unset c.isMidWord;
// the title caser will handle this.
if c.info.isCased() {
// p+1 is guaranteed to be in bounds: if writing ς was
// successful, p+1 will contain the second byte of ς. If not,
// this function will have returned after c.next returned false.
c.dst[p+1]++ // ς → σ
}
c.unreadRune()
return true
}
// A case ignorable may also introduce a word break, so we may need
// to continue searching even after detecting a break.
isMid := c.info.isMid()
if (wasMid && isMid) || c.info.isBreak() {
c.isMidWord = false
}
wasMid = isMid
c.copy()
}
return true
}
// finalSigmaSpan would be the same as isLower.
// elUpper implements Greek upper casing, which entails removing a predefined // elUpper implements Greek upper casing, which entails removing a predefined
// set of non-blocked modifiers. Note that these accents should not be removed // set of non-blocked modifiers. Note that these accents should not be removed
// for title casing! // for title casing!
@ -376,6 +562,8 @@ func elUpper(c *context) bool {
return i == maxIgnorable return i == maxIgnorable
} }
// TODO: implement elUpperSpan (low-priority: complex and infrequent).
func ltLower(c *context) bool { func ltLower(c *context) bool {
// From CLDR: // From CLDR:
// # Introduce an explicit dot above when lowercasing capital I's and J's // # Introduce an explicit dot above when lowercasing capital I's and J's
@ -390,10 +578,10 @@ func ltLower(c *context) bool {
// ::NFD(); // ::NFD();
// I } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0307; // I } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0307;
// J } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → j \u0307; // J } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → j \u0307;
// Į } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → į \u0307; // I \u0328 (Į) } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0328 \u0307;
// Ì → i \u0307 \u0300; // I \u0300 (Ì) → i \u0307 \u0300;
// Í → i \u0307 \u0301; // I \u0301 (Í) → i \u0307 \u0301;
// Ĩ → i \u0307 \u0303; // I \u0303 (Ĩ) → i \u0307 \u0303;
// ::Any-Lower(); // ::Any-Lower();
// ::NFC(); // ::NFC();
@ -445,9 +633,16 @@ func ltLower(c *context) bool {
return i == maxIgnorable return i == maxIgnorable
} }
// ltLowerSpan would be the same as isLower.
func ltUpper(f mapFunc) mapFunc { func ltUpper(f mapFunc) mapFunc {
return func(c *context) bool { return func(c *context) bool {
// Unicode:
// 0307; 0307; ; ; lt After_Soft_Dotted; # COMBINING DOT ABOVE
//
// From CLDR: // From CLDR:
// # Remove \u0307 following soft-dotteds (i, j, and the like), with possible
// # intervening non-230 marks.
// ::NFD(); // ::NFD();
// [:Soft_Dotted:] [^[:ccc=Not_Reordered:][:ccc=Above:]]* { \u0307 → ; // [:Soft_Dotted:] [^[:ccc=Not_Reordered:][:ccc=Above:]]* { \u0307 → ;
// ::Any-Upper(); // ::Any-Upper();
@ -511,6 +706,8 @@ func ltUpper(f mapFunc) mapFunc {
} }
} }
// TODO: implement ltUpperSpan (low priority: complex and infrequent).
func aztrUpper(f mapFunc) mapFunc { func aztrUpper(f mapFunc) mapFunc {
return func(c *context) bool { return func(c *context) bool {
// i→İ; // i→İ;
@ -571,6 +768,8 @@ Loop:
return c.writeString("ı") && c.writeBytes(c.src[start:c.pSrc+c.sz]) && done return c.writeString("ı") && c.writeBytes(c.src[start:c.pSrc+c.sz]) && done
} }
// aztrLowerSpan would be the same as isLower.
func nlTitle(c *context) bool { func nlTitle(c *context) bool {
// From CLDR: // From CLDR:
// # Special titlecasing for Dutch initial "ij". // # Special titlecasing for Dutch initial "ij".
@ -591,6 +790,24 @@ func nlTitle(c *context) bool {
return true return true
} }
func nlTitleSpan(c *context) bool {
// From CLDR:
// # Special titlecasing for Dutch initial "ij".
// ::Any-Title();
// # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29)
// [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ;
if c.src[c.pSrc] != 'I' {
return isTitle(c)
}
if !c.next() || c.src[c.pSrc] == 'j' {
return false
}
if c.src[c.pSrc] != 'J' {
c.unreadRune()
}
return true
}
// Not part of CLDR, but see http://unicode.org/cldr/trac/ticket/7078. // Not part of CLDR, but see http://unicode.org/cldr/trac/ticket/7078.
func afnlRewrite(c *context) { func afnlRewrite(c *context) {
if c.hasPrefix("'") || c.hasPrefix("") { if c.hasPrefix("'") || c.hasPrefix("") {

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
// This file was generated by go generate; DO NOT EDIT // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package cases package cases
@ -22,6 +22,7 @@ package cases
// Only 13..8 are used for XOR patterns. // Only 13..8 are used for XOR patterns.
// 7 inverseFold (fold to upper, not to lower) // 7 inverseFold (fold to upper, not to lower)
// 6 index: interpret the XOR pattern as an index // 6 index: interpret the XOR pattern as an index
// or isMid if case mode is cIgnorableUncased.
// 5..4 CCC: zero (normal or break), above or other // 5..4 CCC: zero (normal or break), above or other
// } // }
// 3 exception: interpret this value as an exception index // 3 exception: interpret this value as an exception index
@ -44,6 +45,7 @@ const (
ignorableValue = 0x0004 ignorableValue = 0x0004
inverseFoldBit = 1 << 7 inverseFoldBit = 1 << 7
isMidBit = 1 << 6
exceptionBit = 1 << 3 exceptionBit = 1 << 3
exceptionShift = 5 exceptionShift = 5
@ -53,7 +55,7 @@ const (
xorShift = 8 xorShift = 8
// There is no mapping if all xor bits and the exception bit are zero. // There is no mapping if all xor bits and the exception bit are zero.
hasMappingMask = 0xffc0 | exceptionBit hasMappingMask = 0xff80 | exceptionBit
) )
// The case mode bits encodes the case type of a rune. This includes uncased, // The case mode bits encodes the case type of a rune. This includes uncased,
@ -91,10 +93,6 @@ func (c info) isCaseIgnorable() bool {
return c&ignorableMask == ignorableValue return c&ignorableMask == ignorableValue
} }
func (c info) isCaseIgnorableAndNonBreakStarter() bool {
return c&(fullCasedMask|cccMask) == (ignorableValue | cccZero)
}
func (c info) isNotCasedAndNotCaseIgnorable() bool { func (c info) isNotCasedAndNotCaseIgnorable() bool {
return c&fullCasedMask == 0 return c&fullCasedMask == 0
} }
@ -103,6 +101,10 @@ func (c info) isCaseIgnorableAndNotCased() bool {
return c&fullCasedMask == cIgnorableUncased return c&fullCasedMask == cIgnorableUncased
} }
func (c info) isMid() bool {
return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased
}
// The case mapping implementation will need to know about various Canonical // The case mapping implementation will need to know about various Canonical
// Combining Class (CCC) values. We encode two of these in the trie value: // Combining Class (CCC) values. We encode two of these in the trie value:
// cccZero (0) and cccAbove (230). If the value is cccOther, it means that // cccZero (0) and cccAbove (230). If the value is cccOther, it means that

View File

@ -52,7 +52,7 @@ type Decoder struct {
} }
// Bytes converts the given encoded bytes to UTF-8. It returns the converted // Bytes converts the given encoded bytes to UTF-8. It returns the converted
// bytes or 0, err if any error occurred. // bytes or nil, err if any error occurred.
func (d *Decoder) Bytes(b []byte) ([]byte, error) { func (d *Decoder) Bytes(b []byte) ([]byte, error) {
b, _, err := transform.Bytes(d, b) b, _, err := transform.Bytes(d, b)
if err != nil { if err != nil {
@ -62,7 +62,7 @@ func (d *Decoder) Bytes(b []byte) ([]byte, error) {
} }
// String converts the given encoded string to UTF-8. It returns the converted // String converts the given encoded string to UTF-8. It returns the converted
// string or 0, err if any error occurred. // string or "", err if any error occurred.
func (d *Decoder) String(s string) (string, error) { func (d *Decoder) String(s string) (string, error) {
s, _, err := transform.String(d, s) s, _, err := transform.String(d, s)
if err != nil { if err != nil {
@ -95,7 +95,7 @@ type Encoder struct {
_ struct{} _ struct{}
} }
// Bytes converts bytes from UTF-8. It returns the converted bytes or 0, err if // Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if
// any error occurred. // any error occurred.
func (e *Encoder) Bytes(b []byte) ([]byte, error) { func (e *Encoder) Bytes(b []byte) ([]byte, error) {
b, _, err := transform.Bytes(e, b) b, _, err := transform.Bytes(e, b)
@ -106,7 +106,7 @@ func (e *Encoder) Bytes(b []byte) ([]byte, error) {
} }
// String converts a string from UTF-8. It returns the converted string or // String converts a string from UTF-8. It returns the converted string or
// 0, err if any error occurred. // "", err if any error occurred.
func (e *Encoder) String(s string) (string, error) { func (e *Encoder) String(s string) (string, error) {
s, _, err := transform.String(e, s) s, _, err := transform.String(e, s)
if err != nil { if err != nil {

View File

@ -36,8 +36,8 @@ package identifier
// - http://www.ietf.org/rfc/rfc2978.txt // - http://www.ietf.org/rfc/rfc2978.txt
// - http://www.unicode.org/reports/tr22/ // - http://www.unicode.org/reports/tr22/
// - http://www.w3.org/TR/encoding/ // - http://www.w3.org/TR/encoding/
// - http://www.w3.org/TR/encoding/indexes/encodings.json
// - https://encoding.spec.whatwg.org/ // - https://encoding.spec.whatwg.org/
// - https://encoding.spec.whatwg.org/encodings.json
// - https://tools.ietf.org/html/rfc6657#section-5 // - https://tools.ietf.org/html/rfc6657#section-5
// Interface can be implemented by Encodings to define the CCS or CES for which // Interface can be implemented by Encodings to define the CCS or CES for which

View File

@ -1,4 +1,4 @@
// This file was generated by go generate; DO NOT EDIT // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package identifier package identifier

30
vendor/golang.org/x/text/internal/BUILD generated vendored Normal file
View File

@ -0,0 +1,30 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"internal.go",
"match.go",
"tables.go",
],
visibility = ["//visibility:public"],
deps = ["//vendor/golang.org/x/text/language:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/golang.org/x/text/internal/tag:all-srcs",
"//vendor/golang.org/x/text/internal/utf8internal:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

52
vendor/golang.org/x/text/internal/gen.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"log"
"golang.org/x/text/internal/gen"
"golang.org/x/text/language"
"golang.org/x/text/unicode/cldr"
)
func main() {
r := gen.OpenCLDRCoreZip()
defer r.Close()
d := &cldr.Decoder{}
data, err := d.DecodeZip(r)
if err != nil {
log.Fatalf("DecodeZip: %v", err)
}
w := gen.NewCodeWriter()
defer w.WriteGoFile("tables.go", "internal")
// Create parents table.
parents := make([]uint16, language.NumCompactTags)
for _, loc := range data.Locales() {
tag := language.MustParse(loc)
index, ok := language.CompactIndex(tag)
if !ok {
continue
}
parentIndex := 0 // und
for p := tag.Parent(); p != language.Und; p = p.Parent() {
if x, ok := language.CompactIndex(p); ok {
parentIndex = x
break
}
}
parents[index] = uint16(parentIndex)
}
w.WriteComment(`
Parent maps a compact index of a tag to the compact index of the parent of
this tag.`)
w.WriteVar("Parent", parents)
}

51
vendor/golang.org/x/text/internal/internal.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// Package internal contains non-exported functionality that are used by
// packages in the text repository.
package internal
import (
"sort"
"golang.org/x/text/language"
)
// SortTags sorts tags in place.
func SortTags(tags []language.Tag) {
sort.Sort(sorter(tags))
}
type sorter []language.Tag
func (s sorter) Len() int {
return len(s)
}
func (s sorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sorter) Less(i, j int) bool {
return s[i].String() < s[j].String()
}
// UniqueTags sorts and filters duplicate tags in place and returns a slice with
// only unique tags.
func UniqueTags(tags []language.Tag) []language.Tag {
if len(tags) <= 1 {
return tags
}
SortTags(tags)
k := 0
for i := 1; i < len(tags); i++ {
if tags[k].String() < tags[i].String() {
k++
tags[k] = tags[i]
}
}
return tags[:k+1]
}

67
vendor/golang.org/x/text/internal/match.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal
// This file contains matchers that implement CLDR inheritance.
//
// See http://unicode.org/reports/tr35/#Locale_Inheritance.
//
// Some of the inheritance described in this document is already handled by
// the cldr package.
import (
"golang.org/x/text/language"
)
// TODO: consider if (some of the) matching algorithm needs to be public after
// getting some feel about what is generic and what is specific.
// NewInheritanceMatcher returns a matcher that matches based on the inheritance
// chain.
//
// The matcher uses canonicalization and the parent relationship to find a
// match. The resulting match will always be either Und or a language with the
// same language and script as the requested language. It will not match
// languages for which there is understood to be mutual or one-directional
// intelligibility.
//
// A Match will indicate an Exact match if the language matches after
// canonicalization and High if the matched tag is a parent.
func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher {
tags := &InheritanceMatcher{make(map[language.Tag]int)}
for i, tag := range t {
ct, err := language.All.Canonicalize(tag)
if err != nil {
ct = tag
}
tags.index[ct] = i
}
return tags
}
type InheritanceMatcher struct {
index map[language.Tag]int
}
func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) {
for _, t := range want {
ct, err := language.All.Canonicalize(t)
if err != nil {
ct = t
}
conf := language.Exact
for {
if index, ok := m.index[ct]; ok {
return ct, index, conf
}
if ct == language.Und {
break
}
ct = ct.Parent()
conf = language.High
}
}
return language.Und, 0, language.No
}

117
vendor/golang.org/x/text/internal/tables.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package internal
// Parent maps a compact index of a tag to the compact index of the parent of
// this tag.
var Parent = []uint16{ // 754 elements
// Entry 0 - 3F
0x0000, 0x0053, 0x00e5, 0x0000, 0x0003, 0x0003, 0x0000, 0x0006,
0x0000, 0x0008, 0x0000, 0x000a, 0x0000, 0x000c, 0x000c, 0x000c,
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
0x000c, 0x0000, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x002e,
0x0000, 0x0000, 0x0031, 0x0030, 0x0030, 0x0000, 0x0035, 0x0000,
0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x003d, 0x0000,
// Entry 40 - 7F
0x0000, 0x0040, 0x0000, 0x0042, 0x0042, 0x0000, 0x0045, 0x0045,
0x0000, 0x0048, 0x0000, 0x004a, 0x0000, 0x0000, 0x004d, 0x004c,
0x004c, 0x0000, 0x0051, 0x0051, 0x0051, 0x0051, 0x0000, 0x0056,
0x0000, 0x0058, 0x0000, 0x005a, 0x0000, 0x005c, 0x005c, 0x0000,
0x005f, 0x0000, 0x0061, 0x0000, 0x0063, 0x0000, 0x0065, 0x0065,
0x0000, 0x0068, 0x0000, 0x006a, 0x006a, 0x006a, 0x006a, 0x006a,
0x006a, 0x006a, 0x0000, 0x0072, 0x0000, 0x0074, 0x0000, 0x0076,
0x0000, 0x0000, 0x0079, 0x0000, 0x007b, 0x0000, 0x007d, 0x0000,
// Entry 80 - BF
0x007f, 0x007f, 0x0000, 0x0082, 0x0082, 0x0000, 0x0085, 0x0086,
0x0086, 0x0086, 0x0085, 0x0087, 0x0086, 0x0086, 0x0086, 0x0085,
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086,
0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0087, 0x0086, 0x0086,
0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
0x0086, 0x0086, 0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0086, 0x0085, 0x0086,
// Entry C0 - FF
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087,
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0085,
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0086,
0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0085, 0x0086, 0x0086,
0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0000, 0x00ee,
0x0000, 0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1,
0x00f1, 0x00f1, 0x00f1, 0x00f0, 0x00f1, 0x00f0, 0x00f0, 0x00f1,
// Entry 100 - 13F
0x00f1, 0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f0, 0x00f1,
0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x0000, 0x010d, 0x0000,
0x010f, 0x0000, 0x0111, 0x0000, 0x0113, 0x0113, 0x0000, 0x0116,
0x0116, 0x0116, 0x0116, 0x0000, 0x011b, 0x0000, 0x011d, 0x0000,
0x011f, 0x011f, 0x0000, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
// Entry 140 - 17F
0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122, 0x0122,
0x0122, 0x0000, 0x0151, 0x0000, 0x0153, 0x0000, 0x0155, 0x0000,
0x0157, 0x0000, 0x0159, 0x0000, 0x015b, 0x015b, 0x015b, 0x0000,
0x015f, 0x0000, 0x0000, 0x0162, 0x0000, 0x0164, 0x0000, 0x0166,
0x0166, 0x0166, 0x0000, 0x016a, 0x0000, 0x016c, 0x0000, 0x016e,
0x0000, 0x0170, 0x0170, 0x0000, 0x0173, 0x0000, 0x0175, 0x0000,
0x0177, 0x0000, 0x0179, 0x0000, 0x017b, 0x0000, 0x017d, 0x0000,
// Entry 180 - 1BF
0x017f, 0x0000, 0x0181, 0x0181, 0x0181, 0x0181, 0x0000, 0x0000,
0x0187, 0x0000, 0x0000, 0x018a, 0x0000, 0x018c, 0x0000, 0x0000,
0x018f, 0x0000, 0x0191, 0x0000, 0x0000, 0x0194, 0x0000, 0x0000,
0x0197, 0x0000, 0x0199, 0x0000, 0x019b, 0x0000, 0x019d, 0x0000,
0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000,
0x01a7, 0x0000, 0x01a9, 0x0000, 0x01ab, 0x01ab, 0x0000, 0x01ae,
0x0000, 0x01b0, 0x0000, 0x01b2, 0x0000, 0x01b4, 0x0000, 0x01b6,
0x0000, 0x0000, 0x01b9, 0x0000, 0x01bb, 0x0000, 0x01bd, 0x0000,
// Entry 1C0 - 1FF
0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x0000, 0x01c5, 0x01c5,
0x01c5, 0x01c5, 0x0000, 0x01ca, 0x0000, 0x01cc, 0x01cc, 0x0000,
0x01cf, 0x0000, 0x01d1, 0x0000, 0x01d3, 0x0000, 0x01d5, 0x0000,
0x01d7, 0x0000, 0x01d9, 0x01d9, 0x0000, 0x01dc, 0x0000, 0x01de,
0x0000, 0x01e0, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6,
0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x0000, 0x01ee,
0x01ee, 0x01ee, 0x0000, 0x01f2, 0x0000, 0x01f4, 0x0000, 0x01f6,
0x0000, 0x01f8, 0x0000, 0x0000, 0x01fb, 0x0000, 0x01fd, 0x01fd,
// Entry 200 - 23F
0x0000, 0x0200, 0x0000, 0x0202, 0x0202, 0x0000, 0x0205, 0x0205,
0x0000, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208,
0x0000, 0x0210, 0x0000, 0x0212, 0x0000, 0x0214, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x021a, 0x0000, 0x0000, 0x021d, 0x0000,
0x021f, 0x021f, 0x0000, 0x0222, 0x0000, 0x0224, 0x0224, 0x0000,
0x0000, 0x0228, 0x0227, 0x0227, 0x0000, 0x0000, 0x022d, 0x0000,
0x022f, 0x0000, 0x0231, 0x0000, 0x023d, 0x0233, 0x023d, 0x023d,
0x023d, 0x023d, 0x023d, 0x023d, 0x023d, 0x0233, 0x023d, 0x023d,
// Entry 240 - 27F
0x0000, 0x0240, 0x0240, 0x0240, 0x0000, 0x0244, 0x0000, 0x0246,
0x0000, 0x0248, 0x0248, 0x0000, 0x024b, 0x0000, 0x024d, 0x024d,
0x024d, 0x024d, 0x024d, 0x024d, 0x0000, 0x0254, 0x0000, 0x0256,
0x0000, 0x0258, 0x0000, 0x025a, 0x0000, 0x025c, 0x0000, 0x0000,
0x025f, 0x025f, 0x025f, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000,
0x0267, 0x0000, 0x0000, 0x026a, 0x0269, 0x0269, 0x0000, 0x026e,
0x0000, 0x0270, 0x0000, 0x0272, 0x0000, 0x0000, 0x0000, 0x0000,
0x0277, 0x0000, 0x0000, 0x027a, 0x0000, 0x027c, 0x027c, 0x027c,
// Entry 280 - 2BF
0x027c, 0x0000, 0x0281, 0x0281, 0x0281, 0x0000, 0x0285, 0x0285,
0x0285, 0x0285, 0x0285, 0x0000, 0x028b, 0x028b, 0x028b, 0x028b,
0x0000, 0x0000, 0x0000, 0x0000, 0x0293, 0x0293, 0x0293, 0x0000,
0x0297, 0x0297, 0x0297, 0x0297, 0x0000, 0x0000, 0x029d, 0x029d,
0x029d, 0x029d, 0x0000, 0x02a2, 0x0000, 0x02a4, 0x02a4, 0x0000,
0x02a7, 0x0000, 0x02a9, 0x02a9, 0x0000, 0x0000, 0x02ad, 0x0000,
0x0000, 0x02b0, 0x0000, 0x02b2, 0x02b2, 0x0000, 0x0000, 0x02b6,
0x0000, 0x02b8, 0x0000, 0x02ba, 0x0000, 0x02bc, 0x0000, 0x02be,
// Entry 2C0 - 2FF
0x02be, 0x0000, 0x0000, 0x02c2, 0x0000, 0x02c4, 0x02c1, 0x02c1,
0x0000, 0x0000, 0x02c9, 0x02c8, 0x02c8, 0x0000, 0x0000, 0x02ce,
0x0000, 0x02d0, 0x0000, 0x02d2, 0x0000, 0x0000, 0x02d5, 0x0000,
0x0000, 0x0000, 0x02d9, 0x0000, 0x02db, 0x0000, 0x02dd, 0x0000,
0x02df, 0x02df, 0x0000, 0x02e2, 0x0000, 0x02e4, 0x0000, 0x02e6,
0x02e6, 0x02e6, 0x02e6, 0x02e6, 0x0000, 0x02ec, 0x02ed, 0x02ec,
0x0000, 0x02f0,
} // Size: 1532 bytes
// Total table size 1532 bytes (1KiB); checksum: 90718A2

View File

@ -1,4 +1,4 @@
// This file was generated by go generate; DO NOT EDIT // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package language package language

View File

@ -678,6 +678,8 @@ func (b *builder) parseIndices() {
b.locale.parse(meta.DefaultContent.Locales) b.locale.parse(meta.DefaultContent.Locales)
} }
// TODO: region inclusion data will probably not be use used in future matchers.
func (b *builder) computeRegionGroups() { func (b *builder) computeRegionGroups() {
b.groups = make(map[int]index) b.groups = make(map[int]index)
@ -686,6 +688,11 @@ func (b *builder) computeRegionGroups() {
b.groups[i] = index(len(b.groups)) b.groups[i] = index(len(b.groups))
} }
for _, g := range b.supp.TerritoryContainment.Group { for _, g := range b.supp.TerritoryContainment.Group {
// Skip UN and EURO zone as they are flattening the containment
// relationship.
if g.Type == "EZ" || g.Type == "UN" {
continue
}
group := b.region.index(g.Type) group := b.region.index(g.Type)
if _, ok := b.groups[group]; !ok { if _, ok := b.groups[group]; !ok {
b.groups[group] = index(len(b.groups)) b.groups[group] = index(len(b.groups))
@ -782,6 +789,7 @@ func (b *builder) writeLanguage() {
lang.updateLater("tw", "twi") lang.updateLater("tw", "twi")
lang.updateLater("nb", "nob") lang.updateLater("nb", "nob")
lang.updateLater("ak", "aka") lang.updateLater("ak", "aka")
lang.updateLater("bh", "bih")
// Ensure that each 2-letter code is matched with a 3-letter code. // Ensure that each 2-letter code is matched with a 3-letter code.
for _, v := range lang.s[1:] { for _, v := range lang.s[1:] {
@ -798,10 +806,10 @@ func (b *builder) writeLanguage() {
} }
} }
// Complete canonialized language tags. // Complete canonicalized language tags.
lang.freeze() lang.freeze()
for i, v := range lang.s { for i, v := range lang.s {
// We can avoid these manual entries by using the IANI registry directly. // We can avoid these manual entries by using the IANA registry directly.
// Seems easier to update the list manually, as changes are rare. // Seems easier to update the list manually, as changes are rare.
// The panic in this loop will trigger if we miss an entry. // The panic in this loop will trigger if we miss an entry.
add := "" add := ""
@ -908,7 +916,7 @@ func (b *builder) writeRegion() {
i := b.region.index(s) i := b.region.index(s)
for _, d := range e.description { for _, d := range e.description {
if strings.Contains(d, "Private use") { if strings.Contains(d, "Private use") {
regionTypes[i] = iso3166UserAssgined regionTypes[i] = iso3166UserAssigned
} }
} }
regionTypes[i] |= bcp47Region regionTypes[i] |= bcp47Region
@ -1065,7 +1073,7 @@ const (
) )
const ( const (
iso3166UserAssgined = 1 << iota iso3166UserAssigned = 1 << iota
ccTLD ccTLD
bcp47Region bcp47Region
) )
@ -1355,42 +1363,23 @@ func (b *builder) writeLikelyData() {
type mutualIntelligibility struct { type mutualIntelligibility struct {
want, have uint16 want, have uint16
conf uint8 distance uint8
oneway bool oneway bool
} }
type scriptIntelligibility struct { type scriptIntelligibility struct {
lang uint16 // langID or 0 if * wantLang, haveLang uint16
want, have uint8 wantScript, haveScript uint8
conf uint8 distance uint8
// Always oneway
} }
type sortByConf []mutualIntelligibility type regionIntelligibility struct {
lang uint16 // compact language id
func (l sortByConf) Less(a, b int) bool { script uint8 // 0 means any
return l[a].conf > l[b].conf group uint8 // 0 means any; if bit 7 is set it means inverse
} distance uint8
// Always twoway.
func (l sortByConf) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l sortByConf) Len() int {
return len(l)
}
// toConf converts a percentage value [0, 100] to a confidence class.
func toConf(pct uint8) uint8 {
switch {
case pct == 100:
return 3 // Exact
case pct >= 90:
return 2 // High
case pct > 50:
return 1 // Low
default:
return 0 // No
}
} }
// writeMatchData writes tables with languages and scripts for which there is // writeMatchData writes tables with languages and scripts for which there is
@ -1400,13 +1389,50 @@ func toConf(pct uint8) uint8 {
// We also drop all region-related data as we use a different algorithm to // We also drop all region-related data as we use a different algorithm to
// determine region equivalence. // determine region equivalence.
func (b *builder) writeMatchData() { func (b *builder) writeMatchData() {
lm := b.supp.LanguageMatching.LanguageMatches
cldr.MakeSlice(&lm).SelectAnyOf("type", "written_new")
regionHierarchy := map[string][]string{}
for _, g := range b.supp.TerritoryContainment.Group {
regions := strings.Split(g.Contains, " ")
regionHierarchy[g.Type] = append(regionHierarchy[g.Type], regions...)
}
regionToGroups := make([]uint8, len(b.region.s))
idToIndex := map[string]uint8{}
for i, mv := range lm[0].MatchVariable {
if i > 6 {
log.Fatalf("Too many groups: %d", i)
}
idToIndex[mv.Id] = uint8(i + 1)
// TODO: also handle '-'
for _, r := range strings.Split(mv.Value, "+") {
todo := []string{r}
for k := 0; k < len(todo); k++ {
r := todo[k]
regionToGroups[b.region.index(r)] |= 1 << uint8(i)
todo = append(todo, regionHierarchy[r]...)
}
}
}
b.writeSlice("regionToGroups", regionToGroups)
b.writeType(mutualIntelligibility{}) b.writeType(mutualIntelligibility{})
b.writeType(scriptIntelligibility{}) b.writeType(scriptIntelligibility{})
lm := b.supp.LanguageMatching.LanguageMatches b.writeType(regionIntelligibility{})
cldr.MakeSlice(&lm).SelectAnyOf("type", "written")
matchLang := []mutualIntelligibility{} matchLang := []mutualIntelligibility{{
// TODO: remove once CLDR is fixed.
want: uint16(b.langIndex("sr")),
have: uint16(b.langIndex("hr")),
distance: uint8(5),
}, {
want: uint16(b.langIndex("sr")),
have: uint16(b.langIndex("bs")),
distance: uint8(5),
}}
matchScript := []scriptIntelligibility{} matchScript := []scriptIntelligibility{}
matchRegion := []regionIntelligibility{}
// Convert the languageMatch entries in lists keyed by desired language. // Convert the languageMatch entries in lists keyed by desired language.
for _, m := range lm[0].LanguageMatch { for _, m := range lm[0].LanguageMatch {
// Different versions of CLDR use different separators. // Different versions of CLDR use different separators.
@ -1414,33 +1440,38 @@ func (b *builder) writeMatchData() {
supported := strings.Replace(m.Supported, "-", "_", -1) supported := strings.Replace(m.Supported, "-", "_", -1)
d := strings.Split(desired, "_") d := strings.Split(desired, "_")
s := strings.Split(supported, "_") s := strings.Split(supported, "_")
if len(d) != len(s) || len(d) > 2 { if len(d) != len(s) {
// Skip all entries with regions and work around CLDR bug. log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
continue continue
} }
pct, _ := strconv.ParseInt(m.Percent, 10, 8) distance, _ := strconv.ParseInt(m.Distance, 10, 8)
if len(d) == 2 && d[0] == s[0] && len(d[1]) == 4 { switch len(d) {
// language-script pair. case 2:
lang := uint16(0) if desired == supported && desired == "*_*" {
if d[0] != "*" { continue
lang = uint16(b.langIndex(d[0]))
} }
// language-script pair.
matchScript = append(matchScript, scriptIntelligibility{ matchScript = append(matchScript, scriptIntelligibility{
lang: lang, wantLang: uint16(b.langIndex(d[0])),
want: uint8(b.script.index(d[1])), haveLang: uint16(b.langIndex(s[0])),
have: uint8(b.script.index(s[1])), wantScript: uint8(b.script.index(d[1])),
conf: toConf(uint8(pct)), haveScript: uint8(b.script.index(s[1])),
distance: uint8(distance),
}) })
if m.Oneway != "true" { if m.Oneway != "true" {
matchScript = append(matchScript, scriptIntelligibility{ matchScript = append(matchScript, scriptIntelligibility{
lang: lang, wantLang: uint16(b.langIndex(s[0])),
want: uint8(b.script.index(s[1])), haveLang: uint16(b.langIndex(d[0])),
have: uint8(b.script.index(d[1])), wantScript: uint8(b.script.index(s[1])),
conf: toConf(uint8(pct)), haveScript: uint8(b.script.index(d[1])),
distance: uint8(distance),
}) })
} }
} else if len(d) == 1 && d[0] != "*" { case 1:
if pct == 100 { if desired == supported && desired == "*" {
continue
}
if distance == 1 {
// nb == no is already handled by macro mapping. Check there // nb == no is already handled by macro mapping. Check there
// really is only this case. // really is only this case.
if d[0] != "no" || s[0] != "nb" { if d[0] != "no" || s[0] != "nb" {
@ -1448,28 +1479,57 @@ func (b *builder) writeMatchData() {
} }
continue continue
} }
// TODO: consider dropping oneway field and just doubling the entry.
matchLang = append(matchLang, mutualIntelligibility{ matchLang = append(matchLang, mutualIntelligibility{
want: uint16(b.langIndex(d[0])), want: uint16(b.langIndex(d[0])),
have: uint16(b.langIndex(s[0])), have: uint16(b.langIndex(s[0])),
conf: uint8(pct), distance: uint8(distance),
oneway: m.Oneway == "true", oneway: m.Oneway == "true",
}) })
} else { case 3:
// TODO: Handle other mappings. if desired == supported && desired == "*_*_*" {
a := []string{"*;*", "*_*;*_*", "es_MX;es_419"} continue
s := strings.Join([]string{desired, supported}, ";")
if i := sort.SearchStrings(a, s); i == len(a) || a[i] != s {
log.Printf("%q not handled", s)
} }
if desired != supported { // (Weird but correct.)
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
continue
}
ri := regionIntelligibility{
lang: b.langIndex(d[0]),
distance: uint8(distance),
}
if d[1] != "*" {
ri.script = uint8(b.script.index(d[1]))
}
switch {
case d[2] == "*":
ri.group = 0x80 // not contained in anything
case strings.HasPrefix(d[2], "$!"):
ri.group = 0x80
d[2] = "$" + d[2][len("$!"):]
fallthrough
case strings.HasPrefix(d[2], "$"):
ri.group |= idToIndex[d[2]]
}
matchRegion = append(matchRegion, ri)
default:
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
} }
} }
sort.Stable(sortByConf(matchLang)) sort.SliceStable(matchLang, func(i, j int) bool {
// collapse percentage into confidence classes return matchLang[i].distance < matchLang[j].distance
for i, m := range matchLang { })
matchLang[i].conf = toConf(m.conf)
}
b.writeSlice("matchLang", matchLang) b.writeSlice("matchLang", matchLang)
sort.SliceStable(matchScript, func(i, j int) bool {
return matchScript[i].distance < matchScript[j].distance
})
b.writeSlice("matchScript", matchScript) b.writeSlice("matchScript", matchScript)
sort.SliceStable(matchRegion, func(i, j int) bool {
return matchRegion[i].distance < matchRegion[j].distance
})
b.writeSlice("matchRegion", matchRegion)
} }
func (b *builder) writeRegionInclusionData() { func (b *builder) writeRegionInclusionData() {
@ -1482,6 +1542,11 @@ func (b *builder) writeRegionInclusionData() {
containment = make(map[index][]index) containment = make(map[index][]index)
) )
for _, g := range b.supp.TerritoryContainment.Group { for _, g := range b.supp.TerritoryContainment.Group {
// Skip UN and EURO zone as they are flattening the containment
// relationship.
if g.Type == "EZ" || g.Type == "UN" {
continue
}
group := b.region.index(g.Type) group := b.region.index(g.Type)
groupIdx := b.groups[group] groupIdx := b.groups[group]
for _, mem := range strings.Split(g.Contains, " ") { for _, mem := range strings.Split(g.Contains, " ") {
@ -1508,7 +1573,6 @@ func (b *builder) writeRegionInclusionData() {
for _, v := range l { for _, v := range l {
regionContainment[g] |= 1 << v regionContainment[g] |= 1 << v
} }
// log.Printf("%d: %X", g, regionContainment[g])
} }
b.writeSlice("regionContainment", regionContainment) b.writeSlice("regionContainment", regionContainment)

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:generate go run maketables.go gen_common.go -output tables.go //go:generate go run gen.go gen_common.go -output tables.go
//go:generate go run gen_index.go //go:generate go run gen_index.go
// Package language implements BCP 47 language tags and related functionality. // Package language implements BCP 47 language tags and related functionality.
@ -129,8 +129,15 @@ const (
// specific language or locale. All language tag values are guaranteed to be // specific language or locale. All language tag values are guaranteed to be
// well-formed. // well-formed.
type Tag struct { type Tag struct {
lang langID lang langID
region regionID region regionID
// TODO: we will soon run out of positions for script. Idea: instead of
// storing lang, region, and script codes, store only the compact index and
// have a lookup table from this code to its expansion. This greatly speeds
// up table lookup, speed up common variant cases.
// This will also immediately free up 3 extra bytes. Also, the pVariant
// field can now be moved to the lookup table, as the compact index uniquely
// determines the offset of a possible variant.
script scriptID script scriptID
pVariant byte // offset in str, includes preceding '-' pVariant byte // offset in str, includes preceding '-'
pExt uint16 // offset of first extension, includes preceding '-' pExt uint16 // offset of first extension, includes preceding '-'
@ -593,7 +600,7 @@ func (t Tag) Extension(x byte) (ext Extension, ok bool) {
return Extension{ext}, true return Extension{ext}, true
} }
} }
return Extension{string(x)}, false return Extension{}, false
} }
// Extensions returns all extensions of t. // Extensions returns all extensions of t.

View File

@ -6,6 +6,16 @@ package language
import "errors" import "errors"
// A MatchOption configures a Matcher.
type MatchOption func(*matcher)
// PreferSameScript will, in the absence of a match, result in the first
// preferred tag with the same script as a supported tag to match this supported
// tag. The default is currently true, but this may change in the future.
func PreferSameScript(preferSame bool) MatchOption {
return func(m *matcher) { m.preferSameScript = preferSame }
}
// Matcher is the interface that wraps the Match method. // Matcher is the interface that wraps the Match method.
// //
// Match returns the best match for any of the given tags, along with // Match returns the best match for any of the given tags, along with
@ -36,23 +46,44 @@ func Comprehends(speaker, alternative Tag) Confidence {
// matched tag in t, but is augmented with the Unicode extension ('u')of the // matched tag in t, but is augmented with the Unicode extension ('u')of the
// corresponding preferred tag. This allows user locale options to be passed // corresponding preferred tag. This allows user locale options to be passed
// transparently. // transparently.
func NewMatcher(t []Tag) Matcher { func NewMatcher(t []Tag, options ...MatchOption) Matcher {
return newMatcher(t) return newMatcher(t, options)
} }
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) { func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
match, w, c := m.getBest(want...) match, w, c := m.getBest(want...)
if match == nil { if match != nil {
t = m.default_.tag
} else {
t, index = match.tag, match.index t, index = match.tag, match.index
} else {
// TODO: this should be an option
t = m.default_.tag
if m.preferSameScript {
outer:
for _, w := range want {
script, _ := w.Script()
if script.scriptID == 0 {
// Don't do anything if there is no script, such as with
// private subtags.
continue
}
for i, h := range m.supported {
if script.scriptID == h.maxScript {
t, index = h.tag, i
break outer
}
}
}
}
// TODO: select first language tag based on script.
}
if w.region != 0 && t.region != 0 && t.region.contains(w.region) {
t, _ = Raw.Compose(t, Region{w.region})
} }
// Copy options from the user-provided tag into the result tag. This is hard // Copy options from the user-provided tag into the result tag. This is hard
// to do after the fact, so we do it here. // to do after the fact, so we do it here.
// TODO: consider also adding in variants that are compatible with the // TODO: add in alternative variants to -u-va-.
// matched language. // TODO: add preferred region to -u-rg-.
// TODO: Add back region if it is non-ambiguous? Or create another tag to // TODO: add other extensions. Merge with existing extensions.
// preserve the region?
if u, ok := w.Extension('u'); ok { if u, ok := w.Extension('u'); ok {
t, _ = Raw.Compose(t, u) t, _ = Raw.Compose(t, u)
} }
@ -388,16 +419,18 @@ func minimizeTags(t Tag) (Tag, error) {
// matcher keeps a set of supported language tags, indexed by language. // matcher keeps a set of supported language tags, indexed by language.
type matcher struct { type matcher struct {
default_ *haveTag default_ *haveTag
index map[langID]*matchHeader supported []*haveTag
passSettings bool index map[langID]*matchHeader
passSettings bool
preferSameScript bool
} }
// matchHeader has the lists of tags for exact matches and matches based on // matchHeader has the lists of tags for exact matches and matches based on
// maximized and canonicalized tags for a given language. // maximized and canonicalized tags for a given language.
type matchHeader struct { type matchHeader struct {
exact []haveTag exact []*haveTag
max []haveTag max []*haveTag
} }
// haveTag holds a supported Tag and its maximized script and region. The maximized // haveTag holds a supported Tag and its maximized script and region. The maximized
@ -440,8 +473,10 @@ func makeHaveTag(tag Tag, index int) (haveTag, langID) {
// script to map to another and we rely on this to keep the code simple. // script to map to another and we rely on this to keep the code simple.
func altScript(l langID, s scriptID) scriptID { func altScript(l langID, s scriptID) scriptID {
for _, alt := range matchScript { for _, alt := range matchScript {
if (alt.lang == 0 || langID(alt.lang) == l) && scriptID(alt.have) == s { // TODO: also match cases where language is not the same.
return scriptID(alt.want) if (langID(alt.wantLang) == l || langID(alt.haveLang) == l) &&
scriptID(alt.haveScript) == s {
return scriptID(alt.wantScript)
} }
} }
return 0 return 0
@ -457,7 +492,7 @@ func (h *matchHeader) addIfNew(n haveTag, exact bool) {
} }
} }
if exact { if exact {
h.exact = append(h.exact, n) h.exact = append(h.exact, &n)
} }
// Allow duplicate maximized tags, but create a linked list to allow quickly // Allow duplicate maximized tags, but create a linked list to allow quickly
// comparing the equivalents and bail out. // comparing the equivalents and bail out.
@ -472,7 +507,7 @@ func (h *matchHeader) addIfNew(n haveTag, exact bool) {
break break
} }
} }
h.max = append(h.max, n) h.max = append(h.max, &n)
} }
// header returns the matchHeader for the given language. It creates one if // header returns the matchHeader for the given language. It creates one if
@ -486,12 +521,26 @@ func (m *matcher) header(l langID) *matchHeader {
return h return h
} }
func toConf(d uint8) Confidence {
if d <= 10 {
return High
}
if d < 30 {
return Low
}
return No
}
// newMatcher builds an index for the given supported tags and returns it as // newMatcher builds an index for the given supported tags and returns it as
// a matcher. It also expands the index by considering various equivalence classes // a matcher. It also expands the index by considering various equivalence classes
// for a given tag. // for a given tag.
func newMatcher(supported []Tag) *matcher { func newMatcher(supported []Tag, options []MatchOption) *matcher {
m := &matcher{ m := &matcher{
index: make(map[langID]*matchHeader), index: make(map[langID]*matchHeader),
preferSameScript: true,
}
for _, o := range options {
o(m)
} }
if len(supported) == 0 { if len(supported) == 0 {
m.default_ = &haveTag{} m.default_ = &haveTag{}
@ -502,8 +551,9 @@ func newMatcher(supported []Tag) *matcher {
for i, tag := range supported { for i, tag := range supported {
pair, _ := makeHaveTag(tag, i) pair, _ := makeHaveTag(tag, i)
m.header(tag.lang).addIfNew(pair, true) m.header(tag.lang).addIfNew(pair, true)
m.supported = append(m.supported, &pair)
} }
m.default_ = &m.header(supported[0].lang).exact[0] m.default_ = m.header(supported[0].lang).exact[0]
for i, tag := range supported { for i, tag := range supported {
pair, max := makeHaveTag(tag, i) pair, max := makeHaveTag(tag, i)
if max != tag.lang { if max != tag.lang {
@ -511,6 +561,9 @@ func newMatcher(supported []Tag) *matcher {
} }
} }
// TODO: include alt script.
// - don't replace regions, but allow regions to be made more specific.
// update is used to add indexes in the map for equivalent languages. // update is used to add indexes in the map for equivalent languages.
// If force is true, the update will also apply to derived entries. To // If force is true, the update will also apply to derived entries. To
// avoid applying a "transitive closure", use false. // avoid applying a "transitive closure", use false.
@ -520,7 +573,8 @@ func newMatcher(supported []Tag) *matcher {
return return
} }
hw := m.header(langID(want)) hw := m.header(langID(want))
for _, v := range hh.max { for _, ht := range hh.max {
v := *ht
if conf < v.conf { if conf < v.conf {
v.conf = conf v.conf = conf
} }
@ -536,9 +590,9 @@ func newMatcher(supported []Tag) *matcher {
// Add entries for languages with mutual intelligibility as defined by CLDR's // Add entries for languages with mutual intelligibility as defined by CLDR's
// languageMatch data. // languageMatch data.
for _, ml := range matchLang { for _, ml := range matchLang {
update(ml.want, ml.have, Confidence(ml.conf), false) update(ml.want, ml.have, toConf(ml.distance), false)
if !ml.oneway { if !ml.oneway {
update(ml.have, ml.want, Confidence(ml.conf), false) update(ml.have, ml.want, toConf(ml.distance), false)
} }
} }
@ -580,7 +634,7 @@ func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
continue continue
} }
for i := range h.exact { for i := range h.exact {
have := &h.exact[i] have := h.exact[i]
if have.tag.equalsRest(w) { if have.tag.equalsRest(w) {
return have, w, Exact return have, w, Exact
} }
@ -591,7 +645,7 @@ func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
// Base language is not defined. // Base language is not defined.
if h != nil { if h != nil {
for i := range h.exact { for i := range h.exact {
have := &h.exact[i] have := h.exact[i]
if have.tag.equalsRest(w) { if have.tag.equalsRest(w) {
return have, w, Exact return have, w, Exact
} }
@ -609,11 +663,11 @@ func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
} }
// Check for match based on maximized tag. // Check for match based on maximized tag.
for i := range h.max { for i := range h.max {
have := &h.max[i] have := h.max[i]
best.update(have, w, max.script, max.region) best.update(have, w, max.script, max.region)
if best.conf == Exact { if best.conf == Exact {
for have.nextMax != 0 { for have.nextMax != 0 {
have = &h.max[have.nextMax] have = h.max[have.nextMax]
best.update(have, w, max.script, max.region) best.update(have, w, max.script, max.region)
} }
return best.have, best.want, High return best.have, best.want, High
@ -635,11 +689,12 @@ type bestMatch struct {
want Tag want Tag
conf Confidence conf Confidence
// Cached results from applying tie-breaking rules. // Cached results from applying tie-breaking rules.
origLang bool origLang bool
origReg bool origReg bool
regDist uint8 regGroupDist uint8
origScript bool regDist uint8
parentDist uint8 // 255 if have is not an ancestor of want tag. origScript bool
parentDist uint8 // 255 if have is not an ancestor of want tag.
} }
// update updates the existing best match if the new pair is considered to be a // update updates the existing best match if the new pair is considered to be a
@ -693,6 +748,14 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
beaten = true beaten = true
} }
regGroupDist := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.lang)
if !beaten && m.regGroupDist != regGroupDist {
if regGroupDist > m.regGroupDist {
return
}
beaten = true
}
// We prefer if the pre-maximized region was specified and identical. // We prefer if the pre-maximized region was specified and identical.
origReg := have.tag.region == tag.region && tag.region != 0 origReg := have.tag.region == tag.region && tag.region != 0
if !beaten && m.origReg != origReg { if !beaten && m.origReg != origReg {
@ -702,8 +765,22 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
beaten = true beaten = true
} }
// Next we prefer smaller distances between regions, as defined by regionDist. // TODO: remove the region distance rule. Region distance has been replaced
regDist := regionDist(have.maxRegion, maxRegion, tag.lang) // by the region grouping rule. For now we leave it as it still seems to
// have a net positive effect when applied after the grouping rule.
// Possible solutions:
// - apply the primary locale rule first to effectively disable region
// region distance if groups are defined.
// - express the following errors in terms of grouping (if possible)
// - find another method of handling the following cases.
// maximization of legacy: find mo in
// "sr-Cyrl, sr-Latn, ro, ro-MD": have ro; want ro-MD (High)
// region distance French: find fr-US in
// "en, fr, fr-CA, fr-CH": have fr; want fr-CA (High)
// Next we prefer smaller distances between regions, as defined by
// regionDist.
regDist := uint8(regionDistance(have.maxRegion, maxRegion))
if !beaten && m.regDist != regDist { if !beaten && m.regDist != regDist {
if regDist > m.regDist { if regDist > m.regDist {
return return
@ -721,6 +798,9 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
} }
// Finally we prefer tags which have a closer parent relationship. // Finally we prefer tags which have a closer parent relationship.
// TODO: the parent relationship no longer seems necessary. It doesn't hurt
// to leave it in as the final tie-breaker, though, especially until the
// grouping data has further matured.
parentDist := parentDistance(have.tag.region, tag) parentDist := parentDistance(have.tag.region, tag)
if !beaten && m.parentDist != parentDist { if !beaten && m.parentDist != parentDist {
if parentDist > m.parentDist { if parentDist > m.parentDist {
@ -737,6 +817,7 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
m.origLang = origLang m.origLang = origLang
m.origReg = origReg m.origReg = origReg
m.origScript = origScript m.origScript = origScript
m.regGroupDist = regGroupDist
m.regDist = regDist m.regDist = regDist
m.parentDist = parentDist m.parentDist = parentDist
} }
@ -759,15 +840,27 @@ func parentDistance(haveRegion regionID, tag Tag) uint8 {
return d return d
} }
// regionDist wraps regionDistance with some exceptions to the algorithmic distance. // regionGroupDist computes the distance between two regions based on their
func regionDist(a, b regionID, lang langID) uint8 { // CLDR grouping.
if lang == _en { func regionGroupDist(a, b regionID, script scriptID, lang langID) uint8 {
// Two variants of non-US English are close to each other, regardless of distance. aGroup := uint(regionToGroups[a]) << 1
if a != _US && b != _US { bGroup := uint(regionToGroups[b]) << 1
return 2 for _, ri := range matchRegion {
if langID(ri.lang) == lang && (ri.script == 0 || scriptID(ri.script) == script) {
group := uint(1 << (ri.group &^ 0x80))
if 0x80&ri.group == 0 {
if aGroup&bGroup&group != 0 { // Both regions are in the group.
return ri.distance
}
} else {
if (aGroup|bGroup)&group == 0 { // Both regions are not in the group.
return ri.distance
}
}
} }
} }
return uint8(regionDistance(a, b)) const defaultDistance = 4
return defaultDistance
} }
// regionDistance computes the distance between two regions based on the // regionDistance computes the distance between two regions based on the

File diff suppressed because it is too large Load Diff

View File

@ -41,20 +41,35 @@ func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
if tNotIn == nil { if tNotIn == nil {
tNotIn = transform.Nop tNotIn = transform.Nop
} }
sIn, ok := tIn.(transform.SpanningTransformer)
if !ok {
sIn = dummySpan{tIn}
}
sNotIn, ok := tNotIn.(transform.SpanningTransformer)
if !ok {
sNotIn = dummySpan{tNotIn}
}
a := &cond{ a := &cond{
tIn: tIn, tIn: sIn,
tNotIn: tNotIn, tNotIn: sNotIn,
f: s.Contains, f: s.Contains,
} }
a.Reset() a.Reset()
return Transformer{a} return Transformer{a}
} }
type dummySpan struct{ transform.Transformer }
func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
return 0, transform.ErrEndOfSpan
}
type cond struct { type cond struct {
tIn, tNotIn transform.Transformer tIn, tNotIn transform.SpanningTransformer
f func(rune) bool f func(rune) bool
check func(rune) bool // current check to perform check func(rune) bool // current check to perform
t transform.Transformer // current transformer to use t transform.SpanningTransformer // current transformer to use
} }
// Reset implements transform.Transformer. // Reset implements transform.Transformer.
@ -84,6 +99,51 @@ func (t *cond) isNot(r rune) bool {
return false return false
} }
// This implementation of Span doesn't help all too much, but it needs to be
// there to satisfy this package's Transformer interface.
// TODO: there are certainly room for improvements, though. For example, if
// t.t == transform.Nop (which will a common occurrence) it will save a bundle
// to special-case that loop.
func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
p := 0
for n < len(src) && err == nil {
// Don't process too much at a time as the Spanner that will be
// called on this block may terminate early.
const maxChunk = 4096
max := len(src)
if v := n + maxChunk; v < max {
max = v
}
atEnd := false
size := 0
current := t.t
for ; p < max; p += size {
r := rune(src[p])
if r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
if !atEOF && !utf8.FullRune(src[p:]) {
err = transform.ErrShortSrc
break
}
}
if !t.check(r) {
// The next rune will be the start of a new run.
atEnd = true
break
}
}
n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
n += n2
if err2 != nil {
return n, err2
}
// At this point either err != nil or t.check will pass for the rune at p.
p = n + size
}
return n, err
}
func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
p := 0 p := 0
for nSrc < len(src) && err == nil { for nSrc < len(src) && err == nil {
@ -99,9 +159,10 @@ func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error
size := 0 size := 0
current := t.t current := t.t
for ; p < max; p += size { for ; p < max; p += size {
var r rune r := rune(src[p])
r, size = utf8.DecodeRune(src[p:]) if r < utf8.RuneSelf {
if r == utf8.RuneError && size == 1 { size = 1
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
if !atEOF && !utf8.FullRune(src[p:]) { if !atEOF && !utf8.FullRune(src[p:]) {
err = transform.ErrShortSrc err = transform.ErrShortSrc
break break

View File

@ -46,9 +46,19 @@ func Predicate(f func(rune) bool) Set {
// Transformer implements the transform.Transformer interface. // Transformer implements the transform.Transformer interface.
type Transformer struct { type Transformer struct {
transform.Transformer t transform.SpanningTransformer
} }
func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return t.t.Transform(dst, src, atEOF)
}
func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
return t.t.Span(b, atEOF)
}
func (t Transformer) Reset() { t.t.Reset() }
// Bytes returns a new byte slice with the result of converting b using t. It // Bytes returns a new byte slice with the result of converting b using t. It
// calls Reset on t. It returns nil if any error was found. This can only happen // calls Reset on t. It returns nil if any error was found. This can only happen
// if an error-producing Transformer is passed to If. // if an error-producing Transformer is passed to If.
@ -96,39 +106,57 @@ type remove func(r rune) bool
func (remove) Reset() {} func (remove) Reset() {}
// Span implements transform.Spanner.
func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
for r, size := rune(0), 0; n < len(src); {
if r = rune(src[n]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
} else {
err = transform.ErrEndOfSpan
}
break
}
if t(r) {
err = transform.ErrEndOfSpan
break
}
n += size
}
return
}
// Transform implements transform.Transformer. // Transform implements transform.Transformer.
func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for r, size := rune(0), 0; nSrc < len(src); { for r, size := rune(0), 0; nSrc < len(src); {
if r = rune(src[nSrc]); r < utf8.RuneSelf { if r = rune(src[nSrc]); r < utf8.RuneSelf {
size = 1 size = 1
} else { } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
r, size = utf8.DecodeRune(src[nSrc:]) // Invalid rune.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
if size == 1 { err = transform.ErrShortSrc
// Invalid rune. break
if !atEOF && !utf8.FullRune(src[nSrc:]) { }
err = transform.ErrShortSrc // We replace illegal bytes with RuneError. Not doing so might
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
// The resulting byte sequence may subsequently contain runes
// for which t(r) is true that were passed unnoticed.
if !t(utf8.RuneError) {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break break
} }
// We replace illegal bytes with RuneError. Not doing so might dst[nDst+0] = runeErrorString[0]
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8. dst[nDst+1] = runeErrorString[1]
// The resulting byte sequence may subsequently contain runes dst[nDst+2] = runeErrorString[2]
// for which t(r) is true that were passed unnoticed. nDst += 3
if !t(utf8.RuneError) {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
}
nSrc++
continue
} }
nSrc++
continue
} }
if t(r) { if t(r) {
nSrc += size nSrc += size
continue continue
@ -157,6 +185,28 @@ type mapper func(rune) rune
func (mapper) Reset() {} func (mapper) Reset() {}
// Span implements transform.Spanner.
func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
for r, size := rune(0), 0; n < len(src); n += size {
if r = rune(src[n]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
} else {
err = transform.ErrEndOfSpan
}
break
}
if t(r) != r {
err = transform.ErrEndOfSpan
break
}
}
return n, err
}
// Transform implements transform.Transformer. // Transform implements transform.Transformer.
func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
var replacement rune var replacement rune
@ -230,24 +280,51 @@ func ReplaceIllFormed() Transformer {
type replaceIllFormed struct{ transform.NopResetter } type replaceIllFormed struct{ transform.NopResetter }
func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
for n < len(src) {
// ASCII fast path.
if src[n] < utf8.RuneSelf {
n++
continue
}
r, size := utf8.DecodeRune(src[n:])
// Look for a valid non-ASCII rune.
if r != utf8.RuneError || size != 1 {
n += size
continue
}
// Look for short source data.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
break
}
// We have an invalid rune.
err = transform.ErrEndOfSpan
break
}
return n, err
}
func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for nSrc < len(src) { for nSrc < len(src) {
r, size := utf8.DecodeRune(src[nSrc:]) // ASCII fast path.
if r := src[nSrc]; r < utf8.RuneSelf {
// Look for an ASCII rune.
if r < utf8.RuneSelf {
if nDst == len(dst) { if nDst == len(dst) {
err = transform.ErrShortDst err = transform.ErrShortDst
break break
} }
dst[nDst] = byte(r) dst[nDst] = r
nDst++ nDst++
nSrc++ nSrc++
continue continue
} }
// Look for a valid non-ASCII rune. // Look for a valid non-ASCII rune.
if r != utf8.RuneError || size != 1 { if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
err = transform.ErrShortDst err = transform.ErrShortDst
break break

View File

@ -123,34 +123,64 @@ var transitions = [...][2]ruleTransition{
// vice versa. // vice versa.
const exclusiveRTL = uint16(1<<bidi.EN | 1<<bidi.AN) const exclusiveRTL = uint16(1<<bidi.EN | 1<<bidi.AN)
// Direction reports the direction of the given label as defined by RFC 5893 or // From RFC 5893
// an error if b is not a valid label according to the Bidi Rule. // An RTL label is a label that contains at least one character of type
func Direction(b []byte) (bidi.Direction, error) { // R, AL, or AN.
t := Transformer{} //
if n, ok := t.advance(b); ok && n == len(b) { // An LTR label is any label that is not an RTL label.
switch t.state {
case ruleLTRFinal, ruleInitial: // Direction reports the direction of the given label as defined by RFC 5893.
return bidi.LeftToRight, nil // The Bidi Rule does not have to be applied to labels of the category
case ruleRTLFinal: // LeftToRight.
return bidi.RightToLeft, nil func Direction(b []byte) bidi.Direction {
for i := 0; i < len(b); {
e, sz := bidi.Lookup(b[i:])
if sz == 0 {
i++
} }
c := e.Class()
if c == bidi.R || c == bidi.AL || c == bidi.AN {
return bidi.RightToLeft
}
i += sz
} }
return bidi.Neutral, ErrInvalid return bidi.LeftToRight
} }
// DirectionString reports the direction of the given label as defined by RFC // DirectionString reports the direction of the given label as defined by RFC
// 5893 or an error if s is not a valid label according to the Bidi Rule. // 5893. The Bidi Rule does not have to be applied to labels of the category
func DirectionString(s string) (bidi.Direction, error) { // LeftToRight.
t := Transformer{} func DirectionString(s string) bidi.Direction {
if n, ok := t.advanceString(s); ok && n == len(s) { for i := 0; i < len(s); {
switch t.state { e, sz := bidi.LookupString(s[i:])
case ruleLTRFinal, ruleInitial: if sz == 0 {
return bidi.LeftToRight, nil i++
case ruleRTLFinal:
return bidi.RightToLeft, nil
} }
c := e.Class()
if c == bidi.R || c == bidi.AL || c == bidi.AN {
return bidi.RightToLeft
}
i += sz
} }
return bidi.Neutral, ErrInvalid return bidi.LeftToRight
}
// Valid reports whether b conforms to the BiDi rule.
func Valid(b []byte) bool {
var t Transformer
if n, ok := t.advance(b); !ok || n < len(b) {
return false
}
return t.isFinal()
}
// ValidString reports whether s conforms to the BiDi rule.
func ValidString(s string) bool {
var t Transformer
if n, ok := t.advanceString(s); !ok || n < len(s) {
return false
}
return t.isFinal()
} }
// New returns a Transformer that verifies that input adheres to the Bidi Rule. // New returns a Transformer that verifies that input adheres to the Bidi Rule.
@ -160,8 +190,23 @@ func New() *Transformer {
// Transformer implements transform.Transform. // Transformer implements transform.Transform.
type Transformer struct { type Transformer struct {
state ruleState state ruleState
seen uint16 hasRTL bool
seen uint16
}
// A rule can only be violated for "Bidi Domain names", meaning if one of the
// following categories has been observed.
func (t *Transformer) isRTL() bool {
const isRTL = 1<<bidi.R | 1<<bidi.AL | 1<<bidi.AN
return t.seen&isRTL != 0
}
func (t *Transformer) isFinal() bool {
if !t.isRTL() {
return true
}
return t.state == ruleLTRFinal || t.state == ruleRTLFinal || t.state == ruleInitial
} }
// Reset implements transform.Transformer. // Reset implements transform.Transformer.
@ -185,7 +230,7 @@ func (t *Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, er
// Span returns the first n bytes of src that conform to the Bidi rule. // Span returns the first n bytes of src that conform to the Bidi rule.
func (t *Transformer) Span(src []byte, atEOF bool) (n int, err error) { func (t *Transformer) Span(src []byte, atEOF bool) (n int, err error) {
if t.state == ruleInvalid { if t.state == ruleInvalid && t.isRTL() {
return 0, ErrInvalid return 0, ErrInvalid
} }
n, ok := t.advance(src) n, ok := t.advance(src)
@ -198,7 +243,7 @@ func (t *Transformer) Span(src []byte, atEOF bool) (n int, err error) {
break break
} }
err = ErrInvalid err = ErrInvalid
case t.state != ruleLTRFinal && t.state != ruleRTLFinal && t.state != ruleInitial: case !t.isFinal():
err = ErrInvalid err = ErrInvalid
} }
return n, err return n, err
@ -225,12 +270,15 @@ func (t *Transformer) advance(s []byte) (n int, ok bool) {
e, sz = bidi.Lookup(s[n:]) e, sz = bidi.Lookup(s[n:])
if sz <= 1 { if sz <= 1 {
if sz == 1 { if sz == 1 {
return n, false // invalid UTF-8 // We always consider invalid UTF-8 to be invalid, even if
// the string has not yet been determined to be RTL.
// TODO: is this correct?
return n, false
} }
return n, true // incomplete UTF-8 encoding return n, true // incomplete UTF-8 encoding
} }
} }
// TODO: using CompactClass results in noticeable speedup. // TODO: using CompactClass would result in noticeable speedup.
// See unicode/bidi/prop.go:Properties.CompactClass. // See unicode/bidi/prop.go:Properties.CompactClass.
c := uint16(1 << e.Class()) c := uint16(1 << e.Class())
t.seen |= c t.seen |= c
@ -245,7 +293,9 @@ func (t *Transformer) advance(s []byte) (n int, ok bool) {
t.state = tr[1].next t.state = tr[1].next
default: default:
t.state = ruleInvalid t.state = ruleInvalid
return n, false if t.isRTL() {
return n, false
}
} }
n += sz n += sz
} }
@ -282,7 +332,9 @@ func (t *Transformer) advanceString(s string) (n int, ok bool) {
t.state = tr[1].next t.state = tr[1].next
default: default:
t.state = ruleInvalid t.state = ruleInvalid
return n, false if t.isRTL() {
return n, false
}
} }
n += sz n += sz
} }

View File

@ -17,6 +17,7 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//vendor/golang.org/x/text/cases:go_default_library", "//vendor/golang.org/x/text/cases:go_default_library",
"//vendor/golang.org/x/text/language:go_default_library",
"//vendor/golang.org/x/text/runes:go_default_library", "//vendor/golang.org/x/text/runes:go_default_library",
"//vendor/golang.org/x/text/secure/bidirule:go_default_library", "//vendor/golang.org/x/text/secure/bidirule:go_default_library",
"//vendor/golang.org/x/text/transform:go_default_library", "//vendor/golang.org/x/text/transform:go_default_library",

View File

@ -6,10 +6,10 @@ package precis
import ( import (
"golang.org/x/text/cases" "golang.org/x/text/cases"
"golang.org/x/text/language"
"golang.org/x/text/runes" "golang.org/x/text/runes"
"golang.org/x/text/transform" "golang.org/x/text/transform"
"golang.org/x/text/unicode/norm" "golang.org/x/text/unicode/norm"
"golang.org/x/text/width"
) )
// An Option is used to define the behavior and rules of a Profile. // An Option is used to define the behavior and rules of a Profile.
@ -20,11 +20,12 @@ type options struct {
foldWidth bool foldWidth bool
// Enforcement options // Enforcement options
cases transform.Transformer asciiLower bool
cases transform.SpanningTransformer
disallow runes.Set disallow runes.Set
norm norm.Form norm transform.SpanningTransformer
additional []func() transform.Transformer additional []func() transform.SpanningTransformer
width *width.Transformer width transform.SpanningTransformer
disallowEmpty bool disallowEmpty bool
bidiRule bool bidiRule bool
@ -36,6 +37,11 @@ func getOpts(o ...Option) (res options) {
for _, f := range o { for _, f := range o {
f(&res) f(&res)
} }
// Using a SpanningTransformer, instead of norm.Form prevents an allocation
// down the road.
if res.norm == nil {
res.norm = norm.NFC
}
return return
} }
@ -74,11 +80,36 @@ var (
} }
) )
// TODO: move this logic to package transform
type spanWrap struct{ transform.Transformer }
func (s spanWrap) Span(src []byte, atEOF bool) (n int, err error) {
return 0, transform.ErrEndOfSpan
}
// TODO: allow different types? For instance:
// func() transform.Transformer
// func() transform.SpanningTransformer
// func([]byte) bool // validation only
//
// Also, would be great if we could detect if a transformer is reentrant.
// The AdditionalMapping option defines the additional mapping rule for the // The AdditionalMapping option defines the additional mapping rule for the
// Profile by applying Transformer's in sequence. // Profile by applying Transformer's in sequence.
func AdditionalMapping(t ...func() transform.Transformer) Option { func AdditionalMapping(t ...func() transform.Transformer) Option {
return func(o *options) { return func(o *options) {
o.additional = t for _, f := range t {
sf := func() transform.SpanningTransformer {
return f().(transform.SpanningTransformer)
}
if _, ok := f().(transform.SpanningTransformer); !ok {
sf = func() transform.SpanningTransformer {
return spanWrap{f()}
}
}
o.additional = append(o.additional, sf)
}
} }
} }
@ -93,10 +124,26 @@ func Norm(f norm.Form) Option {
// provided to determine the type of case folding used. // provided to determine the type of case folding used.
func FoldCase(opts ...cases.Option) Option { func FoldCase(opts ...cases.Option) Option {
return func(o *options) { return func(o *options) {
o.asciiLower = true
o.cases = cases.Fold(opts...) o.cases = cases.Fold(opts...)
} }
} }
// The LowerCase option defines a Profile's case mapping rule. Options can be
// provided to determine the type of case folding used.
func LowerCase(opts ...cases.Option) Option {
return func(o *options) {
o.asciiLower = true
if len(opts) == 0 {
o.cases = cases.Lower(language.Und, cases.HandleFinalSigma(false))
return
}
opts = append([]cases.Option{cases.HandleFinalSigma(false)}, opts...)
o.cases = cases.Lower(language.Und, opts...)
}
}
// The Disallow option further restricts a Profile's allowed characters beyond // The Disallow option further restricts a Profile's allowed characters beyond
// what is disallowed by the underlying string class. // what is disallowed by the underlying string class.
func Disallow(set runes.Set) Option { func Disallow(set runes.Set) Option {

View File

@ -5,9 +5,12 @@
package precis package precis
import ( import (
"bytes"
"errors" "errors"
"unicode/utf8" "unicode/utf8"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"golang.org/x/text/runes" "golang.org/x/text/runes"
"golang.org/x/text/secure/bidirule" "golang.org/x/text/secure/bidirule"
"golang.org/x/text/transform" "golang.org/x/text/transform"
@ -90,32 +93,80 @@ type buffers struct {
next int next int
} }
func (b *buffers) init(n int) { func (b *buffers) apply(t transform.SpanningTransformer) (err error) {
b.buf[0] = make([]byte, 0, n) n, err := t.Span(b.src, true)
b.buf[1] = make([]byte, 0, n) if err != transform.ErrEndOfSpan {
} return err
}
func (b *buffers) apply(t transform.Transformer) (err error) {
// TODO: use Span, once available.
x := b.next & 1 x := b.next & 1
b.src, _, err = transform.Append(t, b.buf[x][:0], b.src) if b.buf[x] == nil {
b.buf[x] = make([]byte, 0, 8+len(b.src)+len(b.src)>>2)
}
span := append(b.buf[x][:0], b.src[:n]...)
b.src, _, err = transform.Append(t, span, b.src[n:])
b.buf[x] = b.src b.buf[x] = b.src
b.next++ b.next++
return err return err
} }
func (b *buffers) enforce(p *Profile, src []byte) (str []byte, err error) { // Pre-allocate transformers when possible. In some cases this avoids allocation.
var (
foldWidthT transform.SpanningTransformer = width.Fold
lowerCaseT transform.SpanningTransformer = cases.Lower(language.Und, cases.HandleFinalSigma(false))
)
// TODO: make this a method on profile.
func (b *buffers) enforce(p *Profile, src []byte, comparing bool) (str []byte, err error) {
b.src = src b.src = src
ascii := true
for _, c := range src {
if c >= utf8.RuneSelf {
ascii = false
break
}
}
// ASCII fast path.
if ascii {
for _, f := range p.options.additional {
if err = b.apply(f()); err != nil {
return nil, err
}
}
switch {
case p.options.asciiLower || (comparing && p.options.ignorecase):
for i, c := range b.src {
if 'A' <= c && c <= 'Z' {
b.src[i] = c ^ 1<<5
}
}
case p.options.cases != nil:
b.apply(p.options.cases)
}
c := checker{p: p}
if _, err := c.span(b.src, true); err != nil {
return nil, err
}
if p.disallow != nil {
for _, c := range b.src {
if p.disallow.Contains(rune(c)) {
return nil, errDisallowedRune
}
}
}
if p.options.disallowEmpty && len(b.src) == 0 {
return nil, errEmptyString
}
return b.src, nil
}
// These transforms are applied in the order defined in // These transforms are applied in the order defined in
// https://tools.ietf.org/html/rfc7564#section-7 // https://tools.ietf.org/html/rfc7564#section-7
// TODO: allow different width transforms options. // TODO: allow different width transforms options.
if p.options.foldWidth { if p.options.foldWidth || (p.options.ignorecase && comparing) {
// TODO: use Span, once available. b.apply(foldWidthT)
if err = b.apply(width.Fold); err != nil {
return nil, err
}
} }
for _, f := range p.options.additional { for _, f := range p.options.additional {
if err = b.apply(f()); err != nil { if err = b.apply(f()); err != nil {
@ -123,24 +174,14 @@ func (b *buffers) enforce(p *Profile, src []byte) (str []byte, err error) {
} }
} }
if p.options.cases != nil { if p.options.cases != nil {
if err = b.apply(p.options.cases); err != nil { b.apply(p.options.cases)
return nil, err
}
} }
if n := p.norm.QuickSpan(b.src); n < len(b.src) { if comparing && p.options.ignorecase {
x := b.next & 1 b.apply(lowerCaseT)
n = copy(b.buf[x], b.src[:n])
b.src, _, err = transform.Append(p.norm, b.buf[x][:n], b.src[n:])
b.buf[x] = b.src
b.next++
if err != nil {
return nil, err
}
} }
if p.options.bidiRule { b.apply(p.norm)
if err := b.apply(bidirule.New()); err != nil { if p.options.bidiRule && !bidirule.Valid(b.src) {
return nil, err return nil, bidirule.ErrInvalid
}
} }
c := checker{p: p} c := checker{p: p}
if _, err := c.span(b.src, true); err != nil { if _, err := c.span(b.src, true); err != nil {
@ -155,9 +196,6 @@ func (b *buffers) enforce(p *Profile, src []byte) (str []byte, err error) {
i += size i += size
} }
} }
// TODO: Add the disallow empty rule with a dummy transformer?
if p.options.disallowEmpty && len(b.src) == 0 { if p.options.disallowEmpty && len(b.src) == 0 {
return nil, errEmptyString return nil, errEmptyString
} }
@ -168,19 +206,16 @@ func (b *buffers) enforce(p *Profile, src []byte) (str []byte, err error) {
// It returns an error if the input string is invalid. // It returns an error if the input string is invalid.
func (p *Profile) Append(dst, src []byte) ([]byte, error) { func (p *Profile) Append(dst, src []byte) ([]byte, error) {
var buf buffers var buf buffers
buf.init(8 + len(src) + len(src)>>2) b, err := buf.enforce(p, src, false)
b, err := buf.enforce(p, src)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return append(dst, b...), nil return append(dst, b...), nil
} }
// Bytes returns a new byte slice with the result of applying the profile to b. func processBytes(p *Profile, b []byte, key bool) ([]byte, error) {
func (p *Profile) Bytes(b []byte) ([]byte, error) {
var buf buffers var buf buffers
buf.init(8 + len(b) + len(b)>>2) b, err := buf.enforce(p, b, key)
b, err := buf.enforce(p, b)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -192,39 +227,62 @@ func (p *Profile) Bytes(b []byte) ([]byte, error) {
return b, nil return b, nil
} }
// String returns a string with the result of applying the profile to s. // Bytes returns a new byte slice with the result of applying the profile to b.
func (p *Profile) String(s string) (string, error) { func (p *Profile) Bytes(b []byte) ([]byte, error) {
return processBytes(p, b, false)
}
// AppendCompareKey appends the result of applying p to src (including any
// optional rules to make strings comparable or useful in a map key such as
// applying lowercasing) writing the result to dst. It returns an error if the
// input string is invalid.
func (p *Profile) AppendCompareKey(dst, src []byte) ([]byte, error) {
var buf buffers var buf buffers
buf.init(8 + len(s) + len(s)>>2) b, err := buf.enforce(p, src, true)
b, err := buf.enforce(p, []byte(s)) if err != nil {
return nil, err
}
return append(dst, b...), nil
}
func processString(p *Profile, s string, key bool) (string, error) {
var buf buffers
b, err := buf.enforce(p, []byte(s), key)
if err != nil { if err != nil {
return "", err return "", err
} }
return string(b), nil return string(b), nil
} }
// String returns a string with the result of applying the profile to s.
func (p *Profile) String(s string) (string, error) {
return processString(p, s, false)
}
// CompareKey returns a string that can be used for comparison, hashing, or
// collation.
func (p *Profile) CompareKey(s string) (string, error) {
return processString(p, s, true)
}
// Compare enforces both strings, and then compares them for bit-string identity // Compare enforces both strings, and then compares them for bit-string identity
// (byte-for-byte equality). If either string cannot be enforced, the comparison // (byte-for-byte equality). If either string cannot be enforced, the comparison
// is false. // is false.
func (p *Profile) Compare(a, b string) bool { func (p *Profile) Compare(a, b string) bool {
a, err := p.String(a) var buf buffers
if err != nil {
return false akey, err := buf.enforce(p, []byte(a), true)
}
b, err = p.String(b)
if err != nil { if err != nil {
return false return false
} }
// TODO: This is out of order. Need to extract the transformation logic and buf = buffers{}
// put this in where the normal case folding would go (but only for bkey, err := buf.enforce(p, []byte(b), true)
// comparison). if err != nil {
if p.options.ignorecase { return false
a = width.Fold.String(a)
b = width.Fold.String(a)
} }
return a == b return bytes.Compare(akey, bkey) == 0
} }
// Allowed returns a runes.Set containing every rune that is a member of the // Allowed returns a runes.Set containing every rune that is a member of the
@ -264,33 +322,35 @@ func (c *checker) span(src []byte, atEOF bool) (n int, err error) {
} }
return n, errDisallowedRune return n, errDisallowedRune
} }
doLookAhead := false
if property(e) < c.p.class.validFrom { if property(e) < c.p.class.validFrom {
if d.rule == nil { if d.rule == nil {
return n, errDisallowedRune return n, errDisallowedRune
} }
doLookAhead, err := d.rule(c.beforeBits) doLookAhead, err = d.rule(c.beforeBits)
if err != nil { if err != nil {
return n, err return n, err
} }
if doLookAhead {
c.beforeBits &= d.keep
c.beforeBits |= d.set
// We may still have a lookahead rule which we will require to
// complete (by checking termBits == 0) before setting the new
// bits.
if c.termBits != 0 && (!c.checkLookahead() || c.termBits == 0) {
return n, err
}
c.termBits = d.term
c.acceptBits = d.accept
n += sz
continue
}
} }
c.beforeBits &= d.keep c.beforeBits &= d.keep
c.beforeBits |= d.set c.beforeBits |= d.set
if c.termBits != 0 && !c.checkLookahead() { if c.termBits != 0 {
return n, errContext // We are currently in an unterminated lookahead.
if c.beforeBits&c.termBits != 0 {
c.termBits = 0
c.acceptBits = 0
} else if c.beforeBits&c.acceptBits == 0 {
// Invalid continuation of the unterminated lookahead sequence.
return n, errContext
}
}
if doLookAhead {
if c.termBits != 0 {
// A previous lookahead run has not been terminated yet.
return n, errContext
}
c.termBits = d.term
c.acceptBits = d.accept
} }
n += sz n += sz
} }
@ -300,18 +360,6 @@ func (c *checker) span(src []byte, atEOF bool) (n int, err error) {
return n, err return n, err
} }
func (c *checker) checkLookahead() bool {
switch {
case c.beforeBits&c.termBits != 0:
c.termBits = 0
c.acceptBits = 0
case c.beforeBits&c.acceptBits != 0:
default:
return false
}
return true
}
// TODO: we may get rid of this transform if transform.Chain understands // TODO: we may get rid of this transform if transform.Chain understands
// something like a Spanner interface. // something like a Spanner interface.
func (c checker) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { func (c checker) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {

View File

@ -13,44 +13,66 @@ import (
) )
var ( var (
Nickname *Profile = nickname // Implements the Nickname profile specified in RFC 7700. // Implements the Nickname profile specified in RFC 7700.
UsernameCaseMapped *Profile = usernameCaseMap // Implements the UsernameCaseMapped profile specified in RFC 7613. // The nickname profile is not idempotent and may need to be applied multiple
UsernameCasePreserved *Profile = usernameNoCaseMap // Implements the UsernameCasePreserved profile specified in RFC 7613. // times before being used for comparisons.
OpaqueString *Profile = opaquestring // Implements the OpaqueString profile defined in RFC 7613 for passwords and other secure labels. Nickname *Profile = nickname
// Implements the UsernameCaseMapped profile specified in RFC 7613.
UsernameCaseMapped *Profile = usernameCaseMap
// Implements the UsernameCasePreserved profile specified in RFC 7613.
UsernameCasePreserved *Profile = usernameNoCaseMap
// Implements the OpaqueString profile defined in RFC 7613 for passwords and other secure labels.
OpaqueString *Profile = opaquestring
) )
// TODO: mvl: "Ultimately, I would manually define the structs for the internal
// profiles. This avoid pulling in unneeded tables when they are not used."
var ( var (
nickname = NewFreeform( nickname = &Profile{
AdditionalMapping(func() transform.Transformer { options: getOpts(
return &nickAdditionalMapping{} AdditionalMapping(func() transform.Transformer {
}), return &nickAdditionalMapping{}
IgnoreCase, }),
Norm(norm.NFKC), IgnoreCase,
DisallowEmpty, Norm(norm.NFKC),
) DisallowEmpty,
usernameCaseMap = NewIdentifier( ),
FoldWidth, class: freeform,
FoldCase(), }
Norm(norm.NFC), usernameCaseMap = &Profile{
BidiRule, options: getOpts(
) FoldWidth,
usernameNoCaseMap = NewIdentifier( LowerCase(),
FoldWidth, Norm(norm.NFC),
Norm(norm.NFC), BidiRule,
BidiRule, ),
) class: identifier,
opaquestring = NewFreeform( }
AdditionalMapping(func() transform.Transformer { usernameNoCaseMap = &Profile{
return runes.Map(func(r rune) rune { options: getOpts(
if unicode.Is(unicode.Zs, r) { FoldWidth,
return ' ' Norm(norm.NFC),
} BidiRule,
return r ),
}) class: identifier,
}), }
Norm(norm.NFC), opaquestring = &Profile{
DisallowEmpty, options: getOpts(
) AdditionalMapping(func() transform.Transformer {
return mapSpaces
}),
Norm(norm.NFC),
DisallowEmpty,
),
class: freeform,
}
) )
// mapSpaces is a shared value of a runes.Map transformer.
var mapSpaces transform.Transformer = runes.Map(func(r rune) rune {
if unicode.Is(unicode.Zs, r) {
return ' '
}
return r
})

View File

@ -1,4 +1,4 @@
// This file was generated by go generate; DO NOT EDIT // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package precis package precis

View File

@ -1,4 +1,4 @@
// This file was generated by go generate; DO NOT EDIT // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package precis package precis

View File

@ -24,6 +24,10 @@ var (
// complete the transformation. // complete the transformation.
ErrShortSrc = errors.New("transform: short source buffer") ErrShortSrc = errors.New("transform: short source buffer")
// ErrEndOfSpan means that the input and output (the transformed input)
// are not identical.
ErrEndOfSpan = errors.New("transform: input and output are not identical")
// errInconsistentByteCount means that Transform returned success (nil // errInconsistentByteCount means that Transform returned success (nil
// error) but also returned nSrc inconsistent with the src argument. // error) but also returned nSrc inconsistent with the src argument.
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned") errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
@ -60,6 +64,41 @@ type Transformer interface {
Reset() Reset()
} }
// SpanningTransformer extends the Transformer interface with a Span method
// that determines how much of the input already conforms to the Transformer.
type SpanningTransformer interface {
Transformer
// Span returns a position in src such that transforming src[:n] results in
// identical output src[:n] for these bytes. It does not necessarily return
// the largest such n. The atEOF argument tells whether src represents the
// last bytes of the input.
//
// Callers should always account for the n bytes consumed before
// considering the error err.
//
// A nil error means that all input bytes are known to be identical to the
// output produced by the Transformer. A nil error can be be returned
// regardless of whether atEOF is true. If err is nil, then then n must
// equal len(src); the converse is not necessarily true.
//
// ErrEndOfSpan means that the Transformer output may differ from the
// input after n bytes. Note that n may be len(src), meaning that the output
// would contain additional bytes after otherwise identical output.
// ErrShortSrc means that src had insufficient data to determine whether the
// remaining bytes would change. Other than the error conditions listed
// here, implementations are free to report other errors that arise.
//
// Calling Span can modify the Transformer state as a side effect. In
// effect, it does the transformation just as calling Transform would, only
// without copying to a destination buffer and only up to a point it can
// determine the input and output bytes are the same. This is obviously more
// limited than calling Transform, but can be more efficient in terms of
// copying and allocating buffers. Calls to Span and Transform may be
// interleaved.
Span(src []byte, atEOF bool) (n int, err error)
}
// NopResetter can be embedded by implementations of Transformer to add a nop // NopResetter can be embedded by implementations of Transformer to add a nop
// Reset method. // Reset method.
type NopResetter struct{} type NopResetter struct{}
@ -278,6 +317,10 @@ func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return n, n, err return n, n, err
} }
func (nop) Span(src []byte, atEOF bool) (n int, err error) {
return len(src), nil
}
type discard struct{ NopResetter } type discard struct{ NopResetter }
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
@ -289,8 +332,8 @@ var (
// by consuming all bytes and writing nothing. // by consuming all bytes and writing nothing.
Discard Transformer = discard{} Discard Transformer = discard{}
// Nop is a Transformer that copies src to dst. // Nop is a SpanningTransformer that copies src to dst.
Nop Transformer = nop{} Nop SpanningTransformer = nop{}
) )
// chain is a sequence of links. A chain with N Transformers has N+1 links and // chain is a sequence of links. A chain with N Transformers has N+1 links and
@ -358,6 +401,8 @@ func (c *chain) Reset() {
} }
} }
// TODO: make chain use Span (is going to be fun to implement!)
// Transform applies the transformers of c in sequence. // Transform applies the transformers of c in sequence.
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
// Set up src and dst in the chain. // Set up src and dst in the chain.
@ -448,8 +493,7 @@ func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro
return dstL.n, srcL.p, err return dstL.n, srcL.p, err
} }
// RemoveFunc returns a Transformer that removes from the input all runes r for // Deprecated: use runes.Remove instead.
// which f(r) is true. Illegal bytes in the input are replaced by RuneError.
func RemoveFunc(f func(r rune) bool) Transformer { func RemoveFunc(f func(r rune) bool) Transformer {
return removeF(f) return removeF(f)
} }

View File

@ -84,7 +84,7 @@ func resolvePairedBrackets(s *isolatingRunSequence) {
dirEmbed = R dirEmbed = R
} }
p.locateBrackets(s.p.pairTypes, s.p.pairValues) p.locateBrackets(s.p.pairTypes, s.p.pairValues)
p.resolveBrackets(dirEmbed) p.resolveBrackets(dirEmbed, s.p.initialTypes)
} }
type bracketPairer struct { type bracketPairer struct {
@ -125,6 +125,8 @@ func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool
return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]] return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]]
} }
const maxPairingDepth = 63
// locateBrackets locates matching bracket pairs according to BD16. // locateBrackets locates matching bracket pairs according to BD16.
// //
// This implementation uses a linked list instead of a stack, because, while // This implementation uses a linked list instead of a stack, because, while
@ -136,11 +138,17 @@ func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []run
for i, index := range p.indexes { for i, index := range p.indexes {
// look at the bracket type for each character // look at the bracket type for each character
switch pairTypes[index] { if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON {
case bpNone:
// continue scanning // continue scanning
continue
}
switch pairTypes[index] {
case bpOpen: case bpOpen:
// check if maximum pairing depth reached
if p.openers.Len() == maxPairingDepth {
p.openers.Init()
return
}
// remember opener location, most recent first // remember opener location, most recent first
p.openers.PushFront(i) p.openers.PushFront(i)
@ -270,7 +278,7 @@ func (p *bracketPairer) classBeforePair(loc bracketPair) Class {
} }
// assignBracketType implements rule N0 for a single bracket pair. // assignBracketType implements rule N0 for a single bracket pair.
func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class) { func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) {
// rule "N0, a", inspect contents of pair // rule "N0, a", inspect contents of pair
dirPair := p.classifyPairContent(loc, dirEmbed) dirPair := p.classifyPairContent(loc, dirEmbed)
@ -295,13 +303,33 @@ func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class) {
// direction // direction
// set the bracket types to the type found // set the bracket types to the type found
p.setBracketsToType(loc, dirPair, initialTypes)
}
func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) {
p.codesIsolatedRun[loc.opener] = dirPair p.codesIsolatedRun[loc.opener] = dirPair
p.codesIsolatedRun[loc.closer] = dirPair p.codesIsolatedRun[loc.closer] = dirPair
for i := loc.opener + 1; i < loc.closer; i++ {
index := p.indexes[i]
if initialTypes[index] != NSM {
break
}
p.codesIsolatedRun[i] = dirPair
}
for i := loc.closer + 1; i < len(p.indexes); i++ {
index := p.indexes[i]
if initialTypes[index] != NSM {
break
}
p.codesIsolatedRun[i] = dirPair
}
} }
// resolveBrackets implements rule N0 for a list of pairs. // resolveBrackets implements rule N0 for a list of pairs.
func (p *bracketPairer) resolveBrackets(dirEmbed Class) { func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) {
for _, loc := range p.pairPositions { for _, loc := range p.pairPositions {
p.assignBracketType(loc, dirEmbed) p.assignBracketType(loc, dirEmbed, initialTypes)
} }
} }

View File

@ -309,6 +309,9 @@ func (p *paragraph) determineExplicitEmbeddingLevels() {
} }
if isIsolate { if isIsolate {
p.resultLevels[i] = stack.lastEmbeddingLevel() p.resultLevels[i] = stack.lastEmbeddingLevel()
if stack.lastDirectionalOverrideStatus() != ON {
p.resultTypes[i] = stack.lastDirectionalOverrideStatus()
}
} }
var newLevel level var newLevel level
@ -723,7 +726,7 @@ loop:
continue loop continue loop
} }
} }
log.Panicf("invalid bidi code %s present in assertOnly at position %d", t, s.indexes[i]) log.Panicf("invalid bidi code %v present in assertOnly at position %d", t, s.indexes[i])
} }
} }

View File

@ -1,4 +1,4 @@
// This file was generated by go generate; DO NOT EDIT // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package bidi package bidi

View File

@ -1,4 +1,4 @@
// This file was generated by go generate; DO NOT EDIT // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package bidi package bidi

View File

@ -33,17 +33,9 @@ const (
// streamSafe implements the policy of when a CGJ should be inserted. // streamSafe implements the policy of when a CGJ should be inserted.
type streamSafe uint8 type streamSafe uint8
// mkStreamSafe is a shorthand for declaring a streamSafe var and calling // first inserts the first rune of a segment. It is a faster version of next if
// first on it. // it is known p represents the first rune in a segment.
func mkStreamSafe(p Properties) streamSafe {
return streamSafe(p.nTrailingNonStarters())
}
// first inserts the first rune of a segment.
func (ss *streamSafe) first(p Properties) { func (ss *streamSafe) first(p Properties) {
if *ss != 0 {
panic("!= 0")
}
*ss = streamSafe(p.nTrailingNonStarters()) *ss = streamSafe(p.nTrailingNonStarters())
} }
@ -66,7 +58,7 @@ func (ss *streamSafe) next(p Properties) ssState {
// be a non-starter. Note that it always hold that if nLead > 0 then // be a non-starter. Note that it always hold that if nLead > 0 then
// nLead == nTrail. // nLead == nTrail.
if n == 0 { if n == 0 {
*ss = 0 *ss = streamSafe(p.nTrailingNonStarters())
return ssStarter return ssStarter
} }
return ssSuccess return ssSuccess
@ -142,7 +134,6 @@ func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) {
func (rb *reorderBuffer) reset() { func (rb *reorderBuffer) reset() {
rb.nrune = 0 rb.nrune = 0
rb.nbyte = 0 rb.nbyte = 0
rb.ss = 0
} }
func (rb *reorderBuffer) doFlush() bool { func (rb *reorderBuffer) doFlush() bool {
@ -257,6 +248,9 @@ func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) {
// It flushes the buffer on each new segment start. // It flushes the buffer on each new segment start.
func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr { func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr {
rb.tmpBytes.setBytes(dcomp) rb.tmpBytes.setBytes(dcomp)
// As the streamSafe accounting already handles the counting for modifiers,
// we don't have to call next. However, we do need to keep the accounting
// intact when flushing the buffer.
for i := 0; i < len(dcomp); { for i := 0; i < len(dcomp); {
info := rb.f.info(rb.tmpBytes, i) info := rb.f.info(rb.tmpBytes, i)
if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() { if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() {

View File

@ -10,7 +10,7 @@ package norm
// and its corresponding decomposing form share the same trie. Each trie maps // and its corresponding decomposing form share the same trie. Each trie maps
// a rune to a uint16. The values take two forms. For v >= 0x8000: // a rune to a uint16. The values take two forms. For v >= 0x8000:
// bits // bits
// 15: 1 (inverse of NFD_QD bit of qcInfo) // 15: 1 (inverse of NFD_QC bit of qcInfo)
// 13..7: qcInfo (see below). isYesD is always true (no decompostion). // 13..7: qcInfo (see below). isYesD is always true (no decompostion).
// 6..0: ccc (compressed CCC value). // 6..0: ccc (compressed CCC value).
// For v < 0x8000, the respective rune has a decomposition and v is an index // For v < 0x8000, the respective rune has a decomposition and v is an index
@ -56,28 +56,31 @@ type formInfo struct {
nextMain iterFunc nextMain iterFunc
} }
var formTable []*formInfo var formTable = []*formInfo{{
form: NFC,
func init() { composing: true,
formTable = make([]*formInfo, 4) compatibility: false,
info: lookupInfoNFC,
for i := range formTable { nextMain: nextComposed,
f := &formInfo{} }, {
formTable[i] = f form: NFD,
f.form = Form(i) composing: false,
if Form(i) == NFKD || Form(i) == NFKC { compatibility: false,
f.compatibility = true info: lookupInfoNFC,
f.info = lookupInfoNFKC nextMain: nextDecomposed,
} else { }, {
f.info = lookupInfoNFC form: NFKC,
} composing: true,
f.nextMain = nextDecomposed compatibility: true,
if Form(i) == NFC || Form(i) == NFKC { info: lookupInfoNFKC,
f.nextMain = nextComposed nextMain: nextComposed,
f.composing = true }, {
} form: NFKD,
} composing: false,
} compatibility: true,
info: lookupInfoNFKC,
nextMain: nextDecomposed,
}}
// We do not distinguish between boundaries for NFC, NFD, etc. to avoid // We do not distinguish between boundaries for NFC, NFD, etc. to avoid
// unexpected behavior for the user. For example, in NFD, there is a boundary // unexpected behavior for the user. For example, in NFD, there is a boundary

View File

@ -90,16 +90,20 @@ func (in *input) charinfoNFKC(p int) (uint16, int) {
} }
func (in *input) hangul(p int) (r rune) { func (in *input) hangul(p int) (r rune) {
var size int
if in.bytes == nil { if in.bytes == nil {
if !isHangulString(in.str[p:]) { if !isHangulString(in.str[p:]) {
return 0 return 0
} }
r, _ = utf8.DecodeRuneInString(in.str[p:]) r, size = utf8.DecodeRuneInString(in.str[p:])
} else { } else {
if !isHangul(in.bytes[p:]) { if !isHangul(in.bytes[p:]) {
return 0 return 0
} }
r, _ = utf8.DecodeRune(in.bytes[p:]) r, size = utf8.DecodeRune(in.bytes[p:])
}
if size != hangulUTF8Size {
return 0
} }
return r return r
} }

View File

@ -41,6 +41,7 @@ func (i *Iter) Init(f Form, src []byte) {
i.next = i.rb.f.nextMain i.next = i.rb.f.nextMain
i.asciiF = nextASCIIBytes i.asciiF = nextASCIIBytes
i.info = i.rb.f.info(i.rb.src, i.p) i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
} }
// InitString initializes i to iterate over src after normalizing it to Form f. // InitString initializes i to iterate over src after normalizing it to Form f.
@ -56,11 +57,12 @@ func (i *Iter) InitString(f Form, src string) {
i.next = i.rb.f.nextMain i.next = i.rb.f.nextMain
i.asciiF = nextASCIIString i.asciiF = nextASCIIString
i.info = i.rb.f.info(i.rb.src, i.p) i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
} }
// Seek sets the segment to be returned by the next call to Next to start // Seek sets the segment to be returned by the next call to Next to start
// at position p. It is the responsibility of the caller to set p to the // at position p. It is the responsibility of the caller to set p to the
// start of a UTF8 rune. // start of a segment.
func (i *Iter) Seek(offset int64, whence int) (int64, error) { func (i *Iter) Seek(offset int64, whence int) (int64, error) {
var abs int64 var abs int64
switch whence { switch whence {
@ -84,6 +86,7 @@ func (i *Iter) Seek(offset int64, whence int) (int64, error) {
i.multiSeg = nil i.multiSeg = nil
i.next = i.rb.f.nextMain i.next = i.rb.f.nextMain
i.info = i.rb.f.info(i.rb.src, i.p) i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
return abs, nil return abs, nil
} }
@ -161,6 +164,7 @@ func nextHangul(i *Iter) []byte {
if next >= i.rb.nsrc { if next >= i.rb.nsrc {
i.setDone() i.setDone()
} else if i.rb.src.hangul(next) == 0 { } else if i.rb.src.hangul(next) == 0 {
i.rb.ss.next(i.info)
i.info = i.rb.f.info(i.rb.src, i.p) i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain i.next = i.rb.f.nextMain
return i.next(i) return i.next(i)
@ -204,12 +208,10 @@ func nextMultiNorm(i *Iter) []byte {
if info.BoundaryBefore() { if info.BoundaryBefore() {
i.rb.compose() i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])] seg := i.buf[:i.rb.flushCopy(i.buf[:])]
i.rb.ss.first(info)
i.rb.insertUnsafe(input{bytes: d}, j, info) i.rb.insertUnsafe(input{bytes: d}, j, info)
i.multiSeg = d[j+int(info.size):] i.multiSeg = d[j+int(info.size):]
return seg return seg
} }
i.rb.ss.next(info)
i.rb.insertUnsafe(input{bytes: d}, j, info) i.rb.insertUnsafe(input{bytes: d}, j, info)
j += int(info.size) j += int(info.size)
} }
@ -222,9 +224,9 @@ func nextMultiNorm(i *Iter) []byte {
func nextDecomposed(i *Iter) (next []byte) { func nextDecomposed(i *Iter) (next []byte) {
outp := 0 outp := 0
inCopyStart, outCopyStart := i.p, 0 inCopyStart, outCopyStart := i.p, 0
ss := mkStreamSafe(i.info)
for { for {
if sz := int(i.info.size); sz <= 1 { if sz := int(i.info.size); sz <= 1 {
i.rb.ss = 0
p := i.p p := i.p
i.p++ // ASCII or illegal byte. Either way, advance by 1. i.p++ // ASCII or illegal byte. Either way, advance by 1.
if i.p >= i.rb.nsrc { if i.p >= i.rb.nsrc {
@ -243,6 +245,8 @@ func nextDecomposed(i *Iter) (next []byte) {
p := outp + len(d) p := outp + len(d)
if outp > 0 { if outp > 0 {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
// TODO: this condition should not be possible, but we leave it
// in for defensive purposes.
if p > len(i.buf) { if p > len(i.buf) {
return i.buf[:outp] return i.buf[:outp]
} }
@ -266,7 +270,7 @@ func nextDecomposed(i *Iter) (next []byte) {
} else { } else {
i.info = i.rb.f.info(i.rb.src, i.p) i.info = i.rb.f.info(i.rb.src, i.p)
} }
switch ss.next(i.info) { switch i.rb.ss.next(i.info) {
case ssOverflow: case ssOverflow:
i.next = nextCGJDecompose i.next = nextCGJDecompose
fallthrough fallthrough
@ -309,7 +313,7 @@ func nextDecomposed(i *Iter) (next []byte) {
} }
prevCC := i.info.tccc prevCC := i.info.tccc
i.info = i.rb.f.info(i.rb.src, i.p) i.info = i.rb.f.info(i.rb.src, i.p)
if v := ss.next(i.info); v == ssStarter { if v := i.rb.ss.next(i.info); v == ssStarter {
break break
} else if v == ssOverflow { } else if v == ssOverflow {
i.next = nextCGJDecompose i.next = nextCGJDecompose
@ -335,10 +339,6 @@ doNorm:
func doNormDecomposed(i *Iter) []byte { func doNormDecomposed(i *Iter) []byte {
for { for {
if s := i.rb.ss.next(i.info); s == ssOverflow {
i.next = nextCGJDecompose
break
}
i.rb.insertUnsafe(i.rb.src, i.p, i.info) i.rb.insertUnsafe(i.rb.src, i.p, i.info)
if i.p += int(i.info.size); i.p >= i.rb.nsrc { if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone() i.setDone()
@ -348,6 +348,10 @@ func doNormDecomposed(i *Iter) []byte {
if i.info.ccc == 0 { if i.info.ccc == 0 {
break break
} }
if s := i.rb.ss.next(i.info); s == ssOverflow {
i.next = nextCGJDecompose
break
}
} }
// new segment or too many combining characters: exit normalization // new segment or too many combining characters: exit normalization
return i.buf[:i.rb.flushCopy(i.buf[:])] return i.buf[:i.rb.flushCopy(i.buf[:])]
@ -357,6 +361,7 @@ func nextCGJDecompose(i *Iter) []byte {
i.rb.ss = 0 i.rb.ss = 0
i.rb.insertCGJ() i.rb.insertCGJ()
i.next = nextDecomposed i.next = nextDecomposed
i.rb.ss.first(i.info)
buf := doNormDecomposed(i) buf := doNormDecomposed(i)
return buf return buf
} }
@ -365,7 +370,6 @@ func nextCGJDecompose(i *Iter) []byte {
func nextComposed(i *Iter) []byte { func nextComposed(i *Iter) []byte {
outp, startp := 0, i.p outp, startp := 0, i.p
var prevCC uint8 var prevCC uint8
ss := mkStreamSafe(i.info)
for { for {
if !i.info.isYesC() { if !i.info.isYesC() {
goto doNorm goto doNorm
@ -385,11 +389,12 @@ func nextComposed(i *Iter) []byte {
i.setDone() i.setDone()
break break
} else if i.rb.src._byte(i.p) < utf8.RuneSelf { } else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.rb.ss = 0
i.next = i.asciiF i.next = i.asciiF
break break
} }
i.info = i.rb.f.info(i.rb.src, i.p) i.info = i.rb.f.info(i.rb.src, i.p)
if v := ss.next(i.info); v == ssStarter { if v := i.rb.ss.next(i.info); v == ssStarter {
break break
} else if v == ssOverflow { } else if v == ssOverflow {
i.next = nextCGJCompose i.next = nextCGJCompose
@ -401,8 +406,10 @@ func nextComposed(i *Iter) []byte {
} }
return i.returnSlice(startp, i.p) return i.returnSlice(startp, i.p)
doNorm: doNorm:
// reset to start position
i.p = startp i.p = startp
i.info = i.rb.f.info(i.rb.src, i.p) i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
if i.info.multiSegment() { if i.info.multiSegment() {
d := i.info.Decomposition() d := i.info.Decomposition()
info := i.rb.f.info(input{bytes: d}, 0) info := i.rb.f.info(input{bytes: d}, 0)

View File

@ -35,12 +35,9 @@ func main() {
computeNonStarterCounts() computeNonStarterCounts()
verifyComputed() verifyComputed()
printChars() printChars()
if *test { testDerived()
testDerived() printTestdata()
printTestdata() makeTables()
} else {
makeTables()
}
} }
var ( var (
@ -602,6 +599,7 @@ func printCharInfoTables(w io.Writer) int {
} }
index := normalDecomp index := normalDecomp
nTrail := chars[r].nTrailingNonStarters nTrail := chars[r].nTrailingNonStarters
nLead := chars[r].nLeadingNonStarters
if tccc > 0 || lccc > 0 || nTrail > 0 { if tccc > 0 || lccc > 0 || nTrail > 0 {
tccc <<= 2 tccc <<= 2
tccc |= nTrail tccc |= nTrail
@ -612,7 +610,7 @@ func printCharInfoTables(w io.Writer) int {
index = firstCCC index = firstCCC
} }
} }
if lccc > 0 { if lccc > 0 || nLead > 0 {
s += string([]byte{lccc}) s += string([]byte{lccc})
if index == firstCCC { if index == firstCCC {
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)

Some files were not shown because too many files have changed in this diff Show More