mirror of https://github.com/k3s-io/k3s
Merge pull request #68147 from timothysc/etcd-3.2-latest
Automatic merge from submit-queue (batch tested with PRs 67691, 68147). If you want to cherry-pick this change to another branch, please follow the instructions here: https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md. Update etcd client to 3.2.24 for latest release **What this PR does / why we need it**: Updates etcd client to 3.2.24 which is the latest in the 3.2 series. See https://github.com/etcd-io/etcd/blob/master/CHANGELOG-3.2.md for details. **Special notes for your reviewer**: This is only the client, in order to update the server components it requires a googler to push the 3.2.24 image. **Release note**: ``` Update etcd client interface to 3.2.24 ``` /assign @jpbetz @wojtek-t /cc @liggitt @kubernetes/sig-cluster-lifecycle @kubernetes/sig-scalability-pr-reviewspull/8/head
commit
2811228dcf
File diff suppressed because it is too large
Load Diff
|
@ -48,295 +48,295 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/alarm",
|
"ImportPath": "github.com/coreos/etcd/alarm",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/auth",
|
"ImportPath": "github.com/coreos/etcd/auth",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/client",
|
"ImportPath": "github.com/coreos/etcd/client",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/clientv3/concurrency",
|
"ImportPath": "github.com/coreos/etcd/clientv3/concurrency",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/clientv3/namespace",
|
"ImportPath": "github.com/coreos/etcd/clientv3/namespace",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/clientv3/naming",
|
"ImportPath": "github.com/coreos/etcd/clientv3/naming",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/compactor",
|
"ImportPath": "github.com/coreos/etcd/compactor",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/discovery",
|
"ImportPath": "github.com/coreos/etcd/discovery",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/embed",
|
"ImportPath": "github.com/coreos/etcd/embed",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/error",
|
"ImportPath": "github.com/coreos/etcd/error",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver",
|
"ImportPath": "github.com/coreos/etcd/etcdserver",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/auth",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/auth",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/membership",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/membership",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/stats",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/stats",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/integration",
|
"ImportPath": "github.com/coreos/etcd/integration",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/lease",
|
"ImportPath": "github.com/coreos/etcd/lease",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/lease/leasehttp",
|
"ImportPath": "github.com/coreos/etcd/lease/leasehttp",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/lease/leasepb",
|
"ImportPath": "github.com/coreos/etcd/lease/leasepb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/mvcc",
|
"ImportPath": "github.com/coreos/etcd/mvcc",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/mvcc/backend",
|
"ImportPath": "github.com/coreos/etcd/mvcc/backend",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/adt",
|
"ImportPath": "github.com/coreos/etcd/pkg/adt",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/contention",
|
"ImportPath": "github.com/coreos/etcd/pkg/contention",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/cors",
|
"ImportPath": "github.com/coreos/etcd/pkg/cors",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/cpuutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/cpuutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/crc",
|
"ImportPath": "github.com/coreos/etcd/pkg/crc",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/debugutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/debugutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/httputil",
|
"ImportPath": "github.com/coreos/etcd/pkg/httputil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/idutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/idutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/ioutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/ioutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/logutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/logutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/monotime",
|
"ImportPath": "github.com/coreos/etcd/pkg/monotime",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/netutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/netutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/pbutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/pbutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/runtime",
|
"ImportPath": "github.com/coreos/etcd/pkg/runtime",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/schedule",
|
"ImportPath": "github.com/coreos/etcd/pkg/schedule",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/testutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/testutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/wait",
|
"ImportPath": "github.com/coreos/etcd/pkg/wait",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy",
|
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter",
|
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache",
|
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/raft",
|
"ImportPath": "github.com/coreos/etcd/raft",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/raft/raftpb",
|
"ImportPath": "github.com/coreos/etcd/raft/raftpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/rafthttp",
|
"ImportPath": "github.com/coreos/etcd/rafthttp",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/snap",
|
"ImportPath": "github.com/coreos/etcd/snap",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/snap/snappb",
|
"ImportPath": "github.com/coreos/etcd/snap/snappb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/store",
|
"ImportPath": "github.com/coreos/etcd/store",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/version",
|
"ImportPath": "github.com/coreos/etcd/version",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/wal",
|
"ImportPath": "github.com/coreos/etcd/wal",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/wal/walpb",
|
"ImportPath": "github.com/coreos/etcd/wal/walpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/go-semver/semver",
|
"ImportPath": "github.com/coreos/go-semver/semver",
|
||||||
|
|
|
@ -44,295 +44,295 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/alarm",
|
"ImportPath": "github.com/coreos/etcd/alarm",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/auth",
|
"ImportPath": "github.com/coreos/etcd/auth",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/client",
|
"ImportPath": "github.com/coreos/etcd/client",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/clientv3/concurrency",
|
"ImportPath": "github.com/coreos/etcd/clientv3/concurrency",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/clientv3/namespace",
|
"ImportPath": "github.com/coreos/etcd/clientv3/namespace",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/clientv3/naming",
|
"ImportPath": "github.com/coreos/etcd/clientv3/naming",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/compactor",
|
"ImportPath": "github.com/coreos/etcd/compactor",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/discovery",
|
"ImportPath": "github.com/coreos/etcd/discovery",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/embed",
|
"ImportPath": "github.com/coreos/etcd/embed",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/error",
|
"ImportPath": "github.com/coreos/etcd/error",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver",
|
"ImportPath": "github.com/coreos/etcd/etcdserver",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/auth",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/auth",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/membership",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/membership",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/stats",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/stats",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/integration",
|
"ImportPath": "github.com/coreos/etcd/integration",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/lease",
|
"ImportPath": "github.com/coreos/etcd/lease",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/lease/leasehttp",
|
"ImportPath": "github.com/coreos/etcd/lease/leasehttp",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/lease/leasepb",
|
"ImportPath": "github.com/coreos/etcd/lease/leasepb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/mvcc",
|
"ImportPath": "github.com/coreos/etcd/mvcc",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/mvcc/backend",
|
"ImportPath": "github.com/coreos/etcd/mvcc/backend",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/adt",
|
"ImportPath": "github.com/coreos/etcd/pkg/adt",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/contention",
|
"ImportPath": "github.com/coreos/etcd/pkg/contention",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/cors",
|
"ImportPath": "github.com/coreos/etcd/pkg/cors",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/cpuutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/cpuutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/crc",
|
"ImportPath": "github.com/coreos/etcd/pkg/crc",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/debugutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/debugutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/httputil",
|
"ImportPath": "github.com/coreos/etcd/pkg/httputil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/idutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/idutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/ioutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/ioutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/logutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/logutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/monotime",
|
"ImportPath": "github.com/coreos/etcd/pkg/monotime",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/netutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/netutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/pbutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/pbutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/runtime",
|
"ImportPath": "github.com/coreos/etcd/pkg/runtime",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/schedule",
|
"ImportPath": "github.com/coreos/etcd/pkg/schedule",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/testutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/testutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/wait",
|
"ImportPath": "github.com/coreos/etcd/pkg/wait",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy",
|
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter",
|
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache",
|
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/raft",
|
"ImportPath": "github.com/coreos/etcd/raft",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/raft/raftpb",
|
"ImportPath": "github.com/coreos/etcd/raft/raftpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/rafthttp",
|
"ImportPath": "github.com/coreos/etcd/rafthttp",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/snap",
|
"ImportPath": "github.com/coreos/etcd/snap",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/snap/snappb",
|
"ImportPath": "github.com/coreos/etcd/snap/snappb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/store",
|
"ImportPath": "github.com/coreos/etcd/store",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/version",
|
"ImportPath": "github.com/coreos/etcd/version",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/wal",
|
"ImportPath": "github.com/coreos/etcd/wal",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/wal/walpb",
|
"ImportPath": "github.com/coreos/etcd/wal/walpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/go-oidc",
|
"ImportPath": "github.com/coreos/go-oidc",
|
||||||
|
|
|
@ -36,51 +36,51 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/client",
|
"ImportPath": "github.com/coreos/etcd/client",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/version",
|
"ImportPath": "github.com/coreos/etcd/version",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/go-semver/semver",
|
"ImportPath": "github.com/coreos/go-semver/semver",
|
||||||
|
|
|
@ -36,51 +36,51 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/client",
|
"ImportPath": "github.com/coreos/etcd/client",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/version",
|
"ImportPath": "github.com/coreos/etcd/version",
|
||||||
"Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada"
|
"Rev": "420a452267a7ce45b3fcbed04d54030d69964fc1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/go-semver/semver",
|
"ImportPath": "github.com/coreos/go-semver/semver",
|
||||||
|
|
|
@ -118,6 +118,11 @@ func (t *tokenSimple) genTokenPrefix() (string, error) {
|
||||||
|
|
||||||
func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
|
func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
|
||||||
t.simpleTokensMu.Lock()
|
t.simpleTokensMu.Lock()
|
||||||
|
defer t.simpleTokensMu.Unlock()
|
||||||
|
if t.simpleTokenKeeper == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
_, ok := t.simpleTokens[token]
|
_, ok := t.simpleTokens[token]
|
||||||
if ok {
|
if ok {
|
||||||
plog.Panicf("token %s is alredy used", token)
|
plog.Panicf("token %s is alredy used", token)
|
||||||
|
@ -125,7 +130,6 @@ func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
|
||||||
|
|
||||||
t.simpleTokens[token] = username
|
t.simpleTokens[token] = username
|
||||||
t.simpleTokenKeeper.addSimpleToken(token)
|
t.simpleTokenKeeper.addSimpleToken(token)
|
||||||
t.simpleTokensMu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tokenSimple) invalidateUser(username string) {
|
func (t *tokenSimple) invalidateUser(username string) {
|
||||||
|
|
|
@ -10,13 +10,13 @@ go_library(
|
||||||
"compare.go",
|
"compare.go",
|
||||||
"config.go",
|
"config.go",
|
||||||
"doc.go",
|
"doc.go",
|
||||||
"grpc_options.go",
|
|
||||||
"health_balancer.go",
|
"health_balancer.go",
|
||||||
"kv.go",
|
"kv.go",
|
||||||
"lease.go",
|
"lease.go",
|
||||||
"logger.go",
|
"logger.go",
|
||||||
"maintenance.go",
|
"maintenance.go",
|
||||||
"op.go",
|
"op.go",
|
||||||
|
"options.go",
|
||||||
"ready_wait.go",
|
"ready_wait.go",
|
||||||
"retry.go",
|
"retry.go",
|
||||||
"sort.go",
|
"sort.go",
|
||||||
|
@ -31,6 +31,7 @@ go_library(
|
||||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library",
|
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
||||||
"//vendor/golang.org/x/net/context:go_default_library",
|
"//vendor/golang.org/x/net/context:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc:go_default_library",
|
"//vendor/google.golang.org/grpc:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||||
|
|
|
@ -529,6 +529,20 @@ func isHaltErr(ctx context.Context, err error) bool {
|
||||||
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
|
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isUnavailableErr returns true if the given error is an unavailable error
|
||||||
|
func isUnavailableErr(ctx context.Context, err error) bool {
|
||||||
|
if ctx != nil && ctx.Err() != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ev, _ := status.FromError(err)
|
||||||
|
// Unavailable codes mean the system will be right back.
|
||||||
|
// (e.g., can't connect, lost leader)
|
||||||
|
return ev.Code() == codes.Unavailable
|
||||||
|
}
|
||||||
|
|
||||||
func toErr(ctx context.Context, err error) error {
|
func toErr(ctx context.Context, err error) error {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -16,6 +16,7 @@ package clientv3
|
||||||
|
|
||||||
import (
|
import (
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"github.com/coreos/etcd/pkg/types"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
@ -65,6 +66,11 @@ func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
||||||
|
// fail-fast before panic in rafthttp
|
||||||
|
if _, err := types.NewURLs(peerAddrs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
|
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
|
||||||
resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
|
resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -83,6 +89,11 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
||||||
|
// fail-fast before panic in rafthttp
|
||||||
|
if _, err := types.NewURLs(peerAddrs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// it is safe to retry on update.
|
// it is safe to retry on update.
|
||||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||||
resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
|
resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
|
||||||
|
|
|
@ -71,8 +71,6 @@ const (
|
||||||
// defaultTTL is the assumed lease TTL used for the first keepalive
|
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||||
// deadline before the actual TTL is known to the client.
|
// deadline before the actual TTL is known to the client.
|
||||||
defaultTTL = 5 * time.Second
|
defaultTTL = 5 * time.Second
|
||||||
// a small buffer to store unsent lease responses.
|
|
||||||
leaseResponseChSize = 16
|
|
||||||
// NoLease is a lease ID for the absence of a lease.
|
// NoLease is a lease ID for the absence of a lease.
|
||||||
NoLease LeaseID = 0
|
NoLease LeaseID = 0
|
||||||
|
|
||||||
|
@ -80,6 +78,11 @@ const (
|
||||||
retryConnWait = 500 * time.Millisecond
|
retryConnWait = 500 * time.Millisecond
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// LeaseResponseChSize is the size of buffer to store unsent lease responses.
|
||||||
|
// WARNING: DO NOT UPDATE.
|
||||||
|
// Only for testing purposes.
|
||||||
|
var LeaseResponseChSize = 16
|
||||||
|
|
||||||
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
|
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
|
||||||
//
|
//
|
||||||
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
|
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
|
||||||
|
@ -219,7 +222,7 @@ func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||||
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
|
ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
|
||||||
|
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
// ensure that recvKeepAliveLoop is still running
|
// ensure that recvKeepAliveLoop is still running
|
||||||
|
@ -475,9 +478,10 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
||||||
for _, ch := range ka.chs {
|
for _, ch := range ka.chs {
|
||||||
select {
|
select {
|
||||||
case ch <- karesp:
|
case ch <- karesp:
|
||||||
ka.nextKeepAlive = nextKeepAlive
|
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
// still advance in order to rate-limit keep-alive sends
|
||||||
|
ka.nextKeepAlive = nextKeepAlive
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,3 +44,6 @@ var (
|
||||||
// Some options are exposed to "clientv3.Config".
|
// Some options are exposed to "clientv3.Config".
|
||||||
// Defaults will be overridden by the settings in "clientv3.Config".
|
// Defaults will be overridden by the settings in "clientv3.Config".
|
||||||
var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
|
var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
|
||||||
|
|
||||||
|
// MaxLeaseTTL is the maximum lease TTL value
|
||||||
|
const MaxLeaseTTL = 9000000000
|
|
@ -769,10 +769,13 @@ func (w *watchGrpcStream) joinSubstreams() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var maxBackoff = 100 * time.Millisecond
|
||||||
|
|
||||||
// openWatchClient retries opening a watch client until success or halt.
|
// openWatchClient retries opening a watch client until success or halt.
|
||||||
// manually retry in case "ws==nil && err==nil"
|
// manually retry in case "ws==nil && err==nil"
|
||||||
// TODO: remove FailFast=false
|
// TODO: remove FailFast=false
|
||||||
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
||||||
|
backoff := time.Millisecond
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-w.ctx.Done():
|
case <-w.ctx.Done():
|
||||||
|
@ -788,6 +791,17 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
|
||||||
if isHaltErr(w.ctx, err) {
|
if isHaltErr(w.ctx, err) {
|
||||||
return nil, v3rpc.Error(err)
|
return nil, v3rpc.Error(err)
|
||||||
}
|
}
|
||||||
|
if isUnavailableErr(w.ctx, err) {
|
||||||
|
// retry, but backoff
|
||||||
|
if backoff < maxBackoff {
|
||||||
|
// 25% backoff factor
|
||||||
|
backoff = backoff + backoff/4
|
||||||
|
if backoff > maxBackoff {
|
||||||
|
backoff = maxBackoff
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(backoff)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return ws, nil
|
return ws, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ go_library(
|
||||||
"//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library",
|
"//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/runtime:go_default_library",
|
"//vendor/github.com/coreos/etcd/pkg/runtime:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/srv:go_default_library",
|
"//vendor/github.com/coreos/etcd/pkg/srv:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
|
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -27,6 +28,7 @@ import (
|
||||||
"github.com/coreos/etcd/pkg/cors"
|
"github.com/coreos/etcd/pkg/cors"
|
||||||
"github.com/coreos/etcd/pkg/netutil"
|
"github.com/coreos/etcd/pkg/netutil"
|
||||||
"github.com/coreos/etcd/pkg/srv"
|
"github.com/coreos/etcd/pkg/srv"
|
||||||
|
"github.com/coreos/etcd/pkg/tlsutil"
|
||||||
"github.com/coreos/etcd/pkg/transport"
|
"github.com/coreos/etcd/pkg/transport"
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
|
|
||||||
|
@ -87,8 +89,38 @@ type Config struct {
|
||||||
// TickMs is the number of milliseconds between heartbeat ticks.
|
// TickMs is the number of milliseconds between heartbeat ticks.
|
||||||
// TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1).
|
// TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1).
|
||||||
// make ticks a cluster wide configuration.
|
// make ticks a cluster wide configuration.
|
||||||
TickMs uint `json:"heartbeat-interval"`
|
TickMs uint `json:"heartbeat-interval"`
|
||||||
ElectionMs uint `json:"election-timeout"`
|
ElectionMs uint `json:"election-timeout"`
|
||||||
|
|
||||||
|
// InitialElectionTickAdvance is true, then local member fast-forwards
|
||||||
|
// election ticks to speed up "initial" leader election trigger. This
|
||||||
|
// benefits the case of larger election ticks. For instance, cross
|
||||||
|
// datacenter deployment may require longer election timeout of 10-second.
|
||||||
|
// If true, local node does not need wait up to 10-second. Instead,
|
||||||
|
// forwards its election ticks to 8-second, and have only 2-second left
|
||||||
|
// before leader election.
|
||||||
|
//
|
||||||
|
// Major assumptions are that:
|
||||||
|
// - cluster has no active leader thus advancing ticks enables faster
|
||||||
|
// leader election, or
|
||||||
|
// - cluster already has an established leader, and rejoining follower
|
||||||
|
// is likely to receive heartbeats from the leader after tick advance
|
||||||
|
// and before election timeout.
|
||||||
|
//
|
||||||
|
// However, when network from leader to rejoining follower is congested,
|
||||||
|
// and the follower does not receive leader heartbeat within left election
|
||||||
|
// ticks, disruptive election has to happen thus affecting cluster
|
||||||
|
// availabilities.
|
||||||
|
//
|
||||||
|
// Disabling this would slow down initial bootstrap process for cross
|
||||||
|
// datacenter deployments. Make your own tradeoffs by configuring
|
||||||
|
// --initial-election-tick-advance at the cost of slow initial bootstrap.
|
||||||
|
//
|
||||||
|
// If single-node, it advances ticks regardless.
|
||||||
|
//
|
||||||
|
// See https://github.com/coreos/etcd/issues/9333 for more detail.
|
||||||
|
InitialElectionTickAdvance bool `json:"initial-election-tick-advance"`
|
||||||
|
|
||||||
QuotaBackendBytes int64 `json:"quota-backend-bytes"`
|
QuotaBackendBytes int64 `json:"quota-backend-bytes"`
|
||||||
MaxRequestBytes uint `json:"max-request-bytes"`
|
MaxRequestBytes uint `json:"max-request-bytes"`
|
||||||
|
|
||||||
|
@ -128,6 +160,11 @@ type Config struct {
|
||||||
PeerTLSInfo transport.TLSInfo
|
PeerTLSInfo transport.TLSInfo
|
||||||
PeerAutoTLS bool
|
PeerAutoTLS bool
|
||||||
|
|
||||||
|
// CipherSuites is a list of supported TLS cipher suites between
|
||||||
|
// client/server and peers. If empty, Go auto-populates the list.
|
||||||
|
// Note that cipher suites are prioritized in the given order.
|
||||||
|
CipherSuites []string `json:"cipher-suites"`
|
||||||
|
|
||||||
// debug
|
// debug
|
||||||
|
|
||||||
Debug bool `json:"debug"`
|
Debug bool `json:"debug"`
|
||||||
|
@ -190,27 +227,28 @@ func NewConfig() *Config {
|
||||||
lcurl, _ := url.Parse(DefaultListenClientURLs)
|
lcurl, _ := url.Parse(DefaultListenClientURLs)
|
||||||
acurl, _ := url.Parse(DefaultAdvertiseClientURLs)
|
acurl, _ := url.Parse(DefaultAdvertiseClientURLs)
|
||||||
cfg := &Config{
|
cfg := &Config{
|
||||||
CorsInfo: &cors.CORSInfo{},
|
CorsInfo: &cors.CORSInfo{},
|
||||||
MaxSnapFiles: DefaultMaxSnapshots,
|
MaxSnapFiles: DefaultMaxSnapshots,
|
||||||
MaxWalFiles: DefaultMaxWALs,
|
MaxWalFiles: DefaultMaxWALs,
|
||||||
Name: DefaultName,
|
Name: DefaultName,
|
||||||
SnapCount: etcdserver.DefaultSnapCount,
|
SnapCount: etcdserver.DefaultSnapCount,
|
||||||
MaxRequestBytes: DefaultMaxRequestBytes,
|
MaxRequestBytes: DefaultMaxRequestBytes,
|
||||||
GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
|
GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
|
||||||
GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
|
GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
|
||||||
GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout,
|
GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout,
|
||||||
TickMs: 100,
|
TickMs: 100,
|
||||||
ElectionMs: 1000,
|
ElectionMs: 1000,
|
||||||
LPUrls: []url.URL{*lpurl},
|
InitialElectionTickAdvance: true,
|
||||||
LCUrls: []url.URL{*lcurl},
|
LPUrls: []url.URL{*lpurl},
|
||||||
APUrls: []url.URL{*apurl},
|
LCUrls: []url.URL{*lcurl},
|
||||||
ACUrls: []url.URL{*acurl},
|
APUrls: []url.URL{*apurl},
|
||||||
ClusterState: ClusterStateFlagNew,
|
ACUrls: []url.URL{*acurl},
|
||||||
InitialClusterToken: "etcd-cluster",
|
ClusterState: ClusterStateFlagNew,
|
||||||
StrictReconfigCheck: true,
|
InitialClusterToken: "etcd-cluster",
|
||||||
Metrics: "basic",
|
StrictReconfigCheck: true,
|
||||||
EnableV2: true,
|
Metrics: "basic",
|
||||||
AuthToken: "simple",
|
EnableV2: true,
|
||||||
|
AuthToken: "simple",
|
||||||
}
|
}
|
||||||
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
|
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
|
||||||
return cfg
|
return cfg
|
||||||
|
@ -298,6 +336,25 @@ func (cfg *configYAML) configFromFile(path string) error {
|
||||||
return cfg.Validate()
|
return cfg.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
|
||||||
|
if len(tls.CipherSuites) > 0 && len(ss) > 0 {
|
||||||
|
return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss)
|
||||||
|
}
|
||||||
|
if len(ss) > 0 {
|
||||||
|
cs := make([]uint16, len(ss))
|
||||||
|
for i, s := range ss {
|
||||||
|
var ok bool
|
||||||
|
cs[i], ok = tlsutil.GetCipherSuite(s)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected TLS cipher suite %q", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tls.CipherSuites = cs
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate ensures that '*embed.Config' fields are properly configured.
|
||||||
func (cfg *Config) Validate() error {
|
func (cfg *Config) Validate() error {
|
||||||
if err := checkBindURLs(cfg.LPUrls); err != nil {
|
if err := checkBindURLs(cfg.LPUrls); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -400,6 +457,44 @@ func (cfg Config) defaultClientHost() bool {
|
||||||
return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
|
return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cfg *Config) ClientSelfCert() (err error) {
|
||||||
|
if !cfg.ClientAutoTLS {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !cfg.ClientTLSInfo.Empty() {
|
||||||
|
plog.Warningf("ignoring client auto TLS since certs given")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
chosts := make([]string, len(cfg.LCUrls))
|
||||||
|
for i, u := range cfg.LCUrls {
|
||||||
|
chosts[i] = u.Host
|
||||||
|
}
|
||||||
|
cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *Config) PeerSelfCert() (err error) {
|
||||||
|
if !cfg.PeerAutoTLS {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !cfg.PeerTLSInfo.Empty() {
|
||||||
|
plog.Warningf("ignoring peer auto TLS since certs given")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
phosts := make([]string, len(cfg.LPUrls))
|
||||||
|
for i, u := range cfg.LPUrls {
|
||||||
|
phosts[i] = u.Host
|
||||||
|
}
|
||||||
|
cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites)
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host,
|
// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host,
|
||||||
// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0.
|
// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0.
|
||||||
// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380
|
// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380
|
||||||
|
|
|
@ -22,7 +22,6 @@ import (
|
||||||
defaultLog "log"
|
defaultLog "log"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -128,30 +127,31 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
srvcfg := &etcdserver.ServerConfig{
|
srvcfg := &etcdserver.ServerConfig{
|
||||||
Name: cfg.Name,
|
Name: cfg.Name,
|
||||||
ClientURLs: cfg.ACUrls,
|
ClientURLs: cfg.ACUrls,
|
||||||
PeerURLs: cfg.APUrls,
|
PeerURLs: cfg.APUrls,
|
||||||
DataDir: cfg.Dir,
|
DataDir: cfg.Dir,
|
||||||
DedicatedWALDir: cfg.WalDir,
|
DedicatedWALDir: cfg.WalDir,
|
||||||
SnapCount: cfg.SnapCount,
|
SnapCount: cfg.SnapCount,
|
||||||
MaxSnapFiles: cfg.MaxSnapFiles,
|
MaxSnapFiles: cfg.MaxSnapFiles,
|
||||||
MaxWALFiles: cfg.MaxWalFiles,
|
MaxWALFiles: cfg.MaxWalFiles,
|
||||||
InitialPeerURLsMap: urlsmap,
|
InitialPeerURLsMap: urlsmap,
|
||||||
InitialClusterToken: token,
|
InitialClusterToken: token,
|
||||||
DiscoveryURL: cfg.Durl,
|
DiscoveryURL: cfg.Durl,
|
||||||
DiscoveryProxy: cfg.Dproxy,
|
DiscoveryProxy: cfg.Dproxy,
|
||||||
NewCluster: cfg.IsNewCluster(),
|
NewCluster: cfg.IsNewCluster(),
|
||||||
ForceNewCluster: cfg.ForceNewCluster,
|
ForceNewCluster: cfg.ForceNewCluster,
|
||||||
PeerTLSInfo: cfg.PeerTLSInfo,
|
PeerTLSInfo: cfg.PeerTLSInfo,
|
||||||
TickMs: cfg.TickMs,
|
TickMs: cfg.TickMs,
|
||||||
ElectionTicks: cfg.ElectionTicks(),
|
ElectionTicks: cfg.ElectionTicks(),
|
||||||
AutoCompactionRetention: cfg.AutoCompactionRetention,
|
InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
|
||||||
QuotaBackendBytes: cfg.QuotaBackendBytes,
|
AutoCompactionRetention: cfg.AutoCompactionRetention,
|
||||||
MaxRequestBytes: cfg.MaxRequestBytes,
|
QuotaBackendBytes: cfg.QuotaBackendBytes,
|
||||||
StrictReconfigCheck: cfg.StrictReconfigCheck,
|
MaxRequestBytes: cfg.MaxRequestBytes,
|
||||||
ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
|
StrictReconfigCheck: cfg.StrictReconfigCheck,
|
||||||
AuthToken: cfg.AuthToken,
|
ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
|
||||||
Debug: cfg.Debug,
|
AuthToken: cfg.AuthToken,
|
||||||
|
Debug: cfg.Debug,
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
|
if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
|
||||||
|
@ -263,17 +263,11 @@ func stopServers(ctx context.Context, ss *servers) {
|
||||||
func (e *Etcd) Err() <-chan error { return e.errc }
|
func (e *Etcd) Err() <-chan error { return e.errc }
|
||||||
|
|
||||||
func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
|
func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
|
||||||
if cfg.PeerAutoTLS && cfg.PeerTLSInfo.Empty() {
|
if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil {
|
||||||
phosts := make([]string, len(cfg.LPUrls))
|
return nil, err
|
||||||
for i, u := range cfg.LPUrls {
|
}
|
||||||
phosts[i] = u.Host
|
if err = cfg.PeerSelfCert(); err != nil {
|
||||||
}
|
plog.Fatalf("could not get certs (%v)", err)
|
||||||
cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
|
|
||||||
if err != nil {
|
|
||||||
plog.Fatalf("could not get certs (%v)", err)
|
|
||||||
}
|
|
||||||
} else if cfg.PeerAutoTLS {
|
|
||||||
plog.Warningf("ignoring peer auto TLS since certs given")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cfg.PeerTLSInfo.Empty() {
|
if !cfg.PeerTLSInfo.Empty() {
|
||||||
|
@ -358,17 +352,11 @@ func (e *Etcd) servePeers() (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
|
func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
|
||||||
if cfg.ClientAutoTLS && cfg.ClientTLSInfo.Empty() {
|
if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil {
|
||||||
chosts := make([]string, len(cfg.LCUrls))
|
return nil, err
|
||||||
for i, u := range cfg.LCUrls {
|
}
|
||||||
chosts[i] = u.Host
|
if err = cfg.ClientSelfCert(); err != nil {
|
||||||
}
|
plog.Fatalf("could not get certs (%v)", err)
|
||||||
cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
|
|
||||||
if err != nil {
|
|
||||||
plog.Fatalf("could not get certs (%v)", err)
|
|
||||||
}
|
|
||||||
} else if cfg.ClientAutoTLS {
|
|
||||||
plog.Warningf("ignoring client auto TLS since certs given")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.EnablePprof {
|
if cfg.EnablePprof {
|
||||||
|
|
|
@ -61,6 +61,7 @@ go_library(
|
||||||
"//vendor/github.com/coreos/go-semver/semver:go_default_library",
|
"//vendor/github.com/coreos/go-semver/semver:go_default_library",
|
||||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||||
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
|
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
|
||||||
|
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||||
"//vendor/golang.org/x/net/context:go_default_library",
|
"//vendor/golang.org/x/net/context:go_default_library",
|
||||||
],
|
],
|
||||||
|
|
|
@ -15,6 +15,8 @@
|
||||||
package v3election
|
package v3election
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
@ -22,6 +24,10 @@ import (
|
||||||
epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
|
epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrMissingLeaderKey is returned when election API request
|
||||||
|
// is missing the "leader" field.
|
||||||
|
var ErrMissingLeaderKey = errors.New(`"leader" field must be provided`)
|
||||||
|
|
||||||
type electionServer struct {
|
type electionServer struct {
|
||||||
c *clientv3.Client
|
c *clientv3.Client
|
||||||
}
|
}
|
||||||
|
@ -51,6 +57,9 @@ func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) {
|
func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) {
|
||||||
|
if req.Leader == nil {
|
||||||
|
return nil, ErrMissingLeaderKey
|
||||||
|
}
|
||||||
s, err := es.session(ctx, req.Leader.Lease)
|
s, err := es.session(ctx, req.Leader.Lease)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -98,6 +107,9 @@ func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) {
|
func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) {
|
||||||
|
if req.Leader == nil {
|
||||||
|
return nil, ErrMissingLeaderKey
|
||||||
|
}
|
||||||
s, err := es.session(ctx, req.Leader.Lease)
|
s, err := es.session(ctx, req.Leader.Lease)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -44,6 +44,7 @@ go_library(
|
||||||
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc/metadata:go_default_library",
|
"//vendor/google.golang.org/grpc/metadata:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,11 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
|
if isClientCtxErr(stream.Context().Err(), err) {
|
||||||
|
plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
|
||||||
|
} else {
|
||||||
|
plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,7 +122,11 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
|
||||||
resp.TTL = ttl
|
resp.TTL = ttl
|
||||||
err = stream.Send(resp)
|
err = stream.Send(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
|
if isClientCtxErr(stream.Context().Err(), err) {
|
||||||
|
plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
|
||||||
|
} else {
|
||||||
|
plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,8 +32,9 @@ var (
|
||||||
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
|
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
|
||||||
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
|
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
|
||||||
|
|
||||||
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
|
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
|
||||||
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
|
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
|
||||||
|
ErrGRPCLeaseTTLTooLarge = grpc.Errorf(codes.OutOfRange, "etcdserver: too large lease TTL")
|
||||||
|
|
||||||
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
||||||
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
||||||
|
@ -79,8 +80,9 @@ var (
|
||||||
grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
|
grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
|
||||||
grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
|
grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
|
||||||
|
|
||||||
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
||||||
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
||||||
|
grpc.ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
|
||||||
|
|
||||||
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
||||||
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
||||||
|
@ -126,8 +128,9 @@ var (
|
||||||
ErrFutureRev = Error(ErrGRPCFutureRev)
|
ErrFutureRev = Error(ErrGRPCFutureRev)
|
||||||
ErrNoSpace = Error(ErrGRPCNoSpace)
|
ErrNoSpace = Error(ErrGRPCNoSpace)
|
||||||
|
|
||||||
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
|
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
|
||||||
ErrLeaseExist = Error(ErrGRPCLeaseExist)
|
ErrLeaseExist = Error(ErrGRPCLeaseExist)
|
||||||
|
ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
|
||||||
|
|
||||||
ErrMemberExist = Error(ErrGRPCMemberExist)
|
ErrMemberExist = Error(ErrGRPCMemberExist)
|
||||||
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
||||||
|
|
|
@ -15,14 +15,18 @@
|
||||||
package v3rpc
|
package v3rpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/coreos/etcd/auth"
|
"github.com/coreos/etcd/auth"
|
||||||
"github.com/coreos/etcd/etcdserver"
|
"github.com/coreos/etcd/etcdserver"
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
"github.com/coreos/etcd/etcdserver/membership"
|
"github.com/coreos/etcd/etcdserver/membership"
|
||||||
"github.com/coreos/etcd/lease"
|
"github.com/coreos/etcd/lease"
|
||||||
"github.com/coreos/etcd/mvcc"
|
"github.com/coreos/etcd/mvcc"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
func togRPCError(err error) error {
|
func togRPCError(err error) error {
|
||||||
|
@ -68,6 +72,8 @@ func togRPCError(err error) error {
|
||||||
return rpctypes.ErrGRPCLeaseNotFound
|
return rpctypes.ErrGRPCLeaseNotFound
|
||||||
case lease.ErrLeaseExists:
|
case lease.ErrLeaseExists:
|
||||||
return rpctypes.ErrGRPCLeaseExist
|
return rpctypes.ErrGRPCLeaseExist
|
||||||
|
case lease.ErrLeaseTTLTooLarge:
|
||||||
|
return rpctypes.ErrGRPCLeaseTTLTooLarge
|
||||||
|
|
||||||
case auth.ErrRootUserNotExist:
|
case auth.ErrRootUserNotExist:
|
||||||
return rpctypes.ErrGRPCRootUserNotExist
|
return rpctypes.ErrGRPCRootUserNotExist
|
||||||
|
@ -101,3 +107,35 @@ func togRPCError(err error) error {
|
||||||
return grpc.Errorf(codes.Unknown, err.Error())
|
return grpc.Errorf(codes.Unknown, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isClientCtxErr(ctxErr error, err error) bool {
|
||||||
|
if ctxErr != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
ev, ok := status.FromError(err)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
switch ev.Code() {
|
||||||
|
case codes.Canceled, codes.DeadlineExceeded:
|
||||||
|
// client-side context cancel or deadline exceeded
|
||||||
|
// "rpc error: code = Canceled desc = context canceled"
|
||||||
|
// "rpc error: code = DeadlineExceeded desc = context deadline exceeded"
|
||||||
|
return true
|
||||||
|
case codes.Unavailable:
|
||||||
|
msg := ev.Message()
|
||||||
|
// client-side context cancel or deadline exceeded with TLS ("http2.errClientDisconnected")
|
||||||
|
// "rpc error: code = Unavailable desc = client disconnected"
|
||||||
|
if msg == "client disconnected" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// "grpc/transport.ClientTransport.CloseStream" on canceled streams
|
||||||
|
// "rpc error: code = Unavailable desc = stream error: stream ID 21; CANCEL")
|
||||||
|
if strings.HasPrefix(msg, "stream error: ") && strings.HasSuffix(msg, "; CANCEL") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
|
@ -141,7 +141,11 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||||
// deadlock when calling sws.close().
|
// deadlock when calling sws.close().
|
||||||
go func() {
|
go func() {
|
||||||
if rerr := sws.recvLoop(); rerr != nil {
|
if rerr := sws.recvLoop(); rerr != nil {
|
||||||
plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
|
if isClientCtxErr(stream.Context().Err(), rerr) {
|
||||||
|
plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
|
||||||
|
} else {
|
||||||
|
plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
|
||||||
|
}
|
||||||
errc <- rerr
|
errc <- rerr
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -338,7 +342,11 @@ func (sws *serverWatchStream) sendLoop() {
|
||||||
|
|
||||||
mvcc.ReportEventReceived(len(evs))
|
mvcc.ReportEventReceived(len(evs))
|
||||||
if err := sws.gRPCStream.Send(wr); err != nil {
|
if err := sws.gRPCStream.Send(wr); err != nil {
|
||||||
plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error())
|
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
|
||||||
|
plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error())
|
||||||
|
} else {
|
||||||
|
plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error())
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -355,7 +363,11 @@ func (sws *serverWatchStream) sendLoop() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sws.gRPCStream.Send(c); err != nil {
|
if err := sws.gRPCStream.Send(c); err != nil {
|
||||||
plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error())
|
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
|
||||||
|
plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error())
|
||||||
|
} else {
|
||||||
|
plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,7 +383,11 @@ func (sws *serverWatchStream) sendLoop() {
|
||||||
for _, v := range pending[wid] {
|
for _, v := range pending[wid] {
|
||||||
mvcc.ReportEventReceived(len(v.Events))
|
mvcc.ReportEventReceived(len(v.Events))
|
||||||
if err := sws.gRPCStream.Send(v); err != nil {
|
if err := sws.gRPCStream.Send(v); err != nil {
|
||||||
plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error())
|
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
|
||||||
|
plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error())
|
||||||
|
} else {
|
||||||
|
plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,6 +89,9 @@ func (s *EtcdServer) newApplierV3() applierV3 {
|
||||||
|
|
||||||
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
|
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||||
ar := &applyResult{}
|
ar := &applyResult{}
|
||||||
|
defer func(start time.Time) {
|
||||||
|
warnOfExpensiveRequest(start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
|
||||||
|
}(time.Now())
|
||||||
|
|
||||||
// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
|
// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
|
||||||
switch {
|
switch {
|
||||||
|
|
|
@ -105,10 +105,12 @@ func (a *applierV2store) Sync(r *pb.Request) Response {
|
||||||
return Response{}
|
return Response{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyV2Request interprets r as a call to store.X and returns a Response interpreted
|
// applyV2Request interprets r as a call to v2store.X
|
||||||
// from store.Event
|
// and returns a Response interpreted from v2store.Event
|
||||||
func (s *EtcdServer) applyV2Request(r *pb.Request) Response {
|
func (s *EtcdServer) applyV2Request(r *pb.Request) Response {
|
||||||
|
defer warnOfExpensiveRequest(time.Now(), r, nil, nil)
|
||||||
toTTLOptions(r)
|
toTTLOptions(r)
|
||||||
|
|
||||||
switch r.Method {
|
switch r.Method {
|
||||||
case "POST":
|
case "POST":
|
||||||
return s.applyV2.Post(r)
|
return s.applyV2.Post(r)
|
||||||
|
|
|
@ -48,8 +48,38 @@ type ServerConfig struct {
|
||||||
ForceNewCluster bool
|
ForceNewCluster bool
|
||||||
PeerTLSInfo transport.TLSInfo
|
PeerTLSInfo transport.TLSInfo
|
||||||
|
|
||||||
TickMs uint
|
TickMs uint
|
||||||
ElectionTicks int
|
ElectionTicks int
|
||||||
|
|
||||||
|
// InitialElectionTickAdvance is true, then local member fast-forwards
|
||||||
|
// election ticks to speed up "initial" leader election trigger. This
|
||||||
|
// benefits the case of larger election ticks. For instance, cross
|
||||||
|
// datacenter deployment may require longer election timeout of 10-second.
|
||||||
|
// If true, local node does not need wait up to 10-second. Instead,
|
||||||
|
// forwards its election ticks to 8-second, and have only 2-second left
|
||||||
|
// before leader election.
|
||||||
|
//
|
||||||
|
// Major assumptions are that:
|
||||||
|
// - cluster has no active leader thus advancing ticks enables faster
|
||||||
|
// leader election, or
|
||||||
|
// - cluster already has an established leader, and rejoining follower
|
||||||
|
// is likely to receive heartbeats from the leader after tick advance
|
||||||
|
// and before election timeout.
|
||||||
|
//
|
||||||
|
// However, when network from leader to rejoining follower is congested,
|
||||||
|
// and the follower does not receive leader heartbeat within left election
|
||||||
|
// ticks, disruptive election has to happen thus affecting cluster
|
||||||
|
// availabilities.
|
||||||
|
//
|
||||||
|
// Disabling this would slow down initial bootstrap process for cross
|
||||||
|
// datacenter deployments. Make your own tradeoffs by configuring
|
||||||
|
// --initial-election-tick-advance at the cost of slow initial bootstrap.
|
||||||
|
//
|
||||||
|
// If single-node, it advances ticks regardless.
|
||||||
|
//
|
||||||
|
// See https://github.com/coreos/etcd/issues/9333 for more detail.
|
||||||
|
InitialElectionTickAdvance bool
|
||||||
|
|
||||||
BootstrapTimeout time.Duration
|
BootstrapTimeout time.Duration
|
||||||
|
|
||||||
AutoCompactionRetention int
|
AutoCompactionRetention int
|
||||||
|
|
|
@ -5,6 +5,7 @@ go_library(
|
||||||
srcs = [
|
srcs = [
|
||||||
"etcdserver.pb.go",
|
"etcdserver.pb.go",
|
||||||
"raft_internal.pb.go",
|
"raft_internal.pb.go",
|
||||||
|
"raft_internal_stringer.go",
|
||||||
"rpc.pb.go",
|
"rpc.pb.go",
|
||||||
],
|
],
|
||||||
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb",
|
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||||
|
|
179
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
generated
vendored
Normal file
179
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
generated
vendored
Normal file
|
@ -0,0 +1,179 @@
|
||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package etcdserverpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InternalRaftStringer implements custom proto Stringer:
|
||||||
|
// redact password, replace value fields with value_size fields.
|
||||||
|
type InternalRaftStringer struct {
|
||||||
|
Request *InternalRaftRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *InternalRaftStringer) String() string {
|
||||||
|
switch {
|
||||||
|
case as.Request.LeaseGrant != nil:
|
||||||
|
return fmt.Sprintf("header:<%s> lease_grant:<ttl:%d-second id:%016x>",
|
||||||
|
as.Request.Header.String(),
|
||||||
|
as.Request.LeaseGrant.TTL,
|
||||||
|
as.Request.LeaseGrant.ID,
|
||||||
|
)
|
||||||
|
case as.Request.LeaseRevoke != nil:
|
||||||
|
return fmt.Sprintf("header:<%s> lease_revoke:<id:%016x>",
|
||||||
|
as.Request.Header.String(),
|
||||||
|
as.Request.LeaseRevoke.ID,
|
||||||
|
)
|
||||||
|
case as.Request.Authenticate != nil:
|
||||||
|
return fmt.Sprintf("header:<%s> authenticate:<name:%s simple_token:%s>",
|
||||||
|
as.Request.Header.String(),
|
||||||
|
as.Request.Authenticate.Name,
|
||||||
|
as.Request.Authenticate.SimpleToken,
|
||||||
|
)
|
||||||
|
case as.Request.AuthUserAdd != nil:
|
||||||
|
return fmt.Sprintf("header:<%s> auth_user_add:<name:%s>",
|
||||||
|
as.Request.Header.String(),
|
||||||
|
as.Request.AuthUserAdd.Name,
|
||||||
|
)
|
||||||
|
case as.Request.AuthUserChangePassword != nil:
|
||||||
|
return fmt.Sprintf("header:<%s> auth_user_change_password:<name:%s>",
|
||||||
|
as.Request.Header.String(),
|
||||||
|
as.Request.AuthUserChangePassword.Name,
|
||||||
|
)
|
||||||
|
case as.Request.Put != nil:
|
||||||
|
return fmt.Sprintf("header:<%s> put:<%s>",
|
||||||
|
as.Request.Header.String(),
|
||||||
|
newLoggablePutRequest(as.Request.Put).String(),
|
||||||
|
)
|
||||||
|
case as.Request.Txn != nil:
|
||||||
|
return fmt.Sprintf("header:<%s> txn:<%s>",
|
||||||
|
as.Request.Header.String(),
|
||||||
|
NewLoggableTxnRequest(as.Request.Txn).String(),
|
||||||
|
)
|
||||||
|
default:
|
||||||
|
// nothing to redact
|
||||||
|
}
|
||||||
|
return as.Request.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// txnRequestStringer implements a custom proto String to replace value bytes fields with value size
|
||||||
|
// fields in any nested txn and put operations.
|
||||||
|
type txnRequestStringer struct {
|
||||||
|
Request *TxnRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer {
|
||||||
|
return &txnRequestStringer{request}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *txnRequestStringer) String() string {
|
||||||
|
var compare []string
|
||||||
|
for _, c := range as.Request.Compare {
|
||||||
|
switch cv := c.TargetUnion.(type) {
|
||||||
|
case *Compare_Value:
|
||||||
|
compare = append(compare, newLoggableValueCompare(c, cv).String())
|
||||||
|
default:
|
||||||
|
// nothing to redact
|
||||||
|
compare = append(compare, c.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var success []string
|
||||||
|
for _, s := range as.Request.Success {
|
||||||
|
success = append(success, newLoggableRequestOp(s).String())
|
||||||
|
}
|
||||||
|
var failure []string
|
||||||
|
for _, f := range as.Request.Failure {
|
||||||
|
failure = append(failure, newLoggableRequestOp(f).String())
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>",
|
||||||
|
strings.Join(compare, " "),
|
||||||
|
strings.Join(success, " "),
|
||||||
|
strings.Join(failure, " "),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// requestOpStringer implements a custom proto String to replace value bytes fields with value
|
||||||
|
// size fields in any nested txn and put operations.
|
||||||
|
type requestOpStringer struct {
|
||||||
|
Op *RequestOp
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
|
||||||
|
return &requestOpStringer{op}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *requestOpStringer) String() string {
|
||||||
|
switch op := as.Op.Request.(type) {
|
||||||
|
case *RequestOp_RequestPut:
|
||||||
|
return fmt.Sprintf("request_put:<%s>", newLoggablePutRequest(op.RequestPut).String())
|
||||||
|
default:
|
||||||
|
// nothing to redact
|
||||||
|
}
|
||||||
|
return as.Op.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// loggableValueCompare implements a custom proto String for Compare.Value union member types to
|
||||||
|
// replace the value bytes field with a value size field.
|
||||||
|
// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here.
|
||||||
|
type loggableValueCompare struct {
|
||||||
|
Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
|
||||||
|
Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
|
||||||
|
Key []byte `protobuf:"bytes,3,opt,name=key,proto3"`
|
||||||
|
ValueSize int `protobuf:"bytes,7,opt,name=value_size,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare {
|
||||||
|
return &loggableValueCompare{
|
||||||
|
c.Result,
|
||||||
|
c.Target,
|
||||||
|
c.Key,
|
||||||
|
len(cv.Value),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *loggableValueCompare) Reset() { *m = loggableValueCompare{} }
|
||||||
|
func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*loggableValueCompare) ProtoMessage() {}
|
||||||
|
|
||||||
|
// loggablePutRequest implements a custom proto String to replace value bytes field with a value
|
||||||
|
// size field.
|
||||||
|
// To preserve proto encoding of the key bytes, a faked out proto type is used here.
|
||||||
|
type loggablePutRequest struct {
|
||||||
|
Key []byte `protobuf:"bytes,1,opt,name=key,proto3"`
|
||||||
|
ValueSize int `protobuf:"varint,2,opt,name=value_size,proto3"`
|
||||||
|
Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"`
|
||||||
|
PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"`
|
||||||
|
IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"`
|
||||||
|
IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLoggablePutRequest(request *PutRequest) *loggablePutRequest {
|
||||||
|
return &loggablePutRequest{
|
||||||
|
request.Key,
|
||||||
|
len(request.Value),
|
||||||
|
request.Lease,
|
||||||
|
request.PrevKv,
|
||||||
|
request.IgnoreValue,
|
||||||
|
request.IgnoreLease,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *loggablePutRequest) Reset() { *m = loggablePutRequest{} }
|
||||||
|
func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*loggablePutRequest) ProtoMessage() {}
|
|
@ -15,9 +15,11 @@
|
||||||
package etcdserver
|
package etcdserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
goruntime "runtime"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/etcd/pkg/runtime"
|
"github.com/coreos/etcd/pkg/runtime"
|
||||||
|
"github.com/coreos/etcd/version"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -28,12 +30,30 @@ var (
|
||||||
Name: "has_leader",
|
Name: "has_leader",
|
||||||
Help: "Whether or not a leader exists. 1 is existence, 0 is not.",
|
Help: "Whether or not a leader exists. 1 is existence, 0 is not.",
|
||||||
})
|
})
|
||||||
|
isLeader = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "server",
|
||||||
|
Name: "is_leader",
|
||||||
|
Help: "Whether or not this member is a leader. 1 if is, 0 otherwise.",
|
||||||
|
})
|
||||||
leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{
|
leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Namespace: "etcd",
|
Namespace: "etcd",
|
||||||
Subsystem: "server",
|
Subsystem: "server",
|
||||||
Name: "leader_changes_seen_total",
|
Name: "leader_changes_seen_total",
|
||||||
Help: "The number of leader changes seen.",
|
Help: "The number of leader changes seen.",
|
||||||
})
|
})
|
||||||
|
heartbeatSendFailures = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "server",
|
||||||
|
Name: "heartbeat_send_failures_total",
|
||||||
|
Help: "The total number of leader heartbeat send failures (likely overloaded from slow disk).",
|
||||||
|
})
|
||||||
|
slowApplies = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "server",
|
||||||
|
Name: "slow_apply_total",
|
||||||
|
Help: "The total number of slow apply requests (likely overloaded from slow disk).",
|
||||||
|
})
|
||||||
proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{
|
proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
Namespace: "etcd",
|
Namespace: "etcd",
|
||||||
Subsystem: "server",
|
Subsystem: "server",
|
||||||
|
@ -64,16 +84,56 @@ var (
|
||||||
Name: "lease_expired_total",
|
Name: "lease_expired_total",
|
||||||
Help: "The total number of expired leases.",
|
Help: "The total number of expired leases.",
|
||||||
})
|
})
|
||||||
|
slowReadIndex = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "server",
|
||||||
|
Name: "slow_read_indexes_total",
|
||||||
|
Help: "The total number of pending read indexes not in sync with leader's or timed out read index requests.",
|
||||||
|
})
|
||||||
|
quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "server",
|
||||||
|
Name: "quota_backend_bytes",
|
||||||
|
Help: "Current backend storage quota size in bytes.",
|
||||||
|
})
|
||||||
|
currentVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "server",
|
||||||
|
Name: "version",
|
||||||
|
Help: "Which version is running. 1 for 'server_version' label with current version.",
|
||||||
|
},
|
||||||
|
[]string{"server_version"})
|
||||||
|
currentGoVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "server",
|
||||||
|
Name: "go_version",
|
||||||
|
Help: "Which Go version server is running with. 1 for 'server_go_version' label with current version.",
|
||||||
|
},
|
||||||
|
[]string{"server_go_version"})
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
prometheus.MustRegister(hasLeader)
|
prometheus.MustRegister(hasLeader)
|
||||||
|
prometheus.MustRegister(isLeader)
|
||||||
prometheus.MustRegister(leaderChanges)
|
prometheus.MustRegister(leaderChanges)
|
||||||
|
prometheus.MustRegister(heartbeatSendFailures)
|
||||||
|
prometheus.MustRegister(slowApplies)
|
||||||
prometheus.MustRegister(proposalsCommitted)
|
prometheus.MustRegister(proposalsCommitted)
|
||||||
prometheus.MustRegister(proposalsApplied)
|
prometheus.MustRegister(proposalsApplied)
|
||||||
prometheus.MustRegister(proposalsPending)
|
prometheus.MustRegister(proposalsPending)
|
||||||
prometheus.MustRegister(proposalsFailed)
|
prometheus.MustRegister(proposalsFailed)
|
||||||
prometheus.MustRegister(leaseExpired)
|
prometheus.MustRegister(leaseExpired)
|
||||||
|
prometheus.MustRegister(slowReadIndex)
|
||||||
|
prometheus.MustRegister(quotaBackendBytes)
|
||||||
|
prometheus.MustRegister(currentVersion)
|
||||||
|
prometheus.MustRegister(currentGoVersion)
|
||||||
|
|
||||||
|
currentVersion.With(prometheus.Labels{
|
||||||
|
"server_version": version.Version,
|
||||||
|
}).Set(1)
|
||||||
|
currentGoVersion.With(prometheus.Labels{
|
||||||
|
"server_go_version": goruntime.Version(),
|
||||||
|
}).Set(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func monitorFileDescriptor(done <-chan struct{}) {
|
func monitorFileDescriptor(done <-chan struct{}) {
|
||||||
|
|
|
@ -14,9 +14,7 @@
|
||||||
|
|
||||||
package etcdserver
|
package etcdserver
|
||||||
|
|
||||||
import (
|
import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// DefaultQuotaBytes is the number of bytes the backend Size may
|
// DefaultQuotaBytes is the number of bytes the backend Size may
|
||||||
|
@ -58,15 +56,20 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewBackendQuota(s *EtcdServer) Quota {
|
func NewBackendQuota(s *EtcdServer) Quota {
|
||||||
|
quotaBackendBytes.Set(float64(s.Cfg.QuotaBackendBytes))
|
||||||
|
|
||||||
if s.Cfg.QuotaBackendBytes < 0 {
|
if s.Cfg.QuotaBackendBytes < 0 {
|
||||||
// disable quotas if negative
|
// disable quotas if negative
|
||||||
plog.Warningf("disabling backend quota")
|
plog.Warningf("disabling backend quota")
|
||||||
return &passthroughQuota{}
|
return &passthroughQuota{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.Cfg.QuotaBackendBytes == 0 {
|
if s.Cfg.QuotaBackendBytes == 0 {
|
||||||
// use default size if no quota size given
|
// use default size if no quota size given
|
||||||
|
quotaBackendBytes.Set(float64(DefaultQuotaBytes))
|
||||||
return &backendQuota{s, DefaultQuotaBytes}
|
return &backendQuota{s, DefaultQuotaBytes}
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
|
if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
|
||||||
plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes)
|
plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes)
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,6 +95,7 @@ type raftNode struct {
|
||||||
term uint64
|
term uint64
|
||||||
lead uint64
|
lead uint64
|
||||||
|
|
||||||
|
tickMu *sync.Mutex
|
||||||
raftNodeConfig
|
raftNodeConfig
|
||||||
|
|
||||||
// a chan to send/receive snapshot
|
// a chan to send/receive snapshot
|
||||||
|
@ -131,6 +132,7 @@ type raftNodeConfig struct {
|
||||||
|
|
||||||
func newRaftNode(cfg raftNodeConfig) *raftNode {
|
func newRaftNode(cfg raftNodeConfig) *raftNode {
|
||||||
r := &raftNode{
|
r := &raftNode{
|
||||||
|
tickMu: new(sync.Mutex),
|
||||||
raftNodeConfig: cfg,
|
raftNodeConfig: cfg,
|
||||||
// set up contention detectors for raft heartbeat message.
|
// set up contention detectors for raft heartbeat message.
|
||||||
// expect to send a heartbeat within 2 heartbeat intervals.
|
// expect to send a heartbeat within 2 heartbeat intervals.
|
||||||
|
@ -149,6 +151,13 @@ func newRaftNode(cfg raftNodeConfig) *raftNode {
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// raft.Node does not have locks in Raft package
|
||||||
|
func (r *raftNode) tick() {
|
||||||
|
r.tickMu.Lock()
|
||||||
|
r.Tick()
|
||||||
|
r.tickMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// start prepares and starts raftNode in a new goroutine. It is no longer safe
|
// start prepares and starts raftNode in a new goroutine. It is no longer safe
|
||||||
// to modify the fields after it has been started.
|
// to modify the fields after it has been started.
|
||||||
func (r *raftNode) start(rh *raftReadyHandler) {
|
func (r *raftNode) start(rh *raftReadyHandler) {
|
||||||
|
@ -161,7 +170,7 @@ func (r *raftNode) start(rh *raftReadyHandler) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-r.ticker.C:
|
case <-r.ticker.C:
|
||||||
r.Tick()
|
r.tick()
|
||||||
case rd := <-r.Ready():
|
case rd := <-r.Ready():
|
||||||
if rd.SoftState != nil {
|
if rd.SoftState != nil {
|
||||||
newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
|
newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
|
||||||
|
@ -177,6 +186,11 @@ func (r *raftNode) start(rh *raftReadyHandler) {
|
||||||
|
|
||||||
atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
|
atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
|
||||||
islead = rd.RaftState == raft.StateLeader
|
islead = rd.RaftState == raft.StateLeader
|
||||||
|
if islead {
|
||||||
|
isLeader.Set(1)
|
||||||
|
} else {
|
||||||
|
isLeader.Set(0)
|
||||||
|
}
|
||||||
rh.updateLeadership(newLeader)
|
rh.updateLeadership(newLeader)
|
||||||
r.td.Reset()
|
r.td.Reset()
|
||||||
}
|
}
|
||||||
|
@ -332,6 +346,7 @@ func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
|
||||||
// TODO: limit request rate.
|
// TODO: limit request rate.
|
||||||
plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
|
plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
|
||||||
plog.Warningf("server is likely overloaded")
|
plog.Warningf("server is likely overloaded")
|
||||||
|
heartbeatSendFailures.Inc()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -368,13 +383,13 @@ func (r *raftNode) resumeSending() {
|
||||||
p.Resume()
|
p.Resume()
|
||||||
}
|
}
|
||||||
|
|
||||||
// advanceTicksForElection advances ticks to the node for fast election.
|
// advanceTicks advances ticks of Raft node.
|
||||||
// This reduces the time to wait for first leader election if bootstrapping the whole
|
// This can be used for fast-forwarding election
|
||||||
// cluster, while leaving at least 1 heartbeat for possible existing leader
|
// ticks in multi data-center deployments, thus
|
||||||
// to contact it.
|
// speeding up election process.
|
||||||
func advanceTicksForElection(n raft.Node, electionTicks int) {
|
func (r *raftNode) advanceTicks(ticks int) {
|
||||||
for i := 0; i < electionTicks-1; i++ {
|
for i := 0; i < ticks; i++ {
|
||||||
n.Tick()
|
r.tick()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -415,8 +430,8 @@ func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (i
|
||||||
raftStatusMu.Lock()
|
raftStatusMu.Lock()
|
||||||
raftStatus = n.Status
|
raftStatus = n.Status
|
||||||
raftStatusMu.Unlock()
|
raftStatusMu.Unlock()
|
||||||
advanceTicksForElection(n, c.ElectionTick)
|
|
||||||
return
|
return id, n, s, w
|
||||||
}
|
}
|
||||||
|
|
||||||
func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
|
func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
|
||||||
|
@ -449,7 +464,6 @@ func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membe
|
||||||
raftStatusMu.Lock()
|
raftStatusMu.Lock()
|
||||||
raftStatus = n.Status
|
raftStatus = n.Status
|
||||||
raftStatusMu.Unlock()
|
raftStatusMu.Unlock()
|
||||||
advanceTicksForElection(n, c.ElectionTick)
|
|
||||||
return id, cl, n, s, w
|
return id, cl, n, s, w
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -498,6 +512,7 @@ func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (type
|
||||||
Storage: s,
|
Storage: s,
|
||||||
MaxSizePerMsg: maxSizePerMsg,
|
MaxSizePerMsg: maxSizePerMsg,
|
||||||
MaxInflightMsgs: maxInflightMsgs,
|
MaxInflightMsgs: maxInflightMsgs,
|
||||||
|
CheckQuorum: true,
|
||||||
}
|
}
|
||||||
n := raft.RestartNode(c)
|
n := raft.RestartNode(c)
|
||||||
raftStatus = n.Status
|
raftStatus = n.Status
|
||||||
|
|
|
@ -513,11 +513,56 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||||
return srv, nil
|
return srv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start prepares and starts server in a new goroutine. It is no longer safe to
|
func (s *EtcdServer) adjustTicks() {
|
||||||
// modify a server's fields after it has been sent to Start.
|
clusterN := len(s.cluster.Members())
|
||||||
// It also starts a goroutine to publish its server information.
|
|
||||||
|
// single-node fresh start, or single-node recovers from snapshot
|
||||||
|
if clusterN == 1 {
|
||||||
|
ticks := s.Cfg.ElectionTicks - 1
|
||||||
|
plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks)
|
||||||
|
s.r.advanceTicks(ticks)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.Cfg.InitialElectionTickAdvance {
|
||||||
|
plog.Infof("skipping initial election tick advance (election tick %d)", s.Cfg.ElectionTicks)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// retry up to "rafthttp.ConnReadTimeout", which is 5-sec
|
||||||
|
// until peer connection reports; otherwise:
|
||||||
|
// 1. all connections failed, or
|
||||||
|
// 2. no active peers, or
|
||||||
|
// 3. restarted single-node with no snapshot
|
||||||
|
// then, do nothing, because advancing ticks would have no effect
|
||||||
|
waitTime := rafthttp.ConnReadTimeout
|
||||||
|
itv := 50 * time.Millisecond
|
||||||
|
for i := int64(0); i < int64(waitTime/itv); i++ {
|
||||||
|
select {
|
||||||
|
case <-time.After(itv):
|
||||||
|
case <-s.stopping:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
peerN := s.r.transport.ActivePeers()
|
||||||
|
if peerN > 1 {
|
||||||
|
// multi-node received peer connection reports
|
||||||
|
// adjust ticks, in case slow leader message receive
|
||||||
|
ticks := s.Cfg.ElectionTicks - 2
|
||||||
|
plog.Infof("%s initialzed peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN)
|
||||||
|
s.r.advanceTicks(ticks)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start performs any initialization of the Server necessary for it to
|
||||||
|
// begin serving requests. It must be called before Do or Process.
|
||||||
|
// Start must be non-blocking; any long-running server functionality
|
||||||
|
// should be implemented in goroutines.
|
||||||
func (s *EtcdServer) Start() {
|
func (s *EtcdServer) Start() {
|
||||||
s.start()
|
s.start()
|
||||||
|
s.goAttach(func() { s.adjustTicks() })
|
||||||
s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
|
s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
|
||||||
s.goAttach(s.purgeFile)
|
s.goAttach(s.purgeFile)
|
||||||
s.goAttach(func() { monitorFileDescriptor(s.stopping) })
|
s.goAttach(func() { monitorFileDescriptor(s.stopping) })
|
||||||
|
@ -552,18 +597,21 @@ func (s *EtcdServer) start() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) purgeFile() {
|
func (s *EtcdServer) purgeFile() {
|
||||||
var serrc, werrc <-chan error
|
var dberrc, serrc, werrc <-chan error
|
||||||
if s.Cfg.MaxSnapFiles > 0 {
|
if s.Cfg.MaxSnapFiles > 0 {
|
||||||
|
dberrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
|
||||||
serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
|
serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
|
||||||
}
|
}
|
||||||
if s.Cfg.MaxWALFiles > 0 {
|
if s.Cfg.MaxWALFiles > 0 {
|
||||||
werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done)
|
werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done)
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case e := <-werrc:
|
case e := <-dberrc:
|
||||||
plog.Fatalf("failed to purge wal file %v", e)
|
plog.Fatalf("failed to purge snap db file %v", e)
|
||||||
case e := <-serrc:
|
case e := <-serrc:
|
||||||
plog.Fatalf("failed to purge snap file %v", e)
|
plog.Fatalf("failed to purge snap file %v", e)
|
||||||
|
case e := <-werrc:
|
||||||
|
plog.Fatalf("failed to purge wal file %v", e)
|
||||||
case <-s.stopping:
|
case <-s.stopping:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -743,8 +791,13 @@ func (s *EtcdServer) run() {
|
||||||
}
|
}
|
||||||
lid := lease.ID
|
lid := lease.ID
|
||||||
s.goAttach(func() {
|
s.goAttach(func() {
|
||||||
s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
|
_, lerr := s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
|
||||||
leaseExpired.Inc()
|
if lerr == nil {
|
||||||
|
leaseExpired.Inc()
|
||||||
|
} else {
|
||||||
|
plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error())
|
||||||
|
}
|
||||||
|
|
||||||
<-c
|
<-c
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -765,14 +818,8 @@ func (s *EtcdServer) run() {
|
||||||
|
|
||||||
func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
|
func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
|
||||||
s.applySnapshot(ep, apply)
|
s.applySnapshot(ep, apply)
|
||||||
st := time.Now()
|
|
||||||
s.applyEntries(ep, apply)
|
s.applyEntries(ep, apply)
|
||||||
d := time.Since(st)
|
|
||||||
entriesNum := len(apply.entries)
|
|
||||||
if entriesNum != 0 && d > time.Duration(entriesNum)*warnApplyDuration {
|
|
||||||
plog.Warningf("apply entries took too long [%v for %d entries]", d, len(apply.entries))
|
|
||||||
plog.Warningf("avoid queries with large range/delete range!")
|
|
||||||
}
|
|
||||||
proposalsApplied.Set(float64(ep.appliedi))
|
proposalsApplied.Set(float64(ep.appliedi))
|
||||||
s.applyWait.Trigger(ep.appliedi)
|
s.applyWait.Trigger(ep.appliedi)
|
||||||
// wait for the raft routine to finish the disk writes before triggering a
|
// wait for the raft routine to finish the disk writes before triggering a
|
||||||
|
|
|
@ -74,10 +74,10 @@ type serverStats struct {
|
||||||
func (ss *ServerStats) JSON() []byte {
|
func (ss *ServerStats) JSON() []byte {
|
||||||
ss.Lock()
|
ss.Lock()
|
||||||
stats := ss.serverStats
|
stats := ss.serverStats
|
||||||
ss.Unlock()
|
|
||||||
stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()
|
|
||||||
stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()
|
stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()
|
||||||
stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()
|
stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()
|
||||||
|
stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()
|
||||||
|
ss.Unlock()
|
||||||
b, err := json.Marshal(stats)
|
b, err := json.Marshal(stats)
|
||||||
// TODO(jonboulle): appropriate error handling?
|
// TODO(jonboulle): appropriate error handling?
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -15,11 +15,16 @@
|
||||||
package etcdserver
|
package etcdserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/etcdserver/membership"
|
"github.com/coreos/etcd/etcdserver/membership"
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
"github.com/coreos/etcd/rafthttp"
|
"github.com/coreos/etcd/rafthttp"
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// isConnectedToQuorumSince checks whether the local member is connected to the
|
// isConnectedToQuorumSince checks whether the local member is connected to the
|
||||||
|
@ -95,3 +100,56 @@ func (nc *notifier) notify(err error) {
|
||||||
nc.err = err
|
nc.err = err
|
||||||
close(nc.c)
|
close(nc.c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func warnOfExpensiveRequest(now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
|
||||||
|
var resp string
|
||||||
|
if !isNil(respMsg) {
|
||||||
|
resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
|
||||||
|
}
|
||||||
|
warnOfExpensiveGenericRequest(now, reqStringer, "", resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func warnOfExpensiveReadOnlyTxnRequest(now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) {
|
||||||
|
reqStringer := pb.NewLoggableTxnRequest(r)
|
||||||
|
var resp string
|
||||||
|
if !isNil(txnResponse) {
|
||||||
|
var resps []string
|
||||||
|
for _, r := range txnResponse.Responses {
|
||||||
|
switch op := r.Response.(type) {
|
||||||
|
case *pb.ResponseOp_ResponseRange:
|
||||||
|
resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.ResponseRange.Kvs)))
|
||||||
|
default:
|
||||||
|
// only range responses should be in a read only txn request
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), proto.Size(txnResponse))
|
||||||
|
}
|
||||||
|
warnOfExpensiveGenericRequest(now, reqStringer, "read-only range ", resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func warnOfExpensiveReadOnlyRangeRequest(now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) {
|
||||||
|
var resp string
|
||||||
|
if !isNil(rangeResponse) {
|
||||||
|
resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), proto.Size(rangeResponse))
|
||||||
|
}
|
||||||
|
warnOfExpensiveGenericRequest(now, reqStringer, "read-only range ", resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func warnOfExpensiveGenericRequest(now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) {
|
||||||
|
// TODO: add metrics
|
||||||
|
d := time.Since(now)
|
||||||
|
if d > warnApplyDuration {
|
||||||
|
var result string
|
||||||
|
if err != nil {
|
||||||
|
result = fmt.Sprintf("error:%v", err)
|
||||||
|
} else {
|
||||||
|
result = resp
|
||||||
|
}
|
||||||
|
plog.Warningf("%srequest %q with result %q took too long (%v) to execute", prefix, reqStringer.String(), result, d)
|
||||||
|
slowApplies.Inc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNil(msg proto.Message) bool {
|
||||||
|
return msg == nil || reflect.ValueOf(msg).IsNil()
|
||||||
|
}
|
||||||
|
|
|
@ -19,8 +19,6 @@ import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gogo/protobuf/proto"
|
|
||||||
|
|
||||||
"github.com/coreos/etcd/auth"
|
"github.com/coreos/etcd/auth"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/etcdserver/membership"
|
"github.com/coreos/etcd/etcdserver/membership"
|
||||||
|
@ -28,7 +26,7 @@ import (
|
||||||
"github.com/coreos/etcd/lease/leasehttp"
|
"github.com/coreos/etcd/lease/leasehttp"
|
||||||
"github.com/coreos/etcd/mvcc"
|
"github.com/coreos/etcd/mvcc"
|
||||||
"github.com/coreos/etcd/raft"
|
"github.com/coreos/etcd/raft"
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -82,20 +80,26 @@ type Authenticator interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||||
|
var resp *pb.RangeResponse
|
||||||
|
var err error
|
||||||
|
defer func(start time.Time) {
|
||||||
|
warnOfExpensiveReadOnlyRangeRequest(start, r, resp, err)
|
||||||
|
}(time.Now())
|
||||||
|
|
||||||
if !r.Serializable {
|
if !r.Serializable {
|
||||||
err := s.linearizableReadNotify(ctx)
|
err = s.linearizableReadNotify(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var resp *pb.RangeResponse
|
|
||||||
var err error
|
|
||||||
chk := func(ai *auth.AuthInfo) error {
|
chk := func(ai *auth.AuthInfo) error {
|
||||||
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
|
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
|
||||||
}
|
}
|
||||||
|
|
||||||
get := func() { resp, err = s.applyV3Base.Range(nil, r) }
|
get := func() { resp, err = s.applyV3Base.Range(nil, r) }
|
||||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||||
return nil, serr
|
err = serr
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
@ -129,12 +133,18 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse
|
||||||
chk := func(ai *auth.AuthInfo) error {
|
chk := func(ai *auth.AuthInfo) error {
|
||||||
return checkTxnAuth(s.authStore, ai, r)
|
return checkTxnAuth(s.authStore, ai, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer func(start time.Time) {
|
||||||
|
warnOfExpensiveReadOnlyTxnRequest(start, r, resp, err)
|
||||||
|
}(time.Now())
|
||||||
|
|
||||||
get := func() { resp, err = s.applyV3Base.Txn(r) }
|
get := func() { resp, err = s.applyV3Base.Txn(r) }
|
||||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||||
return nil, serr
|
return nil, serr
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -587,8 +597,9 @@ func (s *EtcdServer) linearizableReadLoop() {
|
||||||
var rs raft.ReadState
|
var rs raft.ReadState
|
||||||
|
|
||||||
for {
|
for {
|
||||||
ctx := make([]byte, 8)
|
ctxToSend := make([]byte, 8)
|
||||||
binary.BigEndian.PutUint64(ctx, s.reqIDGen.Next())
|
id1 := s.reqIDGen.Next()
|
||||||
|
binary.BigEndian.PutUint64(ctxToSend, id1)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-s.readwaitc:
|
case <-s.readwaitc:
|
||||||
|
@ -604,7 +615,7 @@ func (s *EtcdServer) linearizableReadLoop() {
|
||||||
s.readMu.Unlock()
|
s.readMu.Unlock()
|
||||||
|
|
||||||
cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
|
cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
|
||||||
if err := s.r.ReadIndex(cctx, ctx); err != nil {
|
if err := s.r.ReadIndex(cctx, ctxToSend); err != nil {
|
||||||
cancel()
|
cancel()
|
||||||
if err == raft.ErrStopped {
|
if err == raft.ErrStopped {
|
||||||
return
|
return
|
||||||
|
@ -622,16 +633,24 @@ func (s *EtcdServer) linearizableReadLoop() {
|
||||||
for !timeout && !done {
|
for !timeout && !done {
|
||||||
select {
|
select {
|
||||||
case rs = <-s.r.readStateC:
|
case rs = <-s.r.readStateC:
|
||||||
done = bytes.Equal(rs.RequestCtx, ctx)
|
done = bytes.Equal(rs.RequestCtx, ctxToSend)
|
||||||
if !done {
|
if !done {
|
||||||
// a previous request might time out. now we should ignore the response of it and
|
// a previous request might time out. now we should ignore the response of it and
|
||||||
// continue waiting for the response of the current requests.
|
// continue waiting for the response of the current requests.
|
||||||
plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx)
|
id2 := uint64(0)
|
||||||
|
if len(rs.RequestCtx) == 8 {
|
||||||
|
id2 = binary.BigEndian.Uint64(rs.RequestCtx)
|
||||||
|
}
|
||||||
|
plog.Warningf("ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader (request ID want %d, got %d)", id1, id2)
|
||||||
|
slowReadIndex.Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-time.After(s.Cfg.ReqTimeout()):
|
case <-time.After(s.Cfg.ReqTimeout()):
|
||||||
plog.Warningf("timed out waiting for read index response")
|
plog.Warningf("timed out waiting for read index response")
|
||||||
nr.notify(ErrTimeout)
|
nr.notify(ErrTimeout)
|
||||||
timeout = true
|
timeout = true
|
||||||
|
slowReadIndex.Inc()
|
||||||
|
|
||||||
case <-s.stopping:
|
case <-s.stopping:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -681,6 +700,5 @@ func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error
|
||||||
return authInfo, nil
|
return authInfo, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.AuthStore().AuthInfoFromCtx(ctx)
|
return s.AuthStore().AuthInfoFromCtx(ctx)
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ go_library(
|
||||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library",
|
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library",
|
"//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
|
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
|
||||||
|
|
|
@ -45,6 +45,7 @@ import (
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
"github.com/coreos/etcd/pkg/tlsutil"
|
||||||
"github.com/coreos/etcd/pkg/transport"
|
"github.com/coreos/etcd/pkg/transport"
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
"github.com/coreos/etcd/rafthttp"
|
"github.com/coreos/etcd/rafthttp"
|
||||||
|
@ -562,6 +563,7 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member {
|
||||||
m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo
|
m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo
|
||||||
}
|
}
|
||||||
m.ElectionTicks = electionTicks
|
m.ElectionTicks = electionTicks
|
||||||
|
m.InitialElectionTickAdvance = true
|
||||||
m.TickMs = uint(tickDuration / time.Millisecond)
|
m.TickMs = uint(tickDuration / time.Millisecond)
|
||||||
m.QuotaBackendBytes = mcfg.quotaBackendBytes
|
m.QuotaBackendBytes = mcfg.quotaBackendBytes
|
||||||
m.MaxRequestBytes = mcfg.maxRequestBytes
|
m.MaxRequestBytes = mcfg.maxRequestBytes
|
||||||
|
@ -695,10 +697,16 @@ func (m *member) Launch() error {
|
||||||
if m.PeerTLSInfo == nil {
|
if m.PeerTLSInfo == nil {
|
||||||
hs.Start()
|
hs.Start()
|
||||||
} else {
|
} else {
|
||||||
hs.TLS, err = m.PeerTLSInfo.ServerConfig()
|
info := m.PeerTLSInfo
|
||||||
|
hs.TLS, err = info.ServerConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hs.TLS.Certificates = []tls.Certificate{*tlsCert}
|
||||||
hs.StartTLS()
|
hs.StartTLS()
|
||||||
}
|
}
|
||||||
m.hss = append(m.hss, hs)
|
m.hss = append(m.hss, hs)
|
||||||
|
@ -711,10 +719,45 @@ func (m *member) Launch() error {
|
||||||
if m.ClientTLSInfo == nil {
|
if m.ClientTLSInfo == nil {
|
||||||
hs.Start()
|
hs.Start()
|
||||||
} else {
|
} else {
|
||||||
hs.TLS, err = m.ClientTLSInfo.ServerConfig()
|
info := m.ClientTLSInfo
|
||||||
|
hs.TLS, err = info.ServerConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// baseConfig is called on initial TLS handshake start.
|
||||||
|
//
|
||||||
|
// Previously,
|
||||||
|
// 1. Server has non-empty (*tls.Config).Certificates on client hello
|
||||||
|
// 2. Server calls (*tls.Config).GetCertificate iff:
|
||||||
|
// - Server's (*tls.Config).Certificates is not empty, or
|
||||||
|
// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
|
||||||
|
//
|
||||||
|
// When (*tls.Config).Certificates is always populated on initial handshake,
|
||||||
|
// client is expected to provide a valid matching SNI to pass the TLS
|
||||||
|
// verification, thus trigger server (*tls.Config).GetCertificate to reload
|
||||||
|
// TLS assets. However, a cert whose SAN field does not include domain names
|
||||||
|
// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus
|
||||||
|
// it was never able to trigger TLS reload on initial handshake; first
|
||||||
|
// ceritifcate object was being used, never being updated.
|
||||||
|
//
|
||||||
|
// Now, (*tls.Config).Certificates is created empty on initial TLS client
|
||||||
|
// handshake, in order to trigger (*tls.Config).GetCertificate and populate
|
||||||
|
// rest of the certificates on every new TLS connection, even when client
|
||||||
|
// SNI is empty (e.g. cert only includes IPs).
|
||||||
|
//
|
||||||
|
// This introduces another problem with "httptest.Server":
|
||||||
|
// when server initial certificates are empty, certificates
|
||||||
|
// are overwritten by Go's internal test certs, which have
|
||||||
|
// different SAN fields (e.g. example.com). To work around,
|
||||||
|
// re-overwrite (*tls.Config).Certificates before starting
|
||||||
|
// test server.
|
||||||
|
tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hs.TLS.Certificates = []tls.Certificate{*tlsCert}
|
||||||
|
|
||||||
hs.StartTLS()
|
hs.StartTLS()
|
||||||
}
|
}
|
||||||
m.hss = append(m.hss, hs)
|
m.hss = append(m.hss, hs)
|
||||||
|
|
|
@ -35,15 +35,19 @@ const (
|
||||||
forever = monotime.Time(math.MaxInt64)
|
forever = monotime.Time(math.MaxInt64)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// MaxLeaseTTL is the maximum lease TTL value
|
||||||
|
const MaxLeaseTTL = 9000000000
|
||||||
|
|
||||||
var (
|
var (
|
||||||
leaseBucketName = []byte("lease")
|
leaseBucketName = []byte("lease")
|
||||||
|
|
||||||
// maximum number of leases to revoke per second; configurable for tests
|
// maximum number of leases to revoke per second; configurable for tests
|
||||||
leaseRevokeRate = 1000
|
leaseRevokeRate = 1000
|
||||||
|
|
||||||
ErrNotPrimary = errors.New("not a primary lessor")
|
ErrNotPrimary = errors.New("not a primary lessor")
|
||||||
ErrLeaseNotFound = errors.New("lease not found")
|
ErrLeaseNotFound = errors.New("lease not found")
|
||||||
ErrLeaseExists = errors.New("lease already exists")
|
ErrLeaseExists = errors.New("lease already exists")
|
||||||
|
ErrLeaseTTLTooLarge = errors.New("too large lease TTL")
|
||||||
)
|
)
|
||||||
|
|
||||||
// TxnDelete is a TxnWrite that only permits deletes. Defined here
|
// TxnDelete is a TxnWrite that only permits deletes. Defined here
|
||||||
|
@ -199,6 +203,10 @@ func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
|
||||||
return nil, ErrLeaseNotFound
|
return nil, ErrLeaseNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ttl > MaxLeaseTTL {
|
||||||
|
return nil, ErrLeaseTTLTooLarge
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: when lessor is under high load, it should give out lease
|
// TODO: when lessor is under high load, it should give out lease
|
||||||
// with longer TTL to reduce renew load.
|
// with longer TTL to reduce renew load.
|
||||||
l := &Lease{
|
l := &Lease{
|
||||||
|
|
|
@ -54,6 +54,10 @@ type Backend interface {
|
||||||
Hash(ignores map[IgnoreKey]struct{}) (uint32, error)
|
Hash(ignores map[IgnoreKey]struct{}) (uint32, error)
|
||||||
// Size returns the current size of the backend.
|
// Size returns the current size of the backend.
|
||||||
Size() int64
|
Size() int64
|
||||||
|
// SizeInUse returns the current size of the backend logically in use.
|
||||||
|
// Since the backend can manage free space in a non-byte unit such as
|
||||||
|
// number of pages, the returned value can be not exactly accurate in bytes.
|
||||||
|
SizeInUse() int64
|
||||||
Defrag() error
|
Defrag() error
|
||||||
ForceCommit()
|
ForceCommit()
|
||||||
Close() error
|
Close() error
|
||||||
|
@ -74,6 +78,10 @@ type backend struct {
|
||||||
|
|
||||||
// size is the number of bytes in the backend
|
// size is the number of bytes in the backend
|
||||||
size int64
|
size int64
|
||||||
|
|
||||||
|
// sizeInUse is the number of bytes actually used in the backend
|
||||||
|
sizeInUse int64
|
||||||
|
|
||||||
// commits counts number of commits since start
|
// commits counts number of commits since start
|
||||||
commits int64
|
commits int64
|
||||||
|
|
||||||
|
@ -244,6 +252,10 @@ func (b *backend) Size() int64 {
|
||||||
return atomic.LoadInt64(&b.size)
|
return atomic.LoadInt64(&b.size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *backend) SizeInUse() int64 {
|
||||||
|
return atomic.LoadInt64(&b.sizeInUse)
|
||||||
|
}
|
||||||
|
|
||||||
func (b *backend) run() {
|
func (b *backend) run() {
|
||||||
defer close(b.donec)
|
defer close(b.donec)
|
||||||
t := time.NewTimer(b.batchInterval)
|
t := time.NewTimer(b.batchInterval)
|
||||||
|
@ -272,18 +284,12 @@ func (b *backend) Commits() int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *backend) Defrag() error {
|
func (b *backend) Defrag() error {
|
||||||
err := b.defrag()
|
return b.defrag()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// commit to update metadata like db.size
|
|
||||||
b.batchTx.Commit()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *backend) defrag() error {
|
func (b *backend) defrag() error {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
// TODO: make this non-blocking?
|
// TODO: make this non-blocking?
|
||||||
// lock batchTx to ensure nobody is using previous tx, and then
|
// lock batchTx to ensure nobody is using previous tx, and then
|
||||||
// close previous ongoing tx.
|
// close previous ongoing tx.
|
||||||
|
@ -341,7 +347,14 @@ func (b *backend) defrag() error {
|
||||||
|
|
||||||
b.readTx.buf.reset()
|
b.readTx.buf.reset()
|
||||||
b.readTx.tx = b.unsafeBegin(false)
|
b.readTx.tx = b.unsafeBegin(false)
|
||||||
atomic.StoreInt64(&b.size, b.readTx.tx.Size())
|
|
||||||
|
size := b.readTx.tx.Size()
|
||||||
|
db := b.db
|
||||||
|
atomic.StoreInt64(&b.size, size)
|
||||||
|
atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
|
||||||
|
|
||||||
|
took := time.Since(now)
|
||||||
|
defragDurations.Observe(took.Seconds())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -370,10 +383,10 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpb, berr := tmptx.CreateBucketIfNotExists(next)
|
tmpb, berr := tmptx.CreateBucketIfNotExists(next)
|
||||||
tmpb.FillPercent = 0.9 // for seq write in for each
|
|
||||||
if berr != nil {
|
if berr != nil {
|
||||||
return berr
|
return berr
|
||||||
}
|
}
|
||||||
|
tmpb.FillPercent = 0.9 // for seq write in for each
|
||||||
|
|
||||||
b.ForEach(func(k, v []byte) error {
|
b.ForEach(func(k, v []byte) error {
|
||||||
count++
|
count++
|
||||||
|
@ -402,7 +415,12 @@ func (b *backend) begin(write bool) *bolt.Tx {
|
||||||
b.mu.RLock()
|
b.mu.RLock()
|
||||||
tx := b.unsafeBegin(write)
|
tx := b.unsafeBegin(write)
|
||||||
b.mu.RUnlock()
|
b.mu.RUnlock()
|
||||||
atomic.StoreInt64(&b.size, tx.Size())
|
|
||||||
|
size := tx.Size()
|
||||||
|
db := tx.DB()
|
||||||
|
atomic.StoreInt64(&b.size, size)
|
||||||
|
atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
|
||||||
|
|
||||||
return tx
|
return tx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -141,15 +141,15 @@ func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error)
|
||||||
// Commit commits a previous tx and begins a new writable one.
|
// Commit commits a previous tx and begins a new writable one.
|
||||||
func (t *batchTx) Commit() {
|
func (t *batchTx) Commit() {
|
||||||
t.Lock()
|
t.Lock()
|
||||||
defer t.Unlock()
|
|
||||||
t.commit(false)
|
t.commit(false)
|
||||||
|
t.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// CommitAndStop commits the previous tx and does not create a new one.
|
// CommitAndStop commits the previous tx and does not create a new one.
|
||||||
func (t *batchTx) CommitAndStop() {
|
func (t *batchTx) CommitAndStop() {
|
||||||
t.Lock()
|
t.Lock()
|
||||||
defer t.Unlock()
|
|
||||||
t.commit(true)
|
t.commit(true)
|
||||||
|
t.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *batchTx) Unlock() {
|
func (t *batchTx) Unlock() {
|
||||||
|
@ -163,21 +163,15 @@ func (t *batchTx) commit(stop bool) {
|
||||||
// commit the last tx
|
// commit the last tx
|
||||||
if t.tx != nil {
|
if t.tx != nil {
|
||||||
if t.pending == 0 && !stop {
|
if t.pending == 0 && !stop {
|
||||||
t.backend.mu.RLock()
|
|
||||||
defer t.backend.mu.RUnlock()
|
|
||||||
|
|
||||||
// t.tx.DB()==nil if 'CommitAndStop' calls 'batchTx.commit(true)',
|
|
||||||
// which initializes *bolt.Tx.db and *bolt.Tx.meta as nil; panics t.tx.Size().
|
|
||||||
// Server must make sure 'batchTx.commit(false)' does not follow
|
|
||||||
// 'batchTx.commit(true)' (e.g. stopping backend, and inflight Hash call).
|
|
||||||
atomic.StoreInt64(&t.backend.size, t.tx.Size())
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
// gofail: var beforeCommit struct{}
|
// gofail: var beforeCommit struct{}
|
||||||
err := t.tx.Commit()
|
err := t.tx.Commit()
|
||||||
// gofail: var afterCommit struct{}
|
// gofail: var afterCommit struct{}
|
||||||
|
|
||||||
commitDurations.Observe(time.Since(start).Seconds())
|
commitDurations.Observe(time.Since(start).Seconds())
|
||||||
atomic.AddInt64(&t.backend.commits, 1)
|
atomic.AddInt64(&t.backend.commits, 1)
|
||||||
|
|
||||||
|
@ -222,21 +216,21 @@ func (t *batchTxBuffered) Unlock() {
|
||||||
|
|
||||||
func (t *batchTxBuffered) Commit() {
|
func (t *batchTxBuffered) Commit() {
|
||||||
t.Lock()
|
t.Lock()
|
||||||
defer t.Unlock()
|
|
||||||
t.commit(false)
|
t.commit(false)
|
||||||
|
t.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *batchTxBuffered) CommitAndStop() {
|
func (t *batchTxBuffered) CommitAndStop() {
|
||||||
t.Lock()
|
t.Lock()
|
||||||
defer t.Unlock()
|
|
||||||
t.commit(true)
|
t.commit(true)
|
||||||
|
t.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *batchTxBuffered) commit(stop bool) {
|
func (t *batchTxBuffered) commit(stop bool) {
|
||||||
// all read txs must be closed to acquire boltdb commit rwlock
|
// all read txs must be closed to acquire boltdb commit rwlock
|
||||||
t.backend.readTx.mu.Lock()
|
t.backend.readTx.mu.Lock()
|
||||||
defer t.backend.readTx.mu.Unlock()
|
|
||||||
t.unsafeCommit(stop)
|
t.unsafeCommit(stop)
|
||||||
|
t.backend.readTx.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *batchTxBuffered) unsafeCommit(stop bool) {
|
func (t *batchTxBuffered) unsafeCommit(stop bool) {
|
||||||
|
|
|
@ -22,7 +22,22 @@ var (
|
||||||
Subsystem: "disk",
|
Subsystem: "disk",
|
||||||
Name: "backend_commit_duration_seconds",
|
Name: "backend_commit_duration_seconds",
|
||||||
Help: "The latency distributions of commit called by backend.",
|
Help: "The latency distributions of commit called by backend.",
|
||||||
Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
|
|
||||||
|
// lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
|
||||||
|
// highest bucket start of 0.001 sec * 2^13 == 8.192 sec
|
||||||
|
Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
|
||||||
|
})
|
||||||
|
|
||||||
|
defragDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "disk",
|
||||||
|
Name: "backend_defrag_duration_seconds",
|
||||||
|
Help: "The latency distribution of backend defragmentation.",
|
||||||
|
|
||||||
|
// 100 MB usually takes 1 sec, so start with 10 MB of 100 ms
|
||||||
|
// lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
|
||||||
|
// highest bucket start of 0.1 sec * 2^12 == 409.6 sec
|
||||||
|
Buckets: prometheus.ExponentialBuckets(.1, 2, 13),
|
||||||
})
|
})
|
||||||
|
|
||||||
snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
|
snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||||
|
@ -30,12 +45,15 @@ var (
|
||||||
Subsystem: "disk",
|
Subsystem: "disk",
|
||||||
Name: "backend_snapshot_duration_seconds",
|
Name: "backend_snapshot_duration_seconds",
|
||||||
Help: "The latency distribution of backend snapshots.",
|
Help: "The latency distribution of backend snapshots.",
|
||||||
// 10 ms -> 655 seconds
|
|
||||||
|
// lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
|
||||||
|
// highest bucket start of 0.01 sec * 2^16 == 655.36 sec
|
||||||
Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
|
Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
prometheus.MustRegister(commitDurations)
|
prometheus.MustRegister(commitDurations)
|
||||||
|
prometheus.MustRegister(defragDurations)
|
||||||
prometheus.MustRegister(snapshotDurations)
|
prometheus.MustRegister(snapshotDurations)
|
||||||
}
|
}
|
||||||
|
|
|
@ -150,8 +150,12 @@ func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) Hash() (hash uint32, revision int64, err error) {
|
func (s *store) Hash() (hash uint32, revision int64, err error) {
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
s.b.ForceCommit()
|
s.b.ForceCommit()
|
||||||
h, err := s.b.Hash(DefaultIgnores)
|
h, err := s.b.Hash(DefaultIgnores)
|
||||||
|
|
||||||
|
hashDurations.Observe(time.Since(start).Seconds())
|
||||||
return h, s.currentRev, err
|
return h, s.currentRev, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,10 +249,14 @@ func (s *store) Restore(b backend.Backend) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) restore() error {
|
func (s *store) restore() error {
|
||||||
reportDbTotalSizeInBytesMu.Lock()
|
|
||||||
b := s.b
|
b := s.b
|
||||||
|
|
||||||
|
reportDbTotalSizeInBytesMu.Lock()
|
||||||
reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
|
reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
|
||||||
reportDbTotalSizeInBytesMu.Unlock()
|
reportDbTotalSizeInBytesMu.Unlock()
|
||||||
|
reportDbTotalSizeInUseInBytesMu.Lock()
|
||||||
|
reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) }
|
||||||
|
reportDbTotalSizeInUseInBytesMu.Unlock()
|
||||||
|
|
||||||
min, max := newRevBytes(), newRevBytes()
|
min, max := newRevBytes(), newRevBytes()
|
||||||
revToBytes(revision{main: 1}, min)
|
revToBytes(revision{main: 1}, min)
|
||||||
|
|
|
@ -131,11 +131,23 @@ var (
|
||||||
Buckets: prometheus.ExponentialBuckets(100, 2, 14),
|
Buckets: prometheus.ExponentialBuckets(100, 2, 14),
|
||||||
})
|
})
|
||||||
|
|
||||||
dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
dbTotalSizeDebugging = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
||||||
Namespace: "etcd_debugging",
|
Namespace: "etcd_debugging",
|
||||||
Subsystem: "mvcc",
|
Subsystem: "mvcc",
|
||||||
Name: "db_total_size_in_bytes",
|
Name: "db_total_size_in_bytes",
|
||||||
Help: "Total size of the underlying database in bytes.",
|
Help: "Total size of the underlying database physically allocated in bytes. Use etcd_mvcc_db_total_size_in_bytes",
|
||||||
|
},
|
||||||
|
func() float64 {
|
||||||
|
reportDbTotalSizeInBytesMu.RLock()
|
||||||
|
defer reportDbTotalSizeInBytesMu.RUnlock()
|
||||||
|
return reportDbTotalSizeInBytes()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "mvcc",
|
||||||
|
Name: "db_total_size_in_bytes",
|
||||||
|
Help: "Total size of the underlying database physically allocated in bytes.",
|
||||||
},
|
},
|
||||||
func() float64 {
|
func() float64 {
|
||||||
reportDbTotalSizeInBytesMu.RLock()
|
reportDbTotalSizeInBytesMu.RLock()
|
||||||
|
@ -145,7 +157,35 @@ var (
|
||||||
)
|
)
|
||||||
// overridden by mvcc initialization
|
// overridden by mvcc initialization
|
||||||
reportDbTotalSizeInBytesMu sync.RWMutex
|
reportDbTotalSizeInBytesMu sync.RWMutex
|
||||||
reportDbTotalSizeInBytes func() float64 = func() float64 { return 0 }
|
reportDbTotalSizeInBytes = func() float64 { return 0 }
|
||||||
|
|
||||||
|
dbTotalSizeInUse = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "mvcc",
|
||||||
|
Name: "db_total_size_in_use_in_bytes",
|
||||||
|
Help: "Total size of the underlying database logically in use in bytes.",
|
||||||
|
},
|
||||||
|
func() float64 {
|
||||||
|
reportDbTotalSizeInUseInBytesMu.RLock()
|
||||||
|
defer reportDbTotalSizeInUseInBytesMu.RUnlock()
|
||||||
|
return reportDbTotalSizeInUseInBytes()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
// overridden by mvcc initialization
|
||||||
|
reportDbTotalSizeInUseInBytesMu sync.RWMutex
|
||||||
|
reportDbTotalSizeInUseInBytes func() float64 = func() float64 { return 0 }
|
||||||
|
|
||||||
|
hashDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "mvcc",
|
||||||
|
Name: "hash_duration_seconds",
|
||||||
|
Help: "The latency distribution of storage hash operation.",
|
||||||
|
|
||||||
|
// 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
|
||||||
|
// lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
|
||||||
|
// highest bucket start of 0.01 sec * 2^14 == 163.84 sec
|
||||||
|
Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
|
||||||
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -162,7 +202,10 @@ func init() {
|
||||||
prometheus.MustRegister(indexCompactionPauseDurations)
|
prometheus.MustRegister(indexCompactionPauseDurations)
|
||||||
prometheus.MustRegister(dbCompactionPauseDurations)
|
prometheus.MustRegister(dbCompactionPauseDurations)
|
||||||
prometheus.MustRegister(dbCompactionTotalDurations)
|
prometheus.MustRegister(dbCompactionTotalDurations)
|
||||||
|
prometheus.MustRegister(dbTotalSizeDebugging)
|
||||||
prometheus.MustRegister(dbTotalSize)
|
prometheus.MustRegister(dbTotalSize)
|
||||||
|
prometheus.MustRegister(dbTotalSizeInUse)
|
||||||
|
prometheus.MustRegister(hashDurations)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReportEventReceived reports that an event is received.
|
// ReportEventReceived reports that an event is received.
|
||||||
|
|
|
@ -188,7 +188,8 @@ func (s *watchableStore) Restore(b backend.Backend) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for wa := range s.synced.watchers {
|
for wa := range s.synced.watchers {
|
||||||
s.unsynced.watchers.add(wa)
|
wa.restore = true
|
||||||
|
s.unsynced.add(wa)
|
||||||
}
|
}
|
||||||
s.synced = newWatcherGroup()
|
s.synced = newWatcherGroup()
|
||||||
return nil
|
return nil
|
||||||
|
@ -479,6 +480,14 @@ type watcher struct {
|
||||||
// compacted is set when the watcher is removed because of compaction
|
// compacted is set when the watcher is removed because of compaction
|
||||||
compacted bool
|
compacted bool
|
||||||
|
|
||||||
|
// restore is true when the watcher is being restored from leader snapshot
|
||||||
|
// which means that this watcher has just been moved from "synced" to "unsynced"
|
||||||
|
// watcher group, possibly with a future revision when it was first added
|
||||||
|
// to the synced watcher
|
||||||
|
// "unsynced" watcher revision must always be <= current revision,
|
||||||
|
// except when the watcher were to be moved from "synced" watcher group
|
||||||
|
restore bool
|
||||||
|
|
||||||
// minRev is the minimum revision update the watcher will accept
|
// minRev is the minimum revision update the watcher will accept
|
||||||
minRev int64
|
minRev int64
|
||||||
id WatchID
|
id WatchID
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
package mvcc
|
package mvcc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
|
@ -238,7 +239,15 @@ func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 {
|
||||||
minRev := int64(math.MaxInt64)
|
minRev := int64(math.MaxInt64)
|
||||||
for w := range wg.watchers {
|
for w := range wg.watchers {
|
||||||
if w.minRev > curRev {
|
if w.minRev > curRev {
|
||||||
panic("watcher current revision should not exceed current revision")
|
// after network partition, possibly choosing future revision watcher from restore operation
|
||||||
|
// with watch key "proxy-namespace__lostleader" and revision "math.MaxInt64 - 2"
|
||||||
|
// do not panic when such watcher had been moved from "synced" watcher during restore operation
|
||||||
|
if !w.restore {
|
||||||
|
panic(fmt.Errorf("watcher minimum revision %d should not exceed current revision %d", w.minRev, curRev))
|
||||||
|
}
|
||||||
|
|
||||||
|
// mark 'restore' done, since it's chosen
|
||||||
|
w.restore = false
|
||||||
}
|
}
|
||||||
if w.minRev < compactRev {
|
if w.minRev < compactRev {
|
||||||
select {
|
select {
|
||||||
|
|
|
@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
|
"cipher_suites.go",
|
||||||
"doc.go",
|
"doc.go",
|
||||||
"tlsutil.go",
|
"tlsutil.go",
|
||||||
],
|
],
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package tlsutil
|
||||||
|
|
||||||
|
import "crypto/tls"
|
||||||
|
|
||||||
|
// cipher suites implemented by Go
|
||||||
|
// https://github.com/golang/go/blob/dev.boringcrypto.go1.10/src/crypto/tls/cipher_suites.go
|
||||||
|
var cipherSuites = map[string]uint16{
|
||||||
|
"TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||||
|
"TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
"TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
"TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
"TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
|
||||||
|
"TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
"TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||||
|
"TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||||
|
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||||
|
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||||
|
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCipherSuite returns the corresponding cipher suite,
|
||||||
|
// and boolean value if it is supported.
|
||||||
|
func GetCipherSuite(s string) (uint16, bool) {
|
||||||
|
v, ok := cipherSuites[s]
|
||||||
|
return v, ok
|
||||||
|
}
|
|
@ -69,6 +69,11 @@ type TLSInfo struct {
|
||||||
// connection will be closed immediately afterwards.
|
// connection will be closed immediately afterwards.
|
||||||
HandshakeFailure func(*tls.Conn, error)
|
HandshakeFailure func(*tls.Conn, error)
|
||||||
|
|
||||||
|
// CipherSuites is a list of supported cipher suites.
|
||||||
|
// If empty, Go auto-populates it by default.
|
||||||
|
// Note that cipher suites are prioritized in the given order.
|
||||||
|
CipherSuites []uint16
|
||||||
|
|
||||||
selfCert bool
|
selfCert bool
|
||||||
|
|
||||||
// parseFunc exists to simplify testing. Typically, parseFunc
|
// parseFunc exists to simplify testing. Typically, parseFunc
|
||||||
|
@ -162,16 +167,20 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) {
|
||||||
return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile)
|
return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
|
_, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := &tls.Config{
|
cfg := &tls.Config{
|
||||||
Certificates: []tls.Certificate{*tlsCert},
|
MinVersion: tls.VersionTLS12,
|
||||||
MinVersion: tls.VersionTLS12,
|
ServerName: info.ServerName,
|
||||||
ServerName: info.ServerName,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(info.CipherSuites) > 0 {
|
||||||
|
cfg.CipherSuites = info.CipherSuites
|
||||||
|
}
|
||||||
|
|
||||||
// this only reloads certs when there's a client request
|
// this only reloads certs when there's a client request
|
||||||
// TODO: support server-side refresh (e.g. inotify, SIGHUP), caching
|
// TODO: support server-side refresh (e.g. inotify, SIGHUP), caching
|
||||||
cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||||
|
|
|
@ -225,6 +225,7 @@ func (p *peer) send(m raftpb.Message) {
|
||||||
plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name)
|
plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name)
|
||||||
}
|
}
|
||||||
plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name)
|
plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name)
|
||||||
|
sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,6 +53,7 @@ func (g *remote) send(m raftpb.Message) {
|
||||||
plog.MergeWarningf("dropped internal raft message to %s since sending buffer is full (bad/overloaded network)", g.id)
|
plog.MergeWarningf("dropped internal raft message to %s since sending buffer is full (bad/overloaded network)", g.id)
|
||||||
}
|
}
|
||||||
plog.Debugf("dropped %s to %s since sending buffer is full", m.Type, g.id)
|
plog.Debugf("dropped %s to %s since sending buffer is full", m.Type, g.id)
|
||||||
|
sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -83,6 +83,8 @@ type Transporter interface {
|
||||||
// If the connection is active since peer was added, it returns the adding time.
|
// If the connection is active since peer was added, it returns the adding time.
|
||||||
// If the connection is currently inactive, it returns zero time.
|
// If the connection is currently inactive, it returns zero time.
|
||||||
ActiveSince(id types.ID) time.Time
|
ActiveSince(id types.ID) time.Time
|
||||||
|
// ActivePeers returns the number of active peers.
|
||||||
|
ActivePeers() int
|
||||||
// Stop closes the connections and stops the transporter.
|
// Stop closes the connections and stops the transporter.
|
||||||
Stop()
|
Stop()
|
||||||
}
|
}
|
||||||
|
@ -362,6 +364,20 @@ func (t *Transport) Resume() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ActivePeers returns a channel that closes when an initial
|
||||||
|
// peer connection has been established. Use this to wait until the
|
||||||
|
// first peer connection becomes active.
|
||||||
|
func (t *Transport) ActivePeers() (cnt int) {
|
||||||
|
t.mu.RLock()
|
||||||
|
defer t.mu.RUnlock()
|
||||||
|
for _, p := range t.peers {
|
||||||
|
if !p.activeSince().IsZero() {
|
||||||
|
cnt++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cnt
|
||||||
|
}
|
||||||
|
|
||||||
type nopTransporter struct{}
|
type nopTransporter struct{}
|
||||||
|
|
||||||
func NewNopTransporter() Transporter {
|
func NewNopTransporter() Transporter {
|
||||||
|
@ -378,6 +394,7 @@ func (s *nopTransporter) RemovePeer(id types.ID) {}
|
||||||
func (s *nopTransporter) RemoveAllPeers() {}
|
func (s *nopTransporter) RemoveAllPeers() {}
|
||||||
func (s *nopTransporter) UpdatePeer(id types.ID, us []string) {}
|
func (s *nopTransporter) UpdatePeer(id types.ID, us []string) {}
|
||||||
func (s *nopTransporter) ActiveSince(id types.ID) time.Time { return time.Time{} }
|
func (s *nopTransporter) ActiveSince(id types.ID) time.Time { return time.Time{} }
|
||||||
|
func (s *nopTransporter) ActivePeers() int { return 0 }
|
||||||
func (s *nopTransporter) Stop() {}
|
func (s *nopTransporter) Stop() {}
|
||||||
func (s *nopTransporter) Pause() {}
|
func (s *nopTransporter) Pause() {}
|
||||||
func (s *nopTransporter) Resume() {}
|
func (s *nopTransporter) Resume() {}
|
||||||
|
|
|
@ -26,7 +26,7 @@ import (
|
||||||
var (
|
var (
|
||||||
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
|
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
|
||||||
MinClusterVersion = "3.0.0"
|
MinClusterVersion = "3.0.0"
|
||||||
Version = "3.2.13"
|
Version = "3.2.24"
|
||||||
APIVersion = "unknown"
|
APIVersion = "unknown"
|
||||||
|
|
||||||
// Git SHA Value will be set during build
|
// Git SHA Value will be set during build
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
../../staging/src/k8s.io/cli-runtime/
|
../../staging/src/k8s.io/cli-runtime
|
|
@ -1 +1 @@
|
||||||
../../staging/src/k8s.io/kube-proxy/
|
../../staging/src/k8s.io/kube-proxy
|
|
@ -1 +1 @@
|
||||||
../../staging/src/k8s.io/kube-scheduler/
|
../../staging/src/k8s.io/kube-scheduler
|
Loading…
Reference in New Issue