mirror of https://github.com/k3s-io/k3s
Update etcd to v3.5.0
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>pull/3898/head
parent
e204d863a5
commit
872855015c
12
go.mod
12
go.mod
|
@ -21,7 +21,7 @@ replace (
|
||||||
github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
|
github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
|
||||||
github.com/docker/docker => github.com/docker/docker v20.10.2+incompatible
|
github.com/docker/docker => github.com/docker/docker v20.10.2+incompatible
|
||||||
github.com/docker/libnetwork => github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34
|
github.com/docker/libnetwork => github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34
|
||||||
github.com/golang/protobuf => github.com/k3s-io/protobuf v1.4.3-k3s1
|
github.com/golang/protobuf => github.com/golang/protobuf v1.5.2
|
||||||
github.com/juju/errors => github.com/k3s-io/nocode v0.0.0-20200630202308-cb097102c09f
|
github.com/juju/errors => github.com/k3s-io/nocode v0.0.0-20200630202308-cb097102c09f
|
||||||
github.com/kubernetes-sigs/cri-tools => github.com/k3s-io/cri-tools v1.21.0-k3s1
|
github.com/kubernetes-sigs/cri-tools => github.com/k3s-io/cri-tools v1.21.0-k3s1
|
||||||
github.com/matryer/moq => github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009
|
github.com/matryer/moq => github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009
|
||||||
|
@ -29,7 +29,10 @@ replace (
|
||||||
github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.1
|
github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.1
|
||||||
github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v1.0.3-0.20210316141917-a8c4a9ee0f6b
|
github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v1.0.3-0.20210316141917-a8c4a9ee0f6b
|
||||||
github.com/rancher/k3s/pkg/data => ./pkg/data
|
github.com/rancher/k3s/pkg/data => ./pkg/data
|
||||||
go.etcd.io/etcd => github.com/k3s-io/etcd v0.5.0-alpha.5.0.20201208200253-50621aee4aea
|
go.etcd.io/etcd/api/v3 => github.com/k3s-io/etcd/api/v3 v3.5.0-k3s1
|
||||||
|
go.etcd.io/etcd/client/v3 => github.com/k3s-io/etcd/client/v3 v3.5.0-k3s1
|
||||||
|
go.etcd.io/etcd/etcdutl/v3 => github.com/k3s-io/etcd/etcdutl/v3 v3.5.0-k3s1
|
||||||
|
go.etcd.io/etcd/server/v3 => github.com/k3s-io/etcd/server/v3 v3.5.0-k3s1
|
||||||
golang.org/x/crypto => golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
golang.org/x/crypto => golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
||||||
golang.org/x/net => golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
|
golang.org/x/net => golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
|
||||||
golang.org/x/sys => golang.org/x/sys v0.0.0-20210616094352-59db8d763f22
|
golang.org/x/sys => golang.org/x/sys v0.0.0-20210616094352-59db8d763f22
|
||||||
|
@ -117,7 +120,10 @@ require (
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||||
github.com/urfave/cli v1.22.2
|
github.com/urfave/cli v1.22.2
|
||||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20201208200253-50621aee4aea
|
go.etcd.io/etcd/api/v3 v3.5.0
|
||||||
|
go.etcd.io/etcd/client/v3 v3.5.0
|
||||||
|
go.etcd.io/etcd/etcdutl/v3 v3.5.0
|
||||||
|
go.etcd.io/etcd/server/v3 v3.5.0
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
||||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023
|
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023
|
||||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22
|
||||||
|
|
24
go.sum
24
go.sum
|
@ -364,6 +364,8 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
|
||||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
|
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
|
||||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||||
|
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
||||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
|
github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
|
||||||
|
@ -519,8 +521,14 @@ github.com/k3s-io/cri v1.4.0-k3s.7 h1:1ycdF3dMDJMW/k/UxDC6eMsyGSMZ/p0AoUBVdJvNGQ
|
||||||
github.com/k3s-io/cri v1.4.0-k3s.7/go.mod h1:fGPUUHMKQik/vIegSe05DtX/m4miovdtvVLqRUFAkK0=
|
github.com/k3s-io/cri v1.4.0-k3s.7/go.mod h1:fGPUUHMKQik/vIegSe05DtX/m4miovdtvVLqRUFAkK0=
|
||||||
github.com/k3s-io/cri-tools v1.21.0-k3s1 h1:MWQtAsx4HCNXenqU/B4V9eU6HMyafkd1PnW6d4HCfos=
|
github.com/k3s-io/cri-tools v1.21.0-k3s1 h1:MWQtAsx4HCNXenqU/B4V9eU6HMyafkd1PnW6d4HCfos=
|
||||||
github.com/k3s-io/cri-tools v1.21.0-k3s1/go.mod h1:Qsz54zxINPR+WVWX9Kc3CTmuDFB1dNLCNV8jE8lUbtU=
|
github.com/k3s-io/cri-tools v1.21.0-k3s1/go.mod h1:Qsz54zxINPR+WVWX9Kc3CTmuDFB1dNLCNV8jE8lUbtU=
|
||||||
github.com/k3s-io/etcd v0.5.0-alpha.5.0.20201208200253-50621aee4aea h1:7cwby0GoNAi8IsVrT0q+JfQpB6V76ZaEGhj6qts/mvU=
|
github.com/k3s-io/etcd/api/v3 v3.5.0-k3s1 h1:hbVjpbEfKjttv+ZmYnqgR8wK7vtKIDrPxF8mcGVYWSU=
|
||||||
github.com/k3s-io/etcd v0.5.0-alpha.5.0.20201208200253-50621aee4aea/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
github.com/k3s-io/etcd/api/v3 v3.5.0-k3s1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||||
|
github.com/k3s-io/etcd/client/v3 v3.5.0-k3s1 h1:RAXJQReqdjR6B+wL3HwWuBAdMO31fi7CGft0BdEyI5Q=
|
||||||
|
github.com/k3s-io/etcd/client/v3 v3.5.0-k3s1/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
||||||
|
github.com/k3s-io/etcd/etcdutl/v3 v3.5.0-k3s1 h1:G0u+ShI11UDcP4Z8OP3CSl4/Wg63OgRBqYEiKMsUDP4=
|
||||||
|
github.com/k3s-io/etcd/etcdutl/v3 v3.5.0-k3s1/go.mod h1:o98rKMCibbFAG8QS9KmvlYDGDShmmIbmRE8vSofzYNg=
|
||||||
|
github.com/k3s-io/etcd/server/v3 v3.5.0-k3s1 h1:be2d2LS1w+OxyHE/PF6tmGVzF72+7L2FZ6BmxK1Q0ow=
|
||||||
|
github.com/k3s-io/etcd/server/v3 v3.5.0-k3s1/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
|
||||||
github.com/k3s-io/helm-controller v0.10.5 h1:zrStmx4ZkhtFU/OqJYoAZFGFB1Bu+jZs0N8dtlVRxDk=
|
github.com/k3s-io/helm-controller v0.10.5 h1:zrStmx4ZkhtFU/OqJYoAZFGFB1Bu+jZs0N8dtlVRxDk=
|
||||||
github.com/k3s-io/helm-controller v0.10.5/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74=
|
github.com/k3s-io/helm-controller v0.10.5/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74=
|
||||||
github.com/k3s-io/kine v0.6.2 h1:1aJTPfB8HG4exqMKFVE5H0z4bepF05tJHtYNXotWXa4=
|
github.com/k3s-io/kine v0.6.2 h1:1aJTPfB8HG4exqMKFVE5H0z4bepF05tJHtYNXotWXa4=
|
||||||
|
@ -576,8 +584,6 @@ github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.1-k3s1/go.mod
|
||||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.1-k3s1 h1:FUdS9NJfFIywAoONqeUVE56BUA9Y4k7QC3dD9o2Kz9Q=
|
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.1-k3s1 h1:FUdS9NJfFIywAoONqeUVE56BUA9Y4k7QC3dD9o2Kz9Q=
|
||||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.1-k3s1/go.mod h1:MfbK6LD+Nhyzoy2TEg4jjcicjhy8UQp9IXrCxLIJhAE=
|
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.1-k3s1/go.mod h1:MfbK6LD+Nhyzoy2TEg4jjcicjhy8UQp9IXrCxLIJhAE=
|
||||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.1-k3s1/go.mod h1:mpLHq04wAiOpaWE4BI8ArSQp82DIgRirioGL6CryJDg=
|
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.1-k3s1/go.mod h1:mpLHq04wAiOpaWE4BI8ArSQp82DIgRirioGL6CryJDg=
|
||||||
github.com/k3s-io/protobuf v1.4.3-k3s1 h1:gduXrSm/6KkbTuctP6bASYqKQ/tyC/PNYqxBmJnk4Tc=
|
|
||||||
github.com/k3s-io/protobuf v1.4.3-k3s1/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
|
||||||
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
|
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
|
||||||
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
|
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
|
||||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||||
|
@ -630,6 +636,7 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA
|
||||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
|
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||||
github.com/mattn/go-sqlite3 v1.14.4 h1:4rQjbDxdu9fSgI/r3KN72G3c2goxknAqHHgPWWs8UlI=
|
github.com/mattn/go-sqlite3 v1.14.4 h1:4rQjbDxdu9fSgI/r3KN72G3c2goxknAqHHgPWWs8UlI=
|
||||||
github.com/mattn/go-sqlite3 v1.14.4/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
|
github.com/mattn/go-sqlite3 v1.14.4/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
|
||||||
|
@ -714,6 +721,7 @@ github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+
|
||||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||||
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
|
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
|
||||||
|
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||||
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
@ -934,20 +942,16 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||||
go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw=
|
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0=
|
||||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU=
|
go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU=
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||||
go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs=
|
go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs=
|
||||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||||
go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek=
|
|
||||||
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
|
||||||
go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk=
|
go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk=
|
||||||
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
|
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw=
|
go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw=
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
|
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
|
||||||
go.etcd.io/etcd/server/v3 v3.5.0 h1:jk8D/lwGEDlQU9kZXUFMSANkE22Sg5+mW27ip8xcF9E=
|
|
||||||
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
|
|
||||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
|
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
|
||||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"github.com/rancher/k3s/pkg/util"
|
"github.com/rancher/k3s/pkg/util"
|
||||||
"github.com/rancher/k3s/pkg/version"
|
"github.com/rancher/k3s/pkg/version"
|
||||||
controllerv1 "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
|
controllerv1 "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
|
||||||
etcdv3 "go.etcd.io/etcd/clientv3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ type handler struct {
|
||||||
endpointsController controllerv1.EndpointsController
|
endpointsController controllerv1.EndpointsController
|
||||||
runtime *config.ControlRuntime
|
runtime *config.ControlRuntime
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
etcdClient *etcdv3.Client
|
etcdClient *clientv3.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// This controller will update the version.program/apiaddresses etcd key with a list of
|
// This controller will update the version.program/apiaddresses etcd key with a list of
|
||||||
|
|
|
@ -3,14 +3,14 @@
|
||||||
package executor
|
package executor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/rancher/k3s/pkg/version"
|
"github.com/rancher/k3s/pkg/version"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"go.etcd.io/etcd/embed"
|
"go.etcd.io/etcd/server/v3/embed"
|
||||||
"go.etcd.io/etcd/etcdserver"
|
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (e Embedded) CurrentETCDOptions() (InitialOptions, error) {
|
func (e Embedded) CurrentETCDOptions() (InitialOptions, error) {
|
||||||
|
@ -34,7 +34,7 @@ func (e Embedded) ETCD(args ETCDConfig) error {
|
||||||
go func() {
|
go func() {
|
||||||
select {
|
select {
|
||||||
case err := <-etcd.Server.ErrNotify():
|
case err := <-etcd.Server.ErrNotify():
|
||||||
if strings.Contains(err.Error(), etcdserver.ErrMemberRemoved.Error()) {
|
if errors.Is(err, rafthttp.ErrMemberRemoved) {
|
||||||
tombstoneFile := filepath.Join(args.DataDir, "tombstone")
|
tombstoneFile := filepath.Join(args.DataDir, "tombstone")
|
||||||
if err := ioutil.WriteFile(tombstoneFile, []byte{}, 0600); err != nil {
|
if err := ioutil.WriteFile(tombstoneFile, []byte{}, 0600); err != nil {
|
||||||
logrus.Fatalf("failed to write tombstone file to %s", tombstoneFile)
|
logrus.Fatalf("failed to write tombstone file to %s", tombstoneFile)
|
||||||
|
|
|
@ -29,10 +29,10 @@ import (
|
||||||
"github.com/rancher/k3s/pkg/version"
|
"github.com/rancher/k3s/pkg/version"
|
||||||
"github.com/robfig/cron/v3"
|
"github.com/robfig/cron/v3"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
etcd "go.etcd.io/etcd/clientv3"
|
"go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||||
"go.etcd.io/etcd/clientv3/snapshot"
|
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||||
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
"go.etcd.io/etcd/etcdserver/etcdserverpb"
|
"go.etcd.io/etcd/etcdutl/v3/snapshot"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -65,7 +65,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
type ETCD struct {
|
type ETCD struct {
|
||||||
client *etcd.Client
|
client *clientv3.Client
|
||||||
config *config.Control
|
config *config.Control
|
||||||
name string
|
name string
|
||||||
runtime *config.ControlRuntime
|
runtime *config.ControlRuntime
|
||||||
|
@ -303,7 +303,7 @@ func (e *ETCD) join(ctx context.Context, clientAccessInfo *clientaccess.Info) er
|
||||||
members, err := client.MemberList(ctx)
|
members, err := client.MemberList(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Failed to get member list from etcd cluster. Will assume this member is already added")
|
logrus.Errorf("Failed to get member list from etcd cluster. Will assume this member is already added")
|
||||||
members = &etcd.MemberListResponse{
|
members = &clientv3.MemberListResponse{
|
||||||
Members: append(memberList.Members, &etcdserverpb.Member{
|
Members: append(memberList.Members, &etcdserverpb.Member{
|
||||||
Name: e.name,
|
Name: e.name,
|
||||||
PeerURLs: []string{e.peerURL()},
|
PeerURLs: []string{e.peerURL()},
|
||||||
|
@ -444,21 +444,21 @@ func (e *ETCD) infoHandler() http.Handler {
|
||||||
}
|
}
|
||||||
|
|
||||||
// getClient returns an etcd client connected to the specified endpoints
|
// getClient returns an etcd client connected to the specified endpoints
|
||||||
func GetClient(ctx context.Context, runtime *config.ControlRuntime, endpoints ...string) (*etcd.Client, error) {
|
func GetClient(ctx context.Context, runtime *config.ControlRuntime, endpoints ...string) (*clientv3.Client, error) {
|
||||||
cfg, err := getClientConfig(ctx, runtime, endpoints...)
|
cfg, err := getClientConfig(ctx, runtime, endpoints...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return etcd.New(*cfg)
|
return clientv3.New(*cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
//getClientConfig generates an etcd client config connected to the specified endpoints
|
//getClientConfig generates an etcd client config connected to the specified endpoints
|
||||||
func getClientConfig(ctx context.Context, runtime *config.ControlRuntime, endpoints ...string) (*etcd.Config, error) {
|
func getClientConfig(ctx context.Context, runtime *config.ControlRuntime, endpoints ...string) (*clientv3.Config, error) {
|
||||||
tlsConfig, err := toTLSConfig(runtime)
|
tlsConfig, err := toTLSConfig(runtime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
cfg := &etcd.Config{
|
cfg := &clientv3.Config{
|
||||||
Endpoints: endpoints,
|
Endpoints: endpoints,
|
||||||
TLS: tlsConfig,
|
TLS: tlsConfig,
|
||||||
Context: ctx,
|
Context: ctx,
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
"github.com/rancher/k3s/pkg/daemons/config"
|
"github.com/rancher/k3s/pkg/daemons/config"
|
||||||
testutil "github.com/rancher/k3s/tests/util"
|
testutil "github.com/rancher/k3s/tests/util"
|
||||||
"github.com/robfig/cron/v3"
|
"github.com/robfig/cron/v3"
|
||||||
etcd "go.etcd.io/etcd/clientv3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
func generateTestConfig() *config.Control {
|
func generateTestConfig() *config.Control {
|
||||||
|
@ -193,7 +193,7 @@ func Test_UnitETCD_Register(t *testing.T) {
|
||||||
|
|
||||||
func Test_UnitETCD_Start(t *testing.T) {
|
func Test_UnitETCD_Start(t *testing.T) {
|
||||||
type fields struct {
|
type fields struct {
|
||||||
client *etcd.Client
|
client *clientv3.Client
|
||||||
config *config.Control
|
config *config.Control
|
||||||
name string
|
name string
|
||||||
runtime *config.ControlRuntime
|
runtime *config.ControlRuntime
|
||||||
|
|
|
@ -74,7 +74,10 @@ func (c *Clientset) Tracker() testing.ObjectTracker {
|
||||||
return c.tracker
|
return c.tracker
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ clientset.Interface = &Clientset{}
|
var (
|
||||||
|
_ clientset.Interface = &Clientset{}
|
||||||
|
_ testing.FakeClient = &Clientset{}
|
||||||
|
)
|
||||||
|
|
||||||
// K3sV1 retrieves the K3sV1Client
|
// K3sV1 retrieves the K3sV1Client
|
||||||
func (c *Clientset) K3sV1() k3sv1.K3sV1Interface {
|
func (c *Clientset) K3sV1() k3sv1.K3sV1Interface {
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
.DS_Store
|
|
||||||
bin
|
|
||||||
|
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
language: go
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go vet ./...
|
|
||||||
- go test -v ./...
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.3
|
|
||||||
- 1.4
|
|
||||||
- 1.5
|
|
||||||
- 1.6
|
|
||||||
- 1.7
|
|
||||||
- tip
|
|
|
@ -1,8 +0,0 @@
|
||||||
Copyright (c) 2012 Dave Grijalva
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
|
@ -1,97 +0,0 @@
|
||||||
## Migration Guide from v2 -> v3
|
|
||||||
|
|
||||||
Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code.
|
|
||||||
|
|
||||||
### `Token.Claims` is now an interface type
|
|
||||||
|
|
||||||
The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`.
|
|
||||||
|
|
||||||
`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property.
|
|
||||||
|
|
||||||
The old example for parsing a token looked like this..
|
|
||||||
|
|
||||||
```go
|
|
||||||
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
|
|
||||||
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
is now directly mapped to...
|
|
||||||
|
|
||||||
```go
|
|
||||||
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
|
|
||||||
claims := token.Claims.(jwt.MapClaims)
|
|
||||||
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type.
|
|
||||||
|
|
||||||
```go
|
|
||||||
type MyCustomClaims struct {
|
|
||||||
User string
|
|
||||||
*StandardClaims
|
|
||||||
}
|
|
||||||
|
|
||||||
if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil {
|
|
||||||
claims := token.Claims.(*MyCustomClaims)
|
|
||||||
fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### `ParseFromRequest` has been moved
|
|
||||||
|
|
||||||
To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`.
|
|
||||||
|
|
||||||
`Extractors` do the work of picking the token string out of a request. The interface is simple and composable.
|
|
||||||
|
|
||||||
This simple parsing example:
|
|
||||||
|
|
||||||
```go
|
|
||||||
if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil {
|
|
||||||
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
is directly mapped to:
|
|
||||||
|
|
||||||
```go
|
|
||||||
if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
|
|
||||||
claims := token.Claims.(jwt.MapClaims)
|
|
||||||
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
There are several concrete `Extractor` types provided for your convenience:
|
|
||||||
|
|
||||||
* `HeaderExtractor` will search a list of headers until one contains content.
|
|
||||||
* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content.
|
|
||||||
* `MultiExtractor` will try a list of `Extractors` in order until one returns content.
|
|
||||||
* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token.
|
|
||||||
* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument
|
|
||||||
* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header
|
|
||||||
|
|
||||||
|
|
||||||
### RSA signing methods no longer accept `[]byte` keys
|
|
||||||
|
|
||||||
Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse.
|
|
||||||
|
|
||||||
To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types.
|
|
||||||
|
|
||||||
```go
|
|
||||||
func keyLookupFunc(*Token) (interface{}, error) {
|
|
||||||
// Don't forget to validate the alg is what you expect:
|
|
||||||
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
|
|
||||||
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look up key
|
|
||||||
key, err := lookupPublicKey(token.Header["kid"])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unpack key from PEM encoded PKCS8
|
|
||||||
return jwt.ParseRSAPublicKeyFromPEM(key)
|
|
||||||
}
|
|
||||||
```
|
|
|
@ -1,100 +0,0 @@
|
||||||
# jwt-go
|
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
|
|
||||||
[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go)
|
|
||||||
|
|
||||||
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
|
|
||||||
|
|
||||||
**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3.
|
|
||||||
|
|
||||||
**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
|
|
||||||
|
|
||||||
**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
|
|
||||||
|
|
||||||
## What the heck is a JWT?
|
|
||||||
|
|
||||||
JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
|
|
||||||
|
|
||||||
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
|
|
||||||
|
|
||||||
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
|
|
||||||
|
|
||||||
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
|
|
||||||
|
|
||||||
## What's in the box?
|
|
||||||
|
|
||||||
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
|
|
||||||
|
|
||||||
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
|
|
||||||
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
|
|
||||||
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
|
|
||||||
|
|
||||||
## Extensions
|
|
||||||
|
|
||||||
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
|
|
||||||
|
|
||||||
Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
|
|
||||||
|
|
||||||
## Compliance
|
|
||||||
|
|
||||||
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
|
|
||||||
|
|
||||||
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
|
|
||||||
|
|
||||||
## Project Status & Versioning
|
|
||||||
|
|
||||||
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
|
|
||||||
|
|
||||||
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
|
|
||||||
|
|
||||||
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning.
|
|
||||||
|
|
||||||
**BREAKING CHANGES:***
|
|
||||||
* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
|
|
||||||
|
|
||||||
## Usage Tips
|
|
||||||
|
|
||||||
### Signing vs Encryption
|
|
||||||
|
|
||||||
A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
|
|
||||||
|
|
||||||
* The author of the token was in the possession of the signing secret
|
|
||||||
* The data has not been modified since it was signed
|
|
||||||
|
|
||||||
It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
|
|
||||||
|
|
||||||
### Choosing a Signing Method
|
|
||||||
|
|
||||||
There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
|
|
||||||
|
|
||||||
Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
|
|
||||||
|
|
||||||
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
|
|
||||||
|
|
||||||
### Signing Methods and Key Types
|
|
||||||
|
|
||||||
Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
|
|
||||||
|
|
||||||
* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
|
|
||||||
* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
|
|
||||||
* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
|
|
||||||
|
|
||||||
### JWT and OAuth
|
|
||||||
|
|
||||||
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
|
|
||||||
|
|
||||||
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
|
|
||||||
|
|
||||||
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
|
||||||
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
|
|
||||||
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
|
|
||||||
|
|
||||||
## More
|
|
||||||
|
|
||||||
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
|
|
||||||
|
|
||||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
|
|
|
@ -1,118 +0,0 @@
|
||||||
## `jwt-go` Version History
|
|
||||||
|
|
||||||
#### 3.2.0
|
|
||||||
|
|
||||||
* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
|
|
||||||
* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
|
|
||||||
* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
|
|
||||||
* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
|
|
||||||
|
|
||||||
#### 3.1.0
|
|
||||||
|
|
||||||
* Improvements to `jwt` command line tool
|
|
||||||
* Added `SkipClaimsValidation` option to `Parser`
|
|
||||||
* Documentation updates
|
|
||||||
|
|
||||||
#### 3.0.0
|
|
||||||
|
|
||||||
* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
|
|
||||||
* Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
|
|
||||||
* `ParseFromRequest` has been moved to `request` subpackage and usage has changed
|
|
||||||
* The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
|
|
||||||
* Other Additions and Changes
|
|
||||||
* Added `Claims` interface type to allow users to decode the claims into a custom type
|
|
||||||
* Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
|
|
||||||
* Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
|
|
||||||
* Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
|
|
||||||
* Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
|
|
||||||
* Added several new, more specific, validation errors to error type bitmask
|
|
||||||
* Moved examples from README to executable example files
|
|
||||||
* Signing method registry is now thread safe
|
|
||||||
* Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
|
|
||||||
|
|
||||||
#### 2.7.0
|
|
||||||
|
|
||||||
This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
|
|
||||||
|
|
||||||
* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
|
|
||||||
* Error text for expired tokens includes how long it's been expired
|
|
||||||
* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
|
|
||||||
* Documentation updates
|
|
||||||
|
|
||||||
#### 2.6.0
|
|
||||||
|
|
||||||
* Exposed inner error within ValidationError
|
|
||||||
* Fixed validation errors when using UseJSONNumber flag
|
|
||||||
* Added several unit tests
|
|
||||||
|
|
||||||
#### 2.5.0
|
|
||||||
|
|
||||||
* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
|
|
||||||
* Updated/fixed some documentation
|
|
||||||
* Added more helpful error message when trying to parse tokens that begin with `BEARER `
|
|
||||||
|
|
||||||
#### 2.4.0
|
|
||||||
|
|
||||||
* Added new type, Parser, to allow for configuration of various parsing parameters
|
|
||||||
* You can now specify a list of valid signing methods. Anything outside this set will be rejected.
|
|
||||||
* You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
|
|
||||||
* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
|
|
||||||
* Fixed some bugs with ECDSA parsing
|
|
||||||
|
|
||||||
#### 2.3.0
|
|
||||||
|
|
||||||
* Added support for ECDSA signing methods
|
|
||||||
* Added support for RSA PSS signing methods (requires go v1.4)
|
|
||||||
|
|
||||||
#### 2.2.0
|
|
||||||
|
|
||||||
* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
|
|
||||||
|
|
||||||
#### 2.1.0
|
|
||||||
|
|
||||||
Backwards compatible API change that was missed in 2.0.0.
|
|
||||||
|
|
||||||
* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
|
|
||||||
|
|
||||||
#### 2.0.0
|
|
||||||
|
|
||||||
There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
|
|
||||||
|
|
||||||
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
|
|
||||||
|
|
||||||
It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
|
|
||||||
|
|
||||||
* **Compatibility Breaking Changes**
|
|
||||||
* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
|
|
||||||
* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
|
|
||||||
* `KeyFunc` now returns `interface{}` instead of `[]byte`
|
|
||||||
* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
|
|
||||||
* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
|
|
||||||
* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
|
|
||||||
* Added public package global `SigningMethodHS256`
|
|
||||||
* Added public package global `SigningMethodHS384`
|
|
||||||
* Added public package global `SigningMethodHS512`
|
|
||||||
* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
|
|
||||||
* Added public package global `SigningMethodRS256`
|
|
||||||
* Added public package global `SigningMethodRS384`
|
|
||||||
* Added public package global `SigningMethodRS512`
|
|
||||||
* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
|
|
||||||
* Refactored the RSA implementation to be easier to read
|
|
||||||
* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
|
|
||||||
|
|
||||||
#### 1.0.2
|
|
||||||
|
|
||||||
* Fixed bug in parsing public keys from certificates
|
|
||||||
* Added more tests around the parsing of keys for RS256
|
|
||||||
* Code refactoring in RS256 implementation. No functional changes
|
|
||||||
|
|
||||||
#### 1.0.1
|
|
||||||
|
|
||||||
* Fixed panic if RS256 signing method was passed an invalid key
|
|
||||||
|
|
||||||
#### 1.0.0
|
|
||||||
|
|
||||||
* First versioned release
|
|
||||||
* API stabilized
|
|
||||||
* Supports creating, signing, parsing, and validating JWT tokens
|
|
||||||
* Supports RS256 and HS256 signing methods
|
|
|
@ -1,134 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/subtle"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// For a type to be a Claims object, it must just have a Valid method that determines
|
|
||||||
// if the token is invalid for any supported reason
|
|
||||||
type Claims interface {
|
|
||||||
Valid() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Structured version of Claims Section, as referenced at
|
|
||||||
// https://tools.ietf.org/html/rfc7519#section-4.1
|
|
||||||
// See examples for how to use this with your own claim types
|
|
||||||
type StandardClaims struct {
|
|
||||||
Audience string `json:"aud,omitempty"`
|
|
||||||
ExpiresAt int64 `json:"exp,omitempty"`
|
|
||||||
Id string `json:"jti,omitempty"`
|
|
||||||
IssuedAt int64 `json:"iat,omitempty"`
|
|
||||||
Issuer string `json:"iss,omitempty"`
|
|
||||||
NotBefore int64 `json:"nbf,omitempty"`
|
|
||||||
Subject string `json:"sub,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validates time based claims "exp, iat, nbf".
|
|
||||||
// There is no accounting for clock skew.
|
|
||||||
// As well, if any of the above claims are not in the token, it will still
|
|
||||||
// be considered a valid claim.
|
|
||||||
func (c StandardClaims) Valid() error {
|
|
||||||
vErr := new(ValidationError)
|
|
||||||
now := TimeFunc().Unix()
|
|
||||||
|
|
||||||
// The claims below are optional, by default, so if they are set to the
|
|
||||||
// default value in Go, let's not fail the verification for them.
|
|
||||||
if c.VerifyExpiresAt(now, false) == false {
|
|
||||||
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
|
|
||||||
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
|
|
||||||
vErr.Errors |= ValidationErrorExpired
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.VerifyIssuedAt(now, false) == false {
|
|
||||||
vErr.Inner = fmt.Errorf("Token used before issued")
|
|
||||||
vErr.Errors |= ValidationErrorIssuedAt
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.VerifyNotBefore(now, false) == false {
|
|
||||||
vErr.Inner = fmt.Errorf("token is not valid yet")
|
|
||||||
vErr.Errors |= ValidationErrorNotValidYet
|
|
||||||
}
|
|
||||||
|
|
||||||
if vErr.valid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return vErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compares the aud claim against cmp.
|
|
||||||
// If required is false, this method will return true if the value matches or is unset
|
|
||||||
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
|
|
||||||
return verifyAud(c.Audience, cmp, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compares the exp claim against cmp.
|
|
||||||
// If required is false, this method will return true if the value matches or is unset
|
|
||||||
func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
|
|
||||||
return verifyExp(c.ExpiresAt, cmp, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compares the iat claim against cmp.
|
|
||||||
// If required is false, this method will return true if the value matches or is unset
|
|
||||||
func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
|
|
||||||
return verifyIat(c.IssuedAt, cmp, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compares the iss claim against cmp.
|
|
||||||
// If required is false, this method will return true if the value matches or is unset
|
|
||||||
func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
|
|
||||||
return verifyIss(c.Issuer, cmp, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compares the nbf claim against cmp.
|
|
||||||
// If required is false, this method will return true if the value matches or is unset
|
|
||||||
func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
|
||||||
return verifyNbf(c.NotBefore, cmp, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----- helpers
|
|
||||||
|
|
||||||
func verifyAud(aud string, cmp string, required bool) bool {
|
|
||||||
if aud == "" {
|
|
||||||
return !required
|
|
||||||
}
|
|
||||||
if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
|
|
||||||
return true
|
|
||||||
} else {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func verifyExp(exp int64, now int64, required bool) bool {
|
|
||||||
if exp == 0 {
|
|
||||||
return !required
|
|
||||||
}
|
|
||||||
return now <= exp
|
|
||||||
}
|
|
||||||
|
|
||||||
func verifyIat(iat int64, now int64, required bool) bool {
|
|
||||||
if iat == 0 {
|
|
||||||
return !required
|
|
||||||
}
|
|
||||||
return now >= iat
|
|
||||||
}
|
|
||||||
|
|
||||||
func verifyIss(iss string, cmp string, required bool) bool {
|
|
||||||
if iss == "" {
|
|
||||||
return !required
|
|
||||||
}
|
|
||||||
if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
|
|
||||||
return true
|
|
||||||
} else {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func verifyNbf(nbf int64, now int64, required bool) bool {
|
|
||||||
if nbf == 0 {
|
|
||||||
return !required
|
|
||||||
}
|
|
||||||
return now >= nbf
|
|
||||||
}
|
|
|
@ -1,4 +0,0 @@
|
||||||
// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
|
|
||||||
//
|
|
||||||
// See README.md for more info.
|
|
||||||
package jwt
|
|
|
@ -1,148 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/rand"
|
|
||||||
"errors"
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
|
|
||||||
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Implements the ECDSA family of signing methods signing methods
|
|
||||||
// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
|
|
||||||
type SigningMethodECDSA struct {
|
|
||||||
Name string
|
|
||||||
Hash crypto.Hash
|
|
||||||
KeySize int
|
|
||||||
CurveBits int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Specific instances for EC256 and company
|
|
||||||
var (
|
|
||||||
SigningMethodES256 *SigningMethodECDSA
|
|
||||||
SigningMethodES384 *SigningMethodECDSA
|
|
||||||
SigningMethodES512 *SigningMethodECDSA
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// ES256
|
|
||||||
SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
|
|
||||||
RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodES256
|
|
||||||
})
|
|
||||||
|
|
||||||
// ES384
|
|
||||||
SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
|
|
||||||
RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodES384
|
|
||||||
})
|
|
||||||
|
|
||||||
// ES512
|
|
||||||
SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
|
|
||||||
RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodES512
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SigningMethodECDSA) Alg() string {
|
|
||||||
return m.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the Verify method from SigningMethod
|
|
||||||
// For this verify method, key must be an ecdsa.PublicKey struct
|
|
||||||
func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Decode the signature
|
|
||||||
var sig []byte
|
|
||||||
if sig, err = DecodeSegment(signature); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the key
|
|
||||||
var ecdsaKey *ecdsa.PublicKey
|
|
||||||
switch k := key.(type) {
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
ecdsaKey = k
|
|
||||||
default:
|
|
||||||
return ErrInvalidKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(sig) != 2*m.KeySize {
|
|
||||||
return ErrECDSAVerification
|
|
||||||
}
|
|
||||||
|
|
||||||
r := big.NewInt(0).SetBytes(sig[:m.KeySize])
|
|
||||||
s := big.NewInt(0).SetBytes(sig[m.KeySize:])
|
|
||||||
|
|
||||||
// Create hasher
|
|
||||||
if !m.Hash.Available() {
|
|
||||||
return ErrHashUnavailable
|
|
||||||
}
|
|
||||||
hasher := m.Hash.New()
|
|
||||||
hasher.Write([]byte(signingString))
|
|
||||||
|
|
||||||
// Verify the signature
|
|
||||||
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
|
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
return ErrECDSAVerification
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the Sign method from SigningMethod
|
|
||||||
// For this signing method, key must be an ecdsa.PrivateKey struct
|
|
||||||
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
|
|
||||||
// Get the key
|
|
||||||
var ecdsaKey *ecdsa.PrivateKey
|
|
||||||
switch k := key.(type) {
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
ecdsaKey = k
|
|
||||||
default:
|
|
||||||
return "", ErrInvalidKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the hasher
|
|
||||||
if !m.Hash.Available() {
|
|
||||||
return "", ErrHashUnavailable
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher := m.Hash.New()
|
|
||||||
hasher.Write([]byte(signingString))
|
|
||||||
|
|
||||||
// Sign the string and return r, s
|
|
||||||
if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
|
|
||||||
curveBits := ecdsaKey.Curve.Params().BitSize
|
|
||||||
|
|
||||||
if m.CurveBits != curveBits {
|
|
||||||
return "", ErrInvalidKey
|
|
||||||
}
|
|
||||||
|
|
||||||
keyBytes := curveBits / 8
|
|
||||||
if curveBits%8 > 0 {
|
|
||||||
keyBytes += 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// We serialize the outpus (r and s) into big-endian byte arrays and pad
|
|
||||||
// them with zeros on the left to make sure the sizes work out. Both arrays
|
|
||||||
// must be keyBytes long, and the output must be 2*keyBytes long.
|
|
||||||
rBytes := r.Bytes()
|
|
||||||
rBytesPadded := make([]byte, keyBytes)
|
|
||||||
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
|
||||||
|
|
||||||
sBytes := s.Bytes()
|
|
||||||
sBytesPadded := make([]byte, keyBytes)
|
|
||||||
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
|
||||||
|
|
||||||
out := append(rBytesPadded, sBytesPadded...)
|
|
||||||
|
|
||||||
return EncodeSegment(out), nil
|
|
||||||
} else {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,67 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/pem"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
|
|
||||||
ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Parse PEM encoded Elliptic Curve Private Key Structure
|
|
||||||
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Parse PEM block
|
|
||||||
var block *pem.Block
|
|
||||||
if block, _ = pem.Decode(key); block == nil {
|
|
||||||
return nil, ErrKeyMustBePEMEncoded
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the key
|
|
||||||
var parsedKey interface{}
|
|
||||||
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var pkey *ecdsa.PrivateKey
|
|
||||||
var ok bool
|
|
||||||
if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
|
|
||||||
return nil, ErrNotECPrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
return pkey, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse PEM encoded PKCS1 or PKCS8 public key
|
|
||||||
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Parse PEM block
|
|
||||||
var block *pem.Block
|
|
||||||
if block, _ = pem.Decode(key); block == nil {
|
|
||||||
return nil, ErrKeyMustBePEMEncoded
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the key
|
|
||||||
var parsedKey interface{}
|
|
||||||
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
|
||||||
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
|
|
||||||
parsedKey = cert.PublicKey
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var pkey *ecdsa.PublicKey
|
|
||||||
var ok bool
|
|
||||||
if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
|
|
||||||
return nil, ErrNotECPublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
return pkey, nil
|
|
||||||
}
|
|
|
@ -1,59 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Error constants
|
|
||||||
var (
|
|
||||||
ErrInvalidKey = errors.New("key is invalid")
|
|
||||||
ErrInvalidKeyType = errors.New("key is of invalid type")
|
|
||||||
ErrHashUnavailable = errors.New("the requested hash function is unavailable")
|
|
||||||
)
|
|
||||||
|
|
||||||
// The errors that might occur when parsing and validating a token
|
|
||||||
const (
|
|
||||||
ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
|
|
||||||
ValidationErrorUnverifiable // Token could not be verified because of signing problems
|
|
||||||
ValidationErrorSignatureInvalid // Signature validation failed
|
|
||||||
|
|
||||||
// Standard Claim validation errors
|
|
||||||
ValidationErrorAudience // AUD validation failed
|
|
||||||
ValidationErrorExpired // EXP validation failed
|
|
||||||
ValidationErrorIssuedAt // IAT validation failed
|
|
||||||
ValidationErrorIssuer // ISS validation failed
|
|
||||||
ValidationErrorNotValidYet // NBF validation failed
|
|
||||||
ValidationErrorId // JTI validation failed
|
|
||||||
ValidationErrorClaimsInvalid // Generic claims validation error
|
|
||||||
)
|
|
||||||
|
|
||||||
// Helper for constructing a ValidationError with a string error message
|
|
||||||
func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
|
|
||||||
return &ValidationError{
|
|
||||||
text: errorText,
|
|
||||||
Errors: errorFlags,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The error from Parse if token is not valid
|
|
||||||
type ValidationError struct {
|
|
||||||
Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
|
|
||||||
Errors uint32 // bitfield. see ValidationError... constants
|
|
||||||
text string // errors that do not have a valid error just have text
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validation error is an error type
|
|
||||||
func (e ValidationError) Error() string {
|
|
||||||
if e.Inner != nil {
|
|
||||||
return e.Inner.Error()
|
|
||||||
} else if e.text != "" {
|
|
||||||
return e.text
|
|
||||||
} else {
|
|
||||||
return "token is invalid"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// No errors
|
|
||||||
func (e *ValidationError) valid() bool {
|
|
||||||
return e.Errors == 0
|
|
||||||
}
|
|
|
@ -1,95 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"crypto/hmac"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Implements the HMAC-SHA family of signing methods signing methods
|
|
||||||
// Expects key type of []byte for both signing and validation
|
|
||||||
type SigningMethodHMAC struct {
|
|
||||||
Name string
|
|
||||||
Hash crypto.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// Specific instances for HS256 and company
|
|
||||||
var (
|
|
||||||
SigningMethodHS256 *SigningMethodHMAC
|
|
||||||
SigningMethodHS384 *SigningMethodHMAC
|
|
||||||
SigningMethodHS512 *SigningMethodHMAC
|
|
||||||
ErrSignatureInvalid = errors.New("signature is invalid")
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// HS256
|
|
||||||
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
|
|
||||||
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodHS256
|
|
||||||
})
|
|
||||||
|
|
||||||
// HS384
|
|
||||||
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
|
|
||||||
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodHS384
|
|
||||||
})
|
|
||||||
|
|
||||||
// HS512
|
|
||||||
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
|
|
||||||
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodHS512
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SigningMethodHMAC) Alg() string {
|
|
||||||
return m.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
|
|
||||||
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
|
|
||||||
// Verify the key is the right type
|
|
||||||
keyBytes, ok := key.([]byte)
|
|
||||||
if !ok {
|
|
||||||
return ErrInvalidKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode signature, for comparison
|
|
||||||
sig, err := DecodeSegment(signature)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Can we use the specified hashing method?
|
|
||||||
if !m.Hash.Available() {
|
|
||||||
return ErrHashUnavailable
|
|
||||||
}
|
|
||||||
|
|
||||||
// This signing method is symmetric, so we validate the signature
|
|
||||||
// by reproducing the signature from the signing string and key, then
|
|
||||||
// comparing that against the provided signature.
|
|
||||||
hasher := hmac.New(m.Hash.New, keyBytes)
|
|
||||||
hasher.Write([]byte(signingString))
|
|
||||||
if !hmac.Equal(sig, hasher.Sum(nil)) {
|
|
||||||
return ErrSignatureInvalid
|
|
||||||
}
|
|
||||||
|
|
||||||
// No validation errors. Signature is good.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the Sign method from SigningMethod for this signing method.
|
|
||||||
// Key must be []byte
|
|
||||||
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
|
|
||||||
if keyBytes, ok := key.([]byte); ok {
|
|
||||||
if !m.Hash.Available() {
|
|
||||||
return "", ErrHashUnavailable
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher := hmac.New(m.Hash.New, keyBytes)
|
|
||||||
hasher.Write([]byte(signingString))
|
|
||||||
|
|
||||||
return EncodeSegment(hasher.Sum(nil)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", ErrInvalidKeyType
|
|
||||||
}
|
|
|
@ -1,94 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
// "fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Claims type that uses the map[string]interface{} for JSON decoding
|
|
||||||
// This is the default claims type if you don't supply one
|
|
||||||
type MapClaims map[string]interface{}
|
|
||||||
|
|
||||||
// Compares the aud claim against cmp.
|
|
||||||
// If required is false, this method will return true if the value matches or is unset
|
|
||||||
func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
|
|
||||||
aud, _ := m["aud"].(string)
|
|
||||||
return verifyAud(aud, cmp, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compares the exp claim against cmp.
|
|
||||||
// If required is false, this method will return true if the value matches or is unset
|
|
||||||
func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
|
|
||||||
switch exp := m["exp"].(type) {
|
|
||||||
case float64:
|
|
||||||
return verifyExp(int64(exp), cmp, req)
|
|
||||||
case json.Number:
|
|
||||||
v, _ := exp.Int64()
|
|
||||||
return verifyExp(v, cmp, req)
|
|
||||||
}
|
|
||||||
return req == false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compares the iat claim against cmp.
|
|
||||||
// If required is false, this method will return true if the value matches or is unset
|
|
||||||
func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
|
|
||||||
switch iat := m["iat"].(type) {
|
|
||||||
case float64:
|
|
||||||
return verifyIat(int64(iat), cmp, req)
|
|
||||||
case json.Number:
|
|
||||||
v, _ := iat.Int64()
|
|
||||||
return verifyIat(v, cmp, req)
|
|
||||||
}
|
|
||||||
return req == false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compares the iss claim against cmp.
|
|
||||||
// If required is false, this method will return true if the value matches or is unset
|
|
||||||
func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
|
|
||||||
iss, _ := m["iss"].(string)
|
|
||||||
return verifyIss(iss, cmp, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compares the nbf claim against cmp.
|
|
||||||
// If required is false, this method will return true if the value matches or is unset
|
|
||||||
func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
|
||||||
switch nbf := m["nbf"].(type) {
|
|
||||||
case float64:
|
|
||||||
return verifyNbf(int64(nbf), cmp, req)
|
|
||||||
case json.Number:
|
|
||||||
v, _ := nbf.Int64()
|
|
||||||
return verifyNbf(v, cmp, req)
|
|
||||||
}
|
|
||||||
return req == false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validates time based claims "exp, iat, nbf".
|
|
||||||
// There is no accounting for clock skew.
|
|
||||||
// As well, if any of the above claims are not in the token, it will still
|
|
||||||
// be considered a valid claim.
|
|
||||||
func (m MapClaims) Valid() error {
|
|
||||||
vErr := new(ValidationError)
|
|
||||||
now := TimeFunc().Unix()
|
|
||||||
|
|
||||||
if m.VerifyExpiresAt(now, false) == false {
|
|
||||||
vErr.Inner = errors.New("Token is expired")
|
|
||||||
vErr.Errors |= ValidationErrorExpired
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.VerifyIssuedAt(now, false) == false {
|
|
||||||
vErr.Inner = errors.New("Token used before issued")
|
|
||||||
vErr.Errors |= ValidationErrorIssuedAt
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.VerifyNotBefore(now, false) == false {
|
|
||||||
vErr.Inner = errors.New("Token is not valid yet")
|
|
||||||
vErr.Errors |= ValidationErrorNotValidYet
|
|
||||||
}
|
|
||||||
|
|
||||||
if vErr.valid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return vErr
|
|
||||||
}
|
|
|
@ -1,52 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
// Implements the none signing method. This is required by the spec
|
|
||||||
// but you probably should never use it.
|
|
||||||
var SigningMethodNone *signingMethodNone
|
|
||||||
|
|
||||||
const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
|
|
||||||
|
|
||||||
var NoneSignatureTypeDisallowedError error
|
|
||||||
|
|
||||||
type signingMethodNone struct{}
|
|
||||||
type unsafeNoneMagicConstant string
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
SigningMethodNone = &signingMethodNone{}
|
|
||||||
NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
|
|
||||||
|
|
||||||
RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodNone
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *signingMethodNone) Alg() string {
|
|
||||||
return "none"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
|
|
||||||
func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
|
|
||||||
// Key must be UnsafeAllowNoneSignatureType to prevent accidentally
|
|
||||||
// accepting 'none' signing method
|
|
||||||
if _, ok := key.(unsafeNoneMagicConstant); !ok {
|
|
||||||
return NoneSignatureTypeDisallowedError
|
|
||||||
}
|
|
||||||
// If signing method is none, signature must be an empty string
|
|
||||||
if signature != "" {
|
|
||||||
return NewValidationError(
|
|
||||||
"'none' signing method with non-empty signature",
|
|
||||||
ValidationErrorSignatureInvalid,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accept 'none' signing method.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
|
|
||||||
func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
|
|
||||||
if _, ok := key.(unsafeNoneMagicConstant); ok {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
return "", NoneSignatureTypeDisallowedError
|
|
||||||
}
|
|
|
@ -1,148 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Parser struct {
|
|
||||||
ValidMethods []string // If populated, only these methods will be considered valid
|
|
||||||
UseJSONNumber bool // Use JSON Number format in JSON decoder
|
|
||||||
SkipClaimsValidation bool // Skip claims validation during token parsing
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse, validate, and return a token.
|
|
||||||
// keyFunc will receive the parsed token and should return the key for validating.
|
|
||||||
// If everything is kosher, err will be nil
|
|
||||||
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
|
|
||||||
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
|
|
||||||
token, parts, err := p.ParseUnverified(tokenString, claims)
|
|
||||||
if err != nil {
|
|
||||||
return token, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify signing method is in the required set
|
|
||||||
if p.ValidMethods != nil {
|
|
||||||
var signingMethodValid = false
|
|
||||||
var alg = token.Method.Alg()
|
|
||||||
for _, m := range p.ValidMethods {
|
|
||||||
if m == alg {
|
|
||||||
signingMethodValid = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !signingMethodValid {
|
|
||||||
// signing method is not in the listed set
|
|
||||||
return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lookup key
|
|
||||||
var key interface{}
|
|
||||||
if keyFunc == nil {
|
|
||||||
// keyFunc was not provided. short circuiting validation
|
|
||||||
return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
|
|
||||||
}
|
|
||||||
if key, err = keyFunc(token); err != nil {
|
|
||||||
// keyFunc returned an error
|
|
||||||
if ve, ok := err.(*ValidationError); ok {
|
|
||||||
return token, ve
|
|
||||||
}
|
|
||||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
|
|
||||||
}
|
|
||||||
|
|
||||||
vErr := &ValidationError{}
|
|
||||||
|
|
||||||
// Validate Claims
|
|
||||||
if !p.SkipClaimsValidation {
|
|
||||||
if err := token.Claims.Valid(); err != nil {
|
|
||||||
|
|
||||||
// If the Claims Valid returned an error, check if it is a validation error,
|
|
||||||
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
|
|
||||||
if e, ok := err.(*ValidationError); !ok {
|
|
||||||
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
|
|
||||||
} else {
|
|
||||||
vErr = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perform validation
|
|
||||||
token.Signature = parts[2]
|
|
||||||
if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
|
|
||||||
vErr.Inner = err
|
|
||||||
vErr.Errors |= ValidationErrorSignatureInvalid
|
|
||||||
}
|
|
||||||
|
|
||||||
if vErr.valid() {
|
|
||||||
token.Valid = true
|
|
||||||
return token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return token, vErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// WARNING: Don't use this method unless you know what you're doing
|
|
||||||
//
|
|
||||||
// This method parses the token but doesn't validate the signature. It's only
|
|
||||||
// ever useful in cases where you know the signature is valid (because it has
|
|
||||||
// been checked previously in the stack) and you want to extract values from
|
|
||||||
// it.
|
|
||||||
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
|
|
||||||
parts = strings.Split(tokenString, ".")
|
|
||||||
if len(parts) != 3 {
|
|
||||||
return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
|
|
||||||
}
|
|
||||||
|
|
||||||
token = &Token{Raw: tokenString}
|
|
||||||
|
|
||||||
// parse Header
|
|
||||||
var headerBytes []byte
|
|
||||||
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
|
|
||||||
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
|
|
||||||
return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
|
|
||||||
}
|
|
||||||
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
|
||||||
}
|
|
||||||
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
|
|
||||||
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse Claims
|
|
||||||
var claimBytes []byte
|
|
||||||
token.Claims = claims
|
|
||||||
|
|
||||||
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
|
|
||||||
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
|
||||||
}
|
|
||||||
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
|
|
||||||
if p.UseJSONNumber {
|
|
||||||
dec.UseNumber()
|
|
||||||
}
|
|
||||||
// JSON Decode. Special case for map type to avoid weird pointer behavior
|
|
||||||
if c, ok := token.Claims.(MapClaims); ok {
|
|
||||||
err = dec.Decode(&c)
|
|
||||||
} else {
|
|
||||||
err = dec.Decode(&claims)
|
|
||||||
}
|
|
||||||
// Handle decode error
|
|
||||||
if err != nil {
|
|
||||||
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lookup signature method
|
|
||||||
if method, ok := token.Header["alg"].(string); ok {
|
|
||||||
if token.Method = GetSigningMethod(method); token.Method == nil {
|
|
||||||
return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
|
|
||||||
}
|
|
||||||
|
|
||||||
return token, parts, nil
|
|
||||||
}
|
|
|
@ -1,101 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Implements the RSA family of signing methods signing methods
|
|
||||||
// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
|
|
||||||
type SigningMethodRSA struct {
|
|
||||||
Name string
|
|
||||||
Hash crypto.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// Specific instances for RS256 and company
|
|
||||||
var (
|
|
||||||
SigningMethodRS256 *SigningMethodRSA
|
|
||||||
SigningMethodRS384 *SigningMethodRSA
|
|
||||||
SigningMethodRS512 *SigningMethodRSA
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// RS256
|
|
||||||
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
|
|
||||||
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodRS256
|
|
||||||
})
|
|
||||||
|
|
||||||
// RS384
|
|
||||||
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
|
|
||||||
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodRS384
|
|
||||||
})
|
|
||||||
|
|
||||||
// RS512
|
|
||||||
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
|
|
||||||
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodRS512
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SigningMethodRSA) Alg() string {
|
|
||||||
return m.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the Verify method from SigningMethod
|
|
||||||
// For this signing method, must be an *rsa.PublicKey structure.
|
|
||||||
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Decode the signature
|
|
||||||
var sig []byte
|
|
||||||
if sig, err = DecodeSegment(signature); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var rsaKey *rsa.PublicKey
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
|
|
||||||
return ErrInvalidKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create hasher
|
|
||||||
if !m.Hash.Available() {
|
|
||||||
return ErrHashUnavailable
|
|
||||||
}
|
|
||||||
hasher := m.Hash.New()
|
|
||||||
hasher.Write([]byte(signingString))
|
|
||||||
|
|
||||||
// Verify the signature
|
|
||||||
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the Sign method from SigningMethod
|
|
||||||
// For this signing method, must be an *rsa.PrivateKey structure.
|
|
||||||
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
|
|
||||||
var rsaKey *rsa.PrivateKey
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
// Validate type of key
|
|
||||||
if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
|
|
||||||
return "", ErrInvalidKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the hasher
|
|
||||||
if !m.Hash.Available() {
|
|
||||||
return "", ErrHashUnavailable
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher := m.Hash.New()
|
|
||||||
hasher.Write([]byte(signingString))
|
|
||||||
|
|
||||||
// Sign the string and return the encoded bytes
|
|
||||||
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
|
|
||||||
return EncodeSegment(sigBytes), nil
|
|
||||||
} else {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,126 +0,0 @@
|
||||||
// +build go1.4
|
|
||||||
|
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Implements the RSAPSS family of signing methods signing methods
|
|
||||||
type SigningMethodRSAPSS struct {
|
|
||||||
*SigningMethodRSA
|
|
||||||
Options *rsa.PSSOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// Specific instances for RS/PS and company
|
|
||||||
var (
|
|
||||||
SigningMethodPS256 *SigningMethodRSAPSS
|
|
||||||
SigningMethodPS384 *SigningMethodRSAPSS
|
|
||||||
SigningMethodPS512 *SigningMethodRSAPSS
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// PS256
|
|
||||||
SigningMethodPS256 = &SigningMethodRSAPSS{
|
|
||||||
&SigningMethodRSA{
|
|
||||||
Name: "PS256",
|
|
||||||
Hash: crypto.SHA256,
|
|
||||||
},
|
|
||||||
&rsa.PSSOptions{
|
|
||||||
SaltLength: rsa.PSSSaltLengthAuto,
|
|
||||||
Hash: crypto.SHA256,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodPS256
|
|
||||||
})
|
|
||||||
|
|
||||||
// PS384
|
|
||||||
SigningMethodPS384 = &SigningMethodRSAPSS{
|
|
||||||
&SigningMethodRSA{
|
|
||||||
Name: "PS384",
|
|
||||||
Hash: crypto.SHA384,
|
|
||||||
},
|
|
||||||
&rsa.PSSOptions{
|
|
||||||
SaltLength: rsa.PSSSaltLengthAuto,
|
|
||||||
Hash: crypto.SHA384,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodPS384
|
|
||||||
})
|
|
||||||
|
|
||||||
// PS512
|
|
||||||
SigningMethodPS512 = &SigningMethodRSAPSS{
|
|
||||||
&SigningMethodRSA{
|
|
||||||
Name: "PS512",
|
|
||||||
Hash: crypto.SHA512,
|
|
||||||
},
|
|
||||||
&rsa.PSSOptions{
|
|
||||||
SaltLength: rsa.PSSSaltLengthAuto,
|
|
||||||
Hash: crypto.SHA512,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
|
|
||||||
return SigningMethodPS512
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the Verify method from SigningMethod
|
|
||||||
// For this verify method, key must be an rsa.PublicKey struct
|
|
||||||
func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Decode the signature
|
|
||||||
var sig []byte
|
|
||||||
if sig, err = DecodeSegment(signature); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var rsaKey *rsa.PublicKey
|
|
||||||
switch k := key.(type) {
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
rsaKey = k
|
|
||||||
default:
|
|
||||||
return ErrInvalidKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create hasher
|
|
||||||
if !m.Hash.Available() {
|
|
||||||
return ErrHashUnavailable
|
|
||||||
}
|
|
||||||
hasher := m.Hash.New()
|
|
||||||
hasher.Write([]byte(signingString))
|
|
||||||
|
|
||||||
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the Sign method from SigningMethod
|
|
||||||
// For this signing method, key must be an rsa.PrivateKey struct
|
|
||||||
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
|
|
||||||
var rsaKey *rsa.PrivateKey
|
|
||||||
|
|
||||||
switch k := key.(type) {
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
rsaKey = k
|
|
||||||
default:
|
|
||||||
return "", ErrInvalidKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the hasher
|
|
||||||
if !m.Hash.Available() {
|
|
||||||
return "", ErrHashUnavailable
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher := m.Hash.New()
|
|
||||||
hasher.Write([]byte(signingString))
|
|
||||||
|
|
||||||
// Sign the string and return the encoded bytes
|
|
||||||
if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
|
|
||||||
return EncodeSegment(sigBytes), nil
|
|
||||||
} else {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,101 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/pem"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
|
|
||||||
ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
|
|
||||||
ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Parse PEM encoded PKCS1 or PKCS8 private key
|
|
||||||
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Parse PEM block
|
|
||||||
var block *pem.Block
|
|
||||||
if block, _ = pem.Decode(key); block == nil {
|
|
||||||
return nil, ErrKeyMustBePEMEncoded
|
|
||||||
}
|
|
||||||
|
|
||||||
var parsedKey interface{}
|
|
||||||
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
|
|
||||||
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var pkey *rsa.PrivateKey
|
|
||||||
var ok bool
|
|
||||||
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
|
|
||||||
return nil, ErrNotRSAPrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
return pkey, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
|
|
||||||
func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Parse PEM block
|
|
||||||
var block *pem.Block
|
|
||||||
if block, _ = pem.Decode(key); block == nil {
|
|
||||||
return nil, ErrKeyMustBePEMEncoded
|
|
||||||
}
|
|
||||||
|
|
||||||
var parsedKey interface{}
|
|
||||||
|
|
||||||
var blockDecrypted []byte
|
|
||||||
if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
|
|
||||||
if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var pkey *rsa.PrivateKey
|
|
||||||
var ok bool
|
|
||||||
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
|
|
||||||
return nil, ErrNotRSAPrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
return pkey, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse PEM encoded PKCS1 or PKCS8 public key
|
|
||||||
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Parse PEM block
|
|
||||||
var block *pem.Block
|
|
||||||
if block, _ = pem.Decode(key); block == nil {
|
|
||||||
return nil, ErrKeyMustBePEMEncoded
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the key
|
|
||||||
var parsedKey interface{}
|
|
||||||
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
|
||||||
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
|
|
||||||
parsedKey = cert.PublicKey
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var pkey *rsa.PublicKey
|
|
||||||
var ok bool
|
|
||||||
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
|
|
||||||
return nil, ErrNotRSAPublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
return pkey, nil
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var signingMethods = map[string]func() SigningMethod{}
|
|
||||||
var signingMethodLock = new(sync.RWMutex)
|
|
||||||
|
|
||||||
// Implement SigningMethod to add new methods for signing or verifying tokens.
|
|
||||||
type SigningMethod interface {
|
|
||||||
Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
|
|
||||||
Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
|
|
||||||
Alg() string // returns the alg identifier for this method (example: 'HS256')
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the "alg" name and a factory function for signing method.
|
|
||||||
// This is typically done during init() in the method's implementation
|
|
||||||
func RegisterSigningMethod(alg string, f func() SigningMethod) {
|
|
||||||
signingMethodLock.Lock()
|
|
||||||
defer signingMethodLock.Unlock()
|
|
||||||
|
|
||||||
signingMethods[alg] = f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a signing method from an "alg" string
|
|
||||||
func GetSigningMethod(alg string) (method SigningMethod) {
|
|
||||||
signingMethodLock.RLock()
|
|
||||||
defer signingMethodLock.RUnlock()
|
|
||||||
|
|
||||||
if methodF, ok := signingMethods[alg]; ok {
|
|
||||||
method = methodF()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,108 +0,0 @@
|
||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
|
|
||||||
// You can override it to use another time value. This is useful for testing or if your
|
|
||||||
// server uses a different time zone than your tokens.
|
|
||||||
var TimeFunc = time.Now
|
|
||||||
|
|
||||||
// Parse methods use this callback function to supply
|
|
||||||
// the key for verification. The function receives the parsed,
|
|
||||||
// but unverified Token. This allows you to use properties in the
|
|
||||||
// Header of the token (such as `kid`) to identify which key to use.
|
|
||||||
type Keyfunc func(*Token) (interface{}, error)
|
|
||||||
|
|
||||||
// A JWT Token. Different fields will be used depending on whether you're
|
|
||||||
// creating or parsing/verifying a token.
|
|
||||||
type Token struct {
|
|
||||||
Raw string // The raw token. Populated when you Parse a token
|
|
||||||
Method SigningMethod // The signing method used or to be used
|
|
||||||
Header map[string]interface{} // The first segment of the token
|
|
||||||
Claims Claims // The second segment of the token
|
|
||||||
Signature string // The third segment of the token. Populated when you Parse a token
|
|
||||||
Valid bool // Is the token valid? Populated when you Parse/Verify a token
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new Token. Takes a signing method
|
|
||||||
func New(method SigningMethod) *Token {
|
|
||||||
return NewWithClaims(method, MapClaims{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewWithClaims(method SigningMethod, claims Claims) *Token {
|
|
||||||
return &Token{
|
|
||||||
Header: map[string]interface{}{
|
|
||||||
"typ": "JWT",
|
|
||||||
"alg": method.Alg(),
|
|
||||||
},
|
|
||||||
Claims: claims,
|
|
||||||
Method: method,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the complete, signed token
|
|
||||||
func (t *Token) SignedString(key interface{}) (string, error) {
|
|
||||||
var sig, sstr string
|
|
||||||
var err error
|
|
||||||
if sstr, err = t.SigningString(); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if sig, err = t.Method.Sign(sstr, key); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return strings.Join([]string{sstr, sig}, "."), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate the signing string. This is the
|
|
||||||
// most expensive part of the whole deal. Unless you
|
|
||||||
// need this for something special, just go straight for
|
|
||||||
// the SignedString.
|
|
||||||
func (t *Token) SigningString() (string, error) {
|
|
||||||
var err error
|
|
||||||
parts := make([]string, 2)
|
|
||||||
for i, _ := range parts {
|
|
||||||
var jsonValue []byte
|
|
||||||
if i == 0 {
|
|
||||||
if jsonValue, err = json.Marshal(t.Header); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if jsonValue, err = json.Marshal(t.Claims); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
parts[i] = EncodeSegment(jsonValue)
|
|
||||||
}
|
|
||||||
return strings.Join(parts, "."), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse, validate, and return a token.
|
|
||||||
// keyFunc will receive the parsed token and should return the key for validating.
|
|
||||||
// If everything is kosher, err will be nil
|
|
||||||
func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
|
|
||||||
return new(Parser).Parse(tokenString, keyFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
|
|
||||||
return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode JWT specific base64url encoding with padding stripped
|
|
||||||
func EncodeSegment(seg []byte) string {
|
|
||||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode JWT specific base64url encoding with padding stripped
|
|
||||||
func DecodeSegment(seg string) ([]byte, error) {
|
|
||||||
if l := len(seg) % 4; l > 0 {
|
|
||||||
seg += strings.Repeat("=", 4-l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return base64.URLEncoding.DecodeString(seg)
|
|
||||||
}
|
|
|
@ -79,14 +79,9 @@ func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Obtain the raw file descriptor.
|
// Obtain the raw file descriptor.
|
||||||
var raw []byte
|
fd := d.(protoreflect.FileDescriptor)
|
||||||
switch fd := d.(type) {
|
b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd))
|
||||||
case interface{ ProtoLegacyRawDesc() []byte }:
|
file := protoimpl.X.CompressGZIP(b)
|
||||||
raw = fd.ProtoLegacyRawDesc()
|
|
||||||
case protoreflect.FileDescriptor:
|
|
||||||
raw, _ = proto.Marshal(protodesc.ToFileDescriptorProto(fd))
|
|
||||||
}
|
|
||||||
file := protoimpl.X.CompressGZIP(raw)
|
|
||||||
|
|
||||||
// Reverse the indexes, since we populated it in reverse.
|
// Reverse the indexes, since we populated it in reverse.
|
||||||
for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 {
|
for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 {
|
||||||
|
|
|
@ -135,14 +135,14 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error
|
||||||
md := m.Descriptor()
|
md := m.Descriptor()
|
||||||
fds := md.Fields()
|
fds := md.Fields()
|
||||||
|
|
||||||
if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
|
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
|
||||||
return jsu.UnmarshalJSONPB(u, in)
|
return jsu.UnmarshalJSONPB(u, in)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
switch wellKnownType(md.FullName()) {
|
switch wellKnownType(md.FullName()) {
|
||||||
case "Any":
|
case "Any":
|
||||||
var jsonObject map[string]json.RawMessage
|
var jsonObject map[string]json.RawMessage
|
||||||
|
@ -332,11 +332,12 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error
|
||||||
raw = v
|
raw = v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
field := m.NewField(fd)
|
||||||
// Unmarshal the field value.
|
// Unmarshal the field value.
|
||||||
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd)) {
|
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
v, err := u.unmarshalValue(m.NewField(fd), raw, fd)
|
v, err := u.unmarshalValue(field, raw, fd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -364,11 +365,12 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error
|
||||||
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
|
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
field := m.NewField(fd)
|
||||||
// Unmarshal the field value.
|
// Unmarshal the field value.
|
||||||
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd)) {
|
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
v, err := u.unmarshalValue(m.NewField(fd), raw, fd)
|
v, err := u.unmarshalValue(field, raw, fd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -390,6 +392,14 @@ func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
|
||||||
|
if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
|
||||||
|
_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||||
switch {
|
switch {
|
||||||
case fd.IsList():
|
case fd.IsList():
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/reflect/protodesc"
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
"google.golang.org/protobuf/runtime/protoimpl"
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP {
|
||||||
// Find the descriptor in the v2 registry.
|
// Find the descriptor in the v2 registry.
|
||||||
var b []byte
|
var b []byte
|
||||||
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
|
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
|
||||||
if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
|
b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
|
||||||
b = fd.ProtoLegacyRawDesc()
|
|
||||||
} else {
|
|
||||||
// TODO: Use protodesc.ToFileDescriptorProto to construct
|
|
||||||
// a descriptorpb.FileDescriptorProto and marshal it.
|
|
||||||
// However, doing so causes the proto package to have a dependency
|
|
||||||
// on descriptorpb, leading to cyclic dependency issues.
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Locally cache the raw descriptor form for the file.
|
// Locally cache the raw descriptor form for the file.
|
||||||
|
|
|
@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/"
|
||||||
|
|
||||||
// AnyMessageName returns the message name contained in an anypb.Any message.
|
// AnyMessageName returns the message name contained in an anypb.Any message.
|
||||||
// Most type assertions should use the Is function instead.
|
// Most type assertions should use the Is function instead.
|
||||||
|
//
|
||||||
|
// Deprecated: Call the any.MessageName method instead.
|
||||||
func AnyMessageName(any *anypb.Any) (string, error) {
|
func AnyMessageName(any *anypb.Any) (string, error) {
|
||||||
name, err := anyMessageName(any)
|
name, err := anyMessageName(any)
|
||||||
return string(name), err
|
return string(name), err
|
||||||
|
@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalAny marshals the given message m into an anypb.Any message.
|
// MarshalAny marshals the given message m into an anypb.Any message.
|
||||||
|
//
|
||||||
|
// Deprecated: Call the anypb.New function instead.
|
||||||
func MarshalAny(m proto.Message) (*anypb.Any, error) {
|
func MarshalAny(m proto.Message) (*anypb.Any, error) {
|
||||||
switch dm := m.(type) {
|
switch dm := m.(type) {
|
||||||
case DynamicAny:
|
case DynamicAny:
|
||||||
|
@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) {
|
||||||
// Empty returns a new message of the type specified in an anypb.Any message.
|
// Empty returns a new message of the type specified in an anypb.Any message.
|
||||||
// It returns protoregistry.NotFound if the corresponding message type could not
|
// It returns protoregistry.NotFound if the corresponding message type could not
|
||||||
// be resolved in the global registry.
|
// be resolved in the global registry.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
|
||||||
|
// to resolve the message name and create a new instance of it.
|
||||||
func Empty(any *anypb.Any) (proto.Message, error) {
|
func Empty(any *anypb.Any) (proto.Message, error) {
|
||||||
name, err := anyMessageName(any)
|
name, err := anyMessageName(any)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) {
|
||||||
//
|
//
|
||||||
// The target message m may be a *DynamicAny message. If the underlying message
|
// The target message m may be a *DynamicAny message. If the underlying message
|
||||||
// type could not be resolved, then this returns protoregistry.NotFound.
|
// type could not be resolved, then this returns protoregistry.NotFound.
|
||||||
|
//
|
||||||
|
// Deprecated: Call the any.UnmarshalTo method instead.
|
||||||
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
|
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
|
||||||
if dm, ok := m.(*DynamicAny); ok {
|
if dm, ok := m.(*DynamicAny); ok {
|
||||||
if dm.Message == nil {
|
if dm.Message == nil {
|
||||||
|
@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is reports whether the Any message contains a message of the specified type.
|
// Is reports whether the Any message contains a message of the specified type.
|
||||||
|
//
|
||||||
|
// Deprecated: Call the any.MessageIs method instead.
|
||||||
func Is(any *anypb.Any, m proto.Message) bool {
|
func Is(any *anypb.Any, m proto.Message) bool {
|
||||||
if any == nil || m == nil {
|
if any == nil || m == nil {
|
||||||
return false
|
return false
|
||||||
|
@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool {
|
||||||
// var x ptypes.DynamicAny
|
// var x ptypes.DynamicAny
|
||||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||||
|
//
|
||||||
|
// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
|
||||||
|
// the any message contents into a new instance of the underlying message.
|
||||||
type DynamicAny struct{ proto.Message }
|
type DynamicAny struct{ proto.Message }
|
||||||
|
|
||||||
func (m DynamicAny) String() string {
|
func (m DynamicAny) String() string {
|
||||||
|
|
|
@ -3,4 +3,8 @@
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// Package ptypes provides functionality for interacting with well-known types.
|
// Package ptypes provides functionality for interacting with well-known types.
|
||||||
|
//
|
||||||
|
// Deprecated: Well-known types have specialized functionality directly
|
||||||
|
// injected into the generated packages for each message type.
|
||||||
|
// See the deprecation notice for each function for the suggested alternative.
|
||||||
package ptypes
|
package ptypes
|
||||||
|
|
|
@ -21,6 +21,8 @@ const (
|
||||||
|
|
||||||
// Duration converts a durationpb.Duration to a time.Duration.
|
// Duration converts a durationpb.Duration to a time.Duration.
|
||||||
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
||||||
|
//
|
||||||
|
// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
|
||||||
func Duration(dur *durationpb.Duration) (time.Duration, error) {
|
func Duration(dur *durationpb.Duration) (time.Duration, error) {
|
||||||
if err := validateDuration(dur); err != nil {
|
if err := validateDuration(dur); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DurationProto converts a time.Duration to a durationpb.Duration.
|
// DurationProto converts a time.Duration to a durationpb.Duration.
|
||||||
|
//
|
||||||
|
// Deprecated: Call the durationpb.New function instead.
|
||||||
func DurationProto(d time.Duration) *durationpb.Duration {
|
func DurationProto(d time.Duration) *durationpb.Duration {
|
||||||
nanos := d.Nanoseconds()
|
nanos := d.Nanoseconds()
|
||||||
secs := nanos / 1e9
|
secs := nanos / 1e9
|
||||||
|
|
|
@ -33,6 +33,8 @@ const (
|
||||||
//
|
//
|
||||||
// A nil Timestamp returns an error. The first return value in that case is
|
// A nil Timestamp returns an error. The first return value in that case is
|
||||||
// undefined.
|
// undefined.
|
||||||
|
//
|
||||||
|
// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
|
||||||
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
|
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
|
||||||
// Don't return the zero value on error, because corresponds to a valid
|
// Don't return the zero value on error, because corresponds to a valid
|
||||||
// timestamp. Instead return whatever time.Unix gives us.
|
// timestamp. Instead return whatever time.Unix gives us.
|
||||||
|
@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||||
|
//
|
||||||
|
// Deprecated: Call the timestamppb.Now function instead.
|
||||||
func TimestampNow() *timestamppb.Timestamp {
|
func TimestampNow() *timestamppb.Timestamp {
|
||||||
ts, err := TimestampProto(time.Now())
|
ts, err := TimestampProto(time.Now())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp {
|
||||||
|
|
||||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||||
// It returns an error if the resulting Timestamp is invalid.
|
// It returns an error if the resulting Timestamp is invalid.
|
||||||
|
//
|
||||||
|
// Deprecated: Call the timestamppb.New function instead.
|
||||||
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
|
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
|
||||||
ts := ×tamppb.Timestamp{
|
ts := ×tamppb.Timestamp{
|
||||||
Seconds: t.Unix(),
|
Seconds: t.Unix(),
|
||||||
|
@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
|
||||||
|
|
||||||
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
||||||
// For invalid Timestamps, it returns an error message in parentheses.
|
// For invalid Timestamps, it returns an error message in parentheses.
|
||||||
|
//
|
||||||
|
// Deprecated: Call the ts.AsTime method instead,
|
||||||
|
// followed by a call to the Format method on the time.Time value.
|
||||||
func TimestampString(ts *timestamppb.Timestamp) string {
|
func TimestampString(ts *timestamppb.Timestamp) string {
|
||||||
t, err := Timestamp(ts)
|
t, err := Timestamp(ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -21,7 +21,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"go.etcd.io/etcd/pkg/types"
|
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
6
vendor/go.etcd.io/etcd/client/README.md → vendor/go.etcd.io/etcd/client/v2/README.md
generated
vendored
6
vendor/go.etcd.io/etcd/client/README.md → vendor/go.etcd.io/etcd/client/v2/README.md
generated
vendored
|
@ -4,12 +4,12 @@ etcd/client is the Go client library for etcd.
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/go.etcd.io/etcd/client?status.png)](https://godoc.org/go.etcd.io/etcd/client)
|
[![GoDoc](https://godoc.org/go.etcd.io/etcd/client?status.png)](https://godoc.org/go.etcd.io/etcd/client)
|
||||||
|
|
||||||
For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
|
For full compatibility, it is recommended to install released versions of clients using go modules.
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
go get go.etcd.io/etcd/client
|
go get go.etcd.io/etcd/v3/client
|
||||||
```
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
@ -22,7 +22,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"go.etcd.io/etcd/client"
|
"go.etcd.io/etcd/v3/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
15
vendor/go.etcd.io/etcd/client/client.go → vendor/go.etcd.io/etcd/client/v2/client.go
generated
vendored
15
vendor/go.etcd.io/etcd/client/client.go → vendor/go.etcd.io/etcd/client/v2/client.go
generated
vendored
|
@ -29,7 +29,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.etcd.io/etcd/version"
|
"go.etcd.io/etcd/api/v3/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -48,10 +48,10 @@ var DefaultRequestTimeout = 5 * time.Second
|
||||||
|
|
||||||
var DefaultTransport CancelableTransport = &http.Transport{
|
var DefaultTransport CancelableTransport = &http.Transport{
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
Dial: (&net.Dialer{
|
DialContext: (&net.Dialer{
|
||||||
Timeout: 30 * time.Second,
|
Timeout: 30 * time.Second,
|
||||||
KeepAlive: 30 * time.Second,
|
KeepAlive: 30 * time.Second,
|
||||||
}).Dial,
|
}).DialContext,
|
||||||
TLSHandshakeTimeout: 10 * time.Second,
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -521,15 +521,22 @@ type simpleHTTPClient struct {
|
||||||
headerTimeout time.Duration
|
headerTimeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrNoRequest indicates that the HTTPRequest object could not be found
|
||||||
|
// or was nil. No processing could continue.
|
||||||
|
var ErrNoRequest = errors.New("no HTTPRequest was available")
|
||||||
|
|
||||||
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
|
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
|
||||||
req := act.HTTPRequest(c.endpoint)
|
req := act.HTTPRequest(c.endpoint)
|
||||||
|
if req == nil {
|
||||||
|
return nil, nil, ErrNoRequest
|
||||||
|
}
|
||||||
|
|
||||||
if err := printcURL(req); err != nil {
|
if err := printcURL(req); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
isWait := false
|
isWait := false
|
||||||
if req != nil && req.URL != nil {
|
if req.URL != nil {
|
||||||
ws := req.URL.Query().Get("wait")
|
ws := req.URL.Query().Get("wait")
|
||||||
if len(ws) != 0 {
|
if len(ws) != 0 {
|
||||||
var err error
|
var err error
|
0
vendor/go.etcd.io/etcd/client/curl.go → vendor/go.etcd.io/etcd/client/v2/curl.go
generated
vendored
0
vendor/go.etcd.io/etcd/client/curl.go → vendor/go.etcd.io/etcd/client/v2/curl.go
generated
vendored
|
@ -15,7 +15,7 @@
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"go.etcd.io/etcd/pkg/srv"
|
"go.etcd.io/etcd/client/pkg/v3/srv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Discoverer is an interface that wraps the Discover method.
|
// Discoverer is an interface that wraps the Discover method.
|
|
@ -21,7 +21,7 @@ Create a Config and exchange it for a Client:
|
||||||
"net/http"
|
"net/http"
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"go.etcd.io/etcd/client"
|
"go.etcd.io/etcd/client/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
cfg := client.Config{
|
cfg := client.Config{
|
|
@ -0,0 +1,24 @@
|
||||||
|
module go.etcd.io/etcd/client/v2
|
||||||
|
|
||||||
|
go 1.16
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/json-iterator/go v1.1.11
|
||||||
|
github.com/modern-go/reflect2 v1.0.1
|
||||||
|
go.etcd.io/etcd/api/v3 v3.5.0
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 v3.5.0
|
||||||
|
)
|
||||||
|
|
||||||
|
replace (
|
||||||
|
go.etcd.io/etcd/api/v3 => ../../api
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 => ../pkg
|
||||||
|
)
|
||||||
|
|
||||||
|
// Bad imports are sometimes causing attempts to pull that code.
|
||||||
|
// This makes the error more explicit.
|
||||||
|
replace (
|
||||||
|
go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
|
||||||
|
go.etcd.io/etcd/pkg/v3 => ./FORBIDDED_DEPENDENCY
|
||||||
|
go.etcd.io/etcd/tests/v3 => ./FORBIDDEN_DEPENDENCY
|
||||||
|
go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY
|
||||||
|
)
|
|
@ -0,0 +1,163 @@
|
||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
|
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||||
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
|
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
|
||||||
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
|
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
|
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||||
|
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||||
|
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
5
vendor/go.etcd.io/etcd/client/json.go → vendor/go.etcd.io/etcd/client/v2/json.go
generated
vendored
5
vendor/go.etcd.io/etcd/client/json.go → vendor/go.etcd.io/etcd/client/v2/json.go
generated
vendored
|
@ -15,10 +15,11 @@
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/json-iterator/go"
|
|
||||||
"github.com/modern-go/reflect2"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/json-iterator/go"
|
||||||
|
"github.com/modern-go/reflect2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type customNumberExtension struct {
|
type customNumberExtension struct {
|
3
vendor/go.etcd.io/etcd/client/keys.go → vendor/go.etcd.io/etcd/client/v2/keys.go
generated
vendored
3
vendor/go.etcd.io/etcd/client/keys.go → vendor/go.etcd.io/etcd/client/v2/keys.go
generated
vendored
|
@ -19,12 +19,13 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go.etcd.io/etcd/pkg/pathutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"go.etcd.io/etcd/client/pkg/v3/pathutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
|
@ -23,7 +23,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"go.etcd.io/etcd/pkg/types"
|
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
0
vendor/go.etcd.io/etcd/client/util.go → vendor/go.etcd.io/etcd/client/v2/util.go
generated
vendored
0
vendor/go.etcd.io/etcd/client/util.go → vendor/go.etcd.io/etcd/client/v2/util.go
generated
vendored
|
@ -19,9 +19,9 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
v3 "go.etcd.io/etcd/clientv3"
|
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||||
"go.etcd.io/etcd/mvcc/mvccpb"
|
v3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
|
@ -18,9 +18,9 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
v3 "go.etcd.io/etcd/clientv3"
|
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||||
"go.etcd.io/etcd/mvcc/mvccpb"
|
v3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
|
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
|
|
@ -16,13 +16,18 @@ package concurrency
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
v3 "go.etcd.io/etcd/clientv3"
|
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
v3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrLocked is returned by TryLock when Mutex is already locked by another session.
|
||||||
|
var ErrLocked = errors.New("mutex: Locked by another session")
|
||||||
|
var ErrSessionExpired = errors.New("mutex: session is expired")
|
||||||
|
|
||||||
// Mutex implements the sync Locker interface with etcd
|
// Mutex implements the sync Locker interface with etcd
|
||||||
type Mutex struct {
|
type Mutex struct {
|
||||||
s *Session
|
s *Session
|
||||||
|
@ -37,9 +42,69 @@ func NewMutex(s *Session, pfx string) *Mutex {
|
||||||
return &Mutex{s, pfx + "/", "", -1, nil}
|
return &Mutex{s, pfx + "/", "", -1, nil}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TryLock locks the mutex if not already locked by another session.
|
||||||
|
// If lock is held by another session, return immediately after attempting necessary cleanup
|
||||||
|
// The ctx argument is used for the sending/receiving Txn RPC.
|
||||||
|
func (m *Mutex) TryLock(ctx context.Context) error {
|
||||||
|
resp, err := m.tryAcquire(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// if no key on prefix / the minimum rev is key, already hold the lock
|
||||||
|
ownerKey := resp.Responses[1].GetResponseRange().Kvs
|
||||||
|
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
|
||||||
|
m.hdr = resp.Header
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
client := m.s.Client()
|
||||||
|
// Cannot lock, so delete the key
|
||||||
|
if _, err := client.Delete(ctx, m.myKey); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.myKey = "\x00"
|
||||||
|
m.myRev = -1
|
||||||
|
return ErrLocked
|
||||||
|
}
|
||||||
|
|
||||||
// Lock locks the mutex with a cancelable context. If the context is canceled
|
// Lock locks the mutex with a cancelable context. If the context is canceled
|
||||||
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
|
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
|
||||||
func (m *Mutex) Lock(ctx context.Context) error {
|
func (m *Mutex) Lock(ctx context.Context) error {
|
||||||
|
resp, err := m.tryAcquire(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// if no key on prefix / the minimum rev is key, already hold the lock
|
||||||
|
ownerKey := resp.Responses[1].GetResponseRange().Kvs
|
||||||
|
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
|
||||||
|
m.hdr = resp.Header
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
client := m.s.Client()
|
||||||
|
// wait for deletion revisions prior to myKey
|
||||||
|
// TODO: early termination if the session key is deleted before other session keys with smaller revisions.
|
||||||
|
_, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||||
|
// release lock key if wait failed
|
||||||
|
if werr != nil {
|
||||||
|
m.Unlock(client.Ctx())
|
||||||
|
return werr
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure the session is not expired, and the owner key still exists.
|
||||||
|
gresp, werr := client.Get(ctx, m.myKey)
|
||||||
|
if werr != nil {
|
||||||
|
m.Unlock(client.Ctx())
|
||||||
|
return werr
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(gresp.Kvs) == 0 { // is the session key lost?
|
||||||
|
return ErrSessionExpired
|
||||||
|
}
|
||||||
|
m.hdr = gresp.Header
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) {
|
||||||
s := m.s
|
s := m.s
|
||||||
client := m.s.Client()
|
client := m.s.Client()
|
||||||
|
|
||||||
|
@ -53,28 +118,13 @@ func (m *Mutex) Lock(ctx context.Context) error {
|
||||||
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
|
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
|
||||||
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
|
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
m.myRev = resp.Header.Revision
|
m.myRev = resp.Header.Revision
|
||||||
if !resp.Succeeded {
|
if !resp.Succeeded {
|
||||||
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
|
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
|
||||||
}
|
}
|
||||||
// if no key on prefix / the minimum rev is key, already hold the lock
|
return resp, nil
|
||||||
ownerKey := resp.Responses[1].GetResponseRange().Kvs
|
|
||||||
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
|
|
||||||
m.hdr = resp.Header
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for deletion revisions prior to myKey
|
|
||||||
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
|
||||||
// release lock key if wait failed
|
|
||||||
if werr != nil {
|
|
||||||
m.Unlock(client.Ctx())
|
|
||||||
} else {
|
|
||||||
m.hdr = hdr
|
|
||||||
}
|
|
||||||
return werr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Mutex) Unlock(ctx context.Context) error {
|
func (m *Mutex) Unlock(ctx context.Context) error {
|
|
@ -18,7 +18,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v3 "go.etcd.io/etcd/clientv3"
|
v3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultSessionTTL = 60
|
const defaultSessionTTL = 60
|
|
@ -18,7 +18,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
v3 "go.etcd.io/etcd/clientv3"
|
v3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// STM is an interface for software transactional memory.
|
// STM is an interface for software transactional memory.
|
|
@ -0,0 +1,102 @@
|
||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package snapshot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/dustin/go-humanize"
|
||||||
|
"go.etcd.io/etcd/client/pkg/v3/fileutil"
|
||||||
|
"go.etcd.io/etcd/client/v3"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// hasChecksum returns "true" if the file size "n"
|
||||||
|
// has appended sha256 hash digest.
|
||||||
|
func hasChecksum(n int64) bool {
|
||||||
|
// 512 is chosen because it's a minimum disk sector size
|
||||||
|
// smaller than (and multiplies to) OS page size in most systems
|
||||||
|
return (n % 512) == sha256.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save fetches snapshot from remote etcd server and saves data
|
||||||
|
// to target path. If the context "ctx" is canceled or timed out,
|
||||||
|
// snapshot save stream will error out (e.g. context.Canceled,
|
||||||
|
// context.DeadlineExceeded). Make sure to specify only one endpoint
|
||||||
|
// in client configuration. Snapshot API must be requested to a
|
||||||
|
// selected node, and saved snapshot is the point-in-time state of
|
||||||
|
// the selected node.
|
||||||
|
func Save(ctx context.Context, lg *zap.Logger, cfg clientv3.Config, dbPath string) error {
|
||||||
|
if lg == nil {
|
||||||
|
lg = zap.NewExample()
|
||||||
|
}
|
||||||
|
cfg.Logger = lg.Named("client")
|
||||||
|
if len(cfg.Endpoints) != 1 {
|
||||||
|
return fmt.Errorf("snapshot must be requested to one selected node, not multiple %v", cfg.Endpoints)
|
||||||
|
}
|
||||||
|
cli, err := clientv3.New(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
partpath := dbPath + ".part"
|
||||||
|
defer os.RemoveAll(partpath)
|
||||||
|
|
||||||
|
var f *os.File
|
||||||
|
f, err = os.OpenFile(partpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fileutil.PrivateFileMode)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not open %s (%v)", partpath, err)
|
||||||
|
}
|
||||||
|
lg.Info("created temporary db file", zap.String("path", partpath))
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
var rd io.ReadCloser
|
||||||
|
rd, err = cli.Snapshot(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
lg.Info("fetching snapshot", zap.String("endpoint", cfg.Endpoints[0]))
|
||||||
|
var size int64
|
||||||
|
size, err = io.Copy(f, rd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !hasChecksum(size) {
|
||||||
|
return fmt.Errorf("sha256 checksum not found [bytes: %d]", size)
|
||||||
|
}
|
||||||
|
if err = fileutil.Fsync(f); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = f.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
lg.Info("fetched snapshot",
|
||||||
|
zap.String("endpoint", cfg.Endpoints[0]),
|
||||||
|
zap.String("size", humanize.Bytes(uint64(size))),
|
||||||
|
zap.String("took", humanize.Time(now)),
|
||||||
|
)
|
||||||
|
|
||||||
|
if err = os.Rename(partpath, dbPath); err != nil {
|
||||||
|
return fmt.Errorf("could not rename %s to %s (%v)", partpath, dbPath, err)
|
||||||
|
}
|
||||||
|
lg.Info("saved", zap.String("path", dbPath))
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -34,6 +34,6 @@ func (ep *errPicker) String() string {
|
||||||
return ep.p.String()
|
return ep.p.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ep *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||||
return nil, nil, ep.err
|
return nil, nil, ep.err
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ type rrBalanced struct {
|
||||||
func (rb *rrBalanced) String() string { return rb.p.String() }
|
func (rb *rrBalanced) String() string { return rb.p.String() }
|
||||||
|
|
||||||
// Pick is called for every client request.
|
// Pick is called for every client request.
|
||||||
func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||||
rb.mu.RLock()
|
rb.mu.RLock()
|
||||||
n := len(rb.scs)
|
n := len(rb.scs)
|
||||||
rb.mu.RUnlock()
|
rb.mu.RUnlock()
|
||||||
|
|
|
@ -111,7 +111,7 @@ func (e *ResolverGroup) Close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
|
// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
|
||||||
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||||
if len(target.Authority) < 1 {
|
if len(target.Authority) < 1 {
|
||||||
return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
|
return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
|
||||||
}
|
}
|
||||||
|
@ -179,7 +179,7 @@ func epsToAddrs(eps ...string) (addrs []resolver.Address) {
|
||||||
return addrs
|
return addrs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Resolver) ResolveNow(o resolver.ResolveNowOptions) {}
|
func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
|
||||||
|
|
||||||
func (r *Resolver) Close() {
|
func (r *Resolver) Close() {
|
||||||
es, err := bldr.getResolverGroup(r.endpointID)
|
es, err := bldr.getResolverGroup(r.endpointID)
|
||||||
|
|
|
@ -37,6 +37,7 @@ import (
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
grpccredentials "google.golang.org/grpc/credentials"
|
grpccredentials "google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -396,6 +397,13 @@ func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCrede
|
||||||
return creds
|
return creds
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithRequireLeader requires client requests to only succeed
|
||||||
|
// when the cluster has a leader.
|
||||||
|
func WithRequireLeader(ctx context.Context) context.Context {
|
||||||
|
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||||
|
return metadata.NewOutgoingContext(ctx, md)
|
||||||
|
}
|
||||||
|
|
||||||
func newClient(cfg *Config) (*Client, error) {
|
func newClient(cfg *Config) (*Client, error) {
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
cfg = &Config{}
|
cfg = &Config{}
|
||||||
|
|
|
@ -1,64 +0,0 @@
|
||||||
// Copyright 2020 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package clientv3
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
|
|
||||||
"go.etcd.io/etcd/version"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WithRequireLeader requires client requests to only succeed
|
|
||||||
// when the cluster has a leader.
|
|
||||||
func WithRequireLeader(ctx context.Context) context.Context {
|
|
||||||
md, ok := metadata.FromOutgoingContext(ctx)
|
|
||||||
if !ok { // no outgoing metadata ctx key, create one
|
|
||||||
md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
|
||||||
return metadata.NewOutgoingContext(ctx, md)
|
|
||||||
}
|
|
||||||
copied := md.Copy() // avoid racey updates
|
|
||||||
// overwrite/add 'hasleader' key/value
|
|
||||||
metadataSet(copied, rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
|
||||||
return metadata.NewOutgoingContext(ctx, copied)
|
|
||||||
}
|
|
||||||
|
|
||||||
// embeds client version
|
|
||||||
func withVersion(ctx context.Context) context.Context {
|
|
||||||
md, ok := metadata.FromOutgoingContext(ctx)
|
|
||||||
if !ok { // no outgoing metadata ctx key, create one
|
|
||||||
md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
|
|
||||||
return metadata.NewOutgoingContext(ctx, md)
|
|
||||||
}
|
|
||||||
copied := md.Copy() // avoid racey updates
|
|
||||||
// overwrite/add version key/value
|
|
||||||
metadataSet(copied, rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
|
|
||||||
return metadata.NewOutgoingContext(ctx, copied)
|
|
||||||
}
|
|
||||||
|
|
||||||
func metadataGet(md metadata.MD, k string) []string {
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
return md[k]
|
|
||||||
}
|
|
||||||
|
|
||||||
func metadataSet(md metadata.MD, k string, vals ...string) {
|
|
||||||
if len(vals) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
md[k] = vals
|
|
||||||
}
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||||
"go.uber.org/zap"
|
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
@ -69,7 +68,6 @@ type Maintenance interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type maintenance struct {
|
type maintenance struct {
|
||||||
lg *zap.Logger
|
|
||||||
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
|
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
|
||||||
remote pb.MaintenanceClient
|
remote pb.MaintenanceClient
|
||||||
callOpts []grpc.CallOption
|
callOpts []grpc.CallOption
|
||||||
|
@ -77,7 +75,6 @@ type maintenance struct {
|
||||||
|
|
||||||
func NewMaintenance(c *Client) Maintenance {
|
func NewMaintenance(c *Client) Maintenance {
|
||||||
api := &maintenance{
|
api := &maintenance{
|
||||||
lg: c.lg,
|
|
||||||
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
|
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
|
||||||
conn, err := c.Dial(endpoint)
|
conn, err := c.Dial(endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -96,7 +93,6 @@ func NewMaintenance(c *Client) Maintenance {
|
||||||
|
|
||||||
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
|
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
|
||||||
api := &maintenance{
|
api := &maintenance{
|
||||||
lg: c.lg,
|
|
||||||
dial: func(string) (pb.MaintenanceClient, func(), error) {
|
dial: func(string) (pb.MaintenanceClient, func(), error) {
|
||||||
return remote, func() {}, nil
|
return remote, func() {}, nil
|
||||||
},
|
},
|
||||||
|
@ -197,32 +193,23 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.lg.Info("opened snapshot stream; downloading")
|
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
resp, err := ss.Recv()
|
resp, err := ss.Recv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch err {
|
|
||||||
case io.EOF:
|
|
||||||
m.lg.Info("completed snapshot read; closing")
|
|
||||||
default:
|
|
||||||
m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err))
|
|
||||||
}
|
|
||||||
pw.CloseWithError(err)
|
pw.CloseWithError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if resp == nil && err == nil {
|
||||||
// can "resp == nil && err == nil"
|
break
|
||||||
// before we receive snapshot SHA digest?
|
}
|
||||||
// No, server sends EOF with an empty response
|
|
||||||
// after it sends SHA digest at the end
|
|
||||||
|
|
||||||
if _, werr := pw.Write(resp.Blob); werr != nil {
|
if _, werr := pw.Write(resp.Blob); werr != nil {
|
||||||
pw.CloseWithError(werr)
|
pw.CloseWithError(werr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pw.Close()
|
||||||
}()
|
}()
|
||||||
return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
|
return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,6 @@ import (
|
||||||
func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
|
func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
|
||||||
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
|
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
|
||||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||||
ctx = withVersion(ctx)
|
|
||||||
grpcOpts, retryOpts := filterCallOptions(opts)
|
grpcOpts, retryOpts := filterCallOptions(opts)
|
||||||
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
|
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
|
||||||
// short circuit for simplicity, and avoiding allocations.
|
// short circuit for simplicity, and avoiding allocations.
|
||||||
|
@ -104,7 +103,6 @@ func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOpt
|
||||||
func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
|
func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
|
||||||
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
|
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
|
||||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||||
ctx = withVersion(ctx)
|
|
||||||
grpcOpts, retryOpts := filterCallOptions(opts)
|
grpcOpts, retryOpts := filterCallOptions(opts)
|
||||||
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
|
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
|
||||||
// short circuit for simplicity, and avoiding allocations.
|
// short circuit for simplicity, and avoiding allocations.
|
||||||
|
@ -115,9 +113,10 @@ func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOp
|
||||||
return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
|
return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
|
||||||
}
|
}
|
||||||
newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
|
newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
|
||||||
|
logger.Warn("retry stream intercept", zap.Error(err))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("streamer failed to create ClientStream", zap.Error(err))
|
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
||||||
return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
return nil, err
|
||||||
}
|
}
|
||||||
retryingStreamer := &serverStreamingRetryingStream{
|
retryingStreamer := &serverStreamingRetryingStream{
|
||||||
client: c,
|
client: c,
|
||||||
|
@ -186,7 +185,6 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
|
||||||
if !attemptRetry {
|
if !attemptRetry {
|
||||||
return lastErr // success or hard failure
|
return lastErr // success or hard failure
|
||||||
}
|
}
|
||||||
|
|
||||||
// We start off from attempt 1, because zeroth was already made on normal SendMsg().
|
// We start off from attempt 1, because zeroth was already made on normal SendMsg().
|
||||||
for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
|
for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
|
||||||
if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
|
if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
|
||||||
|
@ -194,13 +192,12 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
|
||||||
}
|
}
|
||||||
newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
|
newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err))
|
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
||||||
return err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
return err
|
||||||
}
|
}
|
||||||
s.setStream(newStream)
|
s.setStream(newStream)
|
||||||
|
|
||||||
s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr))
|
|
||||||
attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
|
attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
|
||||||
|
//fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr)
|
||||||
if !attemptRetry {
|
if !attemptRetry {
|
||||||
return lastErr
|
return lastErr
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||||
mvccpb "go.etcd.io/etcd/mvcc/mvccpb"
|
mvccpb "go.etcd.io/etcd/mvcc/mvccpb"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
@ -141,7 +140,6 @@ type watcher struct {
|
||||||
|
|
||||||
// streams holds all the active grpc streams keyed by ctx value.
|
// streams holds all the active grpc streams keyed by ctx value.
|
||||||
streams map[string]*watchGrpcStream
|
streams map[string]*watchGrpcStream
|
||||||
lg *zap.Logger
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// watchGrpcStream tracks all watch resources attached to a single grpc stream.
|
// watchGrpcStream tracks all watch resources attached to a single grpc stream.
|
||||||
|
@ -178,8 +176,6 @@ type watchGrpcStream struct {
|
||||||
resumec chan struct{}
|
resumec chan struct{}
|
||||||
// closeErr is the error that closed the watch stream
|
// closeErr is the error that closed the watch stream
|
||||||
closeErr error
|
closeErr error
|
||||||
|
|
||||||
lg *zap.Logger
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// watchStreamRequest is a union of the supported watch request operation types
|
// watchStreamRequest is a union of the supported watch request operation types
|
||||||
|
@ -246,7 +242,6 @@ func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
|
||||||
}
|
}
|
||||||
if c != nil {
|
if c != nil {
|
||||||
w.callOpts = c.callOpts
|
w.callOpts = c.callOpts
|
||||||
w.lg = c.lg
|
|
||||||
}
|
}
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
@ -278,7 +273,6 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
||||||
errc: make(chan error, 1),
|
errc: make(chan error, 1),
|
||||||
closingc: make(chan *watcherStream),
|
closingc: make(chan *watcherStream),
|
||||||
resumec: make(chan struct{}),
|
resumec: make(chan struct{}),
|
||||||
lg: w.lg,
|
|
||||||
}
|
}
|
||||||
go wgs.run()
|
go wgs.run()
|
||||||
return wgs
|
return wgs
|
||||||
|
@ -550,18 +544,10 @@ func (w *watchGrpcStream) run() {
|
||||||
w.resuming = append(w.resuming, ws)
|
w.resuming = append(w.resuming, ws)
|
||||||
if len(w.resuming) == 1 {
|
if len(w.resuming) == 1 {
|
||||||
// head of resume queue, can register a new watcher
|
// head of resume queue, can register a new watcher
|
||||||
if err := wc.Send(ws.initReq.toPB()); err != nil {
|
wc.Send(ws.initReq.toPB())
|
||||||
if w.lg != nil {
|
|
||||||
w.lg.Debug("error when sending request", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
case *progressRequest:
|
case *progressRequest:
|
||||||
if err := wc.Send(wreq.toPB()); err != nil {
|
wc.Send(wreq.toPB())
|
||||||
if w.lg != nil {
|
|
||||||
w.lg.Debug("error when sending request", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// new events from the watch client
|
// new events from the watch client
|
||||||
|
@ -585,11 +571,7 @@ func (w *watchGrpcStream) run() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if ws := w.nextResume(); ws != nil {
|
if ws := w.nextResume(); ws != nil {
|
||||||
if err := wc.Send(ws.initReq.toPB()); err != nil {
|
wc.Send(ws.initReq.toPB())
|
||||||
if w.lg != nil {
|
|
||||||
w.lg.Debug("error when sending request", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset for next iteration
|
// reset for next iteration
|
||||||
|
@ -634,14 +616,7 @@ func (w *watchGrpcStream) run() {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
req := &pb.WatchRequest{RequestUnion: cr}
|
req := &pb.WatchRequest{RequestUnion: cr}
|
||||||
if w.lg != nil {
|
wc.Send(req)
|
||||||
w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId))
|
|
||||||
}
|
|
||||||
if err := wc.Send(req); err != nil {
|
|
||||||
if w.lg != nil {
|
|
||||||
w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// watch client failed on Recv; spawn another if possible
|
// watch client failed on Recv; spawn another if possible
|
||||||
|
@ -654,11 +629,7 @@ func (w *watchGrpcStream) run() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if ws := w.nextResume(); ws != nil {
|
if ws := w.nextResume(); ws != nil {
|
||||||
if err := wc.Send(ws.initReq.toPB()); err != nil {
|
wc.Send(ws.initReq.toPB())
|
||||||
if w.lg != nil {
|
|
||||||
w.lg.Debug("error when sending request", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
cancelSet = make(map[int64]struct{})
|
cancelSet = make(map[int64]struct{})
|
||||||
|
|
||||||
|
@ -666,25 +637,6 @@ func (w *watchGrpcStream) run() {
|
||||||
return
|
return
|
||||||
|
|
||||||
case ws := <-w.closingc:
|
case ws := <-w.closingc:
|
||||||
if ws.id != -1 {
|
|
||||||
// client is closing an established watch; close it on the server proactively instead of waiting
|
|
||||||
// to close when the next message arrives
|
|
||||||
cancelSet[ws.id] = struct{}{}
|
|
||||||
cr := &pb.WatchRequest_CancelRequest{
|
|
||||||
CancelRequest: &pb.WatchCancelRequest{
|
|
||||||
WatchId: ws.id,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
req := &pb.WatchRequest{RequestUnion: cr}
|
|
||||||
if w.lg != nil {
|
|
||||||
w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id))
|
|
||||||
}
|
|
||||||
if err := wc.Send(req); err != nil {
|
|
||||||
if w.lg != nil {
|
|
||||||
w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.closeSubstream(ws)
|
w.closeSubstream(ws)
|
||||||
delete(closing, ws)
|
delete(closing, ws)
|
||||||
// no more watchers on this stream, shutdown
|
// no more watchers on this stream, shutdown
|
||||||
|
|
|
@ -1,312 +0,0 @@
|
||||||
// Copyright 2018 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package embed
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"go.etcd.io/etcd/pkg/logutil"
|
|
||||||
|
|
||||||
"github.com/coreos/pkg/capnslog"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"go.uber.org/zap/zapcore"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetLogger returns the logger.
|
|
||||||
func (cfg Config) GetLogger() *zap.Logger {
|
|
||||||
cfg.loggerMu.RLock()
|
|
||||||
l := cfg.logger
|
|
||||||
cfg.loggerMu.RUnlock()
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// for testing
|
|
||||||
var grpcLogOnce = new(sync.Once)
|
|
||||||
|
|
||||||
// setupLogging initializes etcd logging.
|
|
||||||
// Must be called after flag parsing or finishing configuring embed.Config.
|
|
||||||
func (cfg *Config) setupLogging() error {
|
|
||||||
// handle "DeprecatedLogOutput" in v3.4
|
|
||||||
// TODO: remove "DeprecatedLogOutput" in v3.5
|
|
||||||
len1 := len(cfg.DeprecatedLogOutput)
|
|
||||||
len2 := len(cfg.LogOutputs)
|
|
||||||
if len1 != len2 {
|
|
||||||
switch {
|
|
||||||
case len1 > len2: // deprecate "log-output" flag is used
|
|
||||||
fmt.Fprintln(os.Stderr, "'--log-output' flag has been deprecated! Please use '--log-outputs'!")
|
|
||||||
cfg.LogOutputs = cfg.DeprecatedLogOutput
|
|
||||||
case len1 < len2: // "--log-outputs" flag has been set with multiple writers
|
|
||||||
cfg.DeprecatedLogOutput = []string{}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if len1 > 1 {
|
|
||||||
return errors.New("both '--log-output' and '--log-outputs' are set; only set '--log-outputs'")
|
|
||||||
}
|
|
||||||
if len1 < 1 {
|
|
||||||
return errors.New("either '--log-output' or '--log-outputs' flag must be set")
|
|
||||||
}
|
|
||||||
if reflect.DeepEqual(cfg.DeprecatedLogOutput, cfg.LogOutputs) && cfg.DeprecatedLogOutput[0] != DefaultLogOutput {
|
|
||||||
return fmt.Errorf("'--log-output=%q' and '--log-outputs=%q' are incompatible; only set --log-outputs", cfg.DeprecatedLogOutput, cfg.LogOutputs)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(cfg.DeprecatedLogOutput, []string{DefaultLogOutput}) {
|
|
||||||
fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--log-output' flag is set to %q\n", cfg.DeprecatedLogOutput)
|
|
||||||
fmt.Fprintln(os.Stderr, "Please use '--log-outputs' flag")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: remove after deprecating log related flags in v3.5
|
|
||||||
if cfg.Debug {
|
|
||||||
fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--debug' flag is set to %v (use '--log-level=debug' instead\n", cfg.Debug)
|
|
||||||
}
|
|
||||||
if cfg.Debug && cfg.LogLevel != "debug" {
|
|
||||||
fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--debug' flag is set to %v with inconsistent '--log-level=%s' flag\n", cfg.Debug, cfg.LogLevel)
|
|
||||||
}
|
|
||||||
if cfg.Logger == "capnslog" {
|
|
||||||
fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--logger=%s' flag is set; use '--logger=zap' flag instead\n", cfg.Logger)
|
|
||||||
}
|
|
||||||
if cfg.LogPkgLevels != "" {
|
|
||||||
fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--log-package-levels=%s' flag is set; use '--logger=zap' flag instead\n", cfg.LogPkgLevels)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch cfg.Logger {
|
|
||||||
case "capnslog": // TODO: deprecate this in v3.5
|
|
||||||
cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
|
|
||||||
cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
|
|
||||||
|
|
||||||
if cfg.Debug {
|
|
||||||
capnslog.SetGlobalLogLevel(capnslog.DEBUG)
|
|
||||||
grpc.EnableTracing = true
|
|
||||||
// enable info, warning, error
|
|
||||||
grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
|
|
||||||
} else {
|
|
||||||
capnslog.SetGlobalLogLevel(logutil.ConvertToCapnslogLogLevel(cfg.LogLevel))
|
|
||||||
// only discard info
|
|
||||||
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: deprecate with "capnslog"
|
|
||||||
if cfg.LogPkgLevels != "" {
|
|
||||||
repoLog := capnslog.MustRepoLogger("go.etcd.io/etcd")
|
|
||||||
settings, err := repoLog.ParseLogLevelConfig(cfg.LogPkgLevels)
|
|
||||||
if err != nil {
|
|
||||||
plog.Warningf("couldn't parse log level string: %s, continuing with default levels", err.Error())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
repoLog.SetLogLevel(settings)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cfg.LogOutputs) != 1 {
|
|
||||||
return fmt.Errorf("--logger=capnslog supports only 1 value in '--log-outputs', got %q", cfg.LogOutputs)
|
|
||||||
}
|
|
||||||
// capnslog initially SetFormatter(NewDefaultFormatter(os.Stderr))
|
|
||||||
// where NewDefaultFormatter returns NewJournaldFormatter when syscall.Getppid() == 1
|
|
||||||
// specify 'stdout' or 'stderr' to skip journald logging even when running under systemd
|
|
||||||
output := cfg.LogOutputs[0]
|
|
||||||
switch output {
|
|
||||||
case StdErrLogOutput:
|
|
||||||
capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stderr, cfg.Debug))
|
|
||||||
case StdOutLogOutput:
|
|
||||||
capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, cfg.Debug))
|
|
||||||
case DefaultLogOutput:
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unknown log-output %q (only supports %q, %q, %q)", output, DefaultLogOutput, StdErrLogOutput, StdOutLogOutput)
|
|
||||||
}
|
|
||||||
|
|
||||||
case "zap":
|
|
||||||
if len(cfg.LogOutputs) == 0 {
|
|
||||||
cfg.LogOutputs = []string{DefaultLogOutput}
|
|
||||||
}
|
|
||||||
if len(cfg.LogOutputs) > 1 {
|
|
||||||
for _, v := range cfg.LogOutputs {
|
|
||||||
if v == DefaultLogOutput {
|
|
||||||
return fmt.Errorf("multi logoutput for %q is not supported yet", DefaultLogOutput)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
outputPaths, errOutputPaths := make([]string, 0), make([]string, 0)
|
|
||||||
isJournal := false
|
|
||||||
for _, v := range cfg.LogOutputs {
|
|
||||||
switch v {
|
|
||||||
case DefaultLogOutput:
|
|
||||||
outputPaths = append(outputPaths, StdErrLogOutput)
|
|
||||||
errOutputPaths = append(errOutputPaths, StdErrLogOutput)
|
|
||||||
|
|
||||||
case JournalLogOutput:
|
|
||||||
isJournal = true
|
|
||||||
|
|
||||||
case StdErrLogOutput:
|
|
||||||
outputPaths = append(outputPaths, StdErrLogOutput)
|
|
||||||
errOutputPaths = append(errOutputPaths, StdErrLogOutput)
|
|
||||||
|
|
||||||
case StdOutLogOutput:
|
|
||||||
outputPaths = append(outputPaths, StdOutLogOutput)
|
|
||||||
errOutputPaths = append(errOutputPaths, StdOutLogOutput)
|
|
||||||
|
|
||||||
default:
|
|
||||||
outputPaths = append(outputPaths, v)
|
|
||||||
errOutputPaths = append(errOutputPaths, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isJournal {
|
|
||||||
copied := logutil.DefaultZapLoggerConfig
|
|
||||||
copied.OutputPaths = outputPaths
|
|
||||||
copied.ErrorOutputPaths = errOutputPaths
|
|
||||||
copied = logutil.MergeOutputPaths(copied)
|
|
||||||
copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
|
|
||||||
if cfg.Debug || cfg.LogLevel == "debug" {
|
|
||||||
// enable tracing even when "--debug --log-level info"
|
|
||||||
// in order to keep backward compatibility with <= v3.3
|
|
||||||
// TODO: remove "Debug" check in v3.5
|
|
||||||
grpc.EnableTracing = true
|
|
||||||
}
|
|
||||||
if cfg.ZapLoggerBuilder == nil {
|
|
||||||
cfg.ZapLoggerBuilder = func(c *Config) error {
|
|
||||||
var err error
|
|
||||||
c.logger, err = copied.Build()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.loggerMu.Lock()
|
|
||||||
defer c.loggerMu.Unlock()
|
|
||||||
c.loggerConfig = &copied
|
|
||||||
c.loggerCore = nil
|
|
||||||
c.loggerWriteSyncer = nil
|
|
||||||
grpcLogOnce.Do(func() {
|
|
||||||
// debug true, enable info, warning, error
|
|
||||||
// debug false, only discard info
|
|
||||||
var gl grpclog.LoggerV2
|
|
||||||
gl, err = logutil.NewGRPCLoggerV2(copied)
|
|
||||||
if err == nil {
|
|
||||||
grpclog.SetLoggerV2(gl)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if len(cfg.LogOutputs) > 1 {
|
|
||||||
for _, v := range cfg.LogOutputs {
|
|
||||||
if v != DefaultLogOutput {
|
|
||||||
return fmt.Errorf("running with systemd/journal but other '--log-outputs' values (%q) are configured with 'default'; override 'default' value with something else", cfg.LogOutputs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// use stderr as fallback
|
|
||||||
syncer, lerr := getJournalWriteSyncer()
|
|
||||||
if lerr != nil {
|
|
||||||
return lerr
|
|
||||||
}
|
|
||||||
|
|
||||||
lvl := zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
|
|
||||||
if cfg.Debug || cfg.LogLevel == "debug" {
|
|
||||||
// enable tracing even when "--debug --log-level info"
|
|
||||||
// in order to keep backward compatibility with <= v3.3
|
|
||||||
// TODO: remove "Debug" check in v3.5
|
|
||||||
grpc.EnableTracing = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// WARN: do not change field names in encoder config
|
|
||||||
// journald logging writer assumes field names of "level" and "caller"
|
|
||||||
cr := zapcore.NewCore(
|
|
||||||
zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig),
|
|
||||||
syncer,
|
|
||||||
lvl,
|
|
||||||
)
|
|
||||||
if cfg.ZapLoggerBuilder == nil {
|
|
||||||
cfg.ZapLoggerBuilder = func(c *Config) error {
|
|
||||||
c.logger = zap.New(cr, zap.AddCaller(), zap.ErrorOutput(syncer))
|
|
||||||
c.loggerMu.Lock()
|
|
||||||
defer c.loggerMu.Unlock()
|
|
||||||
c.loggerConfig = nil
|
|
||||||
c.loggerCore = cr
|
|
||||||
c.loggerWriteSyncer = syncer
|
|
||||||
|
|
||||||
grpcLogOnce.Do(func() {
|
|
||||||
grpclog.SetLoggerV2(logutil.NewGRPCLoggerV2FromZapCore(cr, syncer))
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err := cfg.ZapLoggerBuilder(cfg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
logTLSHandshakeFailure := func(conn *tls.Conn, err error) {
|
|
||||||
state := conn.ConnectionState()
|
|
||||||
remoteAddr := conn.RemoteAddr().String()
|
|
||||||
serverName := state.ServerName
|
|
||||||
if len(state.PeerCertificates) > 0 {
|
|
||||||
cert := state.PeerCertificates[0]
|
|
||||||
ips := make([]string, len(cert.IPAddresses))
|
|
||||||
for i := range cert.IPAddresses {
|
|
||||||
ips[i] = cert.IPAddresses[i].String()
|
|
||||||
}
|
|
||||||
cfg.logger.Warn(
|
|
||||||
"rejected connection",
|
|
||||||
zap.String("remote-addr", remoteAddr),
|
|
||||||
zap.String("server-name", serverName),
|
|
||||||
zap.Strings("ip-addresses", ips),
|
|
||||||
zap.Strings("dns-names", cert.DNSNames),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
cfg.logger.Warn(
|
|
||||||
"rejected connection",
|
|
||||||
zap.String("remote-addr", remoteAddr),
|
|
||||||
zap.String("server-name", serverName),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
|
|
||||||
cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
|
|
||||||
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unknown logger option %q", cfg.Logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewZapCoreLoggerBuilder generates a zap core logger builder.
|
|
||||||
func NewZapCoreLoggerBuilder(lg *zap.Logger, cr zapcore.Core, syncer zapcore.WriteSyncer) func(*Config) error {
|
|
||||||
return func(cfg *Config) error {
|
|
||||||
cfg.loggerMu.Lock()
|
|
||||||
defer cfg.loggerMu.Unlock()
|
|
||||||
cfg.logger = lg
|
|
||||||
cfg.loggerConfig = nil
|
|
||||||
cfg.loggerCore = cr
|
|
||||||
cfg.loggerWriteSyncer = syncer
|
|
||||||
|
|
||||||
grpcLogOnce.Do(func() {
|
|
||||||
grpclog.SetLoggerV2(logutil.NewGRPCLoggerV2FromZapCore(cr, syncer))
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,832 +0,0 @@
|
||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package embed
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
defaultLog "log"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.etcd.io/etcd/etcdserver"
|
|
||||||
"go.etcd.io/etcd/etcdserver/api/etcdhttp"
|
|
||||||
"go.etcd.io/etcd/etcdserver/api/rafthttp"
|
|
||||||
"go.etcd.io/etcd/etcdserver/api/v2http"
|
|
||||||
"go.etcd.io/etcd/etcdserver/api/v2v3"
|
|
||||||
"go.etcd.io/etcd/etcdserver/api/v3client"
|
|
||||||
"go.etcd.io/etcd/etcdserver/api/v3rpc"
|
|
||||||
"go.etcd.io/etcd/pkg/debugutil"
|
|
||||||
runtimeutil "go.etcd.io/etcd/pkg/runtime"
|
|
||||||
"go.etcd.io/etcd/pkg/transport"
|
|
||||||
"go.etcd.io/etcd/pkg/types"
|
|
||||||
"go.etcd.io/etcd/version"
|
|
||||||
|
|
||||||
"github.com/coreos/pkg/capnslog"
|
|
||||||
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
|
||||||
"github.com/soheilhy/cmux"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/keepalive"
|
|
||||||
)
|
|
||||||
|
|
||||||
var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "embed")
|
|
||||||
|
|
||||||
const (
|
|
||||||
// internal fd usage includes disk usage and transport usage.
|
|
||||||
// To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
|
|
||||||
// at most 2 to read/lock/write WALs. One case that it needs to 2 is to
|
|
||||||
// read all logs after some snapshot index, which locates at the end of
|
|
||||||
// the second last and the head of the last. For purging, it needs to read
|
|
||||||
// directory, so it needs 1. For fd monitor, it needs 1.
|
|
||||||
// For transport, rafthttp builds two long-polling connections and at most
|
|
||||||
// four temporary connections with each member. There are at most 9 members
|
|
||||||
// in a cluster, so it should reserve 96.
|
|
||||||
// For the safety, we set the total reserved number to 150.
|
|
||||||
reservedInternalFDNum = 150
|
|
||||||
)
|
|
||||||
|
|
||||||
// Etcd contains a running etcd server and its listeners.
|
|
||||||
type Etcd struct {
|
|
||||||
Peers []*peerListener
|
|
||||||
Clients []net.Listener
|
|
||||||
// a map of contexts for the servers that serves client requests.
|
|
||||||
sctxs map[string]*serveCtx
|
|
||||||
metricsListeners []net.Listener
|
|
||||||
|
|
||||||
Server *etcdserver.EtcdServer
|
|
||||||
|
|
||||||
cfg Config
|
|
||||||
stopc chan struct{}
|
|
||||||
errc chan error
|
|
||||||
|
|
||||||
closeOnce sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
type peerListener struct {
|
|
||||||
net.Listener
|
|
||||||
serve func() error
|
|
||||||
close func(context.Context) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartEtcd launches the etcd server and HTTP handlers for client/server communication.
|
|
||||||
// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait
|
|
||||||
// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use.
|
|
||||||
func StartEtcd(inCfg *Config) (e *Etcd, err error) {
|
|
||||||
if err = inCfg.Validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
serving := false
|
|
||||||
e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})}
|
|
||||||
cfg := &e.cfg
|
|
||||||
defer func() {
|
|
||||||
if e == nil || err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !serving {
|
|
||||||
// errored before starting gRPC server for serveCtx.serversC
|
|
||||||
for _, sctx := range e.sctxs {
|
|
||||||
close(sctx.serversC)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
e.Close()
|
|
||||||
e = nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
if e.cfg.logger != nil {
|
|
||||||
e.cfg.logger.Info(
|
|
||||||
"configuring peer listeners",
|
|
||||||
zap.Strings("listen-peer-urls", e.cfg.getLPURLs()),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if e.Peers, err = configurePeerListeners(cfg); err != nil {
|
|
||||||
return e, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.cfg.logger != nil {
|
|
||||||
e.cfg.logger.Info(
|
|
||||||
"configuring client listeners",
|
|
||||||
zap.Strings("listen-client-urls", e.cfg.getLCURLs()),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if e.sctxs, err = configureClientListeners(cfg); err != nil {
|
|
||||||
return e, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, sctx := range e.sctxs {
|
|
||||||
e.Clients = append(e.Clients, sctx.l)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
urlsmap types.URLsMap
|
|
||||||
token string
|
|
||||||
)
|
|
||||||
memberInitialized := true
|
|
||||||
if !isMemberInitialized(cfg) {
|
|
||||||
memberInitialized = false
|
|
||||||
urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd")
|
|
||||||
if err != nil {
|
|
||||||
return e, fmt.Errorf("error setting up initial cluster: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AutoCompactionRetention defaults to "0" if not set.
|
|
||||||
if len(cfg.AutoCompactionRetention) == 0 {
|
|
||||||
cfg.AutoCompactionRetention = "0"
|
|
||||||
}
|
|
||||||
autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention)
|
|
||||||
if err != nil {
|
|
||||||
return e, err
|
|
||||||
}
|
|
||||||
|
|
||||||
backendFreelistType := parseBackendFreelistType(cfg.ExperimentalBackendFreelistType)
|
|
||||||
|
|
||||||
srvcfg := etcdserver.ServerConfig{
|
|
||||||
Name: cfg.Name,
|
|
||||||
ClientURLs: cfg.ACUrls,
|
|
||||||
PeerURLs: cfg.APUrls,
|
|
||||||
DataDir: cfg.Dir,
|
|
||||||
DedicatedWALDir: cfg.WalDir,
|
|
||||||
SnapshotCount: cfg.SnapshotCount,
|
|
||||||
SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries,
|
|
||||||
MaxSnapFiles: cfg.MaxSnapFiles,
|
|
||||||
MaxWALFiles: cfg.MaxWalFiles,
|
|
||||||
InitialPeerURLsMap: urlsmap,
|
|
||||||
InitialClusterToken: token,
|
|
||||||
DiscoveryURL: cfg.Durl,
|
|
||||||
DiscoveryProxy: cfg.Dproxy,
|
|
||||||
NewCluster: cfg.IsNewCluster(),
|
|
||||||
PeerTLSInfo: cfg.PeerTLSInfo,
|
|
||||||
TickMs: cfg.TickMs,
|
|
||||||
ElectionTicks: cfg.ElectionTicks(),
|
|
||||||
InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
|
|
||||||
AutoCompactionRetention: autoCompactionRetention,
|
|
||||||
AutoCompactionMode: cfg.AutoCompactionMode,
|
|
||||||
QuotaBackendBytes: cfg.QuotaBackendBytes,
|
|
||||||
BackendBatchLimit: cfg.BackendBatchLimit,
|
|
||||||
BackendFreelistType: backendFreelistType,
|
|
||||||
BackendBatchInterval: cfg.BackendBatchInterval,
|
|
||||||
MaxTxnOps: cfg.MaxTxnOps,
|
|
||||||
MaxRequestBytes: cfg.MaxRequestBytes,
|
|
||||||
StrictReconfigCheck: cfg.StrictReconfigCheck,
|
|
||||||
ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
|
|
||||||
AuthToken: cfg.AuthToken,
|
|
||||||
BcryptCost: cfg.BcryptCost,
|
|
||||||
TokenTTL: cfg.AuthTokenTTL,
|
|
||||||
CORS: cfg.CORS,
|
|
||||||
HostWhitelist: cfg.HostWhitelist,
|
|
||||||
InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck,
|
|
||||||
CorruptCheckTime: cfg.ExperimentalCorruptCheckTime,
|
|
||||||
PreVote: cfg.PreVote,
|
|
||||||
Logger: cfg.logger,
|
|
||||||
LoggerConfig: cfg.loggerConfig,
|
|
||||||
LoggerCore: cfg.loggerCore,
|
|
||||||
LoggerWriteSyncer: cfg.loggerWriteSyncer,
|
|
||||||
Debug: cfg.Debug,
|
|
||||||
ForceNewCluster: cfg.ForceNewCluster,
|
|
||||||
EnableGRPCGateway: cfg.EnableGRPCGateway,
|
|
||||||
UnsafeNoFsync: cfg.UnsafeNoFsync,
|
|
||||||
EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint,
|
|
||||||
CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
|
|
||||||
WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval,
|
|
||||||
}
|
|
||||||
print(e.cfg.logger, *cfg, srvcfg, memberInitialized)
|
|
||||||
if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
|
|
||||||
return e, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// buffer channel so goroutines on closed connections won't wait forever
|
|
||||||
e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
|
|
||||||
|
|
||||||
// newly started member ("memberInitialized==false")
|
|
||||||
// does not need corruption check
|
|
||||||
if memberInitialized {
|
|
||||||
if err = e.Server.CheckInitialHashKV(); err != nil {
|
|
||||||
// set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()"
|
|
||||||
// (nothing to close since rafthttp transports have not been started)
|
|
||||||
e.Server = nil
|
|
||||||
return e, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
e.Server.Start()
|
|
||||||
|
|
||||||
if err = e.servePeers(); err != nil {
|
|
||||||
return e, err
|
|
||||||
}
|
|
||||||
if err = e.serveClients(); err != nil {
|
|
||||||
return e, err
|
|
||||||
}
|
|
||||||
if err = e.serveMetrics(); err != nil {
|
|
||||||
return e, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.cfg.logger != nil {
|
|
||||||
e.cfg.logger.Info(
|
|
||||||
"now serving peer/client/metrics",
|
|
||||||
zap.String("local-member-id", e.Server.ID().String()),
|
|
||||||
zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()),
|
|
||||||
zap.Strings("listen-peer-urls", e.cfg.getLPURLs()),
|
|
||||||
zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
|
|
||||||
zap.Strings("listen-client-urls", e.cfg.getLCURLs()),
|
|
||||||
zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
serving = true
|
|
||||||
return e, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func print(lg *zap.Logger, ec Config, sc etcdserver.ServerConfig, memberInitialized bool) {
|
|
||||||
// TODO: remove this after dropping "capnslog"
|
|
||||||
if lg == nil {
|
|
||||||
plog.Infof("name = %s", ec.Name)
|
|
||||||
if sc.ForceNewCluster {
|
|
||||||
plog.Infof("force new cluster")
|
|
||||||
}
|
|
||||||
plog.Infof("data dir = %s", sc.DataDir)
|
|
||||||
plog.Infof("member dir = %s", sc.MemberDir())
|
|
||||||
if sc.DedicatedWALDir != "" {
|
|
||||||
plog.Infof("dedicated WAL dir = %s", sc.DedicatedWALDir)
|
|
||||||
}
|
|
||||||
plog.Infof("heartbeat = %dms", sc.TickMs)
|
|
||||||
plog.Infof("election = %dms", sc.ElectionTicks*int(sc.TickMs))
|
|
||||||
plog.Infof("snapshot count = %d", sc.SnapshotCount)
|
|
||||||
if len(sc.DiscoveryURL) != 0 {
|
|
||||||
plog.Infof("discovery URL= %s", sc.DiscoveryURL)
|
|
||||||
if len(sc.DiscoveryProxy) != 0 {
|
|
||||||
plog.Infof("discovery proxy = %s", sc.DiscoveryProxy)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
plog.Infof("advertise client URLs = %s", sc.ClientURLs)
|
|
||||||
if memberInitialized {
|
|
||||||
plog.Infof("initial advertise peer URLs = %s", sc.PeerURLs)
|
|
||||||
plog.Infof("initial cluster = %s", sc.InitialPeerURLsMap)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
cors := make([]string, 0, len(ec.CORS))
|
|
||||||
for v := range ec.CORS {
|
|
||||||
cors = append(cors, v)
|
|
||||||
}
|
|
||||||
sort.Strings(cors)
|
|
||||||
|
|
||||||
hss := make([]string, 0, len(ec.HostWhitelist))
|
|
||||||
for v := range ec.HostWhitelist {
|
|
||||||
hss = append(hss, v)
|
|
||||||
}
|
|
||||||
sort.Strings(hss)
|
|
||||||
|
|
||||||
quota := ec.QuotaBackendBytes
|
|
||||||
if quota == 0 {
|
|
||||||
quota = etcdserver.DefaultQuotaBytes
|
|
||||||
}
|
|
||||||
|
|
||||||
lg.Info(
|
|
||||||
"starting an etcd server",
|
|
||||||
zap.String("etcd-version", version.Version),
|
|
||||||
zap.String("git-sha", version.GitSHA),
|
|
||||||
zap.String("go-version", runtime.Version()),
|
|
||||||
zap.String("go-os", runtime.GOOS),
|
|
||||||
zap.String("go-arch", runtime.GOARCH),
|
|
||||||
zap.Int("max-cpu-set", runtime.GOMAXPROCS(0)),
|
|
||||||
zap.Int("max-cpu-available", runtime.NumCPU()),
|
|
||||||
zap.Bool("member-initialized", memberInitialized),
|
|
||||||
zap.String("name", sc.Name),
|
|
||||||
zap.String("data-dir", sc.DataDir),
|
|
||||||
zap.String("wal-dir", ec.WalDir),
|
|
||||||
zap.String("wal-dir-dedicated", sc.DedicatedWALDir),
|
|
||||||
zap.String("member-dir", sc.MemberDir()),
|
|
||||||
zap.Bool("force-new-cluster", sc.ForceNewCluster),
|
|
||||||
zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(sc.TickMs)*time.Millisecond)),
|
|
||||||
zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)),
|
|
||||||
zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance),
|
|
||||||
zap.Uint64("snapshot-count", sc.SnapshotCount),
|
|
||||||
zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries),
|
|
||||||
zap.Strings("initial-advertise-peer-urls", ec.getAPURLs()),
|
|
||||||
zap.Strings("listen-peer-urls", ec.getLPURLs()),
|
|
||||||
zap.Strings("advertise-client-urls", ec.getACURLs()),
|
|
||||||
zap.Strings("listen-client-urls", ec.getLCURLs()),
|
|
||||||
zap.Strings("listen-metrics-urls", ec.getMetricsURLs()),
|
|
||||||
zap.Strings("cors", cors),
|
|
||||||
zap.Strings("host-whitelist", hss),
|
|
||||||
zap.String("initial-cluster", sc.InitialPeerURLsMap.String()),
|
|
||||||
zap.String("initial-cluster-state", ec.ClusterState),
|
|
||||||
zap.String("initial-cluster-token", sc.InitialClusterToken),
|
|
||||||
zap.Int64("quota-size-bytes", quota),
|
|
||||||
zap.Bool("pre-vote", sc.PreVote),
|
|
||||||
zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck),
|
|
||||||
zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()),
|
|
||||||
zap.String("auto-compaction-mode", sc.AutoCompactionMode),
|
|
||||||
zap.Duration("auto-compaction-retention", sc.AutoCompactionRetention),
|
|
||||||
zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()),
|
|
||||||
zap.String("discovery-url", sc.DiscoveryURL),
|
|
||||||
zap.String("discovery-proxy", sc.DiscoveryProxy),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config returns the current configuration.
|
|
||||||
func (e *Etcd) Config() Config {
|
|
||||||
return e.cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close gracefully shuts down all servers/listeners.
|
|
||||||
// Client requests will be terminated with request timeout.
|
|
||||||
// After timeout, enforce remaning requests be closed immediately.
|
|
||||||
func (e *Etcd) Close() {
|
|
||||||
fields := []zap.Field{
|
|
||||||
zap.String("name", e.cfg.Name),
|
|
||||||
zap.String("data-dir", e.cfg.Dir),
|
|
||||||
zap.Strings("advertise-peer-urls", e.cfg.getAPURLs()),
|
|
||||||
zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
|
|
||||||
}
|
|
||||||
lg := e.GetLogger()
|
|
||||||
if lg != nil {
|
|
||||||
lg.Info("closing etcd server", fields...)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if lg != nil {
|
|
||||||
lg.Info("closed etcd server", fields...)
|
|
||||||
lg.Sync()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
e.closeOnce.Do(func() { close(e.stopc) })
|
|
||||||
|
|
||||||
// close client requests with request timeout
|
|
||||||
timeout := 2 * time.Second
|
|
||||||
if e.Server != nil {
|
|
||||||
timeout = e.Server.Cfg.ReqTimeout()
|
|
||||||
}
|
|
||||||
for _, sctx := range e.sctxs {
|
|
||||||
for ss := range sctx.serversC {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
|
||||||
stopServers(ctx, ss)
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, sctx := range e.sctxs {
|
|
||||||
sctx.cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range e.Clients {
|
|
||||||
if e.Clients[i] != nil {
|
|
||||||
e.Clients[i].Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range e.metricsListeners {
|
|
||||||
e.metricsListeners[i].Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// close rafthttp transports
|
|
||||||
if e.Server != nil {
|
|
||||||
e.Server.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// close all idle connections in peer handler (wait up to 1-second)
|
|
||||||
for i := range e.Peers {
|
|
||||||
if e.Peers[i] != nil && e.Peers[i].close != nil {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
||||||
e.Peers[i].close(ctx)
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopServers(ctx context.Context, ss *servers) {
|
|
||||||
shutdownNow := func() {
|
|
||||||
// first, close the http.Server
|
|
||||||
ss.http.Shutdown(ctx)
|
|
||||||
// then close grpc.Server; cancels all active RPCs
|
|
||||||
ss.grpc.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// do not grpc.Server.GracefulStop with TLS enabled etcd server
|
|
||||||
// See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
|
|
||||||
// and https://github.com/etcd-io/etcd/issues/8916
|
|
||||||
if ss.secure {
|
|
||||||
shutdownNow()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ch := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(ch)
|
|
||||||
// close listeners to stop accepting new connections,
|
|
||||||
// will block on any existing transports
|
|
||||||
ss.grpc.GracefulStop()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// wait until all pending RPCs are finished
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
case <-ctx.Done():
|
|
||||||
// took too long, manually close open transports
|
|
||||||
// e.g. watch streams
|
|
||||||
shutdownNow()
|
|
||||||
|
|
||||||
// concurrent GracefulStop should be interrupted
|
|
||||||
<-ch
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Etcd) Err() <-chan error { return e.errc }
|
|
||||||
|
|
||||||
func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
|
|
||||||
if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = cfg.PeerSelfCert(); err != nil {
|
|
||||||
if cfg.logger != nil {
|
|
||||||
cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err))
|
|
||||||
} else {
|
|
||||||
plog.Fatalf("could not get certs (%v)", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !cfg.PeerTLSInfo.Empty() {
|
|
||||||
if cfg.logger != nil {
|
|
||||||
cfg.logger.Info(
|
|
||||||
"starting with peer TLS",
|
|
||||||
zap.String("tls-info", fmt.Sprintf("%+v", cfg.PeerTLSInfo)),
|
|
||||||
zap.Strings("cipher-suites", cfg.CipherSuites),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
plog.Infof("peerTLS: %s", cfg.PeerTLSInfo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
peers = make([]*peerListener, len(cfg.LPUrls))
|
|
||||||
defer func() {
|
|
||||||
if err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for i := range peers {
|
|
||||||
if peers[i] != nil && peers[i].close != nil {
|
|
||||||
if cfg.logger != nil {
|
|
||||||
cfg.logger.Warn(
|
|
||||||
"closing peer listener",
|
|
||||||
zap.String("address", cfg.LPUrls[i].String()),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
|
|
||||||
}
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
||||||
peers[i].close(ctx)
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for i, u := range cfg.LPUrls {
|
|
||||||
if u.Scheme == "http" {
|
|
||||||
if !cfg.PeerTLSInfo.Empty() {
|
|
||||||
if cfg.logger != nil {
|
|
||||||
cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("peer-url", u.String()))
|
|
||||||
} else {
|
|
||||||
plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cfg.PeerTLSInfo.ClientCertAuth {
|
|
||||||
if cfg.logger != nil {
|
|
||||||
cfg.logger.Warn("scheme is HTTP while --peer-client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("peer-url", u.String()))
|
|
||||||
} else {
|
|
||||||
plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
peers[i] = &peerListener{close: func(context.Context) error { return nil }}
|
|
||||||
peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// once serve, overwrite with 'http.Server.Shutdown'
|
|
||||||
peers[i].close = func(context.Context) error {
|
|
||||||
return peers[i].Listener.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return peers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// configure peer handlers after rafthttp.Transport started
|
|
||||||
func (e *Etcd) servePeers() (err error) {
|
|
||||||
ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server)
|
|
||||||
var peerTLScfg *tls.Config
|
|
||||||
if !e.cfg.PeerTLSInfo.Empty() {
|
|
||||||
if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range e.Peers {
|
|
||||||
u := p.Listener.Addr().String()
|
|
||||||
gs := v3rpc.Server(e.Server, peerTLScfg)
|
|
||||||
m := cmux.New(p.Listener)
|
|
||||||
go gs.Serve(m.Match(cmux.HTTP2()))
|
|
||||||
srv := &http.Server{
|
|
||||||
Handler: grpcHandlerFunc(gs, ph),
|
|
||||||
ReadTimeout: 5 * time.Minute,
|
|
||||||
ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error
|
|
||||||
}
|
|
||||||
go srv.Serve(m.Match(cmux.Any()))
|
|
||||||
p.serve = func() error { return m.Serve() }
|
|
||||||
p.close = func(ctx context.Context) error {
|
|
||||||
// gracefully shutdown http.Server
|
|
||||||
// close open listeners, idle connections
|
|
||||||
// until context cancel or time-out
|
|
||||||
if e.cfg.logger != nil {
|
|
||||||
e.cfg.logger.Info(
|
|
||||||
"stopping serving peer traffic",
|
|
||||||
zap.String("address", u),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv})
|
|
||||||
if e.cfg.logger != nil {
|
|
||||||
e.cfg.logger.Info(
|
|
||||||
"stopped serving peer traffic",
|
|
||||||
zap.String("address", u),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// start peer servers in a goroutine
|
|
||||||
for _, pl := range e.Peers {
|
|
||||||
go func(l *peerListener) {
|
|
||||||
u := l.Addr().String()
|
|
||||||
if e.cfg.logger != nil {
|
|
||||||
e.cfg.logger.Info(
|
|
||||||
"serving peer traffic",
|
|
||||||
zap.String("address", u),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
plog.Info("listening for peers on ", u)
|
|
||||||
}
|
|
||||||
e.errHandler(l.serve())
|
|
||||||
}(pl)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
|
|
||||||
if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = cfg.ClientSelfCert(); err != nil {
|
|
||||||
if cfg.logger != nil {
|
|
||||||
cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err))
|
|
||||||
} else {
|
|
||||||
plog.Fatalf("could not get certs (%v)", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cfg.EnablePprof {
|
|
||||||
if cfg.logger != nil {
|
|
||||||
cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf))
|
|
||||||
} else {
|
|
||||||
plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sctxs = make(map[string]*serveCtx)
|
|
||||||
for _, u := range cfg.LCUrls {
|
|
||||||
sctx := newServeCtx(cfg.logger)
|
|
||||||
if u.Scheme == "http" || u.Scheme == "unix" {
|
|
||||||
if !cfg.ClientTLSInfo.Empty() {
|
|
||||||
if cfg.logger != nil {
|
|
||||||
cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String()))
|
|
||||||
} else {
|
|
||||||
plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cfg.ClientTLSInfo.ClientCertAuth {
|
|
||||||
if cfg.logger != nil {
|
|
||||||
cfg.logger.Warn("scheme is HTTP while --client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("client-url", u.String()))
|
|
||||||
} else {
|
|
||||||
plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
|
|
||||||
return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
network := "tcp"
|
|
||||||
addr := u.Host
|
|
||||||
if u.Scheme == "unix" || u.Scheme == "unixs" {
|
|
||||||
network = "unix"
|
|
||||||
addr = u.Host + u.Path
|
|
||||||
}
|
|
||||||
sctx.network = network
|
|
||||||
|
|
||||||
sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
|
|
||||||
sctx.insecure = !sctx.secure
|
|
||||||
if oldctx := sctxs[addr]; oldctx != nil {
|
|
||||||
oldctx.secure = oldctx.secure || sctx.secure
|
|
||||||
oldctx.insecure = oldctx.insecure || sctx.insecure
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if sctx.l, err = net.Listen(network, addr); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
|
|
||||||
// hosts that disable ipv6. So, use the address given by the user.
|
|
||||||
sctx.addr = addr
|
|
||||||
|
|
||||||
if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
|
|
||||||
if fdLimit <= reservedInternalFDNum {
|
|
||||||
if cfg.logger != nil {
|
|
||||||
cfg.logger.Fatal(
|
|
||||||
"file descriptor limit of etcd process is too low; please set higher",
|
|
||||||
zap.Uint64("limit", fdLimit),
|
|
||||||
zap.Int("recommended-limit", reservedInternalFDNum),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
|
|
||||||
}
|
|
||||||
|
|
||||||
if network == "tcp" {
|
|
||||||
if sctx.l, err = transport.NewKeepAliveListener(sctx.l, network, nil); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sctx.l.Close()
|
|
||||||
if cfg.logger != nil {
|
|
||||||
cfg.logger.Warn(
|
|
||||||
"closing peer listener",
|
|
||||||
zap.String("address", u.Host),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
plog.Info("stopping listening for client requests on ", u.Host)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
for k := range cfg.UserHandlers {
|
|
||||||
sctx.userHandlers[k] = cfg.UserHandlers[k]
|
|
||||||
}
|
|
||||||
sctx.serviceRegister = cfg.ServiceRegister
|
|
||||||
if cfg.EnablePprof || cfg.Debug {
|
|
||||||
sctx.registerPprof()
|
|
||||||
}
|
|
||||||
if cfg.Debug {
|
|
||||||
sctx.registerTrace()
|
|
||||||
}
|
|
||||||
sctxs[addr] = sctx
|
|
||||||
}
|
|
||||||
return sctxs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Etcd) serveClients() (err error) {
|
|
||||||
if !e.cfg.ClientTLSInfo.Empty() {
|
|
||||||
if e.cfg.logger != nil {
|
|
||||||
e.cfg.logger.Info(
|
|
||||||
"starting with client TLS",
|
|
||||||
zap.String("tls-info", fmt.Sprintf("%+v", e.cfg.ClientTLSInfo)),
|
|
||||||
zap.Strings("cipher-suites", e.cfg.CipherSuites),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start a client server goroutine for each listen address
|
|
||||||
var h http.Handler
|
|
||||||
if e.Config().EnableV2 {
|
|
||||||
if len(e.Config().ExperimentalEnableV2V3) > 0 {
|
|
||||||
srv := v2v3.NewServer(e.cfg.logger, v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3)
|
|
||||||
h = v2http.NewClientHandler(e.GetLogger(), srv, e.Server.Cfg.ReqTimeout())
|
|
||||||
} else {
|
|
||||||
h = v2http.NewClientHandler(e.GetLogger(), e.Server, e.Server.Cfg.ReqTimeout())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
etcdhttp.HandleBasic(mux, e.Server)
|
|
||||||
h = mux
|
|
||||||
}
|
|
||||||
|
|
||||||
gopts := []grpc.ServerOption{}
|
|
||||||
if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
|
|
||||||
gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
|
|
||||||
MinTime: e.cfg.GRPCKeepAliveMinTime,
|
|
||||||
PermitWithoutStream: false,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
if e.cfg.GRPCKeepAliveInterval > time.Duration(0) &&
|
|
||||||
e.cfg.GRPCKeepAliveTimeout > time.Duration(0) {
|
|
||||||
gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
|
|
||||||
Time: e.cfg.GRPCKeepAliveInterval,
|
|
||||||
Timeout: e.cfg.GRPCKeepAliveTimeout,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// start client servers in each goroutine
|
|
||||||
for _, sctx := range e.sctxs {
|
|
||||||
go func(s *serveCtx) {
|
|
||||||
e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...))
|
|
||||||
}(sctx)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Etcd) serveMetrics() (err error) {
|
|
||||||
if e.cfg.Metrics == "extensive" {
|
|
||||||
grpc_prometheus.EnableHandlingTimeHistogram()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(e.cfg.ListenMetricsUrls) > 0 {
|
|
||||||
metricsMux := http.NewServeMux()
|
|
||||||
etcdhttp.HandleMetricsHealth(metricsMux, e.Server)
|
|
||||||
|
|
||||||
for _, murl := range e.cfg.ListenMetricsUrls {
|
|
||||||
tlsInfo := &e.cfg.ClientTLSInfo
|
|
||||||
if murl.Scheme == "http" {
|
|
||||||
tlsInfo = nil
|
|
||||||
}
|
|
||||||
ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsInfo)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
e.metricsListeners = append(e.metricsListeners, ml)
|
|
||||||
go func(u url.URL, ln net.Listener) {
|
|
||||||
if e.cfg.logger != nil {
|
|
||||||
e.cfg.logger.Info(
|
|
||||||
"serving metrics",
|
|
||||||
zap.String("address", u.String()),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
plog.Info("listening for metrics on ", u.String())
|
|
||||||
}
|
|
||||||
e.errHandler(http.Serve(ln, metricsMux))
|
|
||||||
}(murl, ml)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Etcd) errHandler(err error) {
|
|
||||||
select {
|
|
||||||
case <-e.stopc:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-e.stopc:
|
|
||||||
case e.errc <- err:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLogger returns the logger.
|
|
||||||
func (e *Etcd) GetLogger() *zap.Logger {
|
|
||||||
e.cfg.loggerMu.RLock()
|
|
||||||
l := e.cfg.logger
|
|
||||||
e.cfg.loggerMu.RUnlock()
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) {
|
|
||||||
h, err := strconv.Atoi(retention)
|
|
||||||
if err == nil && h >= 0 {
|
|
||||||
switch mode {
|
|
||||||
case CompactorModeRevision:
|
|
||||||
ret = time.Duration(int64(h))
|
|
||||||
case CompactorModePeriodic:
|
|
||||||
ret = time.Duration(int64(h)) * time.Hour
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// periodic compaction
|
|
||||||
ret, err = time.ParseDuration(retention)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("error parsing CompactionRetention: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
|
@ -1,130 +0,0 @@
|
||||||
// Copyright 2017 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package etcdhttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.etcd.io/etcd/etcdserver"
|
|
||||||
"go.etcd.io/etcd/etcdserver/etcdserverpb"
|
|
||||||
"go.etcd.io/etcd/raft"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
PathMetrics = "/metrics"
|
|
||||||
PathHealth = "/health"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HandleMetricsHealth registers metrics and health handlers.
|
|
||||||
func HandleMetricsHealth(mux *http.ServeMux, srv etcdserver.ServerV2) {
|
|
||||||
mux.Handle(PathMetrics, promhttp.Handler())
|
|
||||||
mux.Handle(PathHealth, NewHealthHandler(func() Health { return checkHealth(srv) }))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlePrometheus registers prometheus handler on '/metrics'.
|
|
||||||
func HandlePrometheus(mux *http.ServeMux) {
|
|
||||||
mux.Handle(PathMetrics, promhttp.Handler())
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHealthHandler handles '/health' requests.
|
|
||||||
func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if r.Method != http.MethodGet {
|
|
||||||
w.Header().Set("Allow", http.MethodGet)
|
|
||||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
|
||||||
plog.Warningf("/health error (status code %d)", http.StatusMethodNotAllowed)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h := hfunc()
|
|
||||||
d, _ := json.Marshal(h)
|
|
||||||
if h.Health != "true" {
|
|
||||||
http.Error(w, string(d), http.StatusServiceUnavailable)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
w.Write(d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{
|
|
||||||
Namespace: "etcd",
|
|
||||||
Subsystem: "server",
|
|
||||||
Name: "health_success",
|
|
||||||
Help: "The total number of successful health checks",
|
|
||||||
})
|
|
||||||
healthFailed = prometheus.NewCounter(prometheus.CounterOpts{
|
|
||||||
Namespace: "etcd",
|
|
||||||
Subsystem: "server",
|
|
||||||
Name: "health_failures",
|
|
||||||
Help: "The total number of failed health checks",
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
prometheus.MustRegister(healthSuccess)
|
|
||||||
prometheus.MustRegister(healthFailed)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Health defines etcd server health status.
|
|
||||||
// TODO: remove manual parsing in etcdctl cluster-health
|
|
||||||
type Health struct {
|
|
||||||
Health string `json:"health"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: server NOSPACE, etcdserver.ErrNoLeader in health API
|
|
||||||
|
|
||||||
func checkHealth(srv etcdserver.ServerV2) Health {
|
|
||||||
h := Health{Health: "true"}
|
|
||||||
|
|
||||||
as := srv.Alarms()
|
|
||||||
if len(as) > 0 {
|
|
||||||
h.Health = "false"
|
|
||||||
for _, v := range as {
|
|
||||||
plog.Warningf("/health error due to an alarm %s", v.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.Health == "true" {
|
|
||||||
if uint64(srv.Leader()) == raft.None {
|
|
||||||
h.Health = "false"
|
|
||||||
plog.Warningf("/health error; no leader (status code %d)", http.StatusServiceUnavailable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.Health == "true" {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
||||||
_, err := srv.Do(ctx, etcdserverpb.Request{Method: "QGET"})
|
|
||||||
cancel()
|
|
||||||
if err != nil {
|
|
||||||
h.Health = "false"
|
|
||||||
plog.Warningf("/health error; QGET failed %v (status code %d)", err, http.StatusServiceUnavailable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.Health == "true" {
|
|
||||||
healthSuccess.Inc()
|
|
||||||
plog.Infof("/health OK (status code %d)", http.StatusOK)
|
|
||||||
} else {
|
|
||||||
healthFailed.Inc()
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
|
@ -1,193 +0,0 @@
|
||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package membership
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"go.etcd.io/etcd/etcdserver/api/v2store"
|
|
||||||
"go.etcd.io/etcd/mvcc/backend"
|
|
||||||
"go.etcd.io/etcd/pkg/types"
|
|
||||||
|
|
||||||
"github.com/coreos/go-semver/semver"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
attributesSuffix = "attributes"
|
|
||||||
raftAttributesSuffix = "raftAttributes"
|
|
||||||
|
|
||||||
// the prefix for stroing membership related information in store provided by store pkg.
|
|
||||||
storePrefix = "/0"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
membersBucketName = []byte("members")
|
|
||||||
membersRemovedBucketName = []byte("members_removed")
|
|
||||||
clusterBucketName = []byte("cluster")
|
|
||||||
|
|
||||||
StoreMembersPrefix = path.Join(storePrefix, "members")
|
|
||||||
storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members")
|
|
||||||
)
|
|
||||||
|
|
||||||
func mustSaveMemberToBackend(be backend.Backend, m *Member) {
|
|
||||||
mkey := backendMemberKey(m.ID)
|
|
||||||
mvalue, err := json.Marshal(m)
|
|
||||||
if err != nil {
|
|
||||||
plog.Panicf("marshal raftAttributes should never fail: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tx := be.BatchTx()
|
|
||||||
tx.Lock()
|
|
||||||
tx.UnsafePut(membersBucketName, mkey, mvalue)
|
|
||||||
tx.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) {
|
|
||||||
mkey := backendMemberKey(id)
|
|
||||||
|
|
||||||
tx := be.BatchTx()
|
|
||||||
tx.Lock()
|
|
||||||
tx.UnsafeDelete(membersBucketName, mkey)
|
|
||||||
tx.UnsafePut(membersRemovedBucketName, mkey, []byte("removed"))
|
|
||||||
tx.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) {
|
|
||||||
ckey := backendClusterVersionKey()
|
|
||||||
|
|
||||||
tx := be.BatchTx()
|
|
||||||
tx.Lock()
|
|
||||||
defer tx.Unlock()
|
|
||||||
tx.UnsafePut(clusterBucketName, ckey, []byte(ver.String()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustSaveMemberToStore(s v2store.Store, m *Member) {
|
|
||||||
b, err := json.Marshal(m.RaftAttributes)
|
|
||||||
if err != nil {
|
|
||||||
plog.Panicf("marshal raftAttributes should never fail: %v", err)
|
|
||||||
}
|
|
||||||
p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
|
|
||||||
if _, err := s.Create(p, false, string(b), false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
|
|
||||||
plog.Panicf("create raftAttributes should never fail: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustDeleteMemberFromStore(s v2store.Store, id types.ID) {
|
|
||||||
if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil {
|
|
||||||
plog.Panicf("delete member should never fail: %v", err)
|
|
||||||
}
|
|
||||||
if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
|
|
||||||
plog.Panicf("create removedMember should never fail: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustUpdateMemberInStore(s v2store.Store, m *Member) {
|
|
||||||
b, err := json.Marshal(m.RaftAttributes)
|
|
||||||
if err != nil {
|
|
||||||
plog.Panicf("marshal raftAttributes should never fail: %v", err)
|
|
||||||
}
|
|
||||||
p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
|
|
||||||
if _, err := s.Update(p, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
|
|
||||||
plog.Panicf("update raftAttributes should never fail: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustUpdateMemberAttrInStore(s v2store.Store, m *Member) {
|
|
||||||
b, err := json.Marshal(m.Attributes)
|
|
||||||
if err != nil {
|
|
||||||
plog.Panicf("marshal raftAttributes should never fail: %v", err)
|
|
||||||
}
|
|
||||||
p := path.Join(MemberStoreKey(m.ID), attributesSuffix)
|
|
||||||
if _, err := s.Set(p, false, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
|
|
||||||
plog.Panicf("update raftAttributes should never fail: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustSaveClusterVersionToStore(s v2store.Store, ver *semver.Version) {
|
|
||||||
if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
|
|
||||||
plog.Panicf("save cluster version should never fail: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// nodeToMember builds member from a key value node.
|
|
||||||
// the child nodes of the given node MUST be sorted by key.
|
|
||||||
func nodeToMember(n *v2store.NodeExtern) (*Member, error) {
|
|
||||||
m := &Member{ID: MustParseMemberIDFromKey(n.Key)}
|
|
||||||
attrs := make(map[string][]byte)
|
|
||||||
raftAttrKey := path.Join(n.Key, raftAttributesSuffix)
|
|
||||||
attrKey := path.Join(n.Key, attributesSuffix)
|
|
||||||
for _, nn := range n.Nodes {
|
|
||||||
if nn.Key != raftAttrKey && nn.Key != attrKey {
|
|
||||||
return nil, fmt.Errorf("unknown key %q", nn.Key)
|
|
||||||
}
|
|
||||||
attrs[nn.Key] = []byte(*nn.Value)
|
|
||||||
}
|
|
||||||
if data := attrs[raftAttrKey]; data != nil {
|
|
||||||
if err := json.Unmarshal(data, &m.RaftAttributes); err != nil {
|
|
||||||
return nil, fmt.Errorf("unmarshal raftAttributes error: %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("raftAttributes key doesn't exist")
|
|
||||||
}
|
|
||||||
if data := attrs[attrKey]; data != nil {
|
|
||||||
if err := json.Unmarshal(data, &m.Attributes); err != nil {
|
|
||||||
return m, fmt.Errorf("unmarshal attributes error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func backendMemberKey(id types.ID) []byte {
|
|
||||||
return []byte(id.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func backendClusterVersionKey() []byte {
|
|
||||||
return []byte("clusterVersion")
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustCreateBackendBuckets(be backend.Backend) {
|
|
||||||
tx := be.BatchTx()
|
|
||||||
tx.Lock()
|
|
||||||
defer tx.Unlock()
|
|
||||||
tx.UnsafeCreateBucket(membersBucketName)
|
|
||||||
tx.UnsafeCreateBucket(membersRemovedBucketName)
|
|
||||||
tx.UnsafeCreateBucket(clusterBucketName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func MemberStoreKey(id types.ID) string {
|
|
||||||
return path.Join(StoreMembersPrefix, id.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func StoreClusterVersionKey() string {
|
|
||||||
return path.Join(storePrefix, "version")
|
|
||||||
}
|
|
||||||
|
|
||||||
func MemberAttributesStorePath(id types.ID) string {
|
|
||||||
return path.Join(MemberStoreKey(id), attributesSuffix)
|
|
||||||
}
|
|
||||||
|
|
||||||
func MustParseMemberIDFromKey(key string) types.ID {
|
|
||||||
id, err := types.IDFromString(path.Base(key))
|
|
||||||
if err != nil {
|
|
||||||
plog.Panicf("unexpected parse member id error: %v", err)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
func RemovedMemberStoreKey(id types.ID) string {
|
|
||||||
return path.Join(storeRemovedMembersPrefix, id.String())
|
|
||||||
}
|
|
|
@ -17,6 +17,4 @@ package rpctypes
|
||||||
var (
|
var (
|
||||||
MetadataRequireLeaderKey = "hasleader"
|
MetadataRequireLeaderKey = "hasleader"
|
||||||
MetadataHasLeader = "true"
|
MetadataHasLeader = "true"
|
||||||
|
|
||||||
MetadataClientAPIVersionKey = "client-api-version"
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
// Copyright 2015 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package etcdserver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
|
||||||
|
|
||||||
// consistentIndex represents the offset of an entry in a consistent replica log.
|
|
||||||
// It implements the mvcc.ConsistentIndexGetter interface.
|
|
||||||
// It is always set to the offset of current entry before executing the entry,
|
|
||||||
// so ConsistentWatchableKV could get the consistent index from it.
|
|
||||||
type consistentIndex uint64
|
|
||||||
|
|
||||||
func (i *consistentIndex) setConsistentIndex(v uint64) {
|
|
||||||
atomic.StoreUint64((*uint64)(i), v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *consistentIndex) ConsistentIndex() uint64 {
|
|
||||||
return atomic.LoadUint64((*uint64)(i))
|
|
||||||
}
|
|
|
@ -1,52 +0,0 @@
|
||||||
// Copyright 2015 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package etcdserver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrUnknownMethod = errors.New("etcdserver: unknown method")
|
|
||||||
ErrStopped = errors.New("etcdserver: server stopped")
|
|
||||||
ErrCanceled = errors.New("etcdserver: request cancelled")
|
|
||||||
ErrTimeout = errors.New("etcdserver: request timed out")
|
|
||||||
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
|
|
||||||
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
|
|
||||||
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
|
|
||||||
ErrLeaderChanged = errors.New("etcdserver: leader changed")
|
|
||||||
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
|
||||||
ErrLearnerNotReady = errors.New("etcdserver: can only promote a learner member which is in sync with leader")
|
|
||||||
ErrNoLeader = errors.New("etcdserver: no leader")
|
|
||||||
ErrNotLeader = errors.New("etcdserver: not leader")
|
|
||||||
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
|
|
||||||
ErrNoSpace = errors.New("etcdserver: no space")
|
|
||||||
ErrTooManyRequests = errors.New("etcdserver: too many requests")
|
|
||||||
ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
|
|
||||||
ErrKeyNotFound = errors.New("etcdserver: key not found")
|
|
||||||
ErrCorrupt = errors.New("etcdserver: corrupt cluster")
|
|
||||||
ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee")
|
|
||||||
ErrMemberRemoved = errors.New("the member has been permanently removed from the cluster")
|
|
||||||
)
|
|
||||||
|
|
||||||
type DiscoveryError struct {
|
|
||||||
Op string
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e DiscoveryError) Error() string {
|
|
||||||
return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err)
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
|
@ -137,7 +137,7 @@ type loggableValueCompare struct {
|
||||||
Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
|
Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
|
||||||
Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
|
Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
|
||||||
Key []byte `protobuf:"bytes,3,opt,name=key,proto3"`
|
Key []byte `protobuf:"bytes,3,opt,name=key,proto3"`
|
||||||
ValueSize int64 `protobuf:"varint,7,opt,name=value_size,proto3"`
|
ValueSize int `protobuf:"bytes,7,opt,name=value_size,proto3"`
|
||||||
RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"`
|
RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,7 +146,7 @@ func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompar
|
||||||
c.Result,
|
c.Result,
|
||||||
c.Target,
|
c.Target,
|
||||||
c.Key,
|
c.Key,
|
||||||
int64(len(cv.Value)),
|
len(cv.Value),
|
||||||
c.RangeEnd,
|
c.RangeEnd,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -160,7 +160,7 @@ func (*loggableValueCompare) ProtoMessage() {}
|
||||||
// To preserve proto encoding of the key bytes, a faked out proto type is used here.
|
// To preserve proto encoding of the key bytes, a faked out proto type is used here.
|
||||||
type loggablePutRequest struct {
|
type loggablePutRequest struct {
|
||||||
Key []byte `protobuf:"bytes,1,opt,name=key,proto3"`
|
Key []byte `protobuf:"bytes,1,opt,name=key,proto3"`
|
||||||
ValueSize int64 `protobuf:"varint,2,opt,name=value_size,proto3"`
|
ValueSize int `protobuf:"varint,2,opt,name=value_size,proto3"`
|
||||||
Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"`
|
Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"`
|
||||||
PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"`
|
PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"`
|
||||||
IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"`
|
IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"`
|
||||||
|
@ -170,7 +170,7 @@ type loggablePutRequest struct {
|
||||||
func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
|
func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
|
||||||
return &loggablePutRequest{
|
return &loggablePutRequest{
|
||||||
request.Key,
|
request.Key,
|
||||||
int64(len(request.Value)),
|
len(request.Value),
|
||||||
request.Lease,
|
request.Lease,
|
||||||
request.PrevKv,
|
request.PrevKv,
|
||||||
request.IgnoreValue,
|
request.IgnoreValue,
|
||||||
|
|
|
@ -104,9 +104,7 @@ var RangeRequest_SortTarget_value = map[string]int32{
|
||||||
func (x RangeRequest_SortTarget) String() string {
|
func (x RangeRequest_SortTarget) String() string {
|
||||||
return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
|
return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
|
||||||
}
|
}
|
||||||
func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) {
|
func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 1} }
|
||||||
return fileDescriptorRpc, []int{1, 1}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Compare_CompareResult int32
|
type Compare_CompareResult int32
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -12,5 +12,5 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package fileutil implements utility functions related to files and paths.
|
// Package snapshot implements utilities around etcd snapshot.
|
||||||
package fileutil
|
package snapshot
|
|
@ -14,7 +14,9 @@
|
||||||
|
|
||||||
package snapshot
|
package snapshot
|
||||||
|
|
||||||
import "encoding/binary"
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
)
|
||||||
|
|
||||||
type revision struct {
|
type revision struct {
|
||||||
main int64
|
main int64
|
||||||
|
@ -27,9 +29,3 @@ func bytesToRev(bytes []byte) revision {
|
||||||
sub: int64(binary.BigEndian.Uint64(bytes[9:])),
|
sub: int64(binary.BigEndian.Uint64(bytes[9:])),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// initIndex implements ConsistentIndexGetter so the snapshot won't block
|
|
||||||
// the new raft instance by waiting for a future raft index.
|
|
||||||
type initIndex int
|
|
||||||
|
|
||||||
func (i *initIndex) ConsistentIndex() uint64 { return uint64(*i) }
|
|
|
@ -21,31 +21,29 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/dustin/go-humanize"
|
|
||||||
bolt "go.etcd.io/bbolt"
|
bolt "go.etcd.io/bbolt"
|
||||||
"go.etcd.io/etcd/clientv3"
|
"go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||||
"go.etcd.io/etcd/etcdserver"
|
"go.etcd.io/etcd/client/pkg/v3/fileutil"
|
||||||
"go.etcd.io/etcd/etcdserver/api/membership"
|
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||||
"go.etcd.io/etcd/etcdserver/api/snap"
|
"go.etcd.io/etcd/client/v3"
|
||||||
"go.etcd.io/etcd/etcdserver/api/v2store"
|
"go.etcd.io/etcd/client/v3/snapshot"
|
||||||
"go.etcd.io/etcd/etcdserver/etcdserverpb"
|
"go.etcd.io/etcd/raft/v3"
|
||||||
"go.etcd.io/etcd/lease"
|
"go.etcd.io/etcd/raft/v3/raftpb"
|
||||||
"go.etcd.io/etcd/mvcc"
|
"go.etcd.io/etcd/server/v3/config"
|
||||||
"go.etcd.io/etcd/mvcc/backend"
|
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||||
"go.etcd.io/etcd/pkg/fileutil"
|
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
|
||||||
"go.etcd.io/etcd/pkg/traceutil"
|
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
|
||||||
"go.etcd.io/etcd/pkg/types"
|
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
|
||||||
"go.etcd.io/etcd/raft"
|
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
|
||||||
"go.etcd.io/etcd/raft/raftpb"
|
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||||
"go.etcd.io/etcd/wal"
|
"go.etcd.io/etcd/server/v3/verify"
|
||||||
"go.etcd.io/etcd/wal/walpb"
|
"go.etcd.io/etcd/server/v3/wal"
|
||||||
|
"go.etcd.io/etcd/server/v3/wal/walpb"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -80,11 +78,11 @@ func NewV3(lg *zap.Logger) Manager {
|
||||||
type v3Manager struct {
|
type v3Manager struct {
|
||||||
lg *zap.Logger
|
lg *zap.Logger
|
||||||
|
|
||||||
name string
|
name string
|
||||||
dbPath string
|
srcDbPath string
|
||||||
walDir string
|
walDir string
|
||||||
snapDir string
|
snapDir string
|
||||||
cl *membership.RaftCluster
|
cl *membership.RaftCluster
|
||||||
|
|
||||||
skipHashCheck bool
|
skipHashCheck bool
|
||||||
}
|
}
|
||||||
|
@ -99,58 +97,7 @@ func hasChecksum(n int64) bool {
|
||||||
|
|
||||||
// Save fetches snapshot from remote etcd server and saves data to target path.
|
// Save fetches snapshot from remote etcd server and saves data to target path.
|
||||||
func (s *v3Manager) Save(ctx context.Context, cfg clientv3.Config, dbPath string) error {
|
func (s *v3Manager) Save(ctx context.Context, cfg clientv3.Config, dbPath string) error {
|
||||||
if len(cfg.Endpoints) != 1 {
|
return snapshot.Save(ctx, s.lg, cfg, dbPath)
|
||||||
return fmt.Errorf("snapshot must be requested to one selected node, not multiple %v", cfg.Endpoints)
|
|
||||||
}
|
|
||||||
cli, err := clientv3.New(cfg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer cli.Close()
|
|
||||||
|
|
||||||
partpath := dbPath + ".part"
|
|
||||||
defer os.RemoveAll(partpath)
|
|
||||||
|
|
||||||
var f *os.File
|
|
||||||
f, err = os.OpenFile(partpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fileutil.PrivateFileMode)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not open %s (%v)", partpath, err)
|
|
||||||
}
|
|
||||||
s.lg.Info("created temporary db file", zap.String("path", partpath))
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
var rd io.ReadCloser
|
|
||||||
rd, err = cli.Snapshot(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.lg.Info("fetching snapshot", zap.String("endpoint", cfg.Endpoints[0]))
|
|
||||||
var size int64
|
|
||||||
size, err = io.Copy(f, rd)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !hasChecksum(size) {
|
|
||||||
return fmt.Errorf("sha256 checksum not found [bytes: %d]", size)
|
|
||||||
}
|
|
||||||
if err = fileutil.Fsync(f); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = f.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.lg.Info(
|
|
||||||
"fetched snapshot",
|
|
||||||
zap.String("endpoint", cfg.Endpoints[0]),
|
|
||||||
zap.String("size", humanize.Bytes(uint64(size))),
|
|
||||||
zap.Duration("took", time.Since(now)),
|
|
||||||
)
|
|
||||||
|
|
||||||
if err = os.Rename(partpath, dbPath); err != nil {
|
|
||||||
return fmt.Errorf("could not rename %s to %s (%v)", partpath, dbPath, err)
|
|
||||||
}
|
|
||||||
s.lg.Info("saved", zap.String("path", dbPath))
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status is the snapshot file status.
|
// Status is the snapshot file status.
|
||||||
|
@ -191,18 +138,26 @@ func (s *v3Manager) Status(dbPath string) (ds Status, err error) {
|
||||||
if b == nil {
|
if b == nil {
|
||||||
return fmt.Errorf("cannot get hash of bucket %s", string(next))
|
return fmt.Errorf("cannot get hash of bucket %s", string(next))
|
||||||
}
|
}
|
||||||
h.Write(next)
|
if _, err := h.Write(next); err != nil {
|
||||||
|
return fmt.Errorf("cannot write bucket %s : %v", string(next), err)
|
||||||
|
}
|
||||||
iskeyb := (string(next) == "key")
|
iskeyb := (string(next) == "key")
|
||||||
b.ForEach(func(k, v []byte) error {
|
if err := b.ForEach(func(k, v []byte) error {
|
||||||
h.Write(k)
|
if _, err := h.Write(k); err != nil {
|
||||||
h.Write(v)
|
return fmt.Errorf("cannot write to bucket %s", err.Error())
|
||||||
|
}
|
||||||
|
if _, err := h.Write(v); err != nil {
|
||||||
|
return fmt.Errorf("cannot write to bucket %s", err.Error())
|
||||||
|
}
|
||||||
if iskeyb {
|
if iskeyb {
|
||||||
rev := bytesToRev(k)
|
rev := bytesToRev(k)
|
||||||
ds.Revision = rev.main
|
ds.Revision = rev.main
|
||||||
}
|
}
|
||||||
ds.TotalKey++
|
ds.TotalKey++
|
||||||
return nil
|
return nil
|
||||||
})
|
}); err != nil {
|
||||||
|
return fmt.Errorf("cannot write bucket %s : %v", string(next), err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
|
@ -256,7 +211,7 @@ func (s *v3Manager) Restore(cfg RestoreConfig) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
srv := etcdserver.ServerConfig{
|
srv := config.ServerConfig{
|
||||||
Logger: s.lg,
|
Logger: s.lg,
|
||||||
Name: cfg.Name,
|
Name: cfg.Name,
|
||||||
PeerURLs: pURLs,
|
PeerURLs: pURLs,
|
||||||
|
@ -276,8 +231,8 @@ func (s *v3Manager) Restore(cfg RestoreConfig) error {
|
||||||
if dataDir == "" {
|
if dataDir == "" {
|
||||||
dataDir = cfg.Name + ".etcd"
|
dataDir = cfg.Name + ".etcd"
|
||||||
}
|
}
|
||||||
if fileutil.Exist(dataDir) {
|
if fileutil.Exist(dataDir) && !fileutil.DirEmpty(dataDir) {
|
||||||
return fmt.Errorf("data-dir %q exists", dataDir)
|
return fmt.Errorf("data-dir %q not empty or could not be read", dataDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
walDir := cfg.OutputWALDir
|
walDir := cfg.OutputWALDir
|
||||||
|
@ -288,52 +243,85 @@ func (s *v3Manager) Restore(cfg RestoreConfig) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
s.name = cfg.Name
|
s.name = cfg.Name
|
||||||
s.dbPath = cfg.SnapshotPath
|
s.srcDbPath = cfg.SnapshotPath
|
||||||
s.walDir = walDir
|
s.walDir = walDir
|
||||||
s.snapDir = filepath.Join(dataDir, "member", "snap")
|
s.snapDir = filepath.Join(dataDir, "member", "snap")
|
||||||
s.skipHashCheck = cfg.SkipHashCheck
|
s.skipHashCheck = cfg.SkipHashCheck
|
||||||
|
|
||||||
s.lg.Info(
|
s.lg.Info(
|
||||||
"restoring snapshot",
|
"restoring snapshot",
|
||||||
zap.String("path", s.dbPath),
|
zap.String("path", s.srcDbPath),
|
||||||
zap.String("wal-dir", s.walDir),
|
zap.String("wal-dir", s.walDir),
|
||||||
zap.String("data-dir", dataDir),
|
zap.String("data-dir", dataDir),
|
||||||
zap.String("snap-dir", s.snapDir),
|
zap.String("snap-dir", s.snapDir),
|
||||||
|
zap.Stack("stack"),
|
||||||
)
|
)
|
||||||
|
|
||||||
if err = s.saveDB(); err != nil {
|
if err = s.saveDB(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = s.saveWALAndSnap(); err != nil {
|
hardstate, err := s.saveWALAndSnap()
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := s.updateCIndex(hardstate.Commit, hardstate.Term); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
s.lg.Info(
|
s.lg.Info(
|
||||||
"restored snapshot",
|
"restored snapshot",
|
||||||
zap.String("path", s.dbPath),
|
zap.String("path", s.srcDbPath),
|
||||||
zap.String("wal-dir", s.walDir),
|
zap.String("wal-dir", s.walDir),
|
||||||
zap.String("data-dir", dataDir),
|
zap.String("data-dir", dataDir),
|
||||||
zap.String("snap-dir", s.snapDir),
|
zap.String("snap-dir", s.snapDir),
|
||||||
)
|
)
|
||||||
|
|
||||||
return nil
|
return verify.VerifyIfEnabled(verify.Config{
|
||||||
|
ExactIndex: true,
|
||||||
|
Logger: s.lg,
|
||||||
|
DataDir: dataDir,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *v3Manager) outDbPath() string {
|
||||||
|
return filepath.Join(s.snapDir, "db")
|
||||||
}
|
}
|
||||||
|
|
||||||
// saveDB copies the database snapshot to the snapshot directory
|
// saveDB copies the database snapshot to the snapshot directory
|
||||||
func (s *v3Manager) saveDB() error {
|
func (s *v3Manager) saveDB() error {
|
||||||
f, ferr := os.OpenFile(s.dbPath, os.O_RDONLY, 0600)
|
err := s.copyAndVerifyDB()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
be := backend.NewDefaultBackend(s.outDbPath())
|
||||||
|
defer be.Close()
|
||||||
|
|
||||||
|
err = membership.TrimMembershipFromBackend(s.lg, be)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *v3Manager) copyAndVerifyDB() error {
|
||||||
|
srcf, ferr := os.Open(s.srcDbPath)
|
||||||
if ferr != nil {
|
if ferr != nil {
|
||||||
return ferr
|
return ferr
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer srcf.Close()
|
||||||
|
|
||||||
// get snapshot integrity hash
|
// get snapshot integrity hash
|
||||||
if _, err := f.Seek(-sha256.Size, io.SeekEnd); err != nil {
|
if _, err := srcf.Seek(-sha256.Size, io.SeekEnd); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sha := make([]byte, sha256.Size)
|
sha := make([]byte, sha256.Size)
|
||||||
if _, err := f.Read(sha); err != nil {
|
if _, err := srcf.Read(sha); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
if _, err := srcf.Seek(0, io.SeekStart); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,12 +329,20 @@ func (s *v3Manager) saveDB() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
dbpath := filepath.Join(s.snapDir, "db")
|
outDbPath := s.outDbPath()
|
||||||
db, dberr := os.OpenFile(dbpath, os.O_RDWR|os.O_CREATE, 0600)
|
|
||||||
|
db, dberr := os.OpenFile(outDbPath, os.O_RDWR|os.O_CREATE, 0600)
|
||||||
if dberr != nil {
|
if dberr != nil {
|
||||||
return dberr
|
return dberr
|
||||||
}
|
}
|
||||||
if _, err := io.Copy(db, f); err != nil {
|
dbClosed := false
|
||||||
|
defer func() {
|
||||||
|
if !dbClosed {
|
||||||
|
db.Close()
|
||||||
|
dbClosed = true
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if _, err := io.Copy(db, srcf); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -383,62 +379,36 @@ func (s *v3Manager) saveDB() error {
|
||||||
|
|
||||||
// db hash is OK, can now modify DB so it can be part of a new cluster
|
// db hash is OK, can now modify DB so it can be part of a new cluster
|
||||||
db.Close()
|
db.Close()
|
||||||
|
|
||||||
commit := len(s.cl.Members())
|
|
||||||
|
|
||||||
// update consistentIndex so applies go through on etcdserver despite
|
|
||||||
// having a new raft instance
|
|
||||||
be := backend.NewDefaultBackend(dbpath)
|
|
||||||
|
|
||||||
// a lessor never timeouts leases
|
|
||||||
lessor := lease.NewLessor(s.lg, be, lease.LessorConfig{MinLeaseTTL: math.MaxInt64})
|
|
||||||
|
|
||||||
mvs := mvcc.NewStore(s.lg, be, lessor, (*initIndex)(&commit), mvcc.StoreConfig{CompactionBatchLimit: math.MaxInt32})
|
|
||||||
txn := mvs.Write(traceutil.TODO())
|
|
||||||
btx := be.BatchTx()
|
|
||||||
del := func(k, v []byte) error {
|
|
||||||
txn.DeleteRange(k, nil)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete stored members from old cluster since using new members
|
|
||||||
btx.UnsafeForEach([]byte("members"), del)
|
|
||||||
|
|
||||||
// todo: add back new members when we start to deprecate old snap file.
|
|
||||||
btx.UnsafeForEach([]byte("members_removed"), del)
|
|
||||||
|
|
||||||
// trigger write-out of new consistent index
|
|
||||||
txn.End()
|
|
||||||
|
|
||||||
mvs.Commit()
|
|
||||||
mvs.Close()
|
|
||||||
be.Close()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// saveWALAndSnap creates a WAL for the initial cluster
|
// saveWALAndSnap creates a WAL for the initial cluster
|
||||||
func (s *v3Manager) saveWALAndSnap() error {
|
//
|
||||||
|
// TODO: This code ignores learners !!!
|
||||||
|
func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
|
||||||
if err := fileutil.CreateDirAll(s.walDir); err != nil {
|
if err := fileutil.CreateDirAll(s.walDir); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// add members again to persist them to the store we create.
|
// add members again to persist them to the store we create.
|
||||||
st := v2store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
|
st := v2store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
|
||||||
s.cl.SetStore(st)
|
s.cl.SetStore(st)
|
||||||
|
be := backend.NewDefaultBackend(s.outDbPath())
|
||||||
|
defer be.Close()
|
||||||
|
s.cl.SetBackend(be)
|
||||||
for _, m := range s.cl.Members() {
|
for _, m := range s.cl.Members() {
|
||||||
s.cl.AddMember(m)
|
s.cl.AddMember(m, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
m := s.cl.MemberByName(s.name)
|
m := s.cl.MemberByName(s.name)
|
||||||
md := &etcdserverpb.Metadata{NodeID: uint64(m.ID), ClusterID: uint64(s.cl.ID())}
|
md := &etcdserverpb.Metadata{NodeID: uint64(m.ID), ClusterID: uint64(s.cl.ID())}
|
||||||
metadata, merr := md.Marshal()
|
metadata, merr := md.Marshal()
|
||||||
if merr != nil {
|
if merr != nil {
|
||||||
return merr
|
return nil, merr
|
||||||
}
|
}
|
||||||
w, walerr := wal.Create(s.lg, s.walDir, metadata)
|
w, walerr := wal.Create(s.lg, s.walDir, metadata)
|
||||||
if walerr != nil {
|
if walerr != nil {
|
||||||
return walerr
|
return nil, walerr
|
||||||
}
|
}
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
|
|
||||||
|
@ -446,7 +416,7 @@ func (s *v3Manager) saveWALAndSnap() error {
|
||||||
for i, id := range s.cl.MemberIDs() {
|
for i, id := range s.cl.MemberIDs() {
|
||||||
ctx, err := json.Marshal((*s.cl).Member(id))
|
ctx, err := json.Marshal((*s.cl).Member(id))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
|
peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
|
||||||
}
|
}
|
||||||
|
@ -462,7 +432,7 @@ func (s *v3Manager) saveWALAndSnap() error {
|
||||||
}
|
}
|
||||||
d, err := cc.Marshal()
|
d, err := cc.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
ents[i] = raftpb.Entry{
|
ents[i] = raftpb.Entry{
|
||||||
Type: raftpb.EntryConfChange,
|
Type: raftpb.EntryConfChange,
|
||||||
|
@ -473,31 +443,42 @@ func (s *v3Manager) saveWALAndSnap() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
commit, term := uint64(len(ents)), uint64(1)
|
commit, term := uint64(len(ents)), uint64(1)
|
||||||
if err := w.Save(raftpb.HardState{
|
hardState := raftpb.HardState{
|
||||||
Term: term,
|
Term: term,
|
||||||
Vote: peers[0].ID,
|
Vote: peers[0].ID,
|
||||||
Commit: commit,
|
Commit: commit,
|
||||||
}, ents); err != nil {
|
}
|
||||||
return err
|
if err := w.Save(hardState, ents); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
b, berr := st.Save()
|
b, berr := st.Save()
|
||||||
if berr != nil {
|
if berr != nil {
|
||||||
return berr
|
return nil, berr
|
||||||
|
}
|
||||||
|
confState := raftpb.ConfState{
|
||||||
|
Voters: nodeIDs,
|
||||||
}
|
}
|
||||||
raftSnap := raftpb.Snapshot{
|
raftSnap := raftpb.Snapshot{
|
||||||
Data: b,
|
Data: b,
|
||||||
Metadata: raftpb.SnapshotMetadata{
|
Metadata: raftpb.SnapshotMetadata{
|
||||||
Index: commit,
|
Index: commit,
|
||||||
Term: term,
|
Term: term,
|
||||||
ConfState: raftpb.ConfState{
|
ConfState: confState,
|
||||||
Voters: nodeIDs,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
sn := snap.New(s.lg, s.snapDir)
|
sn := snap.New(s.lg, s.snapDir)
|
||||||
if err := sn.SaveSnap(raftSnap); err != nil {
|
if err := sn.SaveSnap(raftSnap); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
return w.SaveSnapshot(walpb.Snapshot{Index: commit, Term: term})
|
snapshot := walpb.Snapshot{Index: commit, Term: term, ConfState: &confState}
|
||||||
|
return &hardState, w.SaveSnapshot(snapshot)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *v3Manager) updateCIndex(commit uint64, term uint64) error {
|
||||||
|
be := backend.NewDefaultBackend(s.outDbPath())
|
||||||
|
defer be.Close()
|
||||||
|
|
||||||
|
cindex.UpdateConsistentIndex(be.BatchTx(), commit, term, false)
|
||||||
|
return nil
|
||||||
}
|
}
|
|
@ -1,210 +0,0 @@
|
||||||
// Copyright 2017 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package backend
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"math"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
bolt "go.etcd.io/bbolt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
|
|
||||||
// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket
|
|
||||||
// is known to never overwrite any key so range is safe.
|
|
||||||
var safeRangeBucket = []byte("key")
|
|
||||||
|
|
||||||
type ReadTx interface {
|
|
||||||
Lock()
|
|
||||||
Unlock()
|
|
||||||
RLock()
|
|
||||||
RUnlock()
|
|
||||||
|
|
||||||
UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte)
|
|
||||||
UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type readTx struct {
|
|
||||||
// mu protects accesses to the txReadBuffer
|
|
||||||
mu sync.RWMutex
|
|
||||||
buf txReadBuffer
|
|
||||||
|
|
||||||
// TODO: group and encapsulate {txMu, tx, buckets, txWg}, as they share the same lifecycle.
|
|
||||||
// txMu protects accesses to buckets and tx on Range requests.
|
|
||||||
txMu sync.RWMutex
|
|
||||||
tx *bolt.Tx
|
|
||||||
buckets map[string]*bolt.Bucket
|
|
||||||
// txWg protects tx from being rolled back at the end of a batch interval until all reads using this tx are done.
|
|
||||||
txWg *sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rt *readTx) Lock() { rt.mu.Lock() }
|
|
||||||
func (rt *readTx) Unlock() { rt.mu.Unlock() }
|
|
||||||
func (rt *readTx) RLock() { rt.mu.RLock() }
|
|
||||||
func (rt *readTx) RUnlock() { rt.mu.RUnlock() }
|
|
||||||
|
|
||||||
func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
|
|
||||||
if endKey == nil {
|
|
||||||
// forbid duplicates for single keys
|
|
||||||
limit = 1
|
|
||||||
}
|
|
||||||
if limit <= 0 {
|
|
||||||
limit = math.MaxInt64
|
|
||||||
}
|
|
||||||
if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) {
|
|
||||||
panic("do not use unsafeRange on non-keys bucket")
|
|
||||||
}
|
|
||||||
keys, vals := rt.buf.Range(bucketName, key, endKey, limit)
|
|
||||||
if int64(len(keys)) == limit {
|
|
||||||
return keys, vals
|
|
||||||
}
|
|
||||||
|
|
||||||
// find/cache bucket
|
|
||||||
bn := string(bucketName)
|
|
||||||
rt.txMu.RLock()
|
|
||||||
bucket, ok := rt.buckets[bn]
|
|
||||||
rt.txMu.RUnlock()
|
|
||||||
if !ok {
|
|
||||||
rt.txMu.Lock()
|
|
||||||
bucket = rt.tx.Bucket(bucketName)
|
|
||||||
rt.buckets[bn] = bucket
|
|
||||||
rt.txMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ignore missing bucket since may have been created in this batch
|
|
||||||
if bucket == nil {
|
|
||||||
return keys, vals
|
|
||||||
}
|
|
||||||
rt.txMu.Lock()
|
|
||||||
c := bucket.Cursor()
|
|
||||||
rt.txMu.Unlock()
|
|
||||||
|
|
||||||
k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
|
|
||||||
return append(k2, keys...), append(v2, vals...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
|
|
||||||
dups := make(map[string]struct{})
|
|
||||||
getDups := func(k, v []byte) error {
|
|
||||||
dups[string(k)] = struct{}{}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
visitNoDup := func(k, v []byte) error {
|
|
||||||
if _, ok := dups[string(k)]; ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return visitor(k, v)
|
|
||||||
}
|
|
||||||
if err := rt.buf.ForEach(bucketName, getDups); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rt.txMu.Lock()
|
|
||||||
err := unsafeForEach(rt.tx, bucketName, visitNoDup)
|
|
||||||
rt.txMu.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return rt.buf.ForEach(bucketName, visitor)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rt *readTx) reset() {
|
|
||||||
rt.buf.reset()
|
|
||||||
rt.buckets = make(map[string]*bolt.Bucket)
|
|
||||||
rt.tx = nil
|
|
||||||
rt.txWg = new(sync.WaitGroup)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: create a base type for readTx and concurrentReadTx to avoid duplicated function implementation?
|
|
||||||
type concurrentReadTx struct {
|
|
||||||
buf txReadBuffer
|
|
||||||
txMu *sync.RWMutex
|
|
||||||
tx *bolt.Tx
|
|
||||||
buckets map[string]*bolt.Bucket
|
|
||||||
txWg *sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rt *concurrentReadTx) Lock() {}
|
|
||||||
func (rt *concurrentReadTx) Unlock() {}
|
|
||||||
|
|
||||||
// RLock is no-op. concurrentReadTx does not need to be locked after it is created.
|
|
||||||
func (rt *concurrentReadTx) RLock() {}
|
|
||||||
|
|
||||||
// RUnlock signals the end of concurrentReadTx.
|
|
||||||
func (rt *concurrentReadTx) RUnlock() { rt.txWg.Done() }
|
|
||||||
|
|
||||||
func (rt *concurrentReadTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
|
|
||||||
dups := make(map[string]struct{})
|
|
||||||
getDups := func(k, v []byte) error {
|
|
||||||
dups[string(k)] = struct{}{}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
visitNoDup := func(k, v []byte) error {
|
|
||||||
if _, ok := dups[string(k)]; ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return visitor(k, v)
|
|
||||||
}
|
|
||||||
if err := rt.buf.ForEach(bucketName, getDups); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rt.txMu.Lock()
|
|
||||||
err := unsafeForEach(rt.tx, bucketName, visitNoDup)
|
|
||||||
rt.txMu.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return rt.buf.ForEach(bucketName, visitor)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rt *concurrentReadTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
|
|
||||||
if endKey == nil {
|
|
||||||
// forbid duplicates for single keys
|
|
||||||
limit = 1
|
|
||||||
}
|
|
||||||
if limit <= 0 {
|
|
||||||
limit = math.MaxInt64
|
|
||||||
}
|
|
||||||
if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) {
|
|
||||||
panic("do not use unsafeRange on non-keys bucket")
|
|
||||||
}
|
|
||||||
keys, vals := rt.buf.Range(bucketName, key, endKey, limit)
|
|
||||||
if int64(len(keys)) == limit {
|
|
||||||
return keys, vals
|
|
||||||
}
|
|
||||||
|
|
||||||
// find/cache bucket
|
|
||||||
bn := string(bucketName)
|
|
||||||
rt.txMu.RLock()
|
|
||||||
bucket, ok := rt.buckets[bn]
|
|
||||||
rt.txMu.RUnlock()
|
|
||||||
if !ok {
|
|
||||||
rt.txMu.Lock()
|
|
||||||
bucket = rt.tx.Bucket(bucketName)
|
|
||||||
rt.buckets[bn] = bucket
|
|
||||||
rt.txMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ignore missing bucket since may have been created in this batch
|
|
||||||
if bucket == nil {
|
|
||||||
return keys, vals
|
|
||||||
}
|
|
||||||
rt.txMu.Lock()
|
|
||||||
c := bucket.Cursor()
|
|
||||||
rt.txMu.Unlock()
|
|
||||||
|
|
||||||
k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
|
|
||||||
return append(k2, keys...), append(v2, vals...)
|
|
||||||
}
|
|
|
@ -1,27 +0,0 @@
|
||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import "os"
|
|
||||||
|
|
||||||
const (
|
|
||||||
// PrivateDirMode grants owner to make/remove files inside the directory.
|
|
||||||
PrivateDirMode = 0700
|
|
||||||
)
|
|
||||||
|
|
||||||
// OpenDir opens a directory for syncing.
|
|
||||||
func OpenDir(path string) (*os.File, error) { return os.Open(path) }
|
|
|
@ -1,51 +0,0 @@
|
||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// PrivateDirMode grants owner to make/remove files inside the directory.
|
|
||||||
PrivateDirMode = 0777
|
|
||||||
)
|
|
||||||
|
|
||||||
// OpenDir opens a directory in windows with write access for syncing.
|
|
||||||
func OpenDir(path string) (*os.File, error) {
|
|
||||||
fd, err := openDir(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return os.NewFile(uintptr(fd), path), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func openDir(path string) (fd syscall.Handle, err error) {
|
|
||||||
if len(path) == 0 {
|
|
||||||
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
|
|
||||||
}
|
|
||||||
pathp, err := syscall.UTF16PtrFromString(path)
|
|
||||||
if err != nil {
|
|
||||||
return syscall.InvalidHandle, err
|
|
||||||
}
|
|
||||||
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
|
|
||||||
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
|
|
||||||
createmode := uint32(syscall.OPEN_EXISTING)
|
|
||||||
fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
|
||||||
return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
|
|
||||||
}
|
|
|
@ -1,129 +0,0 @@
|
||||||
// Copyright 2015 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/coreos/pkg/capnslog"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// PrivateFileMode grants owner to read/write a file.
|
|
||||||
PrivateFileMode = 0600
|
|
||||||
)
|
|
||||||
|
|
||||||
var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "pkg/fileutil")
|
|
||||||
|
|
||||||
// IsDirWriteable checks if dir is writable by writing and removing a file
|
|
||||||
// to dir. It returns nil if dir is writable.
|
|
||||||
func IsDirWriteable(dir string) error {
|
|
||||||
f := filepath.Join(dir, ".touch")
|
|
||||||
if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return os.Remove(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
|
|
||||||
// does not exists. TouchDirAll also ensures the given directory is writable.
|
|
||||||
func TouchDirAll(dir string) error {
|
|
||||||
// If path is already a directory, MkdirAll does nothing and returns nil, so,
|
|
||||||
// first check if dir exist with an expected permission mode.
|
|
||||||
if Exist(dir) {
|
|
||||||
err := CheckDirPermission(dir, PrivateDirMode)
|
|
||||||
if err != nil {
|
|
||||||
plog.Warningf("check file permission: %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err := os.MkdirAll(dir, PrivateDirMode)
|
|
||||||
if err != nil {
|
|
||||||
// if mkdirAll("a/text") and "text" is not
|
|
||||||
// a directory, this will return syscall.ENOTDIR
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return IsDirWriteable(dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateDirAll is similar to TouchDirAll but returns error
|
|
||||||
// if the deepest directory was not empty.
|
|
||||||
func CreateDirAll(dir string) error {
|
|
||||||
err := TouchDirAll(dir)
|
|
||||||
if err == nil {
|
|
||||||
var ns []string
|
|
||||||
ns, err = ReadDir(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(ns) != 0 {
|
|
||||||
err = fmt.Errorf("expected %q to be empty, got %q", dir, ns)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exist returns true if a file or directory exists.
|
|
||||||
func Exist(name string) bool {
|
|
||||||
_, err := os.Stat(name)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily
|
|
||||||
// shorten the length of the file.
|
|
||||||
func ZeroToEnd(f *os.File) error {
|
|
||||||
// TODO: support FALLOC_FL_ZERO_RANGE
|
|
||||||
off, err := f.Seek(0, io.SeekCurrent)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
lenf, lerr := f.Seek(0, io.SeekEnd)
|
|
||||||
if lerr != nil {
|
|
||||||
return lerr
|
|
||||||
}
|
|
||||||
if err = f.Truncate(off); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// make sure blocks remain allocated
|
|
||||||
if err = Preallocate(f, lenf, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = f.Seek(off, io.SeekStart)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckDirPermission checks permission on an existing dir.
|
|
||||||
// Returns error if dir is empty or exist with a different permission than specified.
|
|
||||||
func CheckDirPermission(dir string, perm os.FileMode) error {
|
|
||||||
if !Exist(dir) {
|
|
||||||
return fmt.Errorf("directory %q empty, cannot check permission.", dir)
|
|
||||||
}
|
|
||||||
//check the existing permission on the directory
|
|
||||||
dirInfo, err := os.Stat(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dirMode := dirInfo.Mode().Perm()
|
|
||||||
if dirMode != perm {
|
|
||||||
err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data.", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,49 +0,0 @@
|
||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !windows,!plan9,!solaris
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
f, err := os.OpenFile(path, flag, perm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
|
|
||||||
f.Close()
|
|
||||||
if err == syscall.EWOULDBLOCK {
|
|
||||||
err = ErrLocked
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &LockedFile{f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
f, err := os.OpenFile(path, flag, perm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil {
|
|
||||||
f.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &LockedFile{f}, err
|
|
||||||
}
|
|
|
@ -1,97 +0,0 @@
|
||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This used to call syscall.Flock() but that call fails with EBADF on NFS.
|
|
||||||
// An alternative is lockf() which works on NFS but that call lets a process lock
|
|
||||||
// the same file twice. Instead, use Linux's non-standard open file descriptor
|
|
||||||
// locks which will block if the process already holds the file lock.
|
|
||||||
//
|
|
||||||
// constants from /usr/include/bits/fcntl-linux.h
|
|
||||||
const (
|
|
||||||
F_OFD_GETLK = 37
|
|
||||||
F_OFD_SETLK = 37
|
|
||||||
F_OFD_SETLKW = 38
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
wrlck = syscall.Flock_t{
|
|
||||||
Type: syscall.F_WRLCK,
|
|
||||||
Whence: int16(io.SeekStart),
|
|
||||||
Start: 0,
|
|
||||||
Len: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
linuxTryLockFile = flockTryLockFile
|
|
||||||
linuxLockFile = flockLockFile
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// use open file descriptor locks if the system supports it
|
|
||||||
getlk := syscall.Flock_t{Type: syscall.F_RDLCK}
|
|
||||||
if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil {
|
|
||||||
linuxTryLockFile = ofdTryLockFile
|
|
||||||
linuxLockFile = ofdLockFile
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
return linuxTryLockFile(path, flag, perm)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
f, err := os.OpenFile(path, flag, perm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%v)", path, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
flock := wrlck
|
|
||||||
if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil {
|
|
||||||
f.Close()
|
|
||||||
if err == syscall.EWOULDBLOCK {
|
|
||||||
err = ErrLocked
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &LockedFile{f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
return linuxLockFile(path, flag, perm)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
f, err := os.OpenFile(path, flag, perm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("ofdLockFile failed to open %q (%v)", path, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
flock := wrlck
|
|
||||||
err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock)
|
|
||||||
if err != nil {
|
|
||||||
f.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &LockedFile{f}, nil
|
|
||||||
}
|
|
|
@ -1,45 +0,0 @@
|
||||||
// Copyright 2015 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f, err := os.Open(path, flag, perm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, ErrLocked
|
|
||||||
}
|
|
||||||
return &LockedFile{f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
f, err := os.OpenFile(path, flag, perm)
|
|
||||||
if err == nil {
|
|
||||||
return &LockedFile{f}, nil
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,62 +0,0 @@
|
||||||
// Copyright 2015 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build solaris
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
var lock syscall.Flock_t
|
|
||||||
lock.Start = 0
|
|
||||||
lock.Len = 0
|
|
||||||
lock.Pid = 0
|
|
||||||
lock.Type = syscall.F_WRLCK
|
|
||||||
lock.Whence = 0
|
|
||||||
lock.Pid = 0
|
|
||||||
f, err := os.OpenFile(path, flag, perm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil {
|
|
||||||
f.Close()
|
|
||||||
if err == syscall.EAGAIN {
|
|
||||||
err = ErrLocked
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &LockedFile{f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
var lock syscall.Flock_t
|
|
||||||
lock.Start = 0
|
|
||||||
lock.Len = 0
|
|
||||||
lock.Pid = 0
|
|
||||||
lock.Type = syscall.F_WRLCK
|
|
||||||
lock.Whence = 0
|
|
||||||
f, err := os.OpenFile(path, flag, perm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil {
|
|
||||||
f.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &LockedFile{f}, nil
|
|
||||||
}
|
|
|
@ -1,125 +0,0 @@
|
||||||
// Copyright 2015 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
|
||||||
procLockFileEx = modkernel32.NewProc("LockFileEx")
|
|
||||||
|
|
||||||
errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.")
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
|
||||||
LOCKFILE_EXCLUSIVE_LOCK = 2
|
|
||||||
LOCKFILE_FAIL_IMMEDIATELY = 1
|
|
||||||
|
|
||||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
|
|
||||||
errLockViolation syscall.Errno = 0x21
|
|
||||||
)
|
|
||||||
|
|
||||||
func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
f, err := open(path, flag, perm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil {
|
|
||||||
f.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &LockedFile{f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
|
||||||
f, err := open(path, flag, perm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil {
|
|
||||||
f.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &LockedFile{f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func open(path string, flag int, perm os.FileMode) (*os.File, error) {
|
|
||||||
if path == "" {
|
|
||||||
return nil, fmt.Errorf("cannot open empty filename")
|
|
||||||
}
|
|
||||||
var access uint32
|
|
||||||
switch flag {
|
|
||||||
case syscall.O_RDONLY:
|
|
||||||
access = syscall.GENERIC_READ
|
|
||||||
case syscall.O_WRONLY:
|
|
||||||
access = syscall.GENERIC_WRITE
|
|
||||||
case syscall.O_RDWR:
|
|
||||||
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
|
|
||||||
case syscall.O_WRONLY | syscall.O_CREAT:
|
|
||||||
access = syscall.GENERIC_ALL
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("flag %v is not supported", flag))
|
|
||||||
}
|
|
||||||
fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]),
|
|
||||||
access,
|
|
||||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
|
||||||
nil,
|
|
||||||
syscall.OPEN_ALWAYS,
|
|
||||||
syscall.FILE_ATTRIBUTE_NORMAL,
|
|
||||||
0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return os.NewFile(uintptr(fd), path), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func lockFile(fd syscall.Handle, flags uint32) error {
|
|
||||||
var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK
|
|
||||||
flag |= flags
|
|
||||||
if fd == syscall.InvalidHandle {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{})
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
} else if err.Error() == errLocked.Error() {
|
|
||||||
return ErrLocked
|
|
||||||
} else if err != errLockViolation {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
|
||||||
var reserved uint32 = 0
|
|
||||||
r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
|
|
||||||
if r1 == 0 {
|
|
||||||
if e1 != 0 {
|
|
||||||
err = error(e1)
|
|
||||||
} else {
|
|
||||||
err = syscall.EINVAL
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
|
@ -1,54 +0,0 @@
|
||||||
// Copyright 2015 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Preallocate tries to allocate the space for given
|
|
||||||
// file. This operation is only supported on linux by a
|
|
||||||
// few filesystems (btrfs, ext4, etc.).
|
|
||||||
// If the operation is unsupported, no error will be returned.
|
|
||||||
// Otherwise, the error encountered will be returned.
|
|
||||||
func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
|
|
||||||
if sizeInBytes == 0 {
|
|
||||||
// fallocate will return EINVAL if length is 0; skip
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if extendFile {
|
|
||||||
return preallocExtend(f, sizeInBytes)
|
|
||||||
}
|
|
||||||
return preallocFixed(f, sizeInBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
|
|
||||||
curOff, err := f.Seek(0, io.SeekCurrent)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
size, err := f.Seek(sizeInBytes, io.SeekEnd)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = f.Seek(curOff, io.SeekStart); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if sizeInBytes > size {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return f.Truncate(sizeInBytes)
|
|
||||||
}
|
|
|
@ -1,65 +0,0 @@
|
||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build darwin
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
func preallocExtend(f *os.File, sizeInBytes int64) error {
|
|
||||||
if err := preallocFixed(f, sizeInBytes); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return preallocExtendTrunc(f, sizeInBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func preallocFixed(f *os.File, sizeInBytes int64) error {
|
|
||||||
// allocate all requested space or no space at all
|
|
||||||
// TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag
|
|
||||||
fstore := &syscall.Fstore_t{
|
|
||||||
Flags: syscall.F_ALLOCATEALL,
|
|
||||||
Posmode: syscall.F_PEOFPOSMODE,
|
|
||||||
Length: sizeInBytes}
|
|
||||||
p := unsafe.Pointer(fstore)
|
|
||||||
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p))
|
|
||||||
if errno == 0 || errno == syscall.ENOTSUP {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// wrong argument to fallocate syscall
|
|
||||||
if errno == syscall.EINVAL {
|
|
||||||
// filesystem "st_blocks" are allocated in the units of
|
|
||||||
// "Allocation Block Size" (run "diskutil info /" command)
|
|
||||||
var stat syscall.Stat_t
|
|
||||||
syscall.Fstat(int(f.Fd()), &stat)
|
|
||||||
|
|
||||||
// syscall.Statfs_t.Bsize is "optimal transfer block size"
|
|
||||||
// and contains matching 4096 value when latest OS X kernel
|
|
||||||
// supports 4,096 KB filesystem block size
|
|
||||||
var statfs syscall.Statfs_t
|
|
||||||
syscall.Fstatfs(int(f.Fd()), &statfs)
|
|
||||||
blockSize := int64(statfs.Bsize)
|
|
||||||
|
|
||||||
if stat.Blocks*blockSize >= sizeInBytes {
|
|
||||||
// enough blocks are already allocated
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return errno
|
|
||||||
}
|
|
|
@ -1,49 +0,0 @@
|
||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func preallocExtend(f *os.File, sizeInBytes int64) error {
|
|
||||||
// use mode = 0 to change size
|
|
||||||
err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
|
|
||||||
if err != nil {
|
|
||||||
errno, ok := err.(syscall.Errno)
|
|
||||||
// not supported; fallback
|
|
||||||
// fallocate EINTRs frequently in some environments; fallback
|
|
||||||
if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
|
|
||||||
return preallocExtendTrunc(f, sizeInBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func preallocFixed(f *os.File, sizeInBytes int64) error {
|
|
||||||
// use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
|
|
||||||
err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
|
|
||||||
if err != nil {
|
|
||||||
errno, ok := err.(syscall.Errno)
|
|
||||||
// treat not supported as nil error
|
|
||||||
if ok && errno == syscall.ENOTSUP {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
|
@ -1,25 +0,0 @@
|
||||||
// Copyright 2015 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !linux,!darwin
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import "os"
|
|
||||||
|
|
||||||
func preallocExtend(f *os.File, sizeInBytes int64) error {
|
|
||||||
return preallocExtendTrunc(f, sizeInBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func preallocFixed(f *os.File, sizeInBytes int64) error { return nil }
|
|
|
@ -1,98 +0,0 @@
|
||||||
// Copyright 2015 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
|
|
||||||
return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
|
|
||||||
doneC := make(chan struct{})
|
|
||||||
errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC)
|
|
||||||
return doneC, errC
|
|
||||||
}
|
|
||||||
|
|
||||||
// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
|
|
||||||
// if donec is non-nil, the function closes it to notify its exit.
|
|
||||||
func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error {
|
|
||||||
errC := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
if donec != nil {
|
|
||||||
defer close(donec)
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
fnames, err := ReadDir(dirname)
|
|
||||||
if err != nil {
|
|
||||||
errC <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
newfnames := make([]string, 0)
|
|
||||||
for _, fname := range fnames {
|
|
||||||
if strings.HasSuffix(fname, suffix) {
|
|
||||||
newfnames = append(newfnames, fname)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Strings(newfnames)
|
|
||||||
fnames = newfnames
|
|
||||||
for len(newfnames) > int(max) {
|
|
||||||
f := filepath.Join(dirname, newfnames[0])
|
|
||||||
l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err = os.Remove(f); err != nil {
|
|
||||||
errC <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = l.Close(); err != nil {
|
|
||||||
if lg != nil {
|
|
||||||
lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
|
|
||||||
} else {
|
|
||||||
plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
|
|
||||||
}
|
|
||||||
errC <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if lg != nil {
|
|
||||||
lg.Info("purged", zap.String("path", f))
|
|
||||||
} else {
|
|
||||||
plog.Infof("purged file %s successfully", f)
|
|
||||||
}
|
|
||||||
newfnames = newfnames[1:]
|
|
||||||
}
|
|
||||||
if purgec != nil {
|
|
||||||
for i := 0; i < len(fnames)-len(newfnames); i++ {
|
|
||||||
purgec <- fnames[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-time.After(interval):
|
|
||||||
case <-stop:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return errC
|
|
||||||
}
|
|
|
@ -1,70 +0,0 @@
|
||||||
// Copyright 2018 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReadDirOp represents an read-directory operation.
|
|
||||||
type ReadDirOp struct {
|
|
||||||
ext string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadDirOption configures archiver operations.
|
|
||||||
type ReadDirOption func(*ReadDirOp)
|
|
||||||
|
|
||||||
// WithExt filters file names by their extensions.
|
|
||||||
// (e.g. WithExt(".wal") to list only WAL files)
|
|
||||||
func WithExt(ext string) ReadDirOption {
|
|
||||||
return func(op *ReadDirOp) { op.ext = ext }
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *ReadDirOp) applyOpts(opts []ReadDirOption) {
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(op)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadDir returns the filenames in the given directory in sorted order.
|
|
||||||
func ReadDir(d string, opts ...ReadDirOption) ([]string, error) {
|
|
||||||
op := &ReadDirOp{}
|
|
||||||
op.applyOpts(opts)
|
|
||||||
|
|
||||||
dir, err := os.Open(d)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer dir.Close()
|
|
||||||
|
|
||||||
names, err := dir.Readdirnames(-1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sort.Strings(names)
|
|
||||||
|
|
||||||
if op.ext != "" {
|
|
||||||
tss := make([]string, 0)
|
|
||||||
for _, v := range names {
|
|
||||||
if filepath.Ext(v) == op.ext {
|
|
||||||
tss = append(tss, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
names = tss
|
|
||||||
}
|
|
||||||
return names, nil
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !linux,!darwin
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import "os"
|
|
||||||
|
|
||||||
// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
|
|
||||||
func Fsync(f *os.File) error {
|
|
||||||
return f.Sync()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform.
|
|
||||||
func Fdatasync(f *os.File) error {
|
|
||||||
return f.Sync()
|
|
||||||
}
|
|
|
@ -1,40 +0,0 @@
|
||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build darwin
|
|
||||||
|
|
||||||
package fileutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Fsync on HFS/OSX flushes the data on to the physical drive but the drive
|
|
||||||
// may not write it to the persistent media for quite sometime and it may be
|
|
||||||
// written in out-of-order sequence. Using F_FULLFSYNC ensures that the
|
|
||||||
// physical drive's buffer will also get flushed to the media.
|
|
||||||
func Fsync(f *os.File) error {
|
|
||||||
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0))
|
|
||||||
if errno == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errno
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence
|
|
||||||
// on physical drive media.
|
|
||||||
func Fdatasync(f *os.File) error {
|
|
||||||
return Fsync(f)
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue