Merge pull request #529 from erikwilson/vendor-v1.14.3-k3s.1

Update vendor v1.14.3-k3s.1
pull/531/head v0.6.0-rc5
Erik Wilson 2019-06-12 14:21:42 -07:00 committed by GitHub
commit 03080e6287
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 601 additions and 227 deletions

View File

@ -63,7 +63,7 @@ import:
- package: github.com/coreos/go-iptables
version: 47f22b0dd3355c0ba570ba12b0b8a36bf214c04b
- package: github.com/coreos/go-semver
version: v0.2.0-9-ge214231b295a8e
version: v0.3.0
- package: github.com/coreos/go-systemd
version: 48702e0da86bd25e76cfef347e2adeb434a0d0a6
- package: github.com/coreos/pkg
@ -95,7 +95,7 @@ import:
- package: github.com/euank/go-kmsg-parser
version: v2.0.0
- package: github.com/evanphx/json-patch
version: v4.1.0-19-g5858425f75500d
version: v4.2.0
- package: github.com/exponent-io/jsonpath
version: d6023ce2651d8eafb5c75bb0c7167536102ec9f5
- package: github.com/fatih/camelcase
@ -142,7 +142,7 @@ import:
- package: github.com/hashicorp/golang-lru
version: v0.5.0
- package: github.com/ibuildthecloud/kvsql
version: d37dd2b0829b44a4964e48c9396e14b0536fefb6
version: 1afc2d8ad7d7e263c1971b05cb37e83aa5562561
repo: https://github.com/erikwilson/rancher-kvsql.git
- package: github.com/imdario/mergo
version: v0.3.5
@ -310,7 +310,7 @@ import:
- package: k8s.io/klog
version: v0.2.0-14-g8e90cee79f8237
- package: k8s.io/kubernetes
version: v1.14.1-k3s.4
version: v1.14.3-k3s.1
repo: https://github.com/rancher/k3s.git
transitive: true
staging: true

View File

@ -9,7 +9,7 @@ package=github.com/opencontainers/runc/libcontainer/nsenter
package=github.com/opencontainers/runc/libcontainer/specconv
package=github.com/opencontainers/runc/contrib/cmd/recvtty
k8s.io/kubernetes v1.14.1-k3s.4 https://github.com/rancher/k3s.git transitive=true,staging=true
k8s.io/kubernetes v1.14.3-k3s.1 https://github.com/rancher/k3s.git transitive=true,staging=true
github.com/rancher/wrangler 4202dbfa88013c19238bb004d82e013f0593493d
github.com/rancher/wrangler-api efe26ac6a9d720e1bfa5a8cc5f8dce5ad598ce26

View File

@ -268,6 +268,7 @@ func NewCacherFromConfig(config Config) *Cacher {
cacher.stopWg.Add(1)
go func() {
defer cacher.stopWg.Done()
defer cacher.terminateAllWatchers()
wait.Until(
func() {
if !cacher.isStopped() {

View File

@ -10,6 +10,7 @@ go_test(
name = "go_default_test",
srcs = [
"compact_test.go",
"event_test.go",
"lease_manager_test.go",
"store_test.go",
"watcher_test.go",
@ -36,7 +37,10 @@ go_test(
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library",
"//vendor/github.com/coreos/etcd/integration:go_default_library",
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
],
)

View File

@ -17,6 +17,7 @@ limitations under the License.
package etcd3
import (
"fmt"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/mvcc/mvccpb"
)
@ -42,7 +43,12 @@ func parseKV(kv *mvccpb.KeyValue) *event {
}
}
func parseEvent(e *clientv3.Event) *event {
func parseEvent(e *clientv3.Event) (*event, error) {
if !e.IsCreate() && e.PrevKv == nil {
// If the previous value is nil, error. One example of how this is possible is if the previous value has been compacted already.
return nil, fmt.Errorf("etcd event received with PrevKv=nil (key=%q, modRevision=%d, type=%s)", string(e.Kv.Key), e.Kv.ModRevision, e.Type.String())
}
ret := &event{
key: string(e.Kv.Key),
value: e.Kv.Value,
@ -53,5 +59,5 @@ func parseEvent(e *clientv3.Event) *event {
if e.PrevKv != nil {
ret.prevValue = e.PrevKv.Value
}
return ret
return ret, nil
}

View File

@ -288,19 +288,20 @@ func (s *store) GuaranteedUpdate(
ret, ttl, err := s.updateState(origState, tryUpdate)
if err != nil {
// It's possible we were working with stale data
if mustCheckData && apierrors.IsConflict(err) {
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
return err
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
data, err := runtime.Encode(s.codec, ret)

View File

@ -210,7 +210,13 @@ func (wc *watchChan) startWatching(watchClosedCh chan struct{}) {
return
}
for _, e := range wres.Events {
wc.sendEvent(parseEvent(e))
parsedEvent, err := parseEvent(e)
if err != nil {
klog.Errorf("watch chan error: %v", err)
wc.sendError(err)
return
}
wc.sendEvent(parsedEvent)
}
}
// When we come to this point, it's only possible that client side ends the watch.

View File

@ -171,6 +171,7 @@ func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Conf
// blindly overwrite existing values based on precedence
if len(configAuthInfo.Token) > 0 {
config.BearerToken = configAuthInfo.Token
config.BearerTokenFile = configAuthInfo.TokenFile
} else if len(configAuthInfo.TokenFile) > 0 {
tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile)
if err != nil {

View File

@ -172,7 +172,7 @@ func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) {
}
func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error {
if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil {
if err := os.MkdirAll(filepath.Dir(filename), 0750); err != nil {
return err
}
@ -191,7 +191,7 @@ func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Obj
return err
}
err = os.Chmod(f.Name(), 0755)
err = os.Chmod(f.Name(), 0660)
if err != nil {
return err
}

View File

@ -18,6 +18,7 @@ package disk
import (
"net/http"
"os"
"path/filepath"
"github.com/gregjones/httpcache"
@ -35,6 +36,8 @@ type cacheRoundTripper struct {
// corresponding requests.
func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper {
d := diskv.New(diskv.Options{
PathPerm: os.FileMode(0750),
FilePerm: os.FileMode(0660),
BasePath: cacheDir,
TempDir: filepath.Join(cacheDir, ".diskv-temp"),
})

View File

@ -17,6 +17,7 @@ limitations under the License.
package dynamic
import (
"fmt"
"io"
"k8s.io/apimachinery/pkg/api/meta"
@ -94,6 +95,9 @@ func (c *dynamicResourceClient) Create(obj *unstructured.Unstructured, opts meta
return nil, err
}
name = accessor.GetName()
if len(name) == 0 {
return nil, fmt.Errorf("name is required")
}
}
result := c.client.client.
@ -122,6 +126,10 @@ func (c *dynamicResourceClient) Update(obj *unstructured.Unstructured, opts meta
if err != nil {
return nil, err
}
name := accessor.GetName()
if len(name) == 0 {
return nil, fmt.Errorf("name is required")
}
outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
if err != nil {
return nil, err
@ -129,7 +137,7 @@ func (c *dynamicResourceClient) Update(obj *unstructured.Unstructured, opts meta
result := c.client.client.
Put().
AbsPath(append(c.makeURLSegments(accessor.GetName()), subresources...)...).
AbsPath(append(c.makeURLSegments(name), subresources...)...).
Body(outBytes).
SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
Do()
@ -153,6 +161,10 @@ func (c *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opt
if err != nil {
return nil, err
}
name := accessor.GetName()
if len(name) == 0 {
return nil, fmt.Errorf("name is required")
}
outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
if err != nil {
@ -161,7 +173,7 @@ func (c *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opt
result := c.client.client.
Put().
AbsPath(append(c.makeURLSegments(accessor.GetName()), "status")...).
AbsPath(append(c.makeURLSegments(name), "status")...).
Body(outBytes).
SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
Do()
@ -181,6 +193,9 @@ func (c *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opt
}
func (c *dynamicResourceClient) Delete(name string, opts *metav1.DeleteOptions, subresources ...string) error {
if len(name) == 0 {
return fmt.Errorf("name is required")
}
if opts == nil {
opts = &metav1.DeleteOptions{}
}
@ -216,6 +231,9 @@ func (c *dynamicResourceClient) DeleteCollection(opts *metav1.DeleteOptions, lis
}
func (c *dynamicResourceClient) Get(name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
if len(name) == 0 {
return nil, fmt.Errorf("name is required")
}
result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do()
if err := result.Error(); err != nil {
return nil, err
@ -284,6 +302,9 @@ func (c *dynamicResourceClient) Watch(opts metav1.ListOptions) (watch.Interface,
}
func (c *dynamicResourceClient) Patch(name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) {
if len(name) == 0 {
return nil, fmt.Errorf("name is required")
}
result := c.client.client.
Patch(pt).
AbsPath(append(c.makeURLSegments(name), subresources...)...).

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "14"
gitVersion = "v1.14.1-k3s.4"
gitCommit = "52f3b42401c93c36467f1fd6d294a3aba26c7def"
gitVersion = "v1.14.3-k3s.1"
gitCommit = "8343999292c55c807be4406fcaa9f047e8751ffd"
gitTreeState = "clean"
buildDate = "2019-04-15T22:13+00:00Z"
buildDate = "2019-06-12T04:56+00:00Z"
)

View File

@ -73,9 +73,10 @@ func (c *Config) TransportConfig() (*transport.Config, error) {
KeyFile: c.KeyFile,
KeyData: c.KeyData,
},
Username: c.Username,
Password: c.Password,
BearerToken: c.BearerToken,
Username: c.Username,
Password: c.Password,
BearerToken: c.BearerToken,
BearerTokenFile: c.BearerTokenFile,
Impersonate: transport.ImpersonationConfig{
UserName: c.Impersonate.UserName,
Groups: c.Impersonate.Groups,

View File

@ -228,6 +228,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
// blindly overwrite existing values based on precedence
if len(configAuthInfo.Token) > 0 {
mergedConfig.BearerToken = configAuthInfo.Token
mergedConfig.BearerTokenFile = configAuthInfo.TokenFile
} else if len(configAuthInfo.TokenFile) > 0 {
tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile)
if err != nil {
@ -499,8 +500,9 @@ func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error)
if server := config.overrides.ClusterInfo.Server; len(server) > 0 {
icc.Host = server
}
if token := config.overrides.AuthInfo.Token; len(token) > 0 {
icc.BearerToken = token
if len(config.overrides.AuthInfo.Token) > 0 || len(config.overrides.AuthInfo.TokenFile) > 0 {
icc.BearerToken = config.overrides.AuthInfo.Token
icc.BearerTokenFile = config.overrides.AuthInfo.TokenFile
}
if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 {
icc.TLSClientConfig.CAFile = certificateAuthorityFile

View File

@ -51,6 +51,9 @@ type KubeProxyIPVSConfiguration struct {
// excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch
// when cleaning up ipvs services.
ExcludeCIDRs []string `json:"excludeCIDRs"`
// strict ARP configure arp_ignore and arp_announce to avoid answering ARP queries
// from kube-ipvs0 interface
StrictARP bool `json:"strictARP"`
}
// KubeProxyConntrackConfiguration contains conntrack settings for

View File

@ -1,14 +1,28 @@
<!-- BEGIN MUNGE: GENERATED_TOC -->
- [v1.14.0](#v1140)
- [Downloads for v1.14.0](#downloads-for-v1140)
- [v1.14.2](#v1142)
- [Downloads for v1.14.2](#downloads-for-v1142)
- [Client Binaries](#client-binaries)
- [Server Binaries](#server-binaries)
- [Node Binaries](#node-binaries)
- [Changelog since v1.14.1](#changelog-since-v1141)
- [Other notable changes](#other-notable-changes)
- [v1.14.1](#v1141)
- [Downloads for v1.14.1](#downloads-for-v1141)
- [Client Binaries](#client-binaries-1)
- [Server Binaries](#server-binaries-1)
- [Node Binaries](#node-binaries-1)
- [Changelog since v1.14.0](#changelog-since-v1140)
- [Other notable changes](#other-notable-changes-1)
- [v1.14.0](#v1140)
- [Downloads for v1.14.0](#downloads-for-v1140)
- [Client Binaries](#client-binaries-2)
- [Server Binaries](#server-binaries-2)
- [Node Binaries](#node-binaries-2)
- [Kubernetes v1.14 Release Notes](#kubernetes-v114-release-notes)
- [1.14 Whats New](#114-whats-new)
- [1.14 Whats New](#114-whats-new)
- [Known Issues](#known-issues)
- [Urgent Upgrade Notes](#urgent-upgrade-notes)
- [(No, really, you MUST do this before you upgrade)](#no-really-you-must-do-this-before-you-upgrade)
- [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade)
- [Deprecations](#deprecations)
- [Removed and deprecated metrics](#removed-and-deprecated-metrics)
- [Removed metrics](#removed-metrics)
@ -35,57 +49,215 @@
- [External Dependencies](#external-dependencies)
- [v1.14.0-rc.1](#v1140-rc1)
- [Downloads for v1.14.0-rc.1](#downloads-for-v1140-rc1)
- [Client Binaries](#client-binaries-1)
- [Server Binaries](#server-binaries-1)
- [Node Binaries](#node-binaries-1)
- [Changelog since v1.14.0-beta.2](#changelog-since-v1140-beta2)
- [Action Required](#action-required)
- [Other notable changes](#other-notable-changes)
- [v1.14.0-beta.2](#v1140-beta2)
- [Downloads for v1.14.0-beta.2](#downloads-for-v1140-beta2)
- [Client Binaries](#client-binaries-2)
- [Server Binaries](#server-binaries-2)
- [Node Binaries](#node-binaries-2)
- [Changelog since v1.14.0-beta.1](#changelog-since-v1140-beta1)
- [Action Required](#action-required-1)
- [Other notable changes](#other-notable-changes-1)
- [v1.14.0-beta.1](#v1140-beta1)
- [Downloads for v1.14.0-beta.1](#downloads-for-v1140-beta1)
- [Client Binaries](#client-binaries-3)
- [Server Binaries](#server-binaries-3)
- [Node Binaries](#node-binaries-3)
- [Changelog since v1.14.0-alpha.3](#changelog-since-v1140-alpha3)
- [Action Required](#action-required-2)
- [Changelog since v1.14.0-beta.2](#changelog-since-v1140-beta2)
- [Action Required](#action-required)
- [Other notable changes](#other-notable-changes-2)
- [v1.14.0-alpha.3](#v1140-alpha3)
- [Downloads for v1.14.0-alpha.3](#downloads-for-v1140-alpha3)
- [v1.14.0-beta.2](#v1140-beta2)
- [Downloads for v1.14.0-beta.2](#downloads-for-v1140-beta2)
- [Client Binaries](#client-binaries-4)
- [Server Binaries](#server-binaries-4)
- [Node Binaries](#node-binaries-4)
- [Changelog since v1.14.0-alpha.2](#changelog-since-v1140-alpha2)
- [Action Required](#action-required-3)
- [Changelog since v1.14.0-beta.1](#changelog-since-v1140-beta1)
- [Action Required](#action-required-1)
- [Other notable changes](#other-notable-changes-3)
- [v1.14.0-alpha.2](#v1140-alpha2)
- [Downloads for v1.14.0-alpha.2](#downloads-for-v1140-alpha2)
- [v1.14.0-beta.1](#v1140-beta1)
- [Downloads for v1.14.0-beta.1](#downloads-for-v1140-beta1)
- [Client Binaries](#client-binaries-5)
- [Server Binaries](#server-binaries-5)
- [Node Binaries](#node-binaries-5)
- [Changelog since v1.14.0-alpha.1](#changelog-since-v1140-alpha1)
- [Action Required](#action-required-4)
- [Changelog since v1.14.0-alpha.3](#changelog-since-v1140-alpha3)
- [Action Required](#action-required-2)
- [Other notable changes](#other-notable-changes-4)
- [v1.14.0-alpha.1](#v1140-alpha1)
- [Downloads for v1.14.0-alpha.1](#downloads-for-v1140-alpha1)
- [v1.14.0-alpha.3](#v1140-alpha3)
- [Downloads for v1.14.0-alpha.3](#downloads-for-v1140-alpha3)
- [Client Binaries](#client-binaries-6)
- [Server Binaries](#server-binaries-6)
- [Node Binaries](#node-binaries-6)
- [Changelog since v1.14.0-alpha.2](#changelog-since-v1140-alpha2)
- [Action Required](#action-required-3)
- [Other notable changes](#other-notable-changes-5)
- [v1.14.0-alpha.2](#v1140-alpha2)
- [Downloads for v1.14.0-alpha.2](#downloads-for-v1140-alpha2)
- [Client Binaries](#client-binaries-7)
- [Server Binaries](#server-binaries-7)
- [Node Binaries](#node-binaries-7)
- [Changelog since v1.14.0-alpha.1](#changelog-since-v1140-alpha1)
- [Action Required](#action-required-4)
- [Other notable changes](#other-notable-changes-6)
- [v1.14.0-alpha.1](#v1140-alpha1)
- [Downloads for v1.14.0-alpha.1](#downloads-for-v1140-alpha1)
- [Client Binaries](#client-binaries-8)
- [Server Binaries](#server-binaries-8)
- [Node Binaries](#node-binaries-8)
- [Changelog since v1.13.0](#changelog-since-v1130)
- [Action Required](#action-required-5)
- [Other notable changes](#other-notable-changes-5)
- [Other notable changes](#other-notable-changes-7)
<!-- END MUNGE: GENERATED_TOC -->
<!-- NEW RELEASE NOTES ENTRY -->
# v1.14.2
[Documentation](https://docs.k8s.io)
## Downloads for v1.14.2
filename | sha512 hash
-------- | -----------
[kubernetes.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes.tar.gz) | `ef1228ef7cdc3a53e9a5003acb1616aff48eba53db147af82c5e318c174f14db410bb55c030acd67d7f7694b085185ca5f9ac1d3fb9bb6ec853196571e86ad2e`
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-src.tar.gz) | `1721ea726dd19f06bade3e9751379764ffb16289b8902164d78a000eb22da15f11358b208f3996df09cd805f98daa540e49f156c1b7aabee6a06df13de8386ca`
### Client Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-darwin-386.tar.gz) | `f707f3293173cbb47dc8537b19d7da443e40d9c2b3945e8e0559513d227d98a97058b5ee3762fbf93e79b98bceadb23fc985bfbff33c8f4970966383d5032df1`
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-darwin-amd64.tar.gz) | `dcd61588fc0b27d6539f937106a88f8ebb3f19e9a41d37a79804a2594e12860247883374d7594b52a248915820be98b0dd7f756e581f5512cf731f9992bc3950`
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-386.tar.gz) | `90ad262988898cc25c2d84fdf1d62d3cdf8f16a9b7598d477a1b516b7e87e19196a4e501388e68fccc30916ac617977f6e22e4ec13fa2046bda47d386b45a0e6`
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-amd64.tar.gz) | `a4394293cecdc177db7d3ef29f9d9efb7f922d193b00d83fa17c847e2aa1cd1c38eff1f4233843fededf15d99f7c434bf701d84b93a3cb834a4699cbddf02385`
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-arm.tar.gz) | `265599b200f6de8d2c01ac36a33a0fca9faf36fb68e3e3dd5dad9166b9e6605db2aadd4199a05b5b9e20d065a8e59e7d0d130e5038dc01b37ed9705a8550d677`
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-arm64.tar.gz) | `31799018b7840cafac0fa4f8cc474396feaab71340eb7f38a122109fdcf759afc6066e67c5a26fe234232ab9a180d7312e81b3911c153f2e949415236a7b1709`
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-ppc64le.tar.gz) | `670bbe7c3142ccfa99a1eebc6d94798a8c3720165301ef615812aea64e512e03db4a9e2d80bfa073083b87c1a123a1a8e0c72fe2be26e2dfe8a499a3237deb32`
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-s390x.tar.gz) | `58d161e747ec0924f3a937bd4e6856be9bad9227ca2564f2b59cdc9bfd063d78cb9c6381441aac21d3d809a1edee059697cbef5aabd344bb3fb58d4a56641415`
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-windows-386.tar.gz) | `696caeb9611137bce1988c8bf7a1e326f92dbb6f9eb31f82cc2d9cf262888b220c3abed5edb8807c58d37b659a80e46f79ecb9d8ea67627cf6a7e6b9ffa3e5c6`
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-windows-amd64.tar.gz) | `156ccc2102a6f92fe1078feaed835913b34eac94bbd0846726eb43fa60f0beb724355e3a3be4de87630f27f67effdd88a5014aa197ba8695bf36da2b70ee1c14`
### Server Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-amd64.tar.gz) | `f7d9687eb49ea71f0d8b1ccfac33ed05cd341d7cfacb0711fce4a722801769deb05f72f19ade10b6dc29409f0c9136653c489653ca1f20b698c1310f8a43600f`
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-arm.tar.gz) | `5c2247e4cab886cbca59ef47ea32d9ab8bb5f47495f844337dadce2362b76ebedc8a912f34131f9ec2e15bcb9023d75efb561ce7e51ce5fc7d0cb6f058a96840`
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-arm64.tar.gz) | `a341bb15e659d4000fe29b88371cc1c02df4715786901b870546c04cd943f5cad56bd4f014062c4ef2d601f107038bb4024c029f62b8b37456bbcf4d14cfc5d0`
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-ppc64le.tar.gz) | `d179c809da68cc4530910dd1a7c3749598bd40f5b7a773b2b3a9b9d0b0e25c5a0fa8f2caa8f1874b7168d2acb708f0d5014ca4f4721252ce414e36734485e32b`
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-s390x.tar.gz) | `fdc8ffccd1c5a2e225f19b52eabceae5e8fac5e599235797b96d37223df10d45f70218dcbf5027a00db0129929fe179cd16b1f42ae2a6e7a4d020a642cd03981`
### Node Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-amd64.tar.gz) | `12c6139a4b497220f239f6c5e9a9b2e864d6dc298495ef4243b780fcf6c9c3aab53c88fa33d8527ed45d79de707cbce733e0c34c06b10fe2a07b4c3daafc0f50`
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-arm.tar.gz) | `53e14c9dd53562747dcfdfff7738bccdd369a2bd6f550e1ce181aa219e48c0fe92f786c4ed8d4f62fada48018917d573e4e63c0168bf205b707309ef78bac9b5`
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-arm64.tar.gz) | `5917436bdafab57f6564d6e32819b28f32d373bdb22ae53a46f7c7510283ffa87199d08db31862f8db286d5e96a37e299f8a31f0fd630bfd94698ba58b16e9af`
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-ppc64le.tar.gz) | `12a8ca3c87f165ef4eb493adcd3038d5689c592b411ebbbc97741b1de67a40f91fed7c83d0bf97bd59719c8d08e686c49e6d6dd9c6ef24b80010eb0777614187`
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-s390x.tar.gz) | `1919f8b370199261803ec856e558ad75100cf6db8f5619be5710f528a46a6c58692d659bb11525e351fd46673765348050ea6f1a7427fd458386f807040b67eb`
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-windows-amd64.tar.gz) | `86057b6ca519a6b454a4b898c7a12f12a2bb25c8be85e53fd2c9b1e4191e334611ca87e679b5832acdd37c05486972eb9c0b4c4bcbf4b688239d9482a9590745`
## Changelog since v1.14.1
### Other notable changes
* Update to use go 1.12.4 ([#76576](https://github.com/kubernetes/kubernetes/pull/76576), [@cblecker](https://github.com/cblecker))
* Update to use go 1.12.5 ([#77528](https://github.com/kubernetes/kubernetes/pull/77528), [@cblecker](https://github.com/cblecker))
* Check if container memory stats are available before accessing it ([#77656](https://github.com/kubernetes/kubernetes/pull/77656), [@yastij](https://github.com/yastij))
* Bump addon-manager to v9.0.1 ([#77282](https://github.com/kubernetes/kubernetes/pull/77282), [@MrHohn](https://github.com/MrHohn))
* - Rebase image on debian-base:v1.0.0
* If a pod has a running instance, the stats of its previously terminated instances will not show up in the kubelet summary stats any more for CRI runtimes like containerd and cri-o. ([#77426](https://github.com/kubernetes/kubernetes/pull/77426), [@Random-Liu](https://github.com/Random-Liu))
* This keeps the behavior consistent with Docker integration, and fixes an issue that some container Prometheus metrics don't work when there are summary stats for multiple instances of the same pod.
* Add name validation for dynamic client methods in client-go ([#75072](https://github.com/kubernetes/kubernetes/pull/75072), [@lblackstone](https://github.com/lblackstone))
* Fix issue in Portworx volume driver causing controller manager to crash ([#76341](https://github.com/kubernetes/kubernetes/pull/76341), [@harsh-px](https://github.com/harsh-px))
* Fixes segmentation fault issue with Protobuf library when log entries are deeply nested. ([#77224](https://github.com/kubernetes/kubernetes/pull/77224), [@qingling128](https://github.com/qingling128))
* Update Cluster Autoscaler to 1.14.2 ([#77064](https://github.com/kubernetes/kubernetes/pull/77064), [@losipiuk](https://github.com/losipiuk))
* - https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.14.2
* - https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.14.1
* Fixes an error with stuck informers when an etcd watch receives update or delete events with missing data ([#76675](https://github.com/kubernetes/kubernetes/pull/76675), [@ryanmcnamara](https://github.com/ryanmcnamara))
* [fluentd-gcp addon] Bump fluentd-gcp-scaler to v0.5.2 to pick up security fixes. ([#76762](https://github.com/kubernetes/kubernetes/pull/76762), [@serathius](https://github.com/serathius))
* specify azure file share name in azure file plugin ([#76988](https://github.com/kubernetes/kubernetes/pull/76988), [@andyzhangx](https://github.com/andyzhangx))
* Windows nodes on GCE use a known-working 1809 image rather than the latest 1809 image. ([#76722](https://github.com/kubernetes/kubernetes/pull/76722), [@pjh](https://github.com/pjh))
* kube-proxy: os exit when CleanupAndExit is set to true ([#76732](https://github.com/kubernetes/kubernetes/pull/76732), [@JieJhih](https://github.com/JieJhih))
* Clean links handling in cp's tar code ([#76788](https://github.com/kubernetes/kubernetes/pull/76788), [@soltysh](https://github.com/soltysh))
* Adds a new "storage_operation_status_count" metric for kube-controller-manager and kubelet to count success and error statues. ([#75750](https://github.com/kubernetes/kubernetes/pull/75750), [@msau42](https://github.com/msau42))
* kubeadm: Fix a bug where if couple of CRIs are installed a user override of the CRI during join (via kubeadm join --cri-socket ...) is ignored and kubeadm bails out with an error ([#76505](https://github.com/kubernetes/kubernetes/pull/76505), [@rosti](https://github.com/rosti))
* fix detach azure disk back off issue which has too big lock in failure retry condition ([#76573](https://github.com/kubernetes/kubernetes/pull/76573), [@andyzhangx](https://github.com/andyzhangx))
* Ensure the backend pools are set correctly for Azure SLB with multiple backend pools (e.g. outbound rules) ([#76691](https://github.com/kubernetes/kubernetes/pull/76691), [@feiskyer](https://github.com/feiskyer))
* fix azure disk list corruption issue ([#77187](https://github.com/kubernetes/kubernetes/pull/77187), [@andyzhangx](https://github.com/andyzhangx))
* [IPVS] Introduces flag ipvs-strict-arp to configure stricter ARP sysctls, defaulting to false to preserve existing behaviors. This was enabled by default in 1.13.0, which impacted a few CNI plugins. ([#75295](https://github.com/kubernetes/kubernetes/pull/75295), [@lbernail](https://github.com/lbernail))
* [metrics-server addon] Restore connecting to nodes via IP addresses ([#76819](https://github.com/kubernetes/kubernetes/pull/76819), [@serathius](https://github.com/serathius))
* Fixes a NPD bug on GCI, so that it disables glog writing to files for log-counter ([#76211](https://github.com/kubernetes/kubernetes/pull/76211), [@wangzhen127](https://github.com/wangzhen127))
* Fixes bug in DaemonSetController causing it to stop processing some DaemonSets for 5 minutes after node removal. ([#76060](https://github.com/kubernetes/kubernetes/pull/76060), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski))
# v1.14.1
[Documentation](https://docs.k8s.io)
## Downloads for v1.14.1
filename | sha512 hash
-------- | -----------
[kubernetes.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes.tar.gz) | `87f4cb0c6c137cbd07cc0e0b7049722ed2a3f21866ac02aecf1a0e03d350d1e90d7907487bac5ef224da75d05056abfa39f5b1b3741987dde1c5165f379cb253`
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-src.tar.gz) | `ef1b4ed78ed92124fbec2b2bf54ba3b293038f9c8f1df3883071ae9430450cab9c02c6111cf171ad8d61a0aef6d236fbb9f0f1526e6c00f0452323e8c7c16305`
### Client Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-client-darwin-386.tar.gz) | `f9f14293ab8c6e6d49c5be16c2bcfad640a258d3e1ce600d6b327a4ba84c369f679b8ed65f7f96c23b7277c6cbf4fa54cc75dd8d75e4c8a3b756dc02f7e99071`
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-client-darwin-amd64.tar.gz) | `761c6334ff33e0487feb15f9c335e0e44f36fbb1b5b95ddb8aad0383a085ce5c31f762d60b2fc4365052221b7594b5e9c046c25c9806ca93e7af9183e4474cb2`
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-client-linux-386.tar.gz) | `58c4db0219debd85ded6dd0eac2ceac3432772805826b362d915571aec0b3f93e60eaee7181bbf28bf7fb7d93011b9849fa486f7a05b53f4ac922845f2a5deeb`
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-client-linux-amd64.tar.gz) | `66c3a470caecfb35ce9a995a6298e689aed5fabefbdb8aca5086adff572266ae47b997eea03ff3ce0272fdb5be8e22aced3e3ae35906b5ac90cf928d7c0c974f`
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-client-linux-arm.tar.gz) | `50f76e9cca5e056d9dabe7f27de7db72539cb33c3e24bb541e35cf32293b7614d4a22447ec6d9e6a604bfe97825f023e72934993bf144c7763f76896d57595f6`
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-client-linux-arm64.tar.gz) | `088621d5cbb8587896b38e6e1faa84471490a1bd2371c082143aeebc0bac6498422c9175014cba22e5190dd761d4154bec91b1d1b93a09d1fae771d3bebf2227`
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-client-linux-ppc64le.tar.gz) | `d743819920dd3ac906a855af2c1a1327f991e4c295357c610b1fad5d5cd8abf5ac1296e3bf9a46fa3f8877a152e3f8fba3a5d27e51289926f7519215769c24c6`
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-client-linux-s390x.tar.gz) | `71cdd44a0d5418500407e9eea6f7118b7384b8c9a4bafaefb78c107b23e0503393b5a831bbe8eaaab6a37b4b23be3e7c5f700b991bbb4e656a72c46198e40e35`
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-client-windows-386.tar.gz) | `ffeacdc7658da5564811047e66e2b8e370d037b8d983a2af0ceb9cf762f6214658f633fe626d6e97810f520c664c0ab8d346a8e2ce6be330787c099693d76c83`
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-client-windows-amd64.tar.gz) | `f49b8acef5c31b59dfff0d63b4e175f54f605dd5783bdd57e18cdea766719860490281d2cdf0a3ea1f77d2c3753b4ec735af06ccda7f5ca4fcab14cd26411ef2`
### Server Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-server-linux-amd64.tar.gz) | `4775257f919bf013a92d6e3b66099d85e09be32b462189ede82874ea860ccacc3f42ff2d70e9e55b9f3b7886172bf85b26a87bc51e9d42435bfd608308b84ec6`
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-server-linux-arm.tar.gz) | `2806099d6bdd32940a3221a076fff51eb9c79729e058a6b5ef283cfbbf71695d35b0d121643282a089d1ce3ca8599e3a6d095ad90be931bd893ac6ddae562664`
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-server-linux-arm64.tar.gz) | `1aa3c44189f9be25181b69e6ef336955f92ceb0a35c07579e386e4c0584d4bbb5b6f7cb044ccb7002ea111f8286439f02679391f66799c78e8b9d6e100bee5e5`
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-server-linux-ppc64le.tar.gz) | `6e91be7bf7b3cb1dc20a395cbf32c70ad867f1300d09800bb2c6854c93ff8d9cf8c180b703f3172564f0b10336ce04740015f67738fa5401992ad6e3505b1b69`
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-server-linux-s390x.tar.gz) | `ee915182eda63d67292464ed3a3e029c1749d016cd730e15dc8fd19fdcc8ee5ae7bc926681935b6e6837d555e2165c231d3554e9428ac09b051b31f5d22b07e1`
### Node Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-node-linux-amd64.tar.gz) | `df2b5010eea175fd45868c908cc769cc8fefb367899af448ef9c64db5e4a7b50db9bdba77b81943446d0abeb2d9d36d72a22a8d72042f88eecb9123c9b77c0b5`
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-node-linux-arm.tar.gz) | `338ca18540c087c81b07bd341f390b78e446deb270d7e320ef108f9f293518c26580c17968c1a87fe7af2546ff56a9392009a354202dea1d2083b79652250da3`
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-node-linux-arm64.tar.gz) | `dd2544dd9543cb9a556def0116fdccb8b14c0e7ae07debbf10f3b4ac0669a1f38be28327114781157cc9ae06e96140b1a0650eeb707bd883ae3509e0ee873da7`
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-node-linux-ppc64le.tar.gz) | `866fd6680f0d35c747c54369e6363b0241a233a505110a899236e1390ec7365b9ae7df4ddf7087514dc5102ce757a46b4fb218db0c081bb15c200ed526209a83`
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-node-linux-s390x.tar.gz) | `87e7b803f1ae05bf4652fd0b2f636ce61bd1100e40ce7c5c2530407346260435a8f649a41bfbfa5cacb7a810d007ac19323056ef175f67aee469528b0a7d7e30`
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.14.1/kubernetes-node-windows-amd64.tar.gz) | `0e46f70d7f1ec542b33119a9773a98dfb46d80f96a9f31f22ccc33c902e4bb102e2d2453a0fcebcfe319b331d1a78606269816f0f239b68902f7059240ca790e`
## Changelog since v1.14.0
### Other notable changes
* GCE/Windows: disable stackdriver logging agent to prevent node startup failures ([#76099](https://github.com/kubernetes/kubernetes/pull/76099), [@yujuhong](https://github.com/yujuhong))
* Support vSphere SAML token auth when using Zones ([#75515](https://github.com/kubernetes/kubernetes/pull/75515), [@dougm](https://github.com/dougm))
* Fix empty array expansion error in cluster/gce/util.sh ([#76111](https://github.com/kubernetes/kubernetes/pull/76111), [@kewu1992](https://github.com/kewu1992))
* [stackdriver addon] Bump prometheus-to-sd to v0.5.0 to pick up security fixes. ([#75362](https://github.com/kubernetes/kubernetes/pull/75362), [@serathius](https://github.com/serathius))
* [fluentd-gcp addon] Bump fluentd-gcp-scaler to v0.5.1 to pick up security fixes.
* [fluentd-gcp addon] Bump event-exporter to v0.2.4 to pick up security fixes.
* [fluentd-gcp addon] Bump prometheus-to-sd to v0.5.0 to pick up security fixes.
* [metatada-proxy addon] Bump prometheus-to-sd v0.5.0 to pick up security fixes.
* kube-proxy no longer automatically cleans up network rules created by running kube-proxy in other modes. If you are switching the mode that kube-proxy is in running in (EG: iptables to IPVS), you will need to run `kube-proxy --cleanup`, or restart the worker node (recommended) before restarting kube-proxy. ([#76109](https://github.com/kubernetes/kubernetes/pull/76109), [@vllry](https://github.com/vllry))
* If you are not switching kube-proxy between different modes, this change should not require any action.
* This fixes a bug where restarting the iptables proxier can cause connections to fail (https://github.com/kubernetes/kubernetes/issues/75360).
* kubeadm: fixes error when upgrading from v1.13 to v1.14 clusters created with kubeadm v1.12. Please note that it is required to upgrade etcd during the final v1.13 to v1.14 upgrade. ([#75956](https://github.com/kubernetes/kubernetes/pull/75956), [@fabriziopandini](https://github.com/fabriziopandini))
* Fixes a regression proxying responses from aggregated API servers which could cause watch requests to hang until the first event was received ([#75887](https://github.com/kubernetes/kubernetes/pull/75887), [@liggitt](https://github.com/liggitt))
* Increased verbose level for local openapi aggregation logs to avoid flooding the log during normal operation ([#75781](https://github.com/kubernetes/kubernetes/pull/75781), [@roycaihw](https://github.com/roycaihw))
* Update Cluster Autoscaler to 1.14.0; changelog: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.14.0 ([#75480](https://github.com/kubernetes/kubernetes/pull/75480), [@losipiuk](https://github.com/losipiuk))
* Ensures the conformance test image saves results before exiting when ginkgo returns non-zero value. ([#76039](https://github.com/kubernetes/kubernetes/pull/76039), [@johnSchnake](https://github.com/johnSchnake))
* GCE Windows nodes will rely solely on kubernetes and kube-proxy (and not the GCE agent) for network address management. ([#75855](https://github.com/kubernetes/kubernetes/pull/75855), [@pjh](https://github.com/pjh))
* kubeadm: fix "upgrade plan" not defaulting to a "stable" version if no version argument is passed ([#75900](https://github.com/kubernetes/kubernetes/pull/75900), [@neolit123](https://github.com/neolit123))
* kubeadm: preflight checks on external etcd certificates are now skipped when joining a control-plane node with automatic copy of cluster certificates (--certificate-key) ([#75847](https://github.com/kubernetes/kubernetes/pull/75847), [@fabriziopandini](https://github.com/fabriziopandini))
* [IPVS] Allow for transparent kube-proxy restarts ([#75283](https://github.com/kubernetes/kubernetes/pull/75283), [@lbernail](https://github.com/lbernail))
# v1.14.0
[Documentation](https://docs.k8s.io)

View File

@ -92,6 +92,7 @@ const (
// proxyRun defines the interface to run a specified ProxyServer
type proxyRun interface {
Run() error
CleanupAndExit() error
}
// Options contains everything necessary to create and run a proxy server.
@ -166,6 +167,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&o.config.IPVS.SyncPeriod.Duration, "ipvs-sync-period", o.config.IPVS.SyncPeriod.Duration, "The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.")
fs.DurationVar(&o.config.IPVS.MinSyncPeriod.Duration, "ipvs-min-sync-period", o.config.IPVS.MinSyncPeriod.Duration, "The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').")
fs.StringSliceVar(&o.config.IPVS.ExcludeCIDRs, "ipvs-exclude-cidrs", o.config.IPVS.ExcludeCIDRs, "A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules.")
fs.BoolVar(&o.config.IPVS.StrictARP, "ipvs-strict-arp", o.config.IPVS.StrictARP, "Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2")
fs.DurationVar(&o.config.ConfigSyncPeriod.Duration, "config-sync-period", o.config.ConfigSyncPeriod.Duration, "How often configuration from the apiserver is refreshed. Must be greater than 0.")
fs.BoolVar(&o.config.IPTables.MasqueradeAll, "masquerade-all", o.config.IPTables.MasqueradeAll, "If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed)")
fs.StringVar(&o.config.ClusterCIDR, "cluster-cidr", o.config.ClusterCIDR, "The CIDR range of pods in the cluster. When configured, traffic sent to a Service cluster IP from outside this range will be masqueraded and traffic sent from pods to an external LoadBalancer IP will be directed to the respective cluster IP instead")
@ -305,6 +307,11 @@ func (o *Options) Run() error {
if err != nil {
return err
}
if o.CleanupAndExit {
return proxyServer.CleanupAndExit()
}
o.proxyServer = proxyServer
return o.runLoop()
}
@ -498,7 +505,6 @@ type ProxyServer struct {
Conntracker Conntracker // if nil, ignored
ProxyMode string
NodeRef *v1.ObjectReference
CleanupAndExit bool
CleanupIPVS bool
MetricsBindAddress string
EnableProfiling bool
@ -550,19 +556,10 @@ func createClients(config componentbaseconfig.ClientConnectionConfiguration, mas
}
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
// TODO: At the moment, Run() cannot return a nil error, otherwise it's caller will never exit. Update callers of Run to handle nil errors.
func (s *ProxyServer) Run() error {
// To help debugging, immediately log version
klog.Infof("Version: %+v", version.Get())
// remove iptables rules and exit
if s.CleanupAndExit {
encounteredError := userspace.CleanupLeftovers(s.IptInterface)
encounteredError = iptables.CleanupLeftovers(s.IptInterface) || encounteredError
encounteredError = ipvs.CleanupLeftovers(s.IpvsInterface, s.IptInterface, s.IpsetInterface, s.CleanupIPVS) || encounteredError
if encounteredError {
return errors.New("encountered an error while tearing down rules.")
}
return nil
}
// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster
@ -708,3 +705,15 @@ func getConntrackMax(config kubeproxyconfig.KubeProxyConntrackConfiguration) (in
}
return 0, nil
}
// CleanupAndExit remove iptables rules and exit if success return nil
func (s *ProxyServer) CleanupAndExit() error {
encounteredError := userspace.CleanupLeftovers(s.IptInterface)
encounteredError = iptables.CleanupLeftovers(s.IptInterface) || encounteredError
encounteredError = ipvs.CleanupLeftovers(s.IpvsInterface, s.IptInterface, s.IpsetInterface, s.CleanupIPVS) || encounteredError
if encounteredError {
return errors.New("encountered an error while tearing down rules")
}
return nil
}

View File

@ -104,7 +104,6 @@ func newProxyServer(
IptInterface: iptInterface,
IpvsInterface: ipvsInterface,
IpsetInterface: ipsetInterface,
CleanupAndExit: cleanupAndExit,
}, nil
}
@ -185,6 +184,7 @@ func newProxyServer(
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
config.ClusterCIDR,

View File

@ -24,6 +24,8 @@ import (
"errors"
"fmt"
"net"
// Enable pprof HTTP handlers.
_ "net/http/pprof"
"k8s.io/api/core/v1"
@ -63,7 +65,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi
// We omit creation of pretty much everything if we run in cleanup mode
if cleanupAndExit {
return &ProxyServer{CleanupAndExit: cleanupAndExit}, nil
return &ProxyServer{}, nil
}
client, eventClient, err := createClients(config.ClientConnection, master)

View File

@ -133,6 +133,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/client-go/util/certificate:go_default_library",
"//staging/src/k8s.io/client-go/util/connrotation:go_default_library",
"//staging/src/k8s.io/client-go/util/keyutil:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",

View File

@ -53,6 +53,7 @@ import (
"k8s.io/client-go/tools/record"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/certificate"
"k8s.io/client-go/util/connrotation"
"k8s.io/client-go/util/keyutil"
cliflag "k8s.io/component-base/cli/flag"
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
@ -526,6 +527,9 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan
if err != nil {
return err
}
if closeAllConns == nil {
return errors.New("closeAllConns must be a valid function other than nil")
}
kubeDeps.OnHeartbeatFailure = closeAllConns
kubeDeps.KubeClient, err = clientset.NewForConfig(clientConfig)
@ -761,8 +765,21 @@ func buildKubeletClientConfig(s *options.KubeletServer, nodeName types.NodeName)
}
kubeClientConfigOverrides(s, clientConfig)
closeAllConns, err := updateDialer(clientConfig)
if err != nil {
return nil, nil, err
}
return clientConfig, closeAllConns, nil
}
return clientConfig, nil, nil
// updateDialer instruments a restconfig with a dial. the returned function allows forcefully closing all active connections.
func updateDialer(clientConfig *restclient.Config) (func(), error) {
if clientConfig.Transport != nil || clientConfig.Dial != nil {
return nil, fmt.Errorf("there is already a transport or dialer configured")
}
d := connrotation.NewDialer((&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext)
clientConfig.Dial = d.DialContext
return d.CloseAll, nil
}
// buildClientCertificateManager creates a certificate manager that will use certConfig to request a client certificate

View File

@ -964,10 +964,10 @@ func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
failedPodsObserved += failedPodsObservedOnNode
}
// Remove pods assigned to not existing nodes when daemonset pods are scheduled by default scheduler.
// Remove unscheduled pods assigned to not existing nodes when daemonset pods are scheduled by scheduler.
// If node doesn't exist then pods are never scheduled and can't be deleted by PodGCController.
if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) {
podsToDelete = append(podsToDelete, getPodsWithoutNode(nodeList, nodeToDaemonPods)...)
podsToDelete = append(podsToDelete, getUnscheduledPodsWithoutNode(nodeList, nodeToDaemonPods)...)
}
// Label new pods using the hash label value of the current history when creating them
@ -1531,8 +1531,9 @@ func failedPodsBackoffKey(ds *apps.DaemonSet, nodeName string) string {
return fmt.Sprintf("%s/%d/%s", ds.UID, ds.Status.ObservedGeneration, nodeName)
}
// getPodsWithoutNode returns list of pods assigned to not existing nodes.
func getPodsWithoutNode(runningNodesList []*v1.Node, nodeToDaemonPods map[string][]*v1.Pod) []string {
// getUnscheduledPodsWithoutNode returns list of unscheduled pods assigned to not existing nodes.
// Returned pods can't be deleted by PodGCController so they should be deleted by DaemonSetController.
func getUnscheduledPodsWithoutNode(runningNodesList []*v1.Node, nodeToDaemonPods map[string][]*v1.Pod) []string {
var results []string
isNodeRunning := make(map[string]bool)
for _, node := range runningNodesList {
@ -1541,7 +1542,9 @@ func getPodsWithoutNode(runningNodesList []*v1.Node, nodeToDaemonPods map[string
for n, pods := range nodeToDaemonPods {
if !isNodeRunning[n] {
for _, pod := range pods {
results = append(results, pod.Name)
if len(pod.Spec.NodeName) == 0 {
results = append(results, pod.Name)
}
}
}
}

View File

@ -32,6 +32,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
"//staging/src/k8s.io/client-go/rest/fake:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
],
)

View File

@ -418,9 +418,7 @@ func clean(fileName string) string {
return path.Clean(string(os.PathSeparator) + fileName)
}
func (o *CopyOptions) untarAll(reader io.Reader, destFile, prefix string) error {
entrySeq := -1
func (o *CopyOptions) untarAll(reader io.Reader, destDir, prefix string) error {
// TODO: use compression here?
tarReader := tar.NewReader(reader)
for {
@ -431,52 +429,60 @@ func (o *CopyOptions) untarAll(reader io.Reader, destFile, prefix string) error
}
break
}
entrySeq++
mode := header.FileInfo().Mode()
// all the files will start with the prefix, which is the directory where
// All the files will start with the prefix, which is the directory where
// they were located on the pod, we need to strip down that prefix, but
// if the prefix is missing it means the tar was tempered with
// if the prefix is missing it means the tar was tempered with.
// For the case where prefix is empty we need to ensure that the path
// is not absolute, which also indicates the tar file was tempered with.
if !strings.HasPrefix(header.Name, prefix) {
return fmt.Errorf("tar contents corrupted")
}
outFileName := path.Join(destFile, clean(header.Name[len(prefix):]))
baseName := path.Dir(outFileName)
// basic file information
mode := header.FileInfo().Mode()
destFileName := path.Join(destDir, header.Name[len(prefix):])
baseName := path.Dir(destFileName)
if err := os.MkdirAll(baseName, 0755); err != nil {
return err
}
if header.FileInfo().IsDir() {
if err := os.MkdirAll(outFileName, 0755); err != nil {
if err := os.MkdirAll(destFileName, 0755); err != nil {
return err
}
continue
}
// handle coping remote file into local directory
if entrySeq == 0 && !header.FileInfo().IsDir() {
exists, err := dirExists(outFileName)
if err != nil {
return err
}
if exists {
outFileName = filepath.Join(outFileName, path.Base(clean(header.Name)))
}
// We need to ensure that the destination file is always within boundries
// of the destination directory. This prevents any kind of path traversal
// from within tar archive.
dir, file := filepath.Split(destFileName)
evaledPath, err := filepath.EvalSymlinks(dir)
if err != nil {
return err
}
// For scrutiny we verify both the actual destination as well as we follow
// all the links that might lead outside of the destination directory.
if !isDestRelative(destDir, destFileName) || !isDestRelative(destDir, filepath.Join(evaledPath, file)) {
fmt.Fprintf(o.IOStreams.ErrOut, "warning: link %q is pointing to %q which is outside target destination, skipping\n", destFileName, header.Linkname)
continue
}
if mode&os.ModeSymlink != 0 {
linkname := header.Linkname
// error is returned if linkname can't be made relative to destFile,
// but relative can end up being ../dir that's why we also need to
// verify if relative path is the same after Clean-ing
relative, err := filepath.Rel(destFile, linkname)
if path.IsAbs(linkname) && (err != nil || relative != stripPathShortcuts(relative)) {
fmt.Fprintf(o.IOStreams.ErrOut, "warning: link %q is pointing to %q which is outside target destination, skipping\n", outFileName, header.Linkname)
// We need to ensure that the link destination is always within boundries
// of the destination directory. This prevents any kind of path traversal
// from within tar archive.
if !isDestRelative(destDir, linkJoin(destFileName, linkname)) {
fmt.Fprintf(o.IOStreams.ErrOut, "warning: link %q is pointing to %q which is outside target destination, skipping\n", destFileName, header.Linkname)
continue
}
if err := os.Symlink(linkname, outFileName); err != nil {
if err := os.Symlink(linkname, destFileName); err != nil {
return err
}
} else {
outFile, err := os.Create(outFileName)
outFile, err := os.Create(destFileName)
if err != nil {
return err
}
@ -490,14 +496,32 @@ func (o *CopyOptions) untarAll(reader io.Reader, destFile, prefix string) error
}
}
if entrySeq == -1 {
//if no file was copied
errInfo := fmt.Sprintf("error: %s no such file or directory", prefix)
return errors.New(errInfo)
}
return nil
}
// linkJoin joins base and link to get the final path to be created.
// It will consider whether link is an absolute path or not when returning result.
func linkJoin(base, link string) string {
if filepath.IsAbs(link) {
return link
}
return filepath.Join(base, link)
}
// isDestRelative returns true if dest is pointing outside the base directory,
// false otherwise.
func isDestRelative(base, dest string) bool {
fullPath := dest
if !filepath.IsAbs(dest) {
fullPath = filepath.Join(base, dest)
}
relative, err := filepath.Rel(base, fullPath)
if err != nil {
return false
}
return relative == "." || relative == stripPathShortcuts(relative)
}
func getPrefix(file string) string {
// tar strips the leading '/' if it's there, so we will too
return strings.TrimLeft(file, "/")
@ -524,15 +548,3 @@ func (o *CopyOptions) execute(options *exec.ExecOptions) error {
}
return nil
}
// dirExists checks if a path exists and is a directory.
func dirExists(path string) (bool, error) {
fi, err := os.Stat(path)
if err == nil && fi.IsDir() {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}

View File

@ -632,7 +632,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
}
klet.runtimeService = runtimeService
if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) {
if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) && kubeDeps.KubeClient != nil {
klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.KubeClient)
}

View File

@ -89,6 +89,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/github.com/golang/mock/gomock:go_default_library",
"//vendor/github.com/google/cadvisor/fs:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",

View File

@ -137,6 +137,7 @@ func (p *criStatsProvider) listPodStats(updateCPUNanoCoreUsage bool) ([]statsapi
if err != nil {
return nil, fmt.Errorf("failed to list all pod sandboxes: %v", err)
}
podSandboxes = removeTerminatedPods(podSandboxes)
for _, s := range podSandboxes {
podSandboxMap[s.Id] = s
}
@ -153,7 +154,7 @@ func (p *criStatsProvider) listPodStats(updateCPUNanoCoreUsage bool) ([]statsapi
return nil, fmt.Errorf("failed to list all container stats: %v", err)
}
containers = removeTerminatedContainer(containers)
containers = removeTerminatedContainers(containers)
// Creates container map.
containerMap := make(map[string]*runtimeapi.Container)
for _, c := range containers {
@ -233,6 +234,7 @@ func (p *criStatsProvider) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, erro
if err != nil {
return nil, fmt.Errorf("failed to list all pod sandboxes: %v", err)
}
podSandboxes = removeTerminatedPods(podSandboxes)
for _, s := range podSandboxes {
podSandboxMap[s.Id] = s
}
@ -245,7 +247,7 @@ func (p *criStatsProvider) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, erro
return nil, fmt.Errorf("failed to list all container stats: %v", err)
}
containers = removeTerminatedContainer(containers)
containers = removeTerminatedContainers(containers)
// Creates container map.
containerMap := make(map[string]*runtimeapi.Container)
for _, c := range containers {
@ -690,9 +692,51 @@ func (p *criStatsProvider) cleanupOutdatedCaches() {
}
}
// removeTerminatedContainer returns the specified container but with
// the stats of the terminated containers removed.
func removeTerminatedContainer(containers []*runtimeapi.Container) []*runtimeapi.Container {
// removeTerminatedPods returns pods with terminated ones removed.
// It only removes a terminated pod when there is a running instance
// of the pod with the same name and namespace.
// This is needed because:
// 1) PodSandbox may be recreated;
// 2) Pod may be recreated with the same name and namespace.
func removeTerminatedPods(pods []*runtimeapi.PodSandbox) []*runtimeapi.PodSandbox {
podMap := make(map[statsapi.PodReference][]*runtimeapi.PodSandbox)
// Sort order by create time
sort.Slice(pods, func(i, j int) bool {
return pods[i].CreatedAt < pods[j].CreatedAt
})
for _, pod := range pods {
refID := statsapi.PodReference{
Name: pod.GetMetadata().GetName(),
Namespace: pod.GetMetadata().GetNamespace(),
// UID is intentionally left empty.
}
podMap[refID] = append(podMap[refID], pod)
}
result := make([]*runtimeapi.PodSandbox, 0)
for _, refs := range podMap {
if len(refs) == 1 {
result = append(result, refs[0])
continue
}
found := false
for i := 0; i < len(refs); i++ {
if refs[i].State == runtimeapi.PodSandboxState_SANDBOX_READY {
found = true
result = append(result, refs[i])
}
}
if !found {
result = append(result, refs[len(refs)-1])
}
}
return result
}
// removeTerminatedContainers returns containers with terminated ones.
// It only removes a terminated container when there is a running instance
// of the container.
func removeTerminatedContainers(containers []*runtimeapi.Container) []*runtimeapi.Container {
containerMap := make(map[containerID][]*runtimeapi.Container)
// Sort order by create time
sort.Slice(containers, func(i, j int) bool {

View File

@ -51,7 +51,7 @@ func cadvisorInfoToCPUandMemoryStats(info *cadvisorapiv2.ContainerInfo) (*statsa
cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total
}
}
if info.Spec.HasMemory {
if info.Spec.HasMemory && cstat.Memory != nil {
pageFaults := cstat.Memory.ContainerData.Pgfault
majorPageFaults := cstat.Memory.ContainerData.Pgmajfault
memoryStats = &statsapi.MemoryStats{

View File

@ -242,13 +242,15 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
if runningContainers {
klog.V(4).Infof(
"Pod %q has been removed from pod manager. However, it still has one or more containers in the non-exited state. Therefore, it will not be removed from volume manager.",
"Pod %q still has one or more containers in the non-exited state. Therefore, it will not be removed from desired state.",
format.Pod(volumeToMount.Pod))
continue
}
if !dswp.actualStateOfWorld.VolumeExists(volumeToMount.VolumeName) && podExists {
klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Actual state has not yet has this information skip removing volume from desired state", ""))
exists, _, _ := dswp.actualStateOfWorld.PodExistsInVolume(volumeToMount.PodName, volumeToMount.VolumeName)
if !exists && podExists {
klog.V(4).Infof(
volumeToMount.GenerateMsgDetailed(fmt.Sprintf("Actual state has not yet has this volume mounted information and pod (%q) still exists in pod manager, skip removing volume from desired state",
format.Pod(volumeToMount.Pod)), ""))
continue
}
klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Removing volume from desired state", ""))

View File

@ -55,6 +55,9 @@ type KubeProxyIPVSConfiguration struct {
// excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch
// when cleaning up ipvs services.
ExcludeCIDRs []string
// strict ARP configure arp_ignore and arp_announce to avoid answering ARP queries
// from kube-ipvs0 interface
StrictARP bool
}
// KubeProxyConntrackConfiguration contains conntrack settings for

View File

@ -226,6 +226,7 @@ func autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConf
out.MinSyncPeriod = in.MinSyncPeriod
out.Scheduler = in.Scheduler
out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs))
out.StrictARP = in.StrictARP
return nil
}
@ -239,6 +240,7 @@ func autoConvert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConf
out.MinSyncPeriod = in.MinSyncPeriod
out.Scheduler = in.Scheduler
out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs))
out.StrictARP = in.StrictARP
return nil
}

View File

@ -17,10 +17,10 @@ limitations under the License.
package ipvs
import (
"fmt"
"sync"
"time"
"fmt"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
utilipvs "k8s.io/kubernetes/pkg/util/ipvs"
@ -164,10 +164,10 @@ func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, e
}
for _, rs := range rss {
if rsToDelete.RealServer.Equal(rs) {
// Delete RS with no connections
// For UDP, ActiveConn is always 0
// For TCP, InactiveConn are connections not in ESTABLISHED state
if rs.ActiveConn+rs.InactiveConn != 0 {
// For UDP traffic, no graceful termination, we immediately delete the RS
// (existing connections will be deleted on the next packet because sysctlExpireNoDestConn=1)
// For other protocols, don't delete until all connections have expired)
if rsToDelete.VirtualServer.Protocol != "udp" && rs.ActiveConn+rs.InactiveConn != 0 {
klog.Infof("Not deleting, RS %v: %v ActiveConn, %v InactiveConn", rsToDelete.String(), rs.ActiveConn, rs.InactiveConn)
return false, nil
}

View File

@ -194,7 +194,9 @@ type Proxier struct {
syncPeriod time.Duration
minSyncPeriod time.Duration
// Values are CIDR's to exclude when cleaning up IPVS rules.
excludeCIDRs []string
excludeCIDRs []string
// Set to true to set sysctls arp_ignore and arp_announce
strictARP bool
iptables utiliptables.Interface
ipvs utilipvs.Interface
ipset utilipset.Interface
@ -285,6 +287,7 @@ func NewProxier(ipt utiliptables.Interface,
syncPeriod time.Duration,
minSyncPeriod time.Duration,
excludeCIDRs []string,
strictARP bool,
masqueradeAll bool,
masqueradeBit int,
clusterCIDR string,
@ -344,17 +347,19 @@ func NewProxier(ipt utiliptables.Interface,
}
}
// Set the arp_ignore sysctl we need for
if val, _ := sysctl.GetSysctl(sysctlArpIgnore); val != 1 {
if err := sysctl.SetSysctl(sysctlArpIgnore, 1); err != nil {
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlArpIgnore, err)
if strictARP {
// Set the arp_ignore sysctl we need for
if val, _ := sysctl.GetSysctl(sysctlArpIgnore); val != 1 {
if err := sysctl.SetSysctl(sysctlArpIgnore, 1); err != nil {
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlArpIgnore, err)
}
}
}
// Set the arp_announce sysctl we need for
if val, _ := sysctl.GetSysctl(sysctlArpAnnounce); val != 2 {
if err := sysctl.SetSysctl(sysctlArpAnnounce, 2); err != nil {
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlArpAnnounce, err)
// Set the arp_announce sysctl we need for
if val, _ := sysctl.GetSysctl(sysctlArpAnnounce); val != 2 {
if err := sysctl.SetSysctl(sysctlArpAnnounce, 2); err != nil {
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlArpAnnounce, err)
}
}
}
@ -1654,15 +1659,17 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer, legacyBindAddrs map[string]bool) {
for cs := range currentServices {
svc := currentServices[cs]
if proxier.isIPInExcludeCIDRs(svc.Address) {
continue
}
if _, ok := activeServices[cs]; !ok {
// This service was not processed in the latest sync loop so before deleting it,
okayToDelete := true
rsList, _ := proxier.ipvs.GetRealServers(svc)
// If we still have real servers graceful termination is not done
if len(rsList) > 0 {
okayToDelete = false
continue
}
// Applying graceful termination to all real servers
for _, rs := range rsList {
uniqueRS := GetUniqueRSName(svc, rs)
@ -1675,35 +1682,36 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre
klog.Errorf("Failed to delete destination: %v, error: %v", uniqueRS, err)
}
}
// make sure it does not fall within an excluded CIDR range.
for _, excludedCIDR := range proxier.excludeCIDRs {
// Any validation of this CIDR already should have occurred.
_, n, _ := net.ParseCIDR(excludedCIDR)
if n.Contains(svc.Address) {
okayToDelete = false
break
}
klog.V(4).Infof("Delete service %s", svc.String())
if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil {
klog.Errorf("Failed to delete service %s, error: %v", svc.String(), err)
}
if okayToDelete {
klog.V(4).Infof("Delete service %s", svc.String())
if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil {
klog.Errorf("Failed to delete service %s, error: %v", svc.String(), err)
}
addr := svc.Address.String()
if _, ok := legacyBindAddrs[addr]; ok {
klog.V(4).Infof("Unbinding address %s", addr)
if err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice); err != nil {
klog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err)
} else {
// In case we delete a multi-port service, avoid trying to unbind multiple times
delete(legacyBindAddrs, addr)
}
addr := svc.Address.String()
if _, ok := legacyBindAddrs[addr]; ok {
klog.V(4).Infof("Unbinding address %s", addr)
if err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice); err != nil {
klog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err)
} else {
// In case we delete a multi-port service, avoid trying to unbind multiple times
delete(legacyBindAddrs, addr)
}
}
}
}
}
func (proxier *Proxier) isIPInExcludeCIDRs(ip net.IP) bool {
// make sure it does not fall within an excluded CIDR range.
for _, excludedCIDR := range proxier.excludeCIDRs {
// Any validation of this CIDR already should have occurred.
_, n, _ := net.ParseCIDR(excludedCIDR)
if n.Contains(ip) {
return true
}
}
return false
}
func (proxier *Proxier) getLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) map[string]bool {
legacyAddrs := make(map[string]bool)
isIpv6 := utilnet.IsIPv6(proxier.nodeIP)

View File

@ -1,10 +1,4 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_test(
name = "go_default_test",
@ -20,6 +14,7 @@ go_test(
"//pkg/registry/registrytest:go_default_library",
"//pkg/securitycontext:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/apitesting:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -27,7 +22,9 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/example/v1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/registry/generic:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library",
@ -46,6 +43,7 @@ go_library(
"storage.go",
],
importpath = "k8s.io/kubernetes/pkg/registry/core/pod/storage",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/pod:go_default_library",
"//pkg/apis/core:go_default_library",
@ -85,4 +83,5 @@ filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -19,6 +19,7 @@ package storage
import (
"context"
"fmt"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/errors"
@ -29,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/util/dryrun"
"k8s.io/client-go/util/retry"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/policy"
@ -77,11 +79,36 @@ func (r *EvictionREST) New() runtime.Object {
return &policy.Eviction{}
}
// Propagate dry-run takes the dry-run option from the request and pushes it into the eviction object.
// It returns an error if they have non-matching dry-run options.
func propagateDryRun(eviction *policy.Eviction, options *metav1.CreateOptions) (*metav1.DeleteOptions, error) {
if eviction.DeleteOptions == nil {
return &metav1.DeleteOptions{DryRun: options.DryRun}, nil
}
if len(eviction.DeleteOptions.DryRun) == 0 {
eviction.DeleteOptions.DryRun = options.DryRun
return eviction.DeleteOptions, nil
}
if len(options.DryRun) == 0 {
return eviction.DeleteOptions, nil
}
if !reflect.DeepEqual(options.DryRun, eviction.DeleteOptions.DryRun) {
return nil, fmt.Errorf("Non-matching dry-run options in request and content: %v and %v", options.DryRun, eviction.DeleteOptions.DryRun)
}
return eviction.DeleteOptions, nil
}
// Create attempts to create a new eviction. That is, it tries to evict a pod.
func (r *EvictionREST) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
eviction := obj.(*policy.Eviction)
obj, err := r.store.Get(ctx, eviction.Name, &metav1.GetOptions{})
deletionOptions, err := propagateDryRun(eviction, options)
if err != nil {
return nil, err
}
obj, err = r.store.Get(ctx, eviction.Name, &metav1.GetOptions{})
if err != nil {
return nil, err
}
@ -89,7 +116,7 @@ func (r *EvictionREST) Create(ctx context.Context, obj runtime.Object, createVal
// Evicting a terminal pod should result in direct deletion of pod as it already caused disruption by the time we are evicting.
// There is no need to check for pdb.
if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed {
_, _, err = r.store.Delete(ctx, eviction.Name, eviction.DeleteOptions)
_, _, err = r.store.Delete(ctx, eviction.Name, deletionOptions)
if err != nil {
return nil, err
}
@ -118,7 +145,7 @@ func (r *EvictionREST) Create(ctx context.Context, obj runtime.Object, createVal
// If it was false already, or if it becomes false during the course of our retries,
// raise an error marked as a 429.
if err := r.checkAndDecrement(pod.Namespace, pod.Name, pdb); err != nil {
if err := r.checkAndDecrement(pod.Namespace, pod.Name, pdb, dryrun.IsDryRun(deletionOptions.DryRun)); err != nil {
return err
}
}
@ -138,11 +165,6 @@ func (r *EvictionREST) Create(ctx context.Context, obj runtime.Object, createVal
// At this point there was either no PDB or we succeeded in decrementing
// Try the delete
deletionOptions := eviction.DeleteOptions
if deletionOptions == nil {
// default to non-nil to trigger graceful deletion
deletionOptions = &metav1.DeleteOptions{}
}
_, _, err = r.store.Delete(ctx, eviction.Name, deletionOptions)
if err != nil {
return nil, err
@ -153,7 +175,7 @@ func (r *EvictionREST) Create(ctx context.Context, obj runtime.Object, createVal
}
// checkAndDecrement checks if the provided PodDisruptionBudget allows any disruption.
func (r *EvictionREST) checkAndDecrement(namespace string, podName string, pdb policy.PodDisruptionBudget) error {
func (r *EvictionREST) checkAndDecrement(namespace string, podName string, pdb policy.PodDisruptionBudget, dryRun bool) error {
if pdb.Status.ObservedGeneration < pdb.Generation {
// TODO(mml): Add a Retry-After header. Once there are time-based
// budgets, we can sometimes compute a sensible suggested value. But
@ -179,6 +201,12 @@ func (r *EvictionREST) checkAndDecrement(namespace string, podName string, pdb p
if pdb.Status.DisruptedPods == nil {
pdb.Status.DisruptedPods = make(map[string]metav1.Time)
}
// If this is a dry-run, we don't need to go any further than that.
if dryRun == true {
return nil
}
// Eviction handler needs to inform the PDB controller that it is about to delete a pod
// so it should not consider it as available in calculations when updating PodDisruptions allowed.
// If the pod is not deleted within a reasonable time limit PDB controller will assume that it won't

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "14"
gitVersion = "v1.14.1-k3s.4"
gitCommit = "52f3b42401c93c36467f1fd6d294a3aba26c7def"
gitVersion = "v1.14.3-k3s.1"
gitCommit = "8343999292c55c807be4406fcaa9f047e8751ffd"
gitTreeState = "clean"
buildDate = "2019-04-15T22:13+00:00Z"
buildDate = "2019-06-12T04:56+00:00Z"
)

View File

@ -24,11 +24,16 @@ import (
"k8s.io/kubernetes/pkg/volume"
)
const (
statusSuccess = "success"
statusFailUnknown = "fail-unknown"
)
var storageOperationMetric = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "storage_operation_duration_seconds",
Help: "Storage operation duration",
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50},
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50, 120, 300, 600},
},
[]string{"volume_plugin", "operation_name"},
)
@ -41,6 +46,14 @@ var storageOperationErrorMetric = prometheus.NewCounterVec(
[]string{"volume_plugin", "operation_name"},
)
var storageOperationStatusMetric = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "storage_operation_status_count",
Help: "Storage operation return statuses count",
},
[]string{"volume_plugin", "operation_name", "status"},
)
func init() {
registerMetrics()
}
@ -48,6 +61,7 @@ func init() {
func registerMetrics() {
prometheus.MustRegister(storageOperationMetric)
prometheus.MustRegister(storageOperationErrorMetric)
prometheus.MustRegister(storageOperationStatusMetric)
}
// OperationCompleteHook returns a hook to call when an operation is completed
@ -56,11 +70,16 @@ func OperationCompleteHook(plugin, operationName string) func(*error) {
opComplete := func(err *error) {
timeTaken := time.Since(requestTime).Seconds()
// Create metric with operation name and plugin name
status := statusSuccess
if *err != nil {
// TODO: Establish well-known error codes to be able to distinguish
// user configuration errors from system errors.
status = statusFailUnknown
storageOperationErrorMetric.WithLabelValues(plugin, operationName).Inc()
} else {
storageOperationMetric.WithLabelValues(plugin, operationName).Observe(timeTaken)
}
storageOperationStatusMetric.WithLabelValues(plugin, operationName, status).Inc()
}
return opComplete
}

View File

@ -468,29 +468,6 @@ func CheckRequest(quotas []corev1.ResourceQuota, a admission.Attributes, evaluat
restrictedResourcesSet.Insert(localRestrictedResourcesSet.List()...)
}
// verify that for every resource that had limited by default consumption
// enabled that there was a corresponding quota that covered its use.
// if not, we reject the request.
hasNoCoveringQuota := limitedResourceNamesSet.Difference(restrictedResourcesSet)
if len(hasNoCoveringQuota) > 0 {
return quotas, admission.NewForbidden(a, fmt.Errorf("insufficient quota to consume: %v", strings.Join(hasNoCoveringQuota.List(), ",")))
}
// verify that for every scope that had limited access enabled
// that there was a corresponding quota that covered it.
// if not, we reject the request.
scopesHasNoCoveringQuota, err := evaluator.UncoveredQuotaScopes(limitedScopes, restrictedScopes)
if err != nil {
return quotas, err
}
if len(scopesHasNoCoveringQuota) > 0 {
return quotas, fmt.Errorf("insufficient quota to match these scopes: %v", scopesHasNoCoveringQuota)
}
if len(interestingQuotaIndexes) == 0 {
return quotas, nil
}
// Usage of some resources cannot be counted in isolation. For example, when
// the resource represents a number of unique references to external
// resource. In such a case an evaluator needs to process other objects in
@ -537,6 +514,29 @@ func CheckRequest(quotas []corev1.ResourceQuota, a admission.Attributes, evaluat
return quotas, nil
}
// verify that for every resource that had limited by default consumption
// enabled that there was a corresponding quota that covered its use.
// if not, we reject the request.
hasNoCoveringQuota := limitedResourceNamesSet.Difference(restrictedResourcesSet)
if len(hasNoCoveringQuota) > 0 {
return quotas, admission.NewForbidden(a, fmt.Errorf("insufficient quota to consume: %v", strings.Join(hasNoCoveringQuota.List(), ",")))
}
// verify that for every scope that had limited access enabled
// that there was a corresponding quota that covered it.
// if not, we reject the request.
scopesHasNoCoveringQuota, err := evaluator.UncoveredQuotaScopes(limitedScopes, restrictedScopes)
if err != nil {
return quotas, err
}
if len(scopesHasNoCoveringQuota) > 0 {
return quotas, fmt.Errorf("insufficient quota to match these scopes: %v", scopesHasNoCoveringQuota)
}
if len(interestingQuotaIndexes) == 0 {
return quotas, nil
}
outQuotas, err := copyQuotas(quotas)
if err != nil {
return nil, err

View File

@ -228,6 +228,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
// blindly overwrite existing values based on precedence
if len(configAuthInfo.Token) > 0 {
mergedConfig.BearerToken = configAuthInfo.Token
mergedConfig.BearerTokenFile = configAuthInfo.TokenFile
} else if len(configAuthInfo.TokenFile) > 0 {
tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile)
if err != nil {
@ -499,8 +500,9 @@ func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error)
if server := config.overrides.ClusterInfo.Server; len(server) > 0 {
icc.Host = server
}
if token := config.overrides.AuthInfo.Token; len(token) > 0 {
icc.BearerToken = token
if len(config.overrides.AuthInfo.Token) > 0 || len(config.overrides.AuthInfo.TokenFile) > 0 {
icc.BearerToken = config.overrides.AuthInfo.Token
icc.BearerTokenFile = config.overrides.AuthInfo.TokenFile
}
if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 {
icc.TLSClientConfig.CAFile = certificateAuthorityFile

View File

@ -6,7 +6,7 @@ cloud.google.com/go v0.1.0-115-g3b1ae45394a234
github.com/armon/circbuf bbbad097214e2918d8543d5201d12bfd7bca254d
github.com/asaskevich/govalidator v9-26-gf9ffefc3facfbe
github.com/aws/aws-sdk-go v1.16.26
github.com/Azure/azure-sdk-for-go v21.3.0
github.com/Azure/azure-sdk-for-go v21.4.0
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
github.com/Azure/go-autorest v11.1.0
github.com/bazelbuild/bazel-gazelle 0.15.0
@ -26,7 +26,7 @@ github.com/container-storage-interface/spec v1.1.0
github.com/coreos/bbolt v1.3.1-coreos.6
github.com/coreos/etcd v3.3.10
github.com/coreos/go-oidc 065b426bd41667456c1a924468f507673629c46b
github.com/coreos/go-semver v0.2.0-9-ge214231b295a8e
github.com/coreos/go-semver v0.3.0
github.com/coreos/go-systemd v17
github.com/coreos/pkg v4
github.com/coreos/rkt v1.25.0
@ -47,7 +47,7 @@ github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
github.com/elazarl/goproxy v1.0-104-gc4fc26588b6ef8
github.com/emicklei/go-restful 2.2.0-4-gff4f55a206334e
github.com/euank/go-kmsg-parser v2.0.0
github.com/evanphx/json-patch v4.1.0-19-g5858425f75500d
github.com/evanphx/json-patch v4.2.0
github.com/exponent-io/jsonpath d6023ce2651d8eafb5c75bb0c7167536102ec9f5
github.com/fatih/camelcase f6a740d52f961c60348ebb109adde9f4635d7540
github.com/fsnotify/fsnotify v1.3.1-1-gf12c6236fe7b5c
@ -173,7 +173,7 @@ github.com/tmc/grpc-websocket-proxy 89b8d40f7ca833297db804fcb3be53a76d01c238
github.com/ugorji/go bdcc60b419d136a85cdf2e7cbcac34b3f1cd6e57
github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
github.com/vishvananda/netns be1fbeda19366dea804f00efff2dd73a1642fdcc
github.com/vmware/govmomi v0.20.0
github.com/vmware/govmomi v0.20.1
github.com/vmware/photon-controller-go-sdk PROMOTED-488
github.com/xanzy/go-cloudstack v2.1.1-1-g1e2cbf647e57fa
github.com/xiang90/probing 0.0.1