Merge pull request #56754 from dims/remove-hacks-for-mesos

Automatic merge from submit-queue (batch tested with PRs 57127, 57011, 56754, 56601, 56483). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Remove hacks added for mesos

**What this PR does / why we need it**:

Since Mesos is no longer in your main repository and since we have
things like dynamic kubelet configuration in progress, we should
drop these undocumented, untested, private hooks.

cmd/kubelet/app/server.go::CreateAPIServerClientConfig
CreateAPIServerClientConfig::getRuntime
pkg/kubelet/kubelet_pods.go::getPhase

Also remove stuff from Dependencies struct that were specific to
the Mesos integration (ContainerRuntimeOptions and Options)

Also remove stale references in test/e2e and and test owners file


**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
Drop hacks used for Mesos integration that was already removed from main kubernetes repository
```
pull/6/head
Kubernetes Submit Queue 2017-12-17 06:25:56 -08:00 committed by GitHub
commit 94327c5f72
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 19 additions and 67 deletions

View File

@ -329,7 +329,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) {
var heartbeatClient v1core.CoreV1Interface
var externalKubeClient clientset.Interface
clientConfig, err := CreateAPIServerClientConfig(s)
clientConfig, err := createAPIServerClientConfig(s)
var clientCertificateManager certificate.Manager
if err == nil {
@ -613,10 +613,9 @@ func createClientConfig(s *options.KubeletServer) (*restclient.Config, error) {
}
}
// CreateAPIServerClientConfig generates a client.Config from command line flags
// createAPIServerClientConfig generates a client.Config from command line flags
// via createClientConfig and then injects chaos into the configuration via addChaosToClientConfig.
// This func is exported to support integration with third party kubelet extensions (e.g. kubernetes-mesos).
func CreateAPIServerClientConfig(s *options.KubeletServer) (*restclient.Config, error) {
func createAPIServerClientConfig(s *options.KubeletServer) (*restclient.Config, error) {
clientConfig, err := createClientConfig(s)
if err != nil {
return nil, err
@ -688,15 +687,11 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal.
credentialprovider.SetPreferredDockercfgPath(kubeFlags.RootDirectory)
glog.V(2).Infof("Using root directory: %v", kubeFlags.RootDirectory)
builder := kubeDeps.Builder
if builder == nil {
builder = CreateAndInitKubelet
}
if kubeDeps.OSInterface == nil {
kubeDeps.OSInterface = kubecontainer.RealOS{}
}
k, err := builder(kubeCfg,
k, err := CreateAndInitKubelet(kubeCfg,
kubeDeps,
&kubeFlags.ContainerRuntimeOptions,
kubeFlags.ContainerRuntime,

View File

@ -89,8 +89,7 @@ type addressKey struct {
// any existing ready state.
func mapAddressByPort(addr *api.EndpointAddress, port api.EndpointPort, ready bool, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) *api.EndpointAddress {
// use addressKey to distinguish between two endpoints that are identical addresses
// but may have come from different hosts, for attribution. For instance, Mesos
// assigns pods the node IP, but the pods are distinct.
// but may have come from different hosts, for attribution.
key := addressKey{ip: addr.IP}
if addr.TargetRef != nil {
key.uid = addr.TargetRef.UID

View File

@ -223,27 +223,7 @@ type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
// at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping
// these objects while we figure out a more comprehensive dependency injection story for the Kubelet.
type Dependencies struct {
// TODO(mtaufen): KubeletBuilder:
// Mesos currently uses this as a hook to let them make their own call to
// let them wrap the KubeletBootstrap that CreateAndInitKubelet returns with
// their own KubeletBootstrap. It's a useful hook. I need to think about what
// a nice home for it would be. There seems to be a trend, between this and
// the Options fields below, of providing hooks where you can add extra functionality
// to the Kubelet for your solution. Maybe we should centralize these sorts of things?
Builder Builder
// TODO(mtaufen): ContainerRuntimeOptions and Options:
// Arrays of functions that can do arbitrary things to the Kubelet and the Runtime
// seem like a difficult path to trace when it's time to debug something.
// I'm leaving these fields here for now, but there is likely an easier-to-follow
// way to support their intended use cases. E.g. ContainerRuntimeOptions
// is used by Mesos to set an environment variable in containers which has
// some connection to their container GC. It seems that Mesos intends to use
// Options to add additional node conditions that are updated as part of the
// Kubelet lifecycle (see https://github.com/kubernetes/kubernetes/pull/21521).
// We should think about providing more explicit ways of doing these things.
ContainerRuntimeOptions []kubecontainer.Option
Options []Option
Options []Option
// Injected Dependencies
Auth server.AuthInterface

View File

@ -179,9 +179,8 @@ func (kl *Kubelet) GetHostname() string {
return kl.hostname
}
// GetRuntime returns the current Runtime implementation in use by the kubelet. This func
// is exported to simplify integration with third party kubelet extensions (e.g. kubernetes-mesos).
func (kl *Kubelet) GetRuntime() kubecontainer.Runtime {
// getRuntime returns the current Runtime implementation in use by the kubelet.
func (kl *Kubelet) getRuntime() kubecontainer.Runtime {
return kl.containerRuntime
}

View File

@ -65,7 +65,7 @@ func (nh *networkHost) GetKubeClient() clientset.Interface {
}
func (nh *networkHost) GetRuntime() kubecontainer.Runtime {
return nh.kubelet.GetRuntime()
return nh.kubelet.getRuntime()
}
func (nh *networkHost) SupportsLegacyFeatures() bool {
@ -88,7 +88,7 @@ type criNetworkHost struct {
// Any network plugin invoked by a cri must implement NamespaceGetter
// to talk directly to the runtime instead.
func (c *criNetworkHost) GetNetNS(containerID string) (string, error) {
return c.kubelet.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID})
return c.kubelet.getRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID})
}
// NoOpLegacyHost implements the network.LegacyHost interface for the remote
@ -106,7 +106,7 @@ func (n *NoOpLegacyHost) GetKubeClient() clientset.Interface {
return nil
}
// GetRuntime always returns "nil" for 'NoOpLegacyHost'
// getRuntime always returns "nil" for 'NoOpLegacyHost'
func (n *NoOpLegacyHost) GetRuntime() kubecontainer.Runtime {
return nil
}
@ -188,7 +188,7 @@ func (kl *Kubelet) updatePodCIDR(cidr string) {
// kubelet -> generic runtime -> runtime shim -> network plugin
// docker/rkt non-cri implementations have a passthrough UpdatePodCIDR
if err := kl.GetRuntime().UpdatePodCIDR(cidr); err != nil {
if err := kl.getRuntime().UpdatePodCIDR(cidr); err != nil {
glog.Errorf("Failed to update pod CIDR: %v", err)
return
}

View File

@ -814,7 +814,7 @@ func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *k
if runningPod != nil {
p = *runningPod
} else if status != nil {
p = kubecontainer.ConvertPodStatusToRunningPod(kl.GetRuntime().Type(), status)
p = kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), status)
} else {
return fmt.Errorf("one of the two arguments must be non-nil: runningPod, status")
}
@ -1231,10 +1231,8 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, lo
return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr)
}
// GetPhase returns the phase of a pod given its container info.
// This func is exported to simplify integration with 3rd party kubelet
// integrations like kubernetes-mesos.
func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
// getPhase returns the phase of a pod given its container info.
func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
initialized := 0
pendingInitialization := 0
failedInitialization := 0
@ -1364,7 +1362,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
// Assume info is ready to process
spec := &pod.Spec
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
s.Phase = GetPhase(spec, allStatus)
s.Phase = getPhase(spec, allStatus)
kl.probeManager.UpdatePodStatus(pod.UID, s)
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))

View File

@ -1845,7 +1845,7 @@ func TestPodPhaseWithRestartAlways(t *testing.T) {
},
}
for _, test := range tests {
status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses)
status := getPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses)
assert.Equal(t, test.status, status, "[test %s]", test.test)
}
}
@ -1945,7 +1945,7 @@ func TestPodPhaseWithRestartNever(t *testing.T) {
},
}
for _, test := range tests {
status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses)
status := getPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses)
assert.Equal(t, test.status, status, "[test %s]", test.test)
}
}
@ -2058,7 +2058,7 @@ func TestPodPhaseWithRestartOnFailure(t *testing.T) {
},
}
for _, test := range tests {
status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses)
status := getPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses)
assert.Equal(t, test.status, status, "[test %s]", test.test)
}
}

View File

@ -1105,22 +1105,6 @@ func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID {
for _, port := range ss.Ports {
for _, addr := range ss.Addresses {
containerPort := port.Port
hostPort := port.Port
// use endpoint annotations to recover the container port in a Mesos setup
// compare contrib/mesos/pkg/service/endpoints_controller.syncService
key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort)
mesosContainerPortString := endpoints.Annotations[key]
if mesosContainerPortString != "" {
mesosContainerPort, err := strconv.Atoi(mesosContainerPortString)
if err != nil {
continue
}
containerPort = int32(mesosContainerPort)
Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString)
}
// Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort)
if _, ok := m[addr.TargetRef.UID]; !ok {
m[addr.TargetRef.UID] = make([]int, 0)
}

View File

@ -248,9 +248,6 @@ Loadbalancing: L7 GCE should create ingress with given static-ip,eparis,1,
Loadbalancing: L7 Nginx should conform to Ingress spec,ncdc,1,network
"Logging soak should survive logging 1KB every * seconds, for a duration of *, scaling up to * pods per node",justinsb,1,node
"MemoryEviction when there is memory pressure should evict pods in the correct order (besteffort first, then burstable, then guaranteed)",ixdy,1,node
Mesos applies slave attributes as labels,justinsb,1,apps
Mesos schedules pods annotated with roles on correct slaves,tallclair,1,apps
Mesos starts static pods on every node in the mesos cluster,lavalamp,1,apps
MetricsGrabber should grab all metrics from API server.,gmarek,0,instrumentation
MetricsGrabber should grab all metrics from a ControllerManager.,gmarek,0,instrumentation
MetricsGrabber should grab all metrics from a Kubelet.,gmarek,0,instrumentation

1 name owner auto-assigned sig
248 Loadbalancing: L7 Nginx should conform to Ingress spec ncdc 1 network
249 Logging soak should survive logging 1KB every * seconds, for a duration of *, scaling up to * pods per node justinsb 1 node
250 MemoryEviction when there is memory pressure should evict pods in the correct order (besteffort first, then burstable, then guaranteed) ixdy 1 node
Mesos applies slave attributes as labels justinsb 1 apps
Mesos schedules pods annotated with roles on correct slaves tallclair 1 apps
Mesos starts static pods on every node in the mesos cluster lavalamp 1 apps
251 MetricsGrabber should grab all metrics from API server. gmarek 0 instrumentation
252 MetricsGrabber should grab all metrics from a ControllerManager. gmarek 0 instrumentation
253 MetricsGrabber should grab all metrics from a Kubelet. gmarek 0 instrumentation