mirror of https://github.com/k3s-io/k3s
Merge pull request #15386 from yujuhong/rename_kubelettypes
Rename imported package local name kubeletTypes to kubetypespull/6/head
commit
ed382ec0a0
|
@ -51,7 +51,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/pkg/tools/etcdtest"
|
||||
|
@ -415,7 +415,7 @@ containers:
|
|||
|
||||
// Wait for the mirror pod to be created.
|
||||
podName := fmt.Sprintf("%s-localhost", desc)
|
||||
namespace := kubeletTypes.NamespaceDefault
|
||||
namespace := kubetypes.NamespaceDefault
|
||||
if err := wait.Poll(time.Second, longTestTimeout,
|
||||
podRunning(c, namespace, podName)); err != nil {
|
||||
if pods, err := c.Pods(namespace).List(labels.Everything(), fields.Everything()); err == nil {
|
||||
|
|
|
@ -49,7 +49,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
|
@ -149,8 +149,8 @@ type KubeletBootstrap interface {
|
|||
StartGarbageCollection()
|
||||
ListenAndServe(address net.IP, port uint, tlsOptions *kubelet.TLSOptions, auth kubelet.AuthInterface, enableDebuggingHandlers bool)
|
||||
ListenAndServeReadOnly(address net.IP, port uint)
|
||||
Run(<-chan kubeletTypes.PodUpdate)
|
||||
RunOnce(<-chan kubeletTypes.PodUpdate) ([]kubelet.RunPodResult, error)
|
||||
Run(<-chan kubetypes.PodUpdate)
|
||||
RunOnce(<-chan kubetypes.PodUpdate) ([]kubelet.RunPodResult, error)
|
||||
}
|
||||
|
||||
// create and initialize a Kubelet instance
|
||||
|
@ -174,9 +174,9 @@ func NewKubeletServer() *KubeletServer {
|
|||
FileCheckFrequency: 20 * time.Second,
|
||||
HealthzBindAddress: net.ParseIP("127.0.0.1"),
|
||||
HealthzPort: 10248,
|
||||
HostNetworkSources: kubeletTypes.AllSource,
|
||||
HostPIDSources: kubeletTypes.AllSource,
|
||||
HostIPCSources: kubeletTypes.AllSource,
|
||||
HostNetworkSources: kubetypes.AllSource,
|
||||
HostPIDSources: kubetypes.AllSource,
|
||||
HostIPCSources: kubetypes.AllSource,
|
||||
HTTPCheckFrequency: 20 * time.Second,
|
||||
ImageGCHighThresholdPercent: 90,
|
||||
ImageGCLowThresholdPercent: 80,
|
||||
|
@ -284,17 +284,17 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||
// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup
|
||||
// is not valid. It will not start any background processes, and does not include authentication/authorization
|
||||
func (s *KubeletServer) UnsecuredKubeletConfig() (*KubeletConfig, error) {
|
||||
hostNetworkSources, err := kubeletTypes.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
|
||||
hostNetworkSources, err := kubetypes.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostPIDSources, err := kubeletTypes.GetValidatedSources(strings.Split(s.HostPIDSources, ","))
|
||||
hostPIDSources, err := kubetypes.GetValidatedSources(strings.Split(s.HostPIDSources, ","))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostIPCSources, err := kubeletTypes.GetValidatedSources(strings.Split(s.HostIPCSources, ","))
|
||||
hostIPCSources, err := kubetypes.GetValidatedSources(strings.Split(s.HostIPCSources, ","))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -765,17 +765,17 @@ func makePodSourceConfig(kc *KubeletConfig) *config.PodConfig {
|
|||
// define file config source
|
||||
if kc.ConfigFile != "" {
|
||||
glog.Infof("Adding manifest file: %v", kc.ConfigFile)
|
||||
config.NewSourceFile(kc.ConfigFile, kc.NodeName, kc.FileCheckFrequency, cfg.Channel(kubeletTypes.FileSource))
|
||||
config.NewSourceFile(kc.ConfigFile, kc.NodeName, kc.FileCheckFrequency, cfg.Channel(kubetypes.FileSource))
|
||||
}
|
||||
|
||||
// define url config source
|
||||
if kc.ManifestURL != "" {
|
||||
glog.Infof("Adding manifest url %q with HTTP header %v", kc.ManifestURL, kc.ManifestURLHeader)
|
||||
config.NewSourceURL(kc.ManifestURL, kc.ManifestURLHeader, kc.NodeName, kc.HTTPCheckFrequency, cfg.Channel(kubeletTypes.HTTPSource))
|
||||
config.NewSourceURL(kc.ManifestURL, kc.ManifestURLHeader, kc.NodeName, kc.HTTPCheckFrequency, cfg.Channel(kubetypes.HTTPSource))
|
||||
}
|
||||
if kc.KubeClient != nil {
|
||||
glog.Infof("Watching apiserver")
|
||||
config.NewSourceApiserver(kc.KubeClient, kc.NodeName, cfg.Channel(kubeletTypes.ApiserverSource))
|
||||
config.NewSourceApiserver(kc.KubeClient, kc.NodeName, cfg.Channel(kubetypes.ApiserverSource))
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
|
@ -265,9 +265,9 @@ func (k *KubernetesExecutor) onInitialRegistration() {
|
|||
defer close(k.initialRegComplete)
|
||||
|
||||
// emit an empty update to allow the mesos "source" to be marked as seen
|
||||
k.updateChan <- kubeletTypes.PodUpdate{
|
||||
k.updateChan <- kubetypes.PodUpdate{
|
||||
Pods: []*api.Pod{},
|
||||
Op: kubeletTypes.SET,
|
||||
Op: kubetypes.SET,
|
||||
Source: k.sourcename,
|
||||
}
|
||||
}
|
||||
|
@ -393,8 +393,8 @@ func (k *KubernetesExecutor) handleChangedApiserverPod(pod *api.Pod) {
|
|||
oldPod.DeletionTimestamp = pod.DeletionTimestamp
|
||||
oldPod.DeletionGracePeriodSeconds = pod.DeletionGracePeriodSeconds
|
||||
|
||||
update := kubeletTypes.PodUpdate{
|
||||
Op: kubeletTypes.UPDATE,
|
||||
update := kubetypes.PodUpdate{
|
||||
Op: kubetypes.UPDATE,
|
||||
Pods: []*api.Pod{oldPod},
|
||||
}
|
||||
k.updateChan <- update
|
||||
|
@ -566,8 +566,8 @@ func (k *KubernetesExecutor) launchTask(driver bindings.ExecutorDriver, taskId s
|
|||
k.pods[podFullName] = pod
|
||||
|
||||
// send the new pod to the kubelet which will spin it up
|
||||
update := kubeletTypes.PodUpdate{
|
||||
Op: kubeletTypes.ADD,
|
||||
update := kubetypes.PodUpdate{
|
||||
Op: kubetypes.ADD,
|
||||
Pods: []*api.Pod{pod},
|
||||
}
|
||||
k.updateChan <- update
|
||||
|
@ -771,8 +771,8 @@ func (k *KubernetesExecutor) removePodTask(driver bindings.ExecutorDriver, tid,
|
|||
delete(k.pods, pid)
|
||||
|
||||
// tell the kubelet to remove the pod
|
||||
update := kubeletTypes.PodUpdate{
|
||||
Op: kubeletTypes.REMOVE,
|
||||
update := kubetypes.PodUpdate{
|
||||
Op: kubetypes.REMOVE,
|
||||
Pods: []*api.Pod{pod},
|
||||
}
|
||||
k.updateChan <- update
|
||||
|
|
|
@ -43,7 +43,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet"
|
||||
kconfig "k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
|
@ -67,15 +67,15 @@ func TestExecutorRegister(t *testing.T) {
|
|||
executor.Init(mockDriver)
|
||||
executor.Registered(mockDriver, nil, nil, nil)
|
||||
|
||||
initialPodUpdate := kubeletTypes.PodUpdate{
|
||||
initialPodUpdate := kubetypes.PodUpdate{
|
||||
Pods: []*api.Pod{},
|
||||
Op: kubeletTypes.SET,
|
||||
Op: kubetypes.SET,
|
||||
Source: executor.sourcename,
|
||||
}
|
||||
receivedInitialPodUpdate := false
|
||||
select {
|
||||
case m := <-updates:
|
||||
update, ok := m.(kubeletTypes.PodUpdate)
|
||||
update, ok := m.(kubetypes.PodUpdate)
|
||||
if ok {
|
||||
if reflect.DeepEqual(initialPodUpdate, update) {
|
||||
receivedInitialPodUpdate = true
|
||||
|
@ -213,7 +213,7 @@ func TestExecutorLaunchAndKillTask(t *testing.T) {
|
|||
gotPodUpdate := false
|
||||
select {
|
||||
case m := <-updates:
|
||||
update, ok := m.(kubeletTypes.PodUpdate)
|
||||
update, ok := m.(kubetypes.PodUpdate)
|
||||
if ok && len(update.Pods) == 1 {
|
||||
gotPodUpdate = true
|
||||
}
|
||||
|
@ -361,7 +361,7 @@ func TestExecutorStaticPods(t *testing.T) {
|
|||
if !ok {
|
||||
return
|
||||
}
|
||||
podUpdate, ok := update.(kubeletTypes.PodUpdate)
|
||||
podUpdate, ok := update.(kubetypes.PodUpdate)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ import (
|
|||
kconfig "k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
utilio "k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
@ -56,7 +56,7 @@ import (
|
|||
const (
|
||||
// if we don't use this source then the kubelet will do funny, mirror things.
|
||||
// @see ConfigSourceAnnotationKey
|
||||
MESOS_CFG_SOURCE = kubeletTypes.ApiserverSource
|
||||
MESOS_CFG_SOURCE = kubetypes.ApiserverSource
|
||||
)
|
||||
|
||||
type KubeletExecutorServer struct {
|
||||
|
@ -137,17 +137,17 @@ func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error {
|
|||
//cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
|
||||
//log.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
|
||||
|
||||
hostNetworkSources, err := kubeletTypes.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
|
||||
hostNetworkSources, err := kubetypes.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hostPIDSources, err := kubeletTypes.GetValidatedSources(strings.Split(s.HostPIDSources, ","))
|
||||
hostPIDSources, err := kubetypes.GetValidatedSources(strings.Split(s.HostPIDSources, ","))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hostIPCSources, err := kubeletTypes.GetValidatedSources(strings.Split(s.HostIPCSources, ","))
|
||||
hostIPCSources, err := kubetypes.GetValidatedSources(strings.Split(s.HostIPCSources, ","))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -384,7 +384,7 @@ func (ks *KubeletExecutorServer) createAndInitKubelet(
|
|||
|
||||
go exec.InitializeStaticPodsSource(func() {
|
||||
// Create file source only when we are called back. Otherwise, it is never marked unseen.
|
||||
fileSourceUpdates := pc.Channel(kubeletTypes.FileSource)
|
||||
fileSourceUpdates := pc.Channel(kubetypes.FileSource)
|
||||
|
||||
kconfig.NewSourceFile(staticPodsConfigPath, kc.Hostname, kc.FileCheckFrequency, fileSourceUpdates)
|
||||
})
|
||||
|
@ -450,7 +450,7 @@ func (kl *kubeletExecutor) ListenAndServe(address net.IP, port uint, tlsOptions
|
|||
|
||||
// runs the main kubelet loop, closing the kubeletFinished chan when the loop exits.
|
||||
// never returns.
|
||||
func (kl *kubeletExecutor) Run(updates <-chan kubeletTypes.PodUpdate) {
|
||||
func (kl *kubeletExecutor) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
defer func() {
|
||||
close(kl.kubeletFinished)
|
||||
util.HandleCrash()
|
||||
|
@ -461,7 +461,7 @@ func (kl *kubeletExecutor) Run(updates <-chan kubeletTypes.PodUpdate) {
|
|||
|
||||
// push updates through a closable pipe. when the executor indicates shutdown
|
||||
// via Done() we want to stop the Kubelet from processing updates.
|
||||
pipe := make(chan kubeletTypes.PodUpdate)
|
||||
pipe := make(chan kubetypes.PodUpdate)
|
||||
go func() {
|
||||
// closing pipe will cause our patched kubelet's syncLoop() to exit
|
||||
defer close(pipe)
|
||||
|
|
|
@ -47,7 +47,7 @@ import (
|
|||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/tools"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
@ -897,7 +897,7 @@ func (ks *KubernetesScheduler) recoverTasks() error {
|
|||
ks.slaveHostNames.Register(slaveId, t.Offer.Host())
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if _, isMirrorPod := pod.Annotations[kubeletTypes.ConfigMirrorAnnotationKey]; isMirrorPod {
|
||||
if _, isMirrorPod := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; isMirrorPod {
|
||||
// mirrored pods are never reconciled because the scheduler isn't responsible for
|
||||
// scheduling them; they're started by the executor/kubelet upon instantiation and
|
||||
// reflected in the apiserver afterward. the scheduler has no knowledge of them.
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// NewSourceApiserver creates a config source that watches and pulls from the apiserver.
|
||||
|
@ -38,7 +38,7 @@ func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}
|
|||
for _, o := range objs {
|
||||
pods = append(pods, o.(*api.Pod))
|
||||
}
|
||||
updates <- kubeletTypes.PodUpdate{Pods: pods, Op: kubeletTypes.SET, Source: kubeletTypes.ApiserverSource}
|
||||
updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.ApiserverSource}
|
||||
}
|
||||
cache.NewReflector(lw, &api.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0).Run()
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
@ -67,8 +67,8 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
update := got.(kubeletTypes.PodUpdate)
|
||||
expected := CreatePodUpdate(kubeletTypes.SET, kubeletTypes.ApiserverSource, pod1v1)
|
||||
update := got.(kubetypes.PodUpdate)
|
||||
expected := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v1)
|
||||
if !api.Semantic.DeepEqual(expected, update) {
|
||||
t.Errorf("Expected %#v; Got %#v", expected, update)
|
||||
}
|
||||
|
@ -79,10 +79,10 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
update = got.(kubeletTypes.PodUpdate)
|
||||
update = got.(kubetypes.PodUpdate)
|
||||
// Could be sorted either of these two ways:
|
||||
expectedA := CreatePodUpdate(kubeletTypes.SET, kubeletTypes.ApiserverSource, pod1v1, pod2)
|
||||
expectedB := CreatePodUpdate(kubeletTypes.SET, kubeletTypes.ApiserverSource, pod2, pod1v1)
|
||||
expectedA := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v1, pod2)
|
||||
expectedB := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2, pod1v1)
|
||||
|
||||
if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) {
|
||||
t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update)
|
||||
|
@ -94,9 +94,9 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
update = got.(kubeletTypes.PodUpdate)
|
||||
expectedA = CreatePodUpdate(kubeletTypes.SET, kubeletTypes.ApiserverSource, pod1v2, pod2)
|
||||
expectedB = CreatePodUpdate(kubeletTypes.SET, kubeletTypes.ApiserverSource, pod2, pod1v2)
|
||||
update = got.(kubetypes.PodUpdate)
|
||||
expectedA = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v2, pod2)
|
||||
expectedB = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2, pod1v2)
|
||||
|
||||
if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) {
|
||||
t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update)
|
||||
|
@ -108,8 +108,8 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
update = got.(kubeletTypes.PodUpdate)
|
||||
expected = CreatePodUpdate(kubeletTypes.SET, kubeletTypes.ApiserverSource, pod2)
|
||||
update = got.(kubetypes.PodUpdate)
|
||||
expected = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2)
|
||||
if !api.Semantic.DeepEqual(expected, update) {
|
||||
t.Errorf("Expected %#v, Got %#v", expected, update)
|
||||
}
|
||||
|
@ -120,8 +120,8 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
update = got.(kubeletTypes.PodUpdate)
|
||||
expected = CreatePodUpdate(kubeletTypes.SET, kubeletTypes.ApiserverSource)
|
||||
update = got.(kubetypes.PodUpdate)
|
||||
expected = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource)
|
||||
if !api.Semantic.DeepEqual(expected, update) {
|
||||
t.Errorf("Expected %#v, Got %#v", expected, update)
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ func TestNewSourceApiserver_TwoNamespacesSameName(t *testing.T) {
|
|||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
update := got.(kubeletTypes.PodUpdate)
|
||||
update := got.(kubetypes.PodUpdate)
|
||||
// Make sure that we get both pods. Catches bug #2294.
|
||||
if !(len(update.Pods) == 2) {
|
||||
t.Errorf("Expected %d, Got %d", 2, len(update.Pods))
|
||||
|
@ -162,7 +162,7 @@ func TestNewSourceApiserver_TwoNamespacesSameName(t *testing.T) {
|
|||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
update = got.(kubeletTypes.PodUpdate)
|
||||
update = got.(kubetypes.PodUpdate)
|
||||
if !(len(update.Pods) == 1) {
|
||||
t.Errorf("Expected %d, Got %d", 1, len(update.Pods))
|
||||
}
|
||||
|
@ -184,8 +184,8 @@ func TestNewSourceApiserverInitialEmptySendsEmptyPodUpdate(t *testing.T) {
|
|||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
update := got.(kubeletTypes.PodUpdate)
|
||||
expected := CreatePodUpdate(kubeletTypes.SET, kubeletTypes.ApiserverSource)
|
||||
update := got.(kubetypes.PodUpdate)
|
||||
expected := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource)
|
||||
if !api.Semantic.DeepEqual(expected, update) {
|
||||
t.Errorf("Expected %#v; Got %#v", expected, update)
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/latest"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
utilyaml "k8s.io/kubernetes/pkg/util/yaml"
|
||||
|
@ -56,7 +56,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName string) er
|
|||
glog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, source)
|
||||
|
||||
if pod.Namespace == "" {
|
||||
pod.Namespace = kubeletTypes.NamespaceDefault
|
||||
pod.Namespace = kubetypes.NamespaceDefault
|
||||
}
|
||||
glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source)
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubeletUtil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/util/config"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
|
@ -59,7 +59,7 @@ type PodConfig struct {
|
|||
mux *config.Mux
|
||||
|
||||
// the channel of denormalized changes passed to listeners
|
||||
updates chan kubeletTypes.PodUpdate
|
||||
updates chan kubetypes.PodUpdate
|
||||
|
||||
// contains the list of all configured sources
|
||||
sourcesLock sync.Mutex
|
||||
|
@ -69,7 +69,7 @@ type PodConfig struct {
|
|||
// NewPodConfig creates an object that can merge many configuration sources into a stream
|
||||
// of normalized updates to a pod configuration.
|
||||
func NewPodConfig(mode PodConfigNotificationMode, recorder record.EventRecorder) *PodConfig {
|
||||
updates := make(chan kubeletTypes.PodUpdate, 50)
|
||||
updates := make(chan kubetypes.PodUpdate, 50)
|
||||
storage := newPodStorage(updates, mode, recorder)
|
||||
podConfig := &PodConfig{
|
||||
pods: storage,
|
||||
|
@ -100,7 +100,7 @@ func (c *PodConfig) SeenAllSources(seenSources sets.String) bool {
|
|||
}
|
||||
|
||||
// Updates returns a channel of updates to the configuration, properly denormalized.
|
||||
func (c *PodConfig) Updates() <-chan kubeletTypes.PodUpdate {
|
||||
func (c *PodConfig) Updates() <-chan kubetypes.PodUpdate {
|
||||
return c.updates
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ type podStorage struct {
|
|||
// ensures that updates are delivered in strict order
|
||||
// on the updates channel
|
||||
updateLock sync.Mutex
|
||||
updates chan<- kubeletTypes.PodUpdate
|
||||
updates chan<- kubetypes.PodUpdate
|
||||
|
||||
// contains the set of all sources that have sent at least one SET
|
||||
sourcesSeenLock sync.Mutex
|
||||
|
@ -135,7 +135,7 @@ type podStorage struct {
|
|||
// TODO: PodConfigNotificationMode could be handled by a listener to the updates channel
|
||||
// in the future, especially with multiple listeners.
|
||||
// TODO: allow initialization of the current state of the store with snapshotted version.
|
||||
func newPodStorage(updates chan<- kubeletTypes.PodUpdate, mode PodConfigNotificationMode, recorder record.EventRecorder) *podStorage {
|
||||
func newPodStorage(updates chan<- kubetypes.PodUpdate, mode PodConfigNotificationMode, recorder record.EventRecorder) *podStorage {
|
||||
return &podStorage{
|
||||
pods: make(map[string]map[string]*api.Pod),
|
||||
mode: mode,
|
||||
|
@ -172,12 +172,12 @@ func (s *podStorage) Merge(source string, change interface{}) error {
|
|||
s.updates <- *updates
|
||||
}
|
||||
if len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
|
||||
s.updates <- kubeletTypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubeletTypes.SET, Source: source}
|
||||
s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: source}
|
||||
}
|
||||
|
||||
case PodConfigNotificationSnapshot:
|
||||
if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
|
||||
s.updates <- kubeletTypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubeletTypes.SET, Source: source}
|
||||
s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: source}
|
||||
}
|
||||
|
||||
case PodConfigNotificationUnknown:
|
||||
|
@ -189,23 +189,23 @@ func (s *podStorage) Merge(source string, change interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *podStorage) merge(source string, change interface{}) (adds, updates, deletes *kubeletTypes.PodUpdate) {
|
||||
func (s *podStorage) merge(source string, change interface{}) (adds, updates, deletes *kubetypes.PodUpdate) {
|
||||
s.podLock.Lock()
|
||||
defer s.podLock.Unlock()
|
||||
|
||||
adds = &kubeletTypes.PodUpdate{Op: kubeletTypes.ADD, Source: source}
|
||||
updates = &kubeletTypes.PodUpdate{Op: kubeletTypes.UPDATE, Source: source}
|
||||
deletes = &kubeletTypes.PodUpdate{Op: kubeletTypes.REMOVE, Source: source}
|
||||
adds = &kubetypes.PodUpdate{Op: kubetypes.ADD, Source: source}
|
||||
updates = &kubetypes.PodUpdate{Op: kubetypes.UPDATE, Source: source}
|
||||
deletes = &kubetypes.PodUpdate{Op: kubetypes.REMOVE, Source: source}
|
||||
|
||||
pods := s.pods[source]
|
||||
if pods == nil {
|
||||
pods = make(map[string]*api.Pod)
|
||||
}
|
||||
|
||||
update := change.(kubeletTypes.PodUpdate)
|
||||
update := change.(kubetypes.PodUpdate)
|
||||
switch update.Op {
|
||||
case kubeletTypes.ADD, kubeletTypes.UPDATE:
|
||||
if update.Op == kubeletTypes.ADD {
|
||||
case kubetypes.ADD, kubetypes.UPDATE:
|
||||
if update.Op == kubetypes.ADD {
|
||||
glog.V(4).Infof("Adding new pods from source %s : %v", source, update.Pods)
|
||||
} else {
|
||||
glog.V(4).Infof("Updating pods from source %s : %v", source, update.Pods)
|
||||
|
@ -218,7 +218,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
|
|||
if ref.Annotations == nil {
|
||||
ref.Annotations = make(map[string]string)
|
||||
}
|
||||
ref.Annotations[kubeletTypes.ConfigSourceAnnotationKey] = source
|
||||
ref.Annotations[kubetypes.ConfigSourceAnnotationKey] = source
|
||||
if existing, found := pods[name]; found {
|
||||
if checkAndUpdatePod(existing, ref) {
|
||||
// this is an update
|
||||
|
@ -234,7 +234,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
|
|||
adds.Pods = append(adds.Pods, ref)
|
||||
}
|
||||
|
||||
case kubeletTypes.REMOVE:
|
||||
case kubetypes.REMOVE:
|
||||
glog.V(4).Infof("Removing a pod %v", update)
|
||||
for _, value := range update.Pods {
|
||||
name := kubecontainer.GetPodFullName(value)
|
||||
|
@ -247,7 +247,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
|
|||
// this is a no-op
|
||||
}
|
||||
|
||||
case kubeletTypes.SET:
|
||||
case kubetypes.SET:
|
||||
glog.V(4).Infof("Setting pods for source %s", source)
|
||||
s.markSourceSet(source)
|
||||
// Clear the old map entries by just creating a new map
|
||||
|
@ -261,7 +261,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
|
|||
if ref.Annotations == nil {
|
||||
ref.Annotations = make(map[string]string)
|
||||
}
|
||||
ref.Annotations[kubeletTypes.ConfigSourceAnnotationKey] = source
|
||||
ref.Annotations[kubetypes.ConfigSourceAnnotationKey] = source
|
||||
if existing, found := oldPods[name]; found {
|
||||
pods[name] = existing
|
||||
if checkAndUpdatePod(existing, ref) {
|
||||
|
@ -335,9 +335,9 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco
|
|||
|
||||
// Annotations that the kubelet adds to the pod.
|
||||
var localAnnotations = []string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey,
|
||||
kubeletTypes.ConfigMirrorAnnotationKey,
|
||||
kubeletTypes.ConfigFirstSeenAnnotationKey,
|
||||
kubetypes.ConfigSourceAnnotationKey,
|
||||
kubetypes.ConfigMirrorAnnotationKey,
|
||||
kubetypes.ConfigFirstSeenAnnotationKey,
|
||||
}
|
||||
|
||||
func isLocalAnnotationKey(key string) bool {
|
||||
|
@ -379,7 +379,7 @@ func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool {
|
|||
// recordFirstSeenTime records the first seen time of this pod.
|
||||
func recordFirstSeenTime(pod *api.Pod) {
|
||||
glog.V(4).Infof("Receiving a new pod %q", kubeletUtil.FormatPodName(pod))
|
||||
pod.Annotations[kubeletTypes.ConfigFirstSeenAnnotationKey] = kubeletTypes.NewTimestamp().GetString()
|
||||
pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString()
|
||||
}
|
||||
|
||||
// updateAnnotations returns an Annotation map containing the api annotation map plus
|
||||
|
@ -420,7 +420,7 @@ func checkAndUpdatePod(existing, ref *api.Pod) bool {
|
|||
|
||||
// Overwrite the first-seen time with the existing one. This is our own
|
||||
// internal annotation, there is no need to update.
|
||||
ref.Annotations[kubeletTypes.ConfigFirstSeenAnnotationKey] = existing.Annotations[kubeletTypes.ConfigFirstSeenAnnotationKey]
|
||||
ref.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = existing.Annotations[kubetypes.ConfigFirstSeenAnnotationKey]
|
||||
|
||||
existing.Spec = ref.Spec
|
||||
existing.Labels = ref.Labels
|
||||
|
@ -434,7 +434,7 @@ func checkAndUpdatePod(existing, ref *api.Pod) bool {
|
|||
func (s *podStorage) Sync() {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
s.updates <- kubeletTypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubeletTypes.SET, Source: kubeletTypes.AllSource}
|
||||
s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: kubetypes.AllSource}
|
||||
}
|
||||
|
||||
// Object implements config.Accessor
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/conversion"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
@ -74,11 +74,11 @@ func CreateValidPod(name, namespace string) *api.Pod {
|
|||
}
|
||||
}
|
||||
|
||||
func CreatePodUpdate(op kubeletTypes.PodOperation, source string, pods ...*api.Pod) kubeletTypes.PodUpdate {
|
||||
return kubeletTypes.PodUpdate{Pods: pods, Op: op, Source: source}
|
||||
func CreatePodUpdate(op kubetypes.PodOperation, source string, pods ...*api.Pod) kubetypes.PodUpdate {
|
||||
return kubetypes.PodUpdate{Pods: pods, Op: op, Source: source}
|
||||
}
|
||||
|
||||
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubeletTypes.PodUpdate, *PodConfig) {
|
||||
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubetypes.PodUpdate, *PodConfig) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet"}))
|
||||
channel := config.Channel(TestSource)
|
||||
|
@ -86,7 +86,7 @@ func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{},
|
|||
return channel, ch, config
|
||||
}
|
||||
|
||||
func expectPodUpdate(t *testing.T, ch <-chan kubeletTypes.PodUpdate, expected ...kubeletTypes.PodUpdate) {
|
||||
func expectPodUpdate(t *testing.T, ch <-chan kubetypes.PodUpdate, expected ...kubetypes.PodUpdate) {
|
||||
for i := range expected {
|
||||
update := <-ch
|
||||
sort.Sort(sortedPods(update.Pods))
|
||||
|
@ -113,7 +113,7 @@ func expectPodUpdate(t *testing.T, ch <-chan kubeletTypes.PodUpdate, expected ..
|
|||
expectNoPodUpdate(t, ch)
|
||||
}
|
||||
|
||||
func expectNoPodUpdate(t *testing.T, ch <-chan kubeletTypes.PodUpdate) {
|
||||
func expectNoPodUpdate(t *testing.T, ch <-chan kubetypes.PodUpdate) {
|
||||
select {
|
||||
case update := <-ch:
|
||||
t.Errorf("Expected no update in channel, Got %#v", update)
|
||||
|
@ -125,63 +125,63 @@ func TestNewPodAdded(t *testing.T) {
|
|||
channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental)
|
||||
|
||||
// see an update
|
||||
podUpdate := CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")))
|
||||
|
||||
config.Sync()
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.SET, kubeletTypes.AllSource, CreateValidPod("foo", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource, CreateValidPod("foo", "new")))
|
||||
}
|
||||
|
||||
func TestNewPodAddedInvalidNamespace(t *testing.T) {
|
||||
channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental)
|
||||
|
||||
// see an update
|
||||
podUpdate := CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", ""))
|
||||
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", ""))
|
||||
channel <- podUpdate
|
||||
config.Sync()
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.SET, kubeletTypes.AllSource))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource))
|
||||
}
|
||||
|
||||
func TestNewPodAddedDefaultNamespace(t *testing.T) {
|
||||
channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental)
|
||||
|
||||
// see an update
|
||||
podUpdate := CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "default"))
|
||||
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "default"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "default")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "default")))
|
||||
|
||||
config.Sync()
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.SET, kubeletTypes.AllSource, CreateValidPod("foo", "default")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource, CreateValidPod("foo", "default")))
|
||||
}
|
||||
|
||||
func TestNewPodAddedDifferentNamespaces(t *testing.T) {
|
||||
channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental)
|
||||
|
||||
// see an update
|
||||
podUpdate := CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "default"))
|
||||
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "default"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "default")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "default")))
|
||||
|
||||
// see an update in another namespace
|
||||
podUpdate = CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
podUpdate = CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")))
|
||||
|
||||
config.Sync()
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.SET, kubeletTypes.AllSource, CreateValidPod("foo", "default"), CreateValidPod("foo", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource, CreateValidPod("foo", "default"), CreateValidPod("foo", "new")))
|
||||
}
|
||||
|
||||
func TestInvalidPodFiltered(t *testing.T) {
|
||||
channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)
|
||||
|
||||
// see an update
|
||||
podUpdate := CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")))
|
||||
|
||||
// add an invalid update
|
||||
podUpdate = CreatePodUpdate(kubeletTypes.UPDATE, TestSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}})
|
||||
podUpdate = CreatePodUpdate(kubetypes.UPDATE, TestSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}})
|
||||
channel <- podUpdate
|
||||
expectNoPodUpdate(t, ch)
|
||||
}
|
||||
|
@ -190,81 +190,81 @@ func TestNewPodAddedSnapshotAndUpdates(t *testing.T) {
|
|||
channel, ch, config := createPodConfigTester(PodConfigNotificationSnapshotAndUpdates)
|
||||
|
||||
// see an set
|
||||
podUpdate := CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.SET, TestSource, CreateValidPod("foo", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo", "new")))
|
||||
|
||||
config.Sync()
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.SET, kubeletTypes.AllSource, CreateValidPod("foo", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource, CreateValidPod("foo", "new")))
|
||||
|
||||
// container updates are separated as UPDATE
|
||||
pod := *podUpdate.Pods[0]
|
||||
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
|
||||
channel <- CreatePodUpdate(kubeletTypes.ADD, TestSource, &pod)
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.UPDATE, TestSource, &pod))
|
||||
channel <- CreatePodUpdate(kubetypes.ADD, TestSource, &pod)
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, &pod))
|
||||
}
|
||||
|
||||
func TestNewPodAddedSnapshot(t *testing.T) {
|
||||
channel, ch, config := createPodConfigTester(PodConfigNotificationSnapshot)
|
||||
|
||||
// see an set
|
||||
podUpdate := CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.SET, TestSource, CreateValidPod("foo", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo", "new")))
|
||||
|
||||
config.Sync()
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.SET, kubeletTypes.AllSource, CreateValidPod("foo", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource, CreateValidPod("foo", "new")))
|
||||
|
||||
// container updates are separated as UPDATE
|
||||
pod := *podUpdate.Pods[0]
|
||||
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
|
||||
channel <- CreatePodUpdate(kubeletTypes.ADD, TestSource, &pod)
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.SET, TestSource, &pod))
|
||||
channel <- CreatePodUpdate(kubetypes.ADD, TestSource, &pod)
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, TestSource, &pod))
|
||||
}
|
||||
|
||||
func TestNewPodAddedUpdatedRemoved(t *testing.T) {
|
||||
channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)
|
||||
|
||||
// should register an add
|
||||
podUpdate := CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")))
|
||||
|
||||
// should ignore ADDs that are identical
|
||||
expectNoPodUpdate(t, ch)
|
||||
|
||||
// an kubeletTypes.ADD should be converted to kubeletTypes.UPDATE
|
||||
// an kubetypes.ADD should be converted to kubetypes.UPDATE
|
||||
pod := CreateValidPod("foo", "new")
|
||||
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
|
||||
podUpdate = CreatePodUpdate(kubeletTypes.ADD, TestSource, pod)
|
||||
podUpdate = CreatePodUpdate(kubetypes.ADD, TestSource, pod)
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.UPDATE, TestSource, pod))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod))
|
||||
|
||||
podUpdate = CreatePodUpdate(kubeletTypes.REMOVE, TestSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}})
|
||||
podUpdate = CreatePodUpdate(kubetypes.REMOVE, TestSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}})
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.REMOVE, TestSource, pod))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.REMOVE, TestSource, pod))
|
||||
}
|
||||
|
||||
func TestNewPodAddedUpdatedSet(t *testing.T) {
|
||||
channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)
|
||||
|
||||
// should register an add
|
||||
podUpdate := CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new"), CreateValidPod("foo2", "new"), CreateValidPod("foo3", "new"))
|
||||
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"), CreateValidPod("foo2", "new"), CreateValidPod("foo3", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo", "new"), CreateValidPod("foo2", "new"), CreateValidPod("foo3", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"), CreateValidPod("foo2", "new"), CreateValidPod("foo3", "new")))
|
||||
|
||||
// should ignore ADDs that are identical
|
||||
expectNoPodUpdate(t, ch)
|
||||
|
||||
// should be converted to an kubeletTypes.ADD, kubeletTypes.REMOVE, and kubeletTypes.UPDATE
|
||||
// should be converted to an kubetypes.ADD, kubetypes.REMOVE, and kubetypes.UPDATE
|
||||
pod := CreateValidPod("foo2", "new")
|
||||
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
|
||||
podUpdate = CreatePodUpdate(kubeletTypes.SET, TestSource, pod, CreateValidPod("foo3", "new"), CreateValidPod("foo4", "new"))
|
||||
podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, pod, CreateValidPod("foo3", "new"), CreateValidPod("foo4", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch,
|
||||
CreatePodUpdate(kubeletTypes.REMOVE, TestSource, CreateValidPod("foo", "new")),
|
||||
CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo4", "new")),
|
||||
CreatePodUpdate(kubeletTypes.UPDATE, TestSource, pod))
|
||||
CreatePodUpdate(kubetypes.REMOVE, TestSource, CreateValidPod("foo", "new")),
|
||||
CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo4", "new")),
|
||||
CreatePodUpdate(kubetypes.UPDATE, TestSource, pod))
|
||||
}
|
||||
|
||||
func TestPodUpdateAnnotations(t *testing.T) {
|
||||
|
@ -279,24 +279,24 @@ func TestPodUpdateAnnotations(t *testing.T) {
|
|||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
podUpdate := CreatePodUpdate(kubeletTypes.SET, TestSource, CreateValidPod("foo1", "new"), clone.(*api.Pod), CreateValidPod("foo3", "new"))
|
||||
podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), clone.(*api.Pod), CreateValidPod("foo3", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.ADD, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new")))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new")))
|
||||
|
||||
pod.Annotations["kubenetes.io/blah"] = "superblah"
|
||||
podUpdate = CreatePodUpdate(kubeletTypes.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))
|
||||
podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.UPDATE, TestSource, pod))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod))
|
||||
|
||||
pod.Annotations["kubernetes.io/otherblah"] = "doh"
|
||||
podUpdate = CreatePodUpdate(kubeletTypes.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))
|
||||
podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.UPDATE, TestSource, pod))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod))
|
||||
|
||||
delete(pod.Annotations, "kubernetes.io/blah")
|
||||
podUpdate = CreatePodUpdate(kubeletTypes.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))
|
||||
podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.UPDATE, TestSource, pod))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod))
|
||||
}
|
||||
|
||||
func TestPodUpdateLables(t *testing.T) {
|
||||
|
@ -311,13 +311,13 @@ func TestPodUpdateLables(t *testing.T) {
|
|||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
podUpdate := CreatePodUpdate(kubeletTypes.SET, TestSource, clone.(*api.Pod))
|
||||
podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, clone.(*api.Pod))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.ADD, TestSource, pod))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, pod))
|
||||
|
||||
pod.Labels["key"] = "newValue"
|
||||
podUpdate = CreatePodUpdate(kubeletTypes.SET, TestSource, pod)
|
||||
podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, pod)
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubeletTypes.UPDATE, TestSource, pod))
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod))
|
||||
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -66,7 +66,7 @@ func (s *sourceFile) extractFromPath() error {
|
|||
return err
|
||||
}
|
||||
// Emit an update with an empty PodList to allow FileSource to be marked as seen
|
||||
s.updates <- kubeletTypes.PodUpdate{Pods: []*api.Pod{}, Op: kubeletTypes.SET, Source: kubeletTypes.FileSource}
|
||||
s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource}
|
||||
return fmt.Errorf("path does not exist, ignoring")
|
||||
}
|
||||
|
||||
|
@ -76,14 +76,14 @@ func (s *sourceFile) extractFromPath() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.updates <- kubeletTypes.PodUpdate{Pods: pods, Op: kubeletTypes.SET, Source: kubeletTypes.FileSource}
|
||||
s.updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.FileSource}
|
||||
|
||||
case statInfo.Mode().IsRegular():
|
||||
pod, err := s.extractFromFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.updates <- kubeletTypes.PodUpdate{Pods: []*api.Pod{pod}, Op: kubeletTypes.SET, Source: kubeletTypes.FileSource}
|
||||
s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{pod}, Op: kubetypes.SET, Source: kubetypes.FileSource}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("path is not a directory or file")
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
@ -46,8 +46,8 @@ func TestUpdateOnNonExistentFile(t *testing.T) {
|
|||
NewSourceFile("random_non_existent_path", "localhost", time.Millisecond, ch)
|
||||
select {
|
||||
case got := <-ch:
|
||||
update := got.(kubeletTypes.PodUpdate)
|
||||
expected := CreatePodUpdate(kubeletTypes.SET, kubeletTypes.FileSource)
|
||||
update := got.(kubetypes.PodUpdate)
|
||||
expected := CreatePodUpdate(kubetypes.SET, kubetypes.FileSource)
|
||||
if !api.Semantic.DeepDerivative(expected, update) {
|
||||
t.Fatalf("Expected %#v, Got %#v", expected, update)
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func TestReadPodsFromFile(t *testing.T) {
|
|||
var testCases = []struct {
|
||||
desc string
|
||||
pod runtime.Object
|
||||
expected kubeletTypes.PodUpdate
|
||||
expected kubetypes.PodUpdate
|
||||
}{
|
||||
{
|
||||
desc: "Simple pod",
|
||||
|
@ -94,7 +94,7 @@ func TestReadPodsFromFile(t *testing.T) {
|
|||
SecurityContext: &api.PodSecurityContext{},
|
||||
},
|
||||
},
|
||||
expected: CreatePodUpdate(kubeletTypes.SET, kubeletTypes.FileSource, &api.Pod{
|
||||
expected: CreatePodUpdate(kubetypes.SET, kubetypes.FileSource, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test-" + hostname,
|
||||
UID: "12345",
|
||||
|
@ -137,7 +137,7 @@ func TestReadPodsFromFile(t *testing.T) {
|
|||
NewSourceFile(file.Name(), hostname, time.Millisecond, ch)
|
||||
select {
|
||||
case got := <-ch:
|
||||
update := got.(kubeletTypes.PodUpdate)
|
||||
update := got.(kubetypes.PodUpdate)
|
||||
for _, pod := range update.Pods {
|
||||
if errs := validation.ValidatePod(pod); len(errs) > 0 {
|
||||
t.Errorf("%s: Invalid pod %#v, %#v", testCase.desc, pod, errs)
|
||||
|
@ -180,8 +180,8 @@ func TestExtractFromEmptyDir(t *testing.T) {
|
|||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
update := (<-ch).(kubeletTypes.PodUpdate)
|
||||
expected := CreatePodUpdate(kubeletTypes.SET, kubeletTypes.FileSource)
|
||||
update := (<-ch).(kubetypes.PodUpdate)
|
||||
expected := CreatePodUpdate(kubetypes.SET, kubetypes.FileSource)
|
||||
if !api.Semantic.DeepEqual(expected, update) {
|
||||
t.Errorf("Expected %#v, Got %#v", expected, update)
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -95,7 +95,7 @@ func (s *sourceURL) extractFromURL() error {
|
|||
}
|
||||
if len(data) == 0 {
|
||||
// Emit an update with an empty PodList to allow HTTPSource to be marked as seen
|
||||
s.updates <- kubeletTypes.PodUpdate{Pods: []*api.Pod{}, Op: kubeletTypes.SET, Source: kubeletTypes.HTTPSource}
|
||||
s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{}, Op: kubetypes.SET, Source: kubetypes.HTTPSource}
|
||||
return fmt.Errorf("zero-length data received from %v", s.url)
|
||||
}
|
||||
// Short circuit if the data has not changed since the last time it was read.
|
||||
|
@ -111,7 +111,7 @@ func (s *sourceURL) extractFromURL() error {
|
|||
// It parsed but could not be used.
|
||||
return singlePodErr
|
||||
}
|
||||
s.updates <- kubeletTypes.PodUpdate{Pods: []*api.Pod{pod}, Op: kubeletTypes.SET, Source: kubeletTypes.HTTPSource}
|
||||
s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{pod}, Op: kubetypes.SET, Source: kubetypes.HTTPSource}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -126,7 +126,7 @@ func (s *sourceURL) extractFromURL() error {
|
|||
for i := range podList.Items {
|
||||
pods = append(pods, &podList.Items[i])
|
||||
}
|
||||
s.updates <- kubeletTypes.PodUpdate{Pods: pods, Op: kubeletTypes.SET, Source: kubeletTypes.HTTPSource}
|
||||
s.updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.HTTPSource}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/errors"
|
||||
|
@ -128,7 +128,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
var testCases = []struct {
|
||||
desc string
|
||||
pods runtime.Object
|
||||
expected kubeletTypes.PodUpdate
|
||||
expected kubetypes.PodUpdate
|
||||
}{
|
||||
{
|
||||
desc: "Single pod",
|
||||
|
@ -148,8 +148,8 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
SecurityContext: &api.PodSecurityContext{},
|
||||
},
|
||||
},
|
||||
expected: CreatePodUpdate(kubeletTypes.SET,
|
||||
kubeletTypes.HTTPSource,
|
||||
expected: CreatePodUpdate(kubetypes.SET,
|
||||
kubetypes.HTTPSource,
|
||||
&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "111",
|
||||
|
@ -206,15 +206,15 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
expected: CreatePodUpdate(kubeletTypes.SET,
|
||||
kubeletTypes.HTTPSource,
|
||||
expected: CreatePodUpdate(kubetypes.SET,
|
||||
kubetypes.HTTPSource,
|
||||
&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "111",
|
||||
Name: "foo" + "-" + hostname,
|
||||
Namespace: "default",
|
||||
|
||||
SelfLink: getSelfLink("foo-"+hostname, kubeletTypes.NamespaceDefault),
|
||||
SelfLink: getSelfLink("foo-"+hostname, kubetypes.NamespaceDefault),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: hostname,
|
||||
|
@ -237,7 +237,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
Name: "bar" + "-" + hostname,
|
||||
Namespace: "default",
|
||||
|
||||
SelfLink: getSelfLink("bar-"+hostname, kubeletTypes.NamespaceDefault),
|
||||
SelfLink: getSelfLink("bar-"+hostname, kubetypes.NamespaceDefault),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: hostname,
|
||||
|
@ -279,7 +279,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
t.Errorf("%s: Unexpected error: %v", testCase.desc, err)
|
||||
continue
|
||||
}
|
||||
update := (<-ch).(kubeletTypes.PodUpdate)
|
||||
update := (<-ch).(kubetypes.PodUpdate)
|
||||
|
||||
if !api.Semantic.DeepEqual(testCase.expected, update) {
|
||||
t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update)
|
||||
|
@ -325,7 +325,7 @@ func TestURLWithHeader(t *testing.T) {
|
|||
if err := c.extractFromURL(); err != nil {
|
||||
t.Fatalf("Unexpected error extracting from URL: %v", err)
|
||||
}
|
||||
update := (<-ch).(kubeletTypes.PodUpdate)
|
||||
update := (<-ch).(kubetypes.PodUpdate)
|
||||
|
||||
headerVal := fakeHandler.RequestReceived.Header["Metadata-Flavor"]
|
||||
if len(headerVal) != 1 || headerVal[0] != "Google" {
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// This file contains helper functions to convert docker API types to runtime
|
||||
|
@ -38,7 +38,7 @@ func toRuntimeContainer(c *docker.APIContainers) (*kubecontainer.Container, erro
|
|||
return nil, err
|
||||
}
|
||||
return &kubecontainer.Container{
|
||||
ID: kubeletTypes.DockerID(c.ID).ContainerID(),
|
||||
ID: kubetypes.DockerID(c.ID).ContainerID(),
|
||||
Name: dockerName.ContainerName,
|
||||
Image: c.Image,
|
||||
Hash: hash,
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
|
@ -212,7 +212,7 @@ func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {
|
|||
}
|
||||
|
||||
// DockerContainers is a map of containers
|
||||
type DockerContainers map[kubeletTypes.DockerID]*docker.APIContainers
|
||||
type DockerContainers map[kubetypes.DockerID]*docker.APIContainers
|
||||
|
||||
func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {
|
||||
for _, dockerContainer := range c {
|
||||
|
@ -368,7 +368,7 @@ func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (Doc
|
|||
glog.V(3).Infof("Docker Container: %s is not managed by kubelet.", container.Names[0])
|
||||
continue
|
||||
}
|
||||
result[kubeletTypes.DockerID(container.ID)] = container
|
||||
result[kubetypes.DockerID(container.ID)] = container
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
@ -172,7 +172,7 @@ func TestExecSupportNotExists(t *testing.T) {
|
|||
|
||||
func TestDockerContainerCommand(t *testing.T) {
|
||||
runner := &DockerManager{}
|
||||
containerID := kubeletTypes.DockerID("1234").ContainerID()
|
||||
containerID := kubetypes.DockerID("1234").ContainerID()
|
||||
command := []string{"ls"}
|
||||
cmd, _ := runner.getRunInContainerCommand(containerID, command)
|
||||
if cmd.Dir != "/var/lib/docker/execdriver/native/"+containerID.ID {
|
||||
|
@ -561,12 +561,12 @@ func TestFindContainersByPod(t *testing.T) {
|
|||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
ID: kubeletTypes.DockerID("foobar").ContainerID(),
|
||||
ID: kubetypes.DockerID("foobar").ContainerID(),
|
||||
Name: "foobar",
|
||||
Hash: 0x1234,
|
||||
},
|
||||
{
|
||||
ID: kubeletTypes.DockerID("baz").ContainerID(),
|
||||
ID: kubetypes.DockerID("baz").ContainerID(),
|
||||
Name: "baz",
|
||||
Hash: 0x1234,
|
||||
},
|
||||
|
@ -578,7 +578,7 @@ func TestFindContainersByPod(t *testing.T) {
|
|||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
ID: kubeletTypes.DockerID("barbar").ContainerID(),
|
||||
ID: kubetypes.DockerID("barbar").ContainerID(),
|
||||
Name: "barbar",
|
||||
Hash: 0x1234,
|
||||
},
|
||||
|
@ -619,17 +619,17 @@ func TestFindContainersByPod(t *testing.T) {
|
|||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
ID: kubeletTypes.DockerID("foobar").ContainerID(),
|
||||
ID: kubetypes.DockerID("foobar").ContainerID(),
|
||||
Name: "foobar",
|
||||
Hash: 0x1234,
|
||||
},
|
||||
{
|
||||
ID: kubeletTypes.DockerID("barfoo").ContainerID(),
|
||||
ID: kubetypes.DockerID("barfoo").ContainerID(),
|
||||
Name: "barfoo",
|
||||
Hash: 0x1234,
|
||||
},
|
||||
{
|
||||
ID: kubeletTypes.DockerID("baz").ContainerID(),
|
||||
ID: kubetypes.DockerID("baz").ContainerID(),
|
||||
Name: "baz",
|
||||
Hash: 0x1234,
|
||||
},
|
||||
|
@ -641,7 +641,7 @@ func TestFindContainersByPod(t *testing.T) {
|
|||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
ID: kubeletTypes.DockerID("barbar").ContainerID(),
|
||||
ID: kubetypes.DockerID("barbar").ContainerID(),
|
||||
Name: "barbar",
|
||||
Hash: 0x1234,
|
||||
},
|
||||
|
@ -653,7 +653,7 @@ func TestFindContainersByPod(t *testing.T) {
|
|||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
ID: kubeletTypes.DockerID("bazbaz").ContainerID(),
|
||||
ID: kubetypes.DockerID("bazbaz").ContainerID(),
|
||||
Name: "bazbaz",
|
||||
Hash: 0x1234,
|
||||
},
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/oom"
|
||||
"k8s.io/kubernetes/pkg/util/procfs"
|
||||
|
@ -41,7 +41,7 @@ func NewFakeDockerManager(
|
|||
osInterface kubecontainer.OSInterface,
|
||||
networkPlugin network.NetworkPlugin,
|
||||
generator kubecontainer.RunContainerOptionsGenerator,
|
||||
httpClient kubeletTypes.HttpGetter, imageBackOff *util.Backoff) *DockerManager {
|
||||
httpClient kubetypes.HttpGetter, imageBackOff *util.Backoff) *DockerManager {
|
||||
|
||||
fakeOOMAdjuster := oom.NewFakeOOMAdjuster()
|
||||
fakeProcFs := procfs.NewFakeProcFs()
|
||||
|
|
|
@ -46,7 +46,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/network/hairpin"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/probe"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
|
@ -157,7 +157,7 @@ func NewDockerManager(
|
|||
osInterface kubecontainer.OSInterface,
|
||||
networkPlugin network.NetworkPlugin,
|
||||
generator kubecontainer.RunContainerOptionsGenerator,
|
||||
httpClient kubeletTypes.HttpGetter,
|
||||
httpClient kubetypes.HttpGetter,
|
||||
execHandler ExecHandler,
|
||||
oomAdjuster *oom.OOMAdjuster,
|
||||
procFs procfs.ProcFsInterface,
|
||||
|
@ -343,7 +343,7 @@ func (dm *DockerManager) inspectContainer(dockerID, containerName, tPath string,
|
|||
}
|
||||
// override the above if a network plugin exists
|
||||
if dm.networkPlugin.Name() != network.DefaultPluginName {
|
||||
netStatus, err := dm.networkPlugin.Status(pod.Namespace, pod.Name, kubeletTypes.DockerID(dockerID))
|
||||
netStatus, err := dm.networkPlugin.Status(pod.Namespace, pod.Name, kubetypes.DockerID(dockerID))
|
||||
if err != nil {
|
||||
glog.Errorf("NetworkPlugin %s failed on the status hook for pod '%s' - %v", dm.networkPlugin.Name(), pod.Name, err)
|
||||
} else if netStatus != nil {
|
||||
|
@ -573,7 +573,7 @@ func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) {
|
|||
// Sort the container statuses since clients of this interface expect the list
|
||||
// of containers in a pod to behave like the output of `docker list`, which has a
|
||||
// deterministic order.
|
||||
sort.Sort(kubeletTypes.SortedContainerStatuses(podStatus.ContainerStatuses))
|
||||
sort.Sort(kubetypes.SortedContainerStatuses(podStatus.ContainerStatuses))
|
||||
return &podStatus, nil
|
||||
}
|
||||
|
||||
|
@ -808,7 +808,7 @@ func (dm *DockerManager) runContainer(
|
|||
if ref != nil {
|
||||
dm.recorder.Eventf(ref, "Started", "Started with docker id %v", util.ShortenString(dockerContainer.ID, 12))
|
||||
}
|
||||
return kubeletTypes.DockerID(dockerContainer.ID).ContainerID(), nil
|
||||
return kubetypes.DockerID(dockerContainer.ID).ContainerID(), nil
|
||||
}
|
||||
|
||||
func setEntrypointAndCommand(container *api.Container, opts *kubecontainer.RunContainerOptions, dockerOpts *docker.CreateContainerOptions) {
|
||||
|
@ -1296,7 +1296,7 @@ func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) err
|
|||
}
|
||||
wg.Wait()
|
||||
if networkContainer != nil {
|
||||
if err := dm.networkPlugin.TearDownPod(runningPod.Namespace, runningPod.Name, kubeletTypes.DockerID(networkContainer.ID.ID)); err != nil {
|
||||
if err := dm.networkPlugin.TearDownPod(runningPod.Namespace, runningPod.Name, kubetypes.DockerID(networkContainer.ID.ID)); err != nil {
|
||||
glog.Errorf("Failed tearing down the infra container: %v", err)
|
||||
errs <- err
|
||||
}
|
||||
|
@ -1585,7 +1585,7 @@ func appendToFile(filePath, stringToAppend string) error {
|
|||
}
|
||||
|
||||
// createPodInfraContainer starts the pod infra container for a pod. Returns the docker container ID of the newly created container.
|
||||
func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubeletTypes.DockerID, error) {
|
||||
func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubetypes.DockerID, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metrics.ContainerManagerLatency.WithLabelValues("createPodInfraContainer").Observe(metrics.SinceInMicroseconds(start))
|
||||
|
@ -1626,7 +1626,7 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubeletTypes.Doc
|
|||
return "", err
|
||||
}
|
||||
|
||||
return kubeletTypes.DockerID(id.ID), nil
|
||||
return kubetypes.DockerID(id.ID), nil
|
||||
}
|
||||
|
||||
// TODO(vmarmol): This will soon be made non-public when its only use is internal.
|
||||
|
@ -1643,9 +1643,9 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubeletTypes.Doc
|
|||
type empty struct{}
|
||||
type PodContainerChangesSpec struct {
|
||||
StartInfraContainer bool
|
||||
InfraContainerId kubeletTypes.DockerID
|
||||
InfraContainerId kubetypes.DockerID
|
||||
ContainersToStart map[int]empty
|
||||
ContainersToKeep map[kubeletTypes.DockerID]int
|
||||
ContainersToKeep map[kubetypes.DockerID]int
|
||||
}
|
||||
|
||||
func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus) (PodContainerChangesSpec, error) {
|
||||
|
@ -1659,10 +1659,10 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
|
|||
glog.V(4).Infof("Syncing Pod %+v, podFullName: %q, uid: %q", pod, podFullName, uid)
|
||||
|
||||
containersToStart := make(map[int]empty)
|
||||
containersToKeep := make(map[kubeletTypes.DockerID]int)
|
||||
containersToKeep := make(map[kubetypes.DockerID]int)
|
||||
|
||||
var err error
|
||||
var podInfraContainerID kubeletTypes.DockerID
|
||||
var podInfraContainerID kubetypes.DockerID
|
||||
var changed bool
|
||||
podInfraContainer := runningPod.FindContainerByName(PodInfraContainerName)
|
||||
if podInfraContainer != nil {
|
||||
|
@ -1681,7 +1681,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
|
|||
} else {
|
||||
glog.V(4).Infof("Pod infra container looks good, keep it %q", podFullName)
|
||||
createPodInfraContainer = false
|
||||
podInfraContainerID = kubeletTypes.DockerID(podInfraContainer.ID.ID)
|
||||
podInfraContainerID = kubetypes.DockerID(podInfraContainer.ID.ID)
|
||||
containersToKeep[podInfraContainerID] = -1
|
||||
}
|
||||
|
||||
|
@ -1700,7 +1700,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
|
|||
continue
|
||||
}
|
||||
|
||||
containerID := kubeletTypes.DockerID(c.ID.ID)
|
||||
containerID := kubetypes.DockerID(c.ID.ID)
|
||||
hash := c.Hash
|
||||
glog.V(3).Infof("pod %q container %q exists as %v", podFullName, container.Name, containerID)
|
||||
|
||||
|
@ -1750,7 +1750,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
|
|||
|
||||
// If Infra container is the last running one, we don't want to keep it.
|
||||
if !createPodInfraContainer && len(containersToStart) == 0 && len(containersToKeep) == 1 {
|
||||
containersToKeep = make(map[kubeletTypes.DockerID]int)
|
||||
containersToKeep = make(map[kubetypes.DockerID]int)
|
||||
}
|
||||
|
||||
return PodContainerChangesSpec{
|
||||
|
@ -1804,7 +1804,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
|
|||
} else {
|
||||
// Otherwise kill any containers in this pod which are not specified as ones to keep.
|
||||
for _, container := range runningPod.Containers {
|
||||
_, keep := containerChanges.ContainersToKeep[kubeletTypes.DockerID(container.ID.ID)]
|
||||
_, keep := containerChanges.ContainersToKeep[kubetypes.DockerID(container.ID.ID)]
|
||||
if !keep {
|
||||
glog.V(3).Infof("Killing unwanted container %+v", container)
|
||||
// attempt to find the appropriate container policy
|
||||
|
|
|
@ -19,7 +19,7 @@ package kubelet
|
|||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
|
@ -31,12 +31,12 @@ type fakePodWorkers struct {
|
|||
t TestingInterface
|
||||
}
|
||||
|
||||
func (f *fakePodWorkers) UpdatePod(pod *api.Pod, mirrorPod *api.Pod, updateType kubeletTypes.SyncPodType, updateComplete func()) {
|
||||
func (f *fakePodWorkers) UpdatePod(pod *api.Pod, mirrorPod *api.Pod, updateType kubetypes.SyncPodType, updateComplete func()) {
|
||||
pods, err := f.runtimeCache.GetPods()
|
||||
if err != nil {
|
||||
f.t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if err := f.syncPodFn(pod, mirrorPod, kubecontainer.Pods(pods).FindPodByID(pod.UID), kubeletTypes.SyncPodUpdate); err != nil {
|
||||
if err := f.syncPodFn(pod, mirrorPod, kubecontainer.Pods(pods).FindPodByID(pod.UID), kubetypes.SyncPodUpdate); err != nil {
|
||||
f.t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/prober"
|
||||
"k8s.io/kubernetes/pkg/kubelet/rkt"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubeletUtil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/probe"
|
||||
|
@ -462,7 +462,7 @@ type Kubelet struct {
|
|||
// Optional, defaults to simple Docker implementation
|
||||
runner kubecontainer.ContainerCommandRunner
|
||||
// Optional, client for http requests, defaults to empty client
|
||||
httpClient kubeletTypes.HttpGetter
|
||||
httpClient kubetypes.HttpGetter
|
||||
|
||||
// cAdvisor used for container information.
|
||||
cadvisor cadvisor.Interface
|
||||
|
@ -777,7 +777,7 @@ func (kl *Kubelet) StartGarbageCollection() {
|
|||
}
|
||||
|
||||
// Run starts the kubelet reacting to config updates
|
||||
func (kl *Kubelet) Run(updates <-chan kubeletTypes.PodUpdate) {
|
||||
func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
if kl.logServer == nil {
|
||||
kl.logServer = http.StripPrefix("/logs/", http.FileServer(http.Dir("/var/log/")))
|
||||
}
|
||||
|
@ -1248,15 +1248,15 @@ func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType kubeletTypes.SyncPodType) error {
|
||||
func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType kubetypes.SyncPodType) error {
|
||||
podFullName := kubecontainer.GetPodFullName(pod)
|
||||
uid := pod.UID
|
||||
start := time.Now()
|
||||
var firstSeenTime time.Time
|
||||
if firstSeenTimeStr, ok := pod.Annotations[kubeletTypes.ConfigFirstSeenAnnotationKey]; !ok {
|
||||
if firstSeenTimeStr, ok := pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey]; !ok {
|
||||
glog.V(3).Infof("First seen time not recorded for pod %q", pod.UID)
|
||||
} else {
|
||||
firstSeenTime = kubeletTypes.ConvertToTimestamp(firstSeenTimeStr).Get()
|
||||
firstSeenTime = kubetypes.ConvertToTimestamp(firstSeenTimeStr).Get()
|
||||
}
|
||||
|
||||
// Before returning, regenerate status and store it in the cache.
|
||||
|
@ -1332,7 +1332,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
|||
// it's OK to pretend like the kubelet started them after it restarted.
|
||||
|
||||
var podStatus api.PodStatus
|
||||
if updateType == kubeletTypes.SyncPodCreate {
|
||||
if updateType == kubetypes.SyncPodCreate {
|
||||
// This is the first time we are syncing the pod. Record the latency
|
||||
// since kubelet first saw the pod if firstSeenTime is set.
|
||||
if !firstSeenTime.IsZero() {
|
||||
|
@ -1902,7 +1902,7 @@ func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, str
|
|||
// any new change seen, will run a sync against desired state and running state. If
|
||||
// no changes are seen to the configuration, will synchronize the last known desired
|
||||
// state every sync-frequency seconds. Never returns.
|
||||
func (kl *Kubelet) syncLoop(updates <-chan kubeletTypes.PodUpdate, handler SyncHandler) {
|
||||
func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHandler) {
|
||||
glog.Info("Starting kubelet main sync loop.")
|
||||
var housekeepingTimestamp time.Time
|
||||
for {
|
||||
|
@ -1944,7 +1944,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubeletTypes.PodUpdate, handler SyncH
|
|||
}
|
||||
}
|
||||
|
||||
func (kl *Kubelet) syncLoopIteration(updates <-chan kubeletTypes.PodUpdate, handler SyncHandler) bool {
|
||||
func (kl *Kubelet) syncLoopIteration(updates <-chan kubetypes.PodUpdate, handler SyncHandler) bool {
|
||||
kl.syncLoopMonitor.Store(time.Now())
|
||||
select {
|
||||
case u, open := <-updates:
|
||||
|
@ -1954,16 +1954,16 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan kubeletTypes.PodUpdate, hand
|
|||
}
|
||||
kl.addSource(u.Source)
|
||||
switch u.Op {
|
||||
case kubeletTypes.ADD:
|
||||
case kubetypes.ADD:
|
||||
glog.V(2).Infof("SyncLoop (ADD): %q", kubeletUtil.FormatPodNames(u.Pods))
|
||||
handler.HandlePodAdditions(u.Pods)
|
||||
case kubeletTypes.UPDATE:
|
||||
case kubetypes.UPDATE:
|
||||
glog.V(2).Infof("SyncLoop (UPDATE): %q", kubeletUtil.FormatPodNames(u.Pods))
|
||||
handler.HandlePodUpdates(u.Pods)
|
||||
case kubeletTypes.REMOVE:
|
||||
case kubetypes.REMOVE:
|
||||
glog.V(2).Infof("SyncLoop (REMOVE): %q", kubeletUtil.FormatPodNames(u.Pods))
|
||||
handler.HandlePodDeletions(u.Pods)
|
||||
case kubeletTypes.SET:
|
||||
case kubetypes.SET:
|
||||
// TODO: Do we want to support this?
|
||||
glog.Errorf("Kubelet does not support snapshot update")
|
||||
}
|
||||
|
@ -1976,7 +1976,7 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan kubeletTypes.PodUpdate, hand
|
|||
return true
|
||||
}
|
||||
|
||||
func (kl *Kubelet) dispatchWork(pod *api.Pod, syncType kubeletTypes.SyncPodType, mirrorPod *api.Pod, start time.Time) {
|
||||
func (kl *Kubelet) dispatchWork(pod *api.Pod, syncType kubetypes.SyncPodType, mirrorPod *api.Pod, start time.Time) {
|
||||
if kl.podIsTerminated(pod) {
|
||||
return
|
||||
}
|
||||
|
@ -1985,7 +1985,7 @@ func (kl *Kubelet) dispatchWork(pod *api.Pod, syncType kubeletTypes.SyncPodType,
|
|||
metrics.PodWorkerLatency.WithLabelValues(syncType.String()).Observe(metrics.SinceInMicroseconds(start))
|
||||
})
|
||||
// Note the number of containers for new pods.
|
||||
if syncType == kubeletTypes.SyncPodCreate {
|
||||
if syncType == kubetypes.SyncPodCreate {
|
||||
metrics.ContainersPerPodCount.Observe(float64(len(pod.Spec.Containers)))
|
||||
}
|
||||
}
|
||||
|
@ -1996,7 +1996,7 @@ func (kl *Kubelet) handleMirrorPod(mirrorPod *api.Pod, start time.Time) {
|
|||
// corresponding static pod. Send update to the pod worker if the static
|
||||
// pod exists.
|
||||
if pod, ok := kl.podManager.GetPodByMirrorPod(mirrorPod); ok {
|
||||
kl.dispatchWork(pod, kubeletTypes.SyncPodUpdate, mirrorPod, start)
|
||||
kl.dispatchWork(pod, kubetypes.SyncPodUpdate, mirrorPod, start)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2021,7 +2021,7 @@ func (kl *Kubelet) HandlePodAdditions(pods []*api.Pod) {
|
|||
continue
|
||||
}
|
||||
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
||||
kl.dispatchWork(pod, kubeletTypes.SyncPodCreate, mirrorPod, start)
|
||||
kl.dispatchWork(pod, kubetypes.SyncPodCreate, mirrorPod, start)
|
||||
kl.probeManager.AddPod(pod)
|
||||
}
|
||||
}
|
||||
|
@ -2037,7 +2037,7 @@ func (kl *Kubelet) HandlePodUpdates(pods []*api.Pod) {
|
|||
// TODO: Evaluate if we need to validate and reject updates.
|
||||
|
||||
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
||||
kl.dispatchWork(pod, kubeletTypes.SyncPodUpdate, mirrorPod, start)
|
||||
kl.dispatchWork(pod, kubetypes.SyncPodUpdate, mirrorPod, start)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2062,7 +2062,7 @@ func (kl *Kubelet) HandlePodSyncs(pods []*api.Pod) {
|
|||
start := time.Now()
|
||||
for _, pod := range pods {
|
||||
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
||||
kl.dispatchWork(pod, kubeletTypes.SyncPodSync, mirrorPod, start)
|
||||
kl.dispatchWork(pod, kubetypes.SyncPodSync, mirrorPod, start)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
@ -313,7 +313,7 @@ func TestKubeletDirsCompat(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var emptyPodUIDs map[types.UID]kubeletTypes.SyncPodType
|
||||
var emptyPodUIDs map[types.UID]kubetypes.SyncPodType
|
||||
|
||||
func TestSyncLoopTimeUpdate(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
|
@ -325,12 +325,12 @@ func TestSyncLoopTimeUpdate(t *testing.T) {
|
|||
t.Errorf("Unexpected sync loop time: %s, expected 0", loopTime1)
|
||||
}
|
||||
|
||||
kubelet.syncLoopIteration(make(chan kubeletTypes.PodUpdate), kubelet)
|
||||
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet)
|
||||
loopTime2 := kubelet.LatestLoopEntryTime()
|
||||
if loopTime2.IsZero() {
|
||||
t.Errorf("Unexpected sync loop time: 0, expected non-zero value.")
|
||||
}
|
||||
kubelet.syncLoopIteration(make(chan kubeletTypes.PodUpdate), kubelet)
|
||||
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet)
|
||||
loopTime3 := kubelet.LatestLoopEntryTime()
|
||||
if !loopTime3.After(loopTime1) {
|
||||
t.Errorf("Sync Loop Time was not updated correctly. Second update timestamp should be greater than first update timestamp")
|
||||
|
@ -347,7 +347,7 @@ func TestSyncLoopAbort(t *testing.T) {
|
|||
// the channel close
|
||||
kubelet.resyncInterval = time.Second * 30
|
||||
|
||||
ch := make(chan kubeletTypes.PodUpdate)
|
||||
ch := make(chan kubetypes.PodUpdate)
|
||||
close(ch)
|
||||
|
||||
// sanity check (also prevent this test from hanging in the next step)
|
||||
|
@ -2659,7 +2659,7 @@ func TestUpdateNodeStatusError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateMirrorPod(t *testing.T) {
|
||||
for _, updateType := range []kubeletTypes.SyncPodType{kubeletTypes.SyncPodCreate, kubeletTypes.SyncPodUpdate} {
|
||||
for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
|
||||
testKubelet := newTestKubelet(t)
|
||||
kl := testKubelet.kubelet
|
||||
manager := testKubelet.fakeMirrorClient
|
||||
|
@ -2669,7 +2669,7 @@ func TestCreateMirrorPod(t *testing.T) {
|
|||
Name: "bar",
|
||||
Namespace: "foo",
|
||||
Annotations: map[string]string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey: "file",
|
||||
kubetypes.ConfigSourceAnnotationKey: "file",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -2702,7 +2702,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
|||
Name: "foo",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey: "file",
|
||||
kubetypes.ConfigSourceAnnotationKey: "file",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
|
@ -2718,8 +2718,8 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
|||
Name: "foo",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey: "api",
|
||||
kubeletTypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
kubetypes.ConfigSourceAnnotationKey: "api",
|
||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
|
@ -2731,7 +2731,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
|||
|
||||
pods := []*api.Pod{pod, mirrorPod}
|
||||
kl.podManager.SetPods(pods)
|
||||
err := kl.syncPod(pod, mirrorPod, container.Pod{}, kubeletTypes.SyncPodUpdate)
|
||||
err := kl.syncPod(pod, mirrorPod, container.Pod{}, kubetypes.SyncPodUpdate)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -2756,8 +2756,8 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
|||
Name: "pod1",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey: "api",
|
||||
kubeletTypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
kubetypes.ConfigSourceAnnotationKey: "api",
|
||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2767,8 +2767,8 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
|||
Name: "pod2",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey: "api",
|
||||
kubeletTypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
kubetypes.ConfigSourceAnnotationKey: "api",
|
||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2799,7 +2799,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
|||
Name: "qux",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey: "file",
|
||||
kubetypes.ConfigSourceAnnotationKey: "file",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
|
@ -2814,8 +2814,8 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
|||
Name: "qux",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey: "api",
|
||||
kubeletTypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
kubetypes.ConfigSourceAnnotationKey: "api",
|
||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
|
@ -2881,7 +2881,7 @@ func TestDoNotCacheStatusForStaticPods(t *testing.T) {
|
|||
Name: "staticFoo",
|
||||
Namespace: "new",
|
||||
Annotations: map[string]string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey: "file",
|
||||
kubetypes.ConfigSourceAnnotationKey: "file",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
|
@ -2906,7 +2906,7 @@ func TestHostNetworkAllowed(t *testing.T) {
|
|||
|
||||
capabilities.SetForTests(capabilities.Capabilities{
|
||||
PrivilegedSources: capabilities.PrivilegedSources{
|
||||
HostNetworkSources: []string{kubeletTypes.ApiserverSource, kubeletTypes.FileSource},
|
||||
HostNetworkSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
|
||||
},
|
||||
})
|
||||
pod := &api.Pod{
|
||||
|
@ -2915,7 +2915,7 @@ func TestHostNetworkAllowed(t *testing.T) {
|
|||
Name: "foo",
|
||||
Namespace: "new",
|
||||
Annotations: map[string]string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey: kubeletTypes.FileSource,
|
||||
kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
|
@ -2928,7 +2928,7 @@ func TestHostNetworkAllowed(t *testing.T) {
|
|||
},
|
||||
}
|
||||
kubelet.podManager.SetPods([]*api.Pod{pod})
|
||||
err := kubelet.syncPod(pod, nil, container.Pod{}, kubeletTypes.SyncPodUpdate)
|
||||
err := kubelet.syncPod(pod, nil, container.Pod{}, kubetypes.SyncPodUpdate)
|
||||
if err != nil {
|
||||
t.Errorf("expected pod infra creation to succeed: %v", err)
|
||||
}
|
||||
|
@ -2949,7 +2949,7 @@ func TestHostNetworkDisallowed(t *testing.T) {
|
|||
Name: "foo",
|
||||
Namespace: "new",
|
||||
Annotations: map[string]string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey: kubeletTypes.FileSource,
|
||||
kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
|
@ -2961,7 +2961,7 @@ func TestHostNetworkDisallowed(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
err := kubelet.syncPod(pod, nil, container.Pod{}, kubeletTypes.SyncPodUpdate)
|
||||
err := kubelet.syncPod(pod, nil, container.Pod{}, kubetypes.SyncPodUpdate)
|
||||
if err == nil {
|
||||
t.Errorf("expected pod infra creation to fail")
|
||||
}
|
||||
|
@ -2988,7 +2988,7 @@ func TestPrivilegeContainerAllowed(t *testing.T) {
|
|||
},
|
||||
}
|
||||
kubelet.podManager.SetPods([]*api.Pod{pod})
|
||||
err := kubelet.syncPod(pod, nil, container.Pod{}, kubeletTypes.SyncPodUpdate)
|
||||
err := kubelet.syncPod(pod, nil, container.Pod{}, kubetypes.SyncPodUpdate)
|
||||
if err != nil {
|
||||
t.Errorf("expected pod infra creation to succeed: %v", err)
|
||||
}
|
||||
|
@ -3014,7 +3014,7 @@ func TestPrivilegeContainerDisallowed(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
err := kubelet.syncPod(pod, nil, container.Pod{}, kubeletTypes.SyncPodUpdate)
|
||||
err := kubelet.syncPod(pod, nil, container.Pod{}, kubetypes.SyncPodUpdate)
|
||||
if err == nil {
|
||||
t.Errorf("expected pod infra creation to fail")
|
||||
}
|
||||
|
|
|
@ -24,12 +24,12 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
type HandlerRunner struct {
|
||||
httpGetter kubeletTypes.HttpGetter
|
||||
httpGetter kubetypes.HttpGetter
|
||||
commandRunner kubecontainer.ContainerCommandRunner
|
||||
containerManager podStatusProvider
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ type podStatusProvider interface {
|
|||
GetPodStatus(pod *api.Pod) (*api.PodStatus, error)
|
||||
}
|
||||
|
||||
func NewHandlerRunner(httpGetter kubeletTypes.HttpGetter, commandRunner kubecontainer.ContainerCommandRunner, containerManager podStatusProvider) kubecontainer.HandlerRunner {
|
||||
func NewHandlerRunner(httpGetter kubetypes.HttpGetter, commandRunner kubecontainer.ContainerCommandRunner, containerManager podStatusProvider) kubecontainer.HandlerRunner {
|
||||
return &HandlerRunner{
|
||||
httpGetter: httpGetter,
|
||||
commandRunner: commandRunner,
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// Mirror client is used to create/delete a mirror pod.
|
||||
|
@ -55,7 +55,7 @@ func (mc *basicMirrorClient) CreateMirrorPod(pod *api.Pod) error {
|
|||
for k, v := range pod.Annotations {
|
||||
copyPod.Annotations[k] = v
|
||||
}
|
||||
copyPod.Annotations[kubeletTypes.ConfigMirrorAnnotationKey] = kubeletTypes.MirrorType
|
||||
copyPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = kubetypes.MirrorType
|
||||
|
||||
_, err := mc.apiserverClient.Pods(copyPod.Namespace).Create(©Pod)
|
||||
return err
|
||||
|
@ -81,7 +81,7 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error {
|
|||
// Helper functions.
|
||||
func getPodSource(pod *api.Pod) (string, error) {
|
||||
if pod.Annotations != nil {
|
||||
if source, ok := pod.Annotations[kubeletTypes.ConfigSourceAnnotationKey]; ok {
|
||||
if source, ok := pod.Annotations[kubetypes.ConfigSourceAnnotationKey]; ok {
|
||||
return source, nil
|
||||
}
|
||||
}
|
||||
|
@ -90,13 +90,13 @@ func getPodSource(pod *api.Pod) (string, error) {
|
|||
|
||||
func isStaticPod(pod *api.Pod) bool {
|
||||
source, err := getPodSource(pod)
|
||||
return err == nil && source != kubeletTypes.ApiserverSource
|
||||
return err == nil && source != kubetypes.ApiserverSource
|
||||
}
|
||||
|
||||
func isMirrorPod(pod *api.Pod) bool {
|
||||
if value, ok := pod.Annotations[kubeletTypes.ConfigMirrorAnnotationKey]; !ok {
|
||||
if value, ok := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; !ok {
|
||||
return false
|
||||
} else {
|
||||
return value == kubeletTypes.MirrorType
|
||||
return value == kubetypes.MirrorType
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -102,7 +102,7 @@ func (plugin *cniNetworkPlugin) Name() string {
|
|||
return CNIPluginName
|
||||
}
|
||||
|
||||
func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubeletTypes.DockerID) error {
|
||||
func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubetypes.DockerID) error {
|
||||
runtime, ok := plugin.host.GetRuntime().(*dockertools.DockerManager)
|
||||
if !ok {
|
||||
return fmt.Errorf("CNI execution called on non-docker runtime")
|
||||
|
@ -121,7 +121,7 @@ func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubel
|
|||
return err
|
||||
}
|
||||
|
||||
func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id kubeletTypes.DockerID) error {
|
||||
func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id kubetypes.DockerID) error {
|
||||
runtime, ok := plugin.host.GetRuntime().(*dockertools.DockerManager)
|
||||
if !ok {
|
||||
return fmt.Errorf("CNI execution called on non-docker runtime")
|
||||
|
@ -136,7 +136,7 @@ func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id ku
|
|||
|
||||
// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin.
|
||||
// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls
|
||||
func (plugin *cniNetworkPlugin) Status(namespace string, name string, id kubeletTypes.DockerID) (*network.PodNetworkStatus, error) {
|
||||
func (plugin *cniNetworkPlugin) Status(namespace string, name string, id kubetypes.DockerID) (*network.PodNetworkStatus, error) {
|
||||
runtime, ok := plugin.host.GetRuntime().(*dockertools.DockerManager)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("CNI execution called on non-docker runtime")
|
||||
|
|
|
@ -67,7 +67,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
)
|
||||
|
||||
|
@ -132,19 +132,19 @@ func (plugin *execNetworkPlugin) validate() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (plugin *execNetworkPlugin) SetUpPod(namespace string, name string, id kubeletTypes.DockerID) error {
|
||||
func (plugin *execNetworkPlugin) SetUpPod(namespace string, name string, id kubetypes.DockerID) error {
|
||||
out, err := utilexec.New().Command(plugin.getExecutable(), setUpCmd, namespace, name, string(id)).CombinedOutput()
|
||||
glog.V(5).Infof("SetUpPod 'exec' network plugin output: %s, %v", string(out), err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (plugin *execNetworkPlugin) TearDownPod(namespace string, name string, id kubeletTypes.DockerID) error {
|
||||
func (plugin *execNetworkPlugin) TearDownPod(namespace string, name string, id kubetypes.DockerID) error {
|
||||
out, err := utilexec.New().Command(plugin.getExecutable(), tearDownCmd, namespace, name, string(id)).CombinedOutput()
|
||||
glog.V(5).Infof("TearDownPod 'exec' network plugin output: %s, %v", string(out), err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (plugin *execNetworkPlugin) Status(namespace string, name string, id kubeletTypes.DockerID) (*network.PodNetworkStatus, error) {
|
||||
func (plugin *execNetworkPlugin) Status(namespace string, name string, id kubetypes.DockerID) (*network.PodNetworkStatus, error) {
|
||||
out, err := utilexec.New().Command(plugin.getExecutable(), statusCmd, namespace, name, string(id)).CombinedOutput()
|
||||
glog.V(5).Infof("Status 'exec' network plugin output: %s, %v", string(out), err)
|
||||
if err != nil {
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/validation"
|
||||
)
|
||||
|
@ -46,13 +46,13 @@ type NetworkPlugin interface {
|
|||
// SetUpPod is the method called after the infra container of
|
||||
// the pod has been created but before the other containers of the
|
||||
// pod are launched.
|
||||
SetUpPod(namespace string, name string, podInfraContainerID kubeletTypes.DockerID) error
|
||||
SetUpPod(namespace string, name string, podInfraContainerID kubetypes.DockerID) error
|
||||
|
||||
// TearDownPod is the method called before a pod's infra container will be deleted
|
||||
TearDownPod(namespace string, name string, podInfraContainerID kubeletTypes.DockerID) error
|
||||
TearDownPod(namespace string, name string, podInfraContainerID kubetypes.DockerID) error
|
||||
|
||||
// Status is the method called to obtain the ipv4 or ipv6 addresses of the container
|
||||
Status(namespace string, name string, podInfraContainerID kubeletTypes.DockerID) (*PodNetworkStatus, error)
|
||||
Status(namespace string, name string, podInfraContainerID kubetypes.DockerID) (*PodNetworkStatus, error)
|
||||
}
|
||||
|
||||
// PodNetworkStatus stores the network status of a pod (currently just the primary IP address)
|
||||
|
@ -134,14 +134,14 @@ func (plugin *noopNetworkPlugin) Name() string {
|
|||
return DefaultPluginName
|
||||
}
|
||||
|
||||
func (plugin *noopNetworkPlugin) SetUpPod(namespace string, name string, id kubeletTypes.DockerID) error {
|
||||
func (plugin *noopNetworkPlugin) SetUpPod(namespace string, name string, id kubetypes.DockerID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *noopNetworkPlugin) TearDownPod(namespace string, name string, id kubeletTypes.DockerID) error {
|
||||
func (plugin *noopNetworkPlugin) TearDownPod(namespace string, name string, id kubetypes.DockerID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *noopNetworkPlugin) Status(namespace string, name string, id kubeletTypes.DockerID) (*PodNetworkStatus, error) {
|
||||
func (plugin *noopNetworkPlugin) Status(namespace string, name string, id kubetypes.DockerID) (*PodNetworkStatus, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// Stub out mirror client for testing purpose.
|
||||
|
@ -41,8 +41,8 @@ func TestGetSetPods(t *testing.T) {
|
|||
Name: "bar",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
kubeletTypes.ConfigSourceAnnotationKey: "api",
|
||||
kubeletTypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
kubetypes.ConfigSourceAnnotationKey: "api",
|
||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ func TestGetSetPods(t *testing.T) {
|
|||
UID: "123456789",
|
||||
Name: "bar",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{kubeletTypes.ConfigSourceAnnotationKey: "file"},
|
||||
Annotations: map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@ func TestGetSetPods(t *testing.T) {
|
|||
UID: "999999999",
|
||||
Name: "taco",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{kubeletTypes.ConfigSourceAnnotationKey: "api"},
|
||||
Annotations: map[string]string{kubetypes.ConfigSourceAnnotationKey: "api"},
|
||||
},
|
||||
},
|
||||
staticPod,
|
||||
|
|
|
@ -24,19 +24,19 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
// PodWorkers is an abstract interface for testability.
|
||||
type PodWorkers interface {
|
||||
UpdatePod(pod *api.Pod, mirrorPod *api.Pod, updateType kubeletTypes.SyncPodType, updateComplete func())
|
||||
UpdatePod(pod *api.Pod, mirrorPod *api.Pod, updateType kubetypes.SyncPodType, updateComplete func())
|
||||
ForgetNonExistingPodWorkers(desiredPods map[types.UID]empty)
|
||||
ForgetWorker(uid types.UID)
|
||||
}
|
||||
|
||||
type syncPodFnType func(*api.Pod, *api.Pod, kubecontainer.Pod, kubeletTypes.SyncPodType) error
|
||||
type syncPodFnType func(*api.Pod, *api.Pod, kubecontainer.Pod, kubetypes.SyncPodType) error
|
||||
|
||||
type podWorkers struct {
|
||||
// Protects all per worker fields.
|
||||
|
@ -75,7 +75,7 @@ type workUpdate struct {
|
|||
updateCompleteFn func()
|
||||
|
||||
// A string describing the type of this update, eg: create
|
||||
updateType kubeletTypes.SyncPodType
|
||||
updateType kubetypes.SyncPodType
|
||||
}
|
||||
|
||||
func newPodWorkers(runtimeCache kubecontainer.RuntimeCache, syncPodFn syncPodFnType,
|
||||
|
@ -122,7 +122,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
|
|||
}
|
||||
|
||||
// Apply the new setting to the specified pod. updateComplete is called when the update is completed.
|
||||
func (p *podWorkers) UpdatePod(pod *api.Pod, mirrorPod *api.Pod, updateType kubeletTypes.SyncPodType, updateComplete func()) {
|
||||
func (p *podWorkers) UpdatePod(pod *api.Pod, mirrorPod *api.Pod, updateType kubetypes.SyncPodType, updateComplete func()) {
|
||||
uid := pod.UID
|
||||
var podUpdates chan workUpdate
|
||||
var exists bool
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
|
@ -57,7 +57,7 @@ func createPodWorkers() (*podWorkers, map[types.UID][]string) {
|
|||
fakeRuntimeCache := createFakeRuntimeCache(fakeRecorder)
|
||||
podWorkers := newPodWorkers(
|
||||
fakeRuntimeCache,
|
||||
func(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType kubeletTypes.SyncPodType) error {
|
||||
func(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType kubetypes.SyncPodType) error {
|
||||
func() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
@ -94,7 +94,7 @@ func TestUpdatePod(t *testing.T) {
|
|||
numPods := 20
|
||||
for i := 0; i < numPods; i++ {
|
||||
for j := i; j < numPods; j++ {
|
||||
podWorkers.UpdatePod(newPod(string(j), string(i)), nil, kubeletTypes.SyncPodCreate, func() {})
|
||||
podWorkers.UpdatePod(newPod(string(j), string(i)), nil, kubetypes.SyncPodCreate, func() {})
|
||||
}
|
||||
}
|
||||
drainWorkers(podWorkers, numPods)
|
||||
|
@ -127,7 +127,7 @@ func TestForgetNonExistingPodWorkers(t *testing.T) {
|
|||
|
||||
numPods := 20
|
||||
for i := 0; i < numPods; i++ {
|
||||
podWorkers.UpdatePod(newPod(string(i), "name"), nil, kubeletTypes.SyncPodUpdate, func() {})
|
||||
podWorkers.UpdatePod(newPod(string(i), "name"), nil, kubetypes.SyncPodUpdate, func() {})
|
||||
}
|
||||
drainWorkers(podWorkers, numPods)
|
||||
|
||||
|
@ -163,12 +163,12 @@ type simpleFakeKubelet struct {
|
|||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func (kl *simpleFakeKubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType kubeletTypes.SyncPodType) error {
|
||||
func (kl *simpleFakeKubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType kubetypes.SyncPodType) error {
|
||||
kl.pod, kl.mirrorPod, kl.runningPod = pod, mirrorPod, runningPod
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kl *simpleFakeKubelet) syncPodWithWaitGroup(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType kubeletTypes.SyncPodType) error {
|
||||
func (kl *simpleFakeKubelet) syncPodWithWaitGroup(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType kubetypes.SyncPodType) error {
|
||||
kl.pod, kl.mirrorPod, kl.runningPod = pod, mirrorPod, runningPod
|
||||
kl.wg.Done()
|
||||
return nil
|
||||
|
@ -354,8 +354,8 @@ func TestFakePodWorkers(t *testing.T) {
|
|||
kubeletForRealWorkers.wg.Add(1)
|
||||
|
||||
fakeDocker.ContainerList = tt.containerList
|
||||
realPodWorkers.UpdatePod(tt.pod, tt.mirrorPod, kubeletTypes.SyncPodUpdate, func() {})
|
||||
fakePodWorkers.UpdatePod(tt.pod, tt.mirrorPod, kubeletTypes.SyncPodUpdate, func() {})
|
||||
realPodWorkers.UpdatePod(tt.pod, tt.mirrorPod, kubetypes.SyncPodUpdate, func() {})
|
||||
fakePodWorkers.UpdatePod(tt.pod, tt.mirrorPod, kubetypes.SyncPodUpdate, func() {})
|
||||
|
||||
kubeletForRealWorkers.wg.Wait()
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -40,7 +40,7 @@ type RunPodResult struct {
|
|||
}
|
||||
|
||||
// RunOnce polls from one configuration update and run the associated pods.
|
||||
func (kl *Kubelet) RunOnce(updates <-chan kubeletTypes.PodUpdate) ([]RunPodResult, error) {
|
||||
func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, error) {
|
||||
select {
|
||||
case u := <-updates:
|
||||
glog.Infof("processing manifest with %d pods", len(u.Pods))
|
||||
|
@ -110,7 +110,7 @@ func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error {
|
|||
glog.Infof("pod %q containers not running: syncing", pod.Name)
|
||||
// We don't create mirror pods in this mode; pass a dummy boolean value
|
||||
// to sycnPod.
|
||||
if err = kl.syncPod(pod, nil, p, kubeletTypes.SyncPodUpdate); err != nil {
|
||||
if err = kl.syncPod(pod, nil, p, kubetypes.SyncPodUpdate); err != nil {
|
||||
return fmt.Errorf("error syncing pod: %v", err)
|
||||
}
|
||||
if retry >= RunOnceMaxRetries {
|
||||
|
|
|
@ -40,7 +40,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/auth/user"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/httpstream"
|
||||
"k8s.io/kubernetes/pkg/util/httpstream/spdy"
|
||||
|
@ -211,7 +211,7 @@ func readResp(resp *http.Response) (string, error) {
|
|||
// A helper function to return the correct pod name.
|
||||
func getPodName(name, namespace string) string {
|
||||
if namespace == "" {
|
||||
namespace = kubeletTypes.NamespaceDefault
|
||||
namespace = kubetypes.NamespaceDefault
|
||||
}
|
||||
return name + "_" + namespace
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubeletUtil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
@ -85,8 +85,8 @@ func NewManager(kubeClient client.Interface) Manager {
|
|||
// isStatusEqual returns true if the given pod statuses are equal, false otherwise.
|
||||
// This method sorts container statuses so order does not affect equality.
|
||||
func isStatusEqual(oldStatus, status *api.PodStatus) bool {
|
||||
sort.Sort(kubeletTypes.SortedContainerStatuses(status.ContainerStatuses))
|
||||
sort.Sort(kubeletTypes.SortedContainerStatuses(oldStatus.ContainerStatuses))
|
||||
sort.Sort(kubetypes.SortedContainerStatuses(status.ContainerStatuses))
|
||||
sort.Sort(kubetypes.SortedContainerStatuses(oldStatus.ContainerStatuses))
|
||||
|
||||
// TODO: More sophisticated equality checking.
|
||||
return reflect.DeepEqual(status, oldStatus)
|
||||
|
|
Loading…
Reference in New Issue