2015-03-20 16:37:08 +00:00
|
|
|
/*
|
|
|
|
Copyright 2014 Google Inc. All rights reserved.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package kubelet
|
|
|
|
|
|
|
|
import (
|
|
|
|
"reflect"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
|
|
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
2015-03-23 17:14:30 +00:00
|
|
|
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
|
2015-03-20 16:37:08 +00:00
|
|
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
|
|
|
"github.com/golang/glog"
|
|
|
|
)
|
|
|
|
|
|
|
|
type podStatusSyncRequest struct {
|
2015-03-24 23:52:38 +00:00
|
|
|
pod *api.Pod
|
|
|
|
status api.PodStatus
|
2015-03-20 16:37:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Updates pod statuses in apiserver. Writes only when new status has changed.
|
|
|
|
// All methods are thread-safe.
|
|
|
|
type statusManager struct {
|
|
|
|
kubeClient client.Interface
|
|
|
|
// Map from pod full name to sync status of the corresponding pod.
|
|
|
|
podStatusesLock sync.RWMutex
|
|
|
|
podStatuses map[string]api.PodStatus
|
|
|
|
podStatusChannel chan podStatusSyncRequest
|
|
|
|
}
|
|
|
|
|
|
|
|
func newStatusManager(kubeClient client.Interface) *statusManager {
|
|
|
|
return &statusManager{
|
|
|
|
kubeClient: kubeClient,
|
|
|
|
podStatuses: make(map[string]api.PodStatus),
|
|
|
|
podStatusChannel: make(chan podStatusSyncRequest, 1000), // Buffer up to 1000 statuses
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *statusManager) Start() {
|
|
|
|
// We can run SyncBatch() often because it will block until we have some updates to send.
|
|
|
|
go util.Forever(s.SyncBatch, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *statusManager) GetPodStatus(podFullName string) (api.PodStatus, bool) {
|
|
|
|
s.podStatusesLock.RLock()
|
|
|
|
defer s.podStatusesLock.RUnlock()
|
|
|
|
status, ok := s.podStatuses[podFullName]
|
|
|
|
return status, ok
|
|
|
|
}
|
|
|
|
|
2015-03-24 23:52:38 +00:00
|
|
|
func (s *statusManager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
|
|
|
|
podFullName := kubecontainer.GetPodFullName(pod)
|
2015-03-20 16:37:08 +00:00
|
|
|
s.podStatusesLock.Lock()
|
|
|
|
defer s.podStatusesLock.Unlock()
|
|
|
|
oldStatus, found := s.podStatuses[podFullName]
|
|
|
|
if !found || !reflect.DeepEqual(oldStatus, status) {
|
|
|
|
s.podStatuses[podFullName] = status
|
2015-03-24 23:52:38 +00:00
|
|
|
s.podStatusChannel <- podStatusSyncRequest{pod, status}
|
2015-03-20 16:37:08 +00:00
|
|
|
} else {
|
|
|
|
glog.V(3).Infof("Ignoring same pod status for %s - old: %s new: %s", podFullName, oldStatus, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *statusManager) DeletePodStatus(podFullName string) {
|
|
|
|
s.podStatusesLock.Lock()
|
|
|
|
defer s.podStatusesLock.Unlock()
|
|
|
|
delete(s.podStatuses, podFullName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(filipg): It'd be cleaner if we can do this without signal from user.
|
|
|
|
func (s *statusManager) RemoveOrphanedStatuses(podFullNames map[string]bool) {
|
|
|
|
s.podStatusesLock.Lock()
|
|
|
|
defer s.podStatusesLock.Unlock()
|
|
|
|
for key := range s.podStatuses {
|
|
|
|
if _, ok := podFullNames[key]; !ok {
|
|
|
|
glog.V(5).Infof("Removing %q from status map.", key)
|
|
|
|
delete(s.podStatuses, key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// SyncBatch syncs pods statuses with the apiserver. It will loop until channel
|
|
|
|
// s.podStatusChannel is empty for at least 1s.
|
|
|
|
func (s *statusManager) SyncBatch() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case syncRequest := <-s.podStatusChannel:
|
2015-03-24 23:52:38 +00:00
|
|
|
pod := syncRequest.pod
|
|
|
|
podFullName := kubecontainer.GetPodFullName(pod)
|
2015-03-20 16:37:08 +00:00
|
|
|
status := syncRequest.status
|
|
|
|
glog.V(3).Infof("Syncing status for %s", podFullName)
|
2015-03-24 23:52:38 +00:00
|
|
|
_, err := s.kubeClient.Pods(pod.Namespace).UpdateStatus(pod.Name, &status)
|
2015-03-20 16:37:08 +00:00
|
|
|
if err != nil {
|
|
|
|
// We failed to update status. In order to make sure we retry next time
|
|
|
|
// we delete cached value. This may result in an additional update, but
|
|
|
|
// this is ok.
|
|
|
|
s.DeletePodStatus(podFullName)
|
2015-03-24 23:52:38 +00:00
|
|
|
glog.Warningf("Error updating status for pod %q: %v", podFullName, err)
|
2015-03-20 16:37:08 +00:00
|
|
|
} else {
|
2015-03-24 23:52:38 +00:00
|
|
|
glog.V(3).Infof("Status for pod %q updated successfully", podFullName)
|
2015-03-20 16:37:08 +00:00
|
|
|
}
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|