mirror of https://github.com/k3s-io/k3s
Merge pull request #78262 from MrHohn/svc-finalizer-cleanup2
Add Service Load Balancer finalizer supportk3s-v1.15.3
commit
bc32307314
|
@ -19,6 +19,7 @@ go_library(
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
"//pkg/features:go_default_library",
|
||||||
"//pkg/util/metrics:go_default_library",
|
"//pkg/util/metrics:go_default_library",
|
||||||
|
"//pkg/util/slice:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
|
@ -36,6 +37,7 @@ go_library(
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||||
|
"//staging/src/k8s.io/cloud-provider/service/helpers:go_default_library",
|
||||||
"//vendor/k8s.io/klog:go_default_library",
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -50,16 +52,20 @@ go_test(
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/testapi:go_default_library",
|
"//pkg/api/testapi:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
|
"//pkg/features:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||||
"//staging/src/k8s.io/cloud-provider/fake:go_default_library",
|
"//staging/src/k8s.io/cloud-provider/fake:go_default_library",
|
||||||
|
"//staging/src/k8s.io/cloud-provider/service/helpers:go_default_library",
|
||||||
|
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -39,11 +39,13 @@ import (
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
|
servicehelper "k8s.io/cloud-provider/service/helpers"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
"k8s.io/kubernetes/pkg/util/slice"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -135,15 +137,28 @@ func New(
|
||||||
|
|
||||||
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
|
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||||
cache.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: s.enqueueService,
|
AddFunc: func(cur interface{}) {
|
||||||
UpdateFunc: func(old, cur interface{}) {
|
svc, ok := cur.(*v1.Service)
|
||||||
oldSvc, ok1 := old.(*v1.Service)
|
if ok && (wantsLoadBalancer(svc) || needsCleanup(svc)) {
|
||||||
curSvc, ok2 := cur.(*v1.Service)
|
|
||||||
if ok1 && ok2 && s.needsUpdate(oldSvc, curSvc) {
|
|
||||||
s.enqueueService(cur)
|
s.enqueueService(cur)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
DeleteFunc: s.enqueueService,
|
UpdateFunc: func(old, cur interface{}) {
|
||||||
|
oldSvc, ok1 := old.(*v1.Service)
|
||||||
|
curSvc, ok2 := cur.(*v1.Service)
|
||||||
|
if ok1 && ok2 && (s.needsUpdate(oldSvc, curSvc) || needsCleanup(curSvc)) {
|
||||||
|
s.enqueueService(cur)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
DeleteFunc: func(old interface{}) {
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ServiceLoadBalancerFinalizer) {
|
||||||
|
// No need to handle deletion event if finalizer feature gate is
|
||||||
|
// enabled. Because the deletion would be handled by the update
|
||||||
|
// path when the deletion timestamp is added.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.enqueueService(old)
|
||||||
|
},
|
||||||
},
|
},
|
||||||
serviceSyncPeriod,
|
serviceSyncPeriod,
|
||||||
)
|
)
|
||||||
|
@ -160,7 +175,7 @@ func New(
|
||||||
func (s *ServiceController) enqueueService(obj interface{}) {
|
func (s *ServiceController) enqueueService(obj interface{}) {
|
||||||
key, err := controller.KeyFunc(obj)
|
key, err := controller.KeyFunc(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Couldn't get key for object %#v: %v", obj, err)
|
runtime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", obj, err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.queue.Add(key)
|
s.queue.Add(key)
|
||||||
|
@ -235,95 +250,112 @@ func (s *ServiceController) init() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// processServiceUpdate operates loadbalancers for the incoming service accordingly.
|
// processServiceCreateOrUpdate operates loadbalancers for the incoming service accordingly.
|
||||||
// Returns an error if processing the service update failed.
|
// Returns an error if processing the service update failed.
|
||||||
func (s *ServiceController) processServiceUpdate(cachedService *cachedService, service *v1.Service, key string) error {
|
func (s *ServiceController) processServiceCreateOrUpdate(service *v1.Service, key string) error {
|
||||||
if cachedService.state != nil {
|
// TODO(@MrHohn): Remove the cache once we get rid of the non-finalizer deletion
|
||||||
if cachedService.state.UID != service.UID {
|
// path. Ref https://github.com/kubernetes/enhancements/issues/980.
|
||||||
err := s.processLoadBalancerDelete(cachedService, key)
|
cachedService := s.cache.getOrCreate(key)
|
||||||
if err != nil {
|
if cachedService.state != nil && cachedService.state.UID != service.UID {
|
||||||
return err
|
// This happens only when a service is deleted and re-created
|
||||||
}
|
// in a short period, which is only possible when it doesn't
|
||||||
|
// contain finalizer.
|
||||||
|
if err := s.processLoadBalancerDelete(cachedService.state, key); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// cache the service, we need the info for service deletion
|
// Always cache the service, we need the info for service deletion in case
|
||||||
|
// when load balancer cleanup is not handled via finalizer.
|
||||||
cachedService.state = service
|
cachedService.state = service
|
||||||
err := s.syncLoadBalancerIfNeeded(key, service)
|
op, err := s.syncLoadBalancerIfNeeded(service, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
eventType := "CreatingLoadBalancerFailed"
|
s.eventRecorder.Eventf(service, v1.EventTypeWarning, "SyncLoadBalancerFailed", "Error syncing load balancer: %v", err)
|
||||||
message := "Error creating load balancer (will retry): "
|
|
||||||
if !wantsLoadBalancer(service) {
|
|
||||||
eventType = "CleanupLoadBalancerFailed"
|
|
||||||
message = "Error cleaning up load balancer (will retry): "
|
|
||||||
}
|
|
||||||
message += err.Error()
|
|
||||||
s.eventRecorder.Event(service, v1.EventTypeWarning, eventType, message)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Always update the cache upon success.
|
if op == deleteLoadBalancer {
|
||||||
// NOTE: Since we update the cached service if and only if we successfully
|
// Only delete the cache upon successful load balancer deletion.
|
||||||
// processed it, a cached service being nil implies that it hasn't yet
|
s.cache.delete(key)
|
||||||
// been successfully processed.
|
}
|
||||||
s.cache.set(key, cachedService)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type loadBalancerOperation int
|
||||||
|
|
||||||
|
const (
|
||||||
|
deleteLoadBalancer loadBalancerOperation = iota
|
||||||
|
ensureLoadBalancer
|
||||||
|
)
|
||||||
|
|
||||||
// syncLoadBalancerIfNeeded ensures that service's status is synced up with loadbalancer
|
// syncLoadBalancerIfNeeded ensures that service's status is synced up with loadbalancer
|
||||||
// i.e. creates loadbalancer for service if requested and deletes loadbalancer if the service
|
// i.e. creates loadbalancer for service if requested and deletes loadbalancer if the service
|
||||||
// doesn't want a loadbalancer no more. Returns whatever error occurred.
|
// doesn't want a loadbalancer no more. Returns whatever error occurred.
|
||||||
func (s *ServiceController) syncLoadBalancerIfNeeded(key string, service *v1.Service) error {
|
func (s *ServiceController) syncLoadBalancerIfNeeded(service *v1.Service, key string) (loadBalancerOperation, error) {
|
||||||
// Note: It is safe to just call EnsureLoadBalancer. But, on some clouds that requires a delete & create,
|
// Note: It is safe to just call EnsureLoadBalancer. But, on some clouds that requires a delete & create,
|
||||||
// which may involve service interruption. Also, we would like user-friendly events.
|
// which may involve service interruption. Also, we would like user-friendly events.
|
||||||
|
|
||||||
// Save the state so we can avoid a write if it doesn't change
|
// Save the state so we can avoid a write if it doesn't change
|
||||||
previousState := v1helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
|
previousStatus := v1helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
|
||||||
var newState *v1.LoadBalancerStatus
|
var newStatus *v1.LoadBalancerStatus
|
||||||
|
var op loadBalancerOperation
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if !wantsLoadBalancer(service) {
|
if !wantsLoadBalancer(service) || needsCleanup(service) {
|
||||||
|
// Delete the load balancer if service no longer wants one, or if service needs cleanup.
|
||||||
|
op = deleteLoadBalancer
|
||||||
|
newStatus = &v1.LoadBalancerStatus{}
|
||||||
_, exists, err := s.balancer.GetLoadBalancer(context.TODO(), s.clusterName, service)
|
_, exists, err := s.balancer.GetLoadBalancer(context.TODO(), s.clusterName, service)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error getting LB for service %s: %v", key, err)
|
return op, fmt.Errorf("failed to check if load balancer exists before cleanup: %v", err)
|
||||||
}
|
}
|
||||||
if exists {
|
if exists {
|
||||||
klog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key)
|
klog.V(2).Infof("Deleting existing load balancer for service %s", key)
|
||||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
|
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
|
||||||
if err := s.balancer.EnsureLoadBalancerDeleted(context.TODO(), s.clusterName, service); err != nil {
|
if err := s.balancer.EnsureLoadBalancerDeleted(context.TODO(), s.clusterName, service); err != nil {
|
||||||
return err
|
return op, fmt.Errorf("failed to delete load balancer: %v", err)
|
||||||
}
|
}
|
||||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
|
|
||||||
}
|
}
|
||||||
|
// Always try to remove finalizer when load balancer is deleted.
|
||||||
newState = &v1.LoadBalancerStatus{}
|
// It will be a no-op if finalizer does not exist.
|
||||||
|
// Note this also clears up finalizer if the cluster is downgraded
|
||||||
|
// from a version that attaches finalizer to a version that doesn't.
|
||||||
|
if err := s.removeFinalizer(service); err != nil {
|
||||||
|
return op, fmt.Errorf("failed to remove load balancer cleanup finalizer: %v", err)
|
||||||
|
}
|
||||||
|
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
|
||||||
} else {
|
} else {
|
||||||
klog.V(2).Infof("Ensuring LB for service %s", key)
|
// Create or update the load balancer if service wants one.
|
||||||
|
op = ensureLoadBalancer
|
||||||
// TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart
|
klog.V(2).Infof("Ensuring load balancer for service %s", key)
|
||||||
|
|
||||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuringLoadBalancer", "Ensuring load balancer")
|
s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuringLoadBalancer", "Ensuring load balancer")
|
||||||
newState, err = s.ensureLoadBalancer(service)
|
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ServiceLoadBalancerFinalizer) {
|
||||||
|
// Always try to add finalizer prior to load balancer creation.
|
||||||
|
// It will be a no-op if finalizer already exists.
|
||||||
|
// Note this also retrospectively puts on finalizer if the cluster
|
||||||
|
// is upgraded from a version that doesn't attach finalizer to a
|
||||||
|
// version that does.
|
||||||
|
if err := s.addFinalizer(service); err != nil {
|
||||||
|
return op, fmt.Errorf("failed to add load balancer cleanup finalizer: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
newStatus, err = s.ensureLoadBalancer(service)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to ensure load balancer for service %s: %v", key, err)
|
return op, fmt.Errorf("failed to ensure load balancer: %v", err)
|
||||||
}
|
}
|
||||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuredLoadBalancer", "Ensured load balancer")
|
s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuredLoadBalancer", "Ensured load balancer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there are any changes to the status then patch the service.
|
if err := s.patchStatus(service, previousStatus, newStatus); err != nil {
|
||||||
if !v1helper.LoadBalancerStatusEqual(previousState, newState) {
|
// Only retry error that isn't not found:
|
||||||
// Make a copy so we don't mutate the shared informer cache
|
// - Not found error mostly happens when service disappears right after
|
||||||
updated := service.DeepCopy()
|
// we remove the finalizer.
|
||||||
updated.Status.LoadBalancer = *newState
|
// - We can't patch status on non-exist service anyway.
|
||||||
|
if !errors.IsNotFound(err) {
|
||||||
if _, err := patch(s.kubeClient.CoreV1(), service, updated); err != nil {
|
return op, fmt.Errorf("failed to update load balancer status: %v", err)
|
||||||
return fmt.Errorf("failed to patch status for service %s: %v", key, err)
|
|
||||||
}
|
}
|
||||||
klog.V(4).Infof("Successfully patched status for service %s", key)
|
|
||||||
} else {
|
|
||||||
klog.V(4).Infof("Not persisting unchanged LoadBalancerStatus for service %s to registry.", key)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return op, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ServiceController) ensureLoadBalancer(service *v1.Service) (*v1.LoadBalancerStatus, error) {
|
func (s *ServiceController) ensureLoadBalancer(service *v1.Service) (*v1.LoadBalancerStatus, error) {
|
||||||
|
@ -334,7 +366,7 @@ func (s *ServiceController) ensureLoadBalancer(service *v1.Service) (*v1.LoadBal
|
||||||
|
|
||||||
// If there are no available nodes for LoadBalancer service, make a EventTypeWarning event for it.
|
// If there are no available nodes for LoadBalancer service, make a EventTypeWarning event for it.
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
s.eventRecorder.Eventf(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer service %s/%s", service.Namespace, service.Name)
|
s.eventRecorder.Event(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// - Only one protocol supported per service
|
// - Only one protocol supported per service
|
||||||
|
@ -407,6 +439,12 @@ func (s *serviceCache) delete(serviceName string) {
|
||||||
delete(s.serviceMap, serviceName)
|
delete(s.serviceMap, serviceName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// needsCleanup checks if load balancer needs to be cleaned up as indicated by finalizer.
|
||||||
|
func needsCleanup(service *v1.Service) bool {
|
||||||
|
return service.ObjectMeta.DeletionTimestamp != nil && servicehelper.HasLBFinalizer(service)
|
||||||
|
}
|
||||||
|
|
||||||
|
// needsUpdate checks if load balancer needs to be updated due to change in attributes.
|
||||||
func (s *ServiceController) needsUpdate(oldService *v1.Service, newService *v1.Service) bool {
|
func (s *ServiceController) needsUpdate(oldService *v1.Service, newService *v1.Service) bool {
|
||||||
if !wantsLoadBalancer(oldService) && !wantsLoadBalancer(newService) {
|
if !wantsLoadBalancer(oldService) && !wantsLoadBalancer(newService) {
|
||||||
return false
|
return false
|
||||||
|
@ -592,7 +630,7 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate {
|
||||||
func (s *ServiceController) nodeSyncLoop() {
|
func (s *ServiceController) nodeSyncLoop() {
|
||||||
newHosts, err := s.nodeLister.ListWithPredicate(getNodeConditionPredicate())
|
newHosts, err := s.nodeLister.ListWithPredicate(getNodeConditionPredicate())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err)
|
runtime.HandleError(fmt.Errorf("Failed to retrieve current set of nodes from node lister: %v", err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if nodeSlicesEqualForLB(newHosts, s.knownHosts) {
|
if nodeSlicesEqualForLB(newHosts, s.knownHosts) {
|
||||||
|
@ -602,7 +640,7 @@ func (s *ServiceController) nodeSyncLoop() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Infof("Detected change in list of current cluster nodes. New node set: %v",
|
klog.V(2).Infof("Detected change in list of current cluster nodes. New node set: %v",
|
||||||
nodeNames(newHosts))
|
nodeNames(newHosts))
|
||||||
|
|
||||||
// Try updating all services, and save the ones that fail to try again next
|
// Try updating all services, and save the ones that fail to try again next
|
||||||
|
@ -610,7 +648,7 @@ func (s *ServiceController) nodeSyncLoop() {
|
||||||
s.servicesToUpdate = s.cache.allServices()
|
s.servicesToUpdate = s.cache.allServices()
|
||||||
numServices := len(s.servicesToUpdate)
|
numServices := len(s.servicesToUpdate)
|
||||||
s.servicesToUpdate = s.updateLoadBalancerHosts(s.servicesToUpdate, newHosts)
|
s.servicesToUpdate = s.updateLoadBalancerHosts(s.servicesToUpdate, newHosts)
|
||||||
klog.Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes",
|
klog.V(2).Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes",
|
||||||
numServices-len(s.servicesToUpdate), numServices)
|
numServices-len(s.servicesToUpdate), numServices)
|
||||||
|
|
||||||
s.knownHosts = newHosts
|
s.knownHosts = newHosts
|
||||||
|
@ -626,7 +664,7 @@ func (s *ServiceController) updateLoadBalancerHosts(services []*v1.Service, host
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := s.lockedUpdateLoadBalancerHosts(service, hosts); err != nil {
|
if err := s.lockedUpdateLoadBalancerHosts(service, hosts); err != nil {
|
||||||
klog.Errorf("External error while updating load balancer: %v.", err)
|
runtime.HandleError(fmt.Errorf("failed to update load balancer hosts for service %s/%s: %v", service.Namespace, service.Name, err))
|
||||||
servicesToRetry = append(servicesToRetry, service)
|
servicesToRetry = append(servicesToRetry, service)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -646,7 +684,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *v1.Service, h
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// If there are no available nodes for LoadBalancer service, make a EventTypeWarning event for it.
|
// If there are no available nodes for LoadBalancer service, make a EventTypeWarning event for it.
|
||||||
if len(hosts) == 0 {
|
if len(hosts) == 0 {
|
||||||
s.eventRecorder.Eventf(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer service %s/%s", service.Namespace, service.Name)
|
s.eventRecorder.Event(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer")
|
||||||
} else {
|
} else {
|
||||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts")
|
s.eventRecorder.Event(service, v1.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts")
|
||||||
}
|
}
|
||||||
|
@ -655,12 +693,12 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *v1.Service, h
|
||||||
|
|
||||||
// It's only an actual error if the load balancer still exists.
|
// It's only an actual error if the load balancer still exists.
|
||||||
if _, exists, err := s.balancer.GetLoadBalancer(context.TODO(), s.clusterName, service); err != nil {
|
if _, exists, err := s.balancer.GetLoadBalancer(context.TODO(), s.clusterName, service); err != nil {
|
||||||
klog.Errorf("External error while checking if load balancer %q exists: name, %v", s.balancer.GetLoadBalancerName(context.TODO(), s.clusterName, service), err)
|
runtime.HandleError(fmt.Errorf("failed to check if load balancer exists for service %s/%s: %v", service.Namespace, service.Name, err))
|
||||||
} else if !exists {
|
} else if !exists {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s.eventRecorder.Eventf(service, v1.EventTypeWarning, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", nodeNames(hosts), err)
|
s.eventRecorder.Eventf(service, v1.EventTypeWarning, "UpdateLoadBalancerFailed", "Error updating load balancer with new hosts %v: %v", nodeNames(hosts), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -677,7 +715,6 @@ func loadBalancerIPsAreEqual(oldService, newService *v1.Service) bool {
|
||||||
// invoked concurrently with the same key.
|
// invoked concurrently with the same key.
|
||||||
func (s *ServiceController) syncService(key string) error {
|
func (s *ServiceController) syncService(key string) error {
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
var cachedService *cachedService
|
|
||||||
defer func() {
|
defer func() {
|
||||||
klog.V(4).Infof("Finished syncing service %q (%v)", key, time.Since(startTime))
|
klog.V(4).Infof("Finished syncing service %q (%v)", key, time.Since(startTime))
|
||||||
}()
|
}()
|
||||||
|
@ -692,44 +729,88 @@ func (s *ServiceController) syncService(key string) error {
|
||||||
switch {
|
switch {
|
||||||
case errors.IsNotFound(err):
|
case errors.IsNotFound(err):
|
||||||
// service absence in store means watcher caught the deletion, ensure LB info is cleaned
|
// service absence in store means watcher caught the deletion, ensure LB info is cleaned
|
||||||
klog.Infof("Service has been deleted %v. Attempting to cleanup load balancer resources", key)
|
|
||||||
err = s.processServiceDeletion(key)
|
err = s.processServiceDeletion(key)
|
||||||
case err != nil:
|
case err != nil:
|
||||||
klog.Infof("Unable to retrieve service %v from store: %v", key, err)
|
runtime.HandleError(fmt.Errorf("Unable to retrieve service %v from store: %v", key, err))
|
||||||
default:
|
default:
|
||||||
cachedService = s.cache.getOrCreate(key)
|
err = s.processServiceCreateOrUpdate(service, key)
|
||||||
err = s.processServiceUpdate(cachedService, service, key)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns an error if processing the service deletion failed, along with a time.Duration
|
|
||||||
// indicating whether processing should be retried; zero means no-retry; otherwise
|
|
||||||
// we should retry after that Duration.
|
|
||||||
func (s *ServiceController) processServiceDeletion(key string) error {
|
func (s *ServiceController) processServiceDeletion(key string) error {
|
||||||
cachedService, ok := s.cache.get(key)
|
cachedService, ok := s.cache.get(key)
|
||||||
if !ok {
|
if !ok {
|
||||||
klog.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion", key)
|
// Cache does not contains the key means:
|
||||||
|
// - We didn't create a Load Balancer for the deleted service at all.
|
||||||
|
// - We already deleted the Load Balancer that was created for the service.
|
||||||
|
// In both cases we have nothing left to do.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return s.processLoadBalancerDelete(cachedService, key)
|
klog.V(2).Infof("Service %v has been deleted. Attempting to cleanup load balancer resources", key)
|
||||||
|
if err := s.processLoadBalancerDelete(cachedService.state, key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.cache.delete(key)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ServiceController) processLoadBalancerDelete(cachedService *cachedService, key string) error {
|
func (s *ServiceController) processLoadBalancerDelete(service *v1.Service, key string) error {
|
||||||
service := cachedService.state
|
|
||||||
// delete load balancer info only if the service type is LoadBalancer
|
// delete load balancer info only if the service type is LoadBalancer
|
||||||
if !wantsLoadBalancer(service) {
|
if !wantsLoadBalancer(service) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
|
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
|
||||||
err := s.balancer.EnsureLoadBalancerDeleted(context.TODO(), s.clusterName, service)
|
if err := s.balancer.EnsureLoadBalancerDeleted(context.TODO(), s.clusterName, service); err != nil {
|
||||||
if err != nil {
|
s.eventRecorder.Eventf(service, v1.EventTypeWarning, "DeleteLoadBalancerFailed", "Error deleting load balancer: %v", err)
|
||||||
s.eventRecorder.Eventf(service, v1.EventTypeWarning, "DeletingLoadBalancerFailed", "Error deleting load balancer (will retry): %v", err)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
|
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
|
||||||
s.cache.delete(key)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// addFinalizer patches the service to add finalizer.
|
||||||
|
func (s *ServiceController) addFinalizer(service *v1.Service) error {
|
||||||
|
if servicehelper.HasLBFinalizer(service) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a copy so we don't mutate the shared informer cache.
|
||||||
|
updated := service.DeepCopy()
|
||||||
|
updated.ObjectMeta.Finalizers = append(updated.ObjectMeta.Finalizers, servicehelper.LoadBalancerCleanupFinalizer)
|
||||||
|
|
||||||
|
klog.V(2).Infof("Adding finalizer to service %s/%s", updated.Namespace, updated.Name)
|
||||||
|
_, err := patch(s.kubeClient.CoreV1(), service, updated)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeFinalizer patches the service to remove finalizer.
|
||||||
|
func (s *ServiceController) removeFinalizer(service *v1.Service) error {
|
||||||
|
if !servicehelper.HasLBFinalizer(service) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a copy so we don't mutate the shared informer cache.
|
||||||
|
updated := service.DeepCopy()
|
||||||
|
updated.ObjectMeta.Finalizers = slice.RemoveString(updated.ObjectMeta.Finalizers, servicehelper.LoadBalancerCleanupFinalizer, nil)
|
||||||
|
|
||||||
|
klog.V(2).Infof("Removing finalizer from service %s/%s", updated.Namespace, updated.Name)
|
||||||
|
_, err := patch(s.kubeClient.CoreV1(), service, updated)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// patchStatus patches the service with the given LoadBalancerStatus.
|
||||||
|
func (s *ServiceController) patchStatus(service *v1.Service, previousStatus, newStatus *v1.LoadBalancerStatus) error {
|
||||||
|
if v1helper.LoadBalancerStatusEqual(previousStatus, newStatus) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a copy so we don't mutate the shared informer cache.
|
||||||
|
updated := service.DeepCopy()
|
||||||
|
updated.Status.LoadBalancer = *newStatus
|
||||||
|
|
||||||
|
klog.V(2).Infof("Patching status for service %s/%s", updated.Namespace, updated.Name)
|
||||||
|
_, err := patch(s.kubeClient.CoreV1(), service, updated)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
|
@ -22,19 +22,24 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
fakecloud "k8s.io/cloud-provider/fake"
|
fakecloud "k8s.io/cloud-provider/fake"
|
||||||
|
servicehelper "k8s.io/cloud-provider/service/helpers"
|
||||||
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/api/testapi"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
const region = "us-central"
|
const region = "us-central"
|
||||||
|
@ -82,13 +87,22 @@ func newController() (*ServiceController, *fakecloud.Cloud, *fake.Clientset) {
|
||||||
return controller, cloud, client
|
return controller, cloud, client
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCreateExternalLoadBalancer(t *testing.T) {
|
// TODO(@MrHohn): Verify the end state when below issue is resolved:
|
||||||
table := []struct {
|
// https://github.com/kubernetes/client-go/issues/607
|
||||||
service *v1.Service
|
func TestSyncLoadBalancerIfNeeded(t *testing.T) {
|
||||||
expectErr bool
|
testCases := []struct {
|
||||||
expectCreateAttempt bool
|
desc string
|
||||||
|
enableFeatureGate bool
|
||||||
|
service *v1.Service
|
||||||
|
lbExists bool
|
||||||
|
expectOp loadBalancerOperation
|
||||||
|
expectCreateAttempt bool
|
||||||
|
expectDeleteAttempt bool
|
||||||
|
expectPatchStatus bool
|
||||||
|
expectPatchFinalizer bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
desc: "service doesn't want LB",
|
||||||
service: &v1.Service{
|
service: &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "no-external-balancer",
|
Name: "no-external-balancer",
|
||||||
|
@ -98,10 +112,34 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
||||||
Type: v1.ServiceTypeClusterIP,
|
Type: v1.ServiceTypeClusterIP,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectErr: false,
|
expectOp: deleteLoadBalancer,
|
||||||
expectCreateAttempt: false,
|
expectPatchStatus: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
desc: "service no longer wants LB",
|
||||||
|
service: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "no-external-balancer",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
Type: v1.ServiceTypeClusterIP,
|
||||||
|
},
|
||||||
|
Status: v1.ServiceStatus{
|
||||||
|
LoadBalancer: v1.LoadBalancerStatus{
|
||||||
|
Ingress: []v1.LoadBalancerIngress{
|
||||||
|
{IP: "8.8.8.8"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
lbExists: true,
|
||||||
|
expectOp: deleteLoadBalancer,
|
||||||
|
expectDeleteAttempt: true,
|
||||||
|
expectPatchStatus: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "udp service that wants LB",
|
||||||
service: &v1.Service{
|
service: &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "udp-service",
|
Name: "udp-service",
|
||||||
|
@ -116,10 +154,12 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
||||||
Type: v1.ServiceTypeLoadBalancer,
|
Type: v1.ServiceTypeLoadBalancer,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectErr: false,
|
expectOp: ensureLoadBalancer,
|
||||||
expectCreateAttempt: true,
|
expectCreateAttempt: true,
|
||||||
|
expectPatchStatus: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
desc: "tcp service that wants LB",
|
||||||
service: &v1.Service{
|
service: &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "basic-service1",
|
Name: "basic-service1",
|
||||||
|
@ -134,10 +174,12 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
||||||
Type: v1.ServiceTypeLoadBalancer,
|
Type: v1.ServiceTypeLoadBalancer,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectErr: false,
|
expectOp: ensureLoadBalancer,
|
||||||
expectCreateAttempt: true,
|
expectCreateAttempt: true,
|
||||||
|
expectPatchStatus: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
desc: "sctp service that wants LB",
|
||||||
service: &v1.Service{
|
service: &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "sctp-service",
|
Name: "sctp-service",
|
||||||
|
@ -152,61 +194,206 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
||||||
Type: v1.ServiceTypeLoadBalancer,
|
Type: v1.ServiceTypeLoadBalancer,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectErr: false,
|
expectOp: ensureLoadBalancer,
|
||||||
expectCreateAttempt: true,
|
expectCreateAttempt: true,
|
||||||
|
expectPatchStatus: true,
|
||||||
|
},
|
||||||
|
// Finalizer test cases below.
|
||||||
|
{
|
||||||
|
desc: "service with finalizer that no longer wants LB",
|
||||||
|
service: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "no-external-balancer",
|
||||||
|
Namespace: "default",
|
||||||
|
Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer},
|
||||||
|
},
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
Type: v1.ServiceTypeClusterIP,
|
||||||
|
},
|
||||||
|
Status: v1.ServiceStatus{
|
||||||
|
LoadBalancer: v1.LoadBalancerStatus{
|
||||||
|
Ingress: []v1.LoadBalancerIngress{
|
||||||
|
{IP: "8.8.8.8"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
lbExists: true,
|
||||||
|
expectOp: deleteLoadBalancer,
|
||||||
|
expectDeleteAttempt: true,
|
||||||
|
expectPatchStatus: true,
|
||||||
|
expectPatchFinalizer: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "service that needs cleanup",
|
||||||
|
service: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "basic-service1",
|
||||||
|
Namespace: "default",
|
||||||
|
SelfLink: testapi.Default.SelfLink("services", "basic-service1"),
|
||||||
|
DeletionTimestamp: &metav1.Time{
|
||||||
|
Time: time.Now(),
|
||||||
|
},
|
||||||
|
Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer},
|
||||||
|
},
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
Ports: []v1.ServicePort{{
|
||||||
|
Port: 80,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
}},
|
||||||
|
Type: v1.ServiceTypeLoadBalancer,
|
||||||
|
},
|
||||||
|
Status: v1.ServiceStatus{
|
||||||
|
LoadBalancer: v1.LoadBalancerStatus{
|
||||||
|
Ingress: []v1.LoadBalancerIngress{
|
||||||
|
{IP: "8.8.8.8"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
lbExists: true,
|
||||||
|
expectOp: deleteLoadBalancer,
|
||||||
|
expectDeleteAttempt: true,
|
||||||
|
expectPatchStatus: true,
|
||||||
|
expectPatchFinalizer: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "service without finalizer that wants LB",
|
||||||
|
enableFeatureGate: true,
|
||||||
|
service: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "basic-service1",
|
||||||
|
Namespace: "default",
|
||||||
|
SelfLink: testapi.Default.SelfLink("services", "basic-service1"),
|
||||||
|
},
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
Ports: []v1.ServicePort{{
|
||||||
|
Port: 80,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
}},
|
||||||
|
Type: v1.ServiceTypeLoadBalancer,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectOp: ensureLoadBalancer,
|
||||||
|
expectCreateAttempt: true,
|
||||||
|
expectPatchStatus: true,
|
||||||
|
expectPatchFinalizer: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "service with finalizer that wants LB",
|
||||||
|
enableFeatureGate: true,
|
||||||
|
service: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "basic-service1",
|
||||||
|
Namespace: "default",
|
||||||
|
SelfLink: testapi.Default.SelfLink("services", "basic-service1"),
|
||||||
|
Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer},
|
||||||
|
},
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
Ports: []v1.ServicePort{{
|
||||||
|
Port: 80,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
}},
|
||||||
|
Type: v1.ServiceTypeLoadBalancer,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectOp: ensureLoadBalancer,
|
||||||
|
expectCreateAttempt: true,
|
||||||
|
expectPatchStatus: true,
|
||||||
|
expectPatchFinalizer: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range table {
|
for _, tc := range testCases {
|
||||||
controller, cloud, client := newController()
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
key := fmt.Sprintf("%s/%s", item.service.Namespace, item.service.Name)
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceLoadBalancerFinalizer, tc.enableFeatureGate)()
|
||||||
if _, err := client.CoreV1().Services(item.service.Namespace).Create(item.service); err != nil {
|
|
||||||
t.Errorf("Failed to prepare service %s for testing: %v", key, err)
|
controller, cloud, client := newController()
|
||||||
continue
|
cloud.Exists = tc.lbExists
|
||||||
}
|
key := fmt.Sprintf("%s/%s", tc.service.Namespace, tc.service.Name)
|
||||||
client.ClearActions()
|
if _, err := client.CoreV1().Services(tc.service.Namespace).Create(tc.service); err != nil {
|
||||||
err := controller.syncLoadBalancerIfNeeded(key, item.service)
|
t.Fatalf("Failed to prepare service %s for testing: %v", key, err)
|
||||||
if !item.expectErr && err != nil {
|
|
||||||
t.Errorf("unexpected error: %v", err)
|
|
||||||
} else if item.expectErr && err == nil {
|
|
||||||
t.Errorf("expected error creating %v, got nil", item.service)
|
|
||||||
}
|
|
||||||
actions := client.Actions()
|
|
||||||
if !item.expectCreateAttempt {
|
|
||||||
if len(cloud.Calls) > 0 {
|
|
||||||
t.Errorf("unexpected cloud provider calls: %v", cloud.Calls)
|
|
||||||
}
|
}
|
||||||
if len(actions) > 0 {
|
client.ClearActions()
|
||||||
t.Errorf("unexpected client actions: %v", actions)
|
|
||||||
|
op, err := controller.syncLoadBalancerIfNeeded(tc.service, key)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Got error: %v, want nil", err)
|
||||||
}
|
}
|
||||||
} else {
|
if op != tc.expectOp {
|
||||||
var balancer *fakecloud.Balancer
|
t.Errorf("Got operation %v, want %v", op, tc.expectOp)
|
||||||
for k := range cloud.Balancers {
|
}
|
||||||
|
// Capture actions from test so it won't be messed up.
|
||||||
|
actions := client.Actions()
|
||||||
|
|
||||||
|
if !tc.expectCreateAttempt && !tc.expectDeleteAttempt {
|
||||||
|
if len(cloud.Calls) > 0 {
|
||||||
|
t.Errorf("Unexpected cloud provider calls: %v", cloud.Calls)
|
||||||
|
}
|
||||||
|
if len(actions) > 0 {
|
||||||
|
t.Errorf("Unexpected client actions: %v", actions)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.expectCreateAttempt {
|
||||||
|
createCallFound := false
|
||||||
|
for _, call := range cloud.Calls {
|
||||||
|
if call == "create" {
|
||||||
|
createCallFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !createCallFound {
|
||||||
|
t.Errorf("Got no create call for load balancer, expected one")
|
||||||
|
}
|
||||||
|
// TODO(@MrHohn): Clean up the awkward pattern here.
|
||||||
|
var balancer *fakecloud.Balancer
|
||||||
|
for k := range cloud.Balancers {
|
||||||
|
if balancer == nil {
|
||||||
|
b := cloud.Balancers[k]
|
||||||
|
balancer = &b
|
||||||
|
} else {
|
||||||
|
t.Errorf("Got load balancer %v, expected one to be created", cloud.Balancers)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
if balancer == nil {
|
if balancer == nil {
|
||||||
b := cloud.Balancers[k]
|
t.Errorf("Got no load balancer, expected one to be created")
|
||||||
balancer = &b
|
} else if balancer.Name != controller.loadBalancerName(tc.service) ||
|
||||||
} else {
|
balancer.Region != region ||
|
||||||
t.Errorf("expected one load balancer to be created, got %v", cloud.Balancers)
|
balancer.Ports[0].Port != tc.service.Spec.Ports[0].Port {
|
||||||
break
|
t.Errorf("Created load balancer has incorrect parameters: %v", balancer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if balancer == nil {
|
if tc.expectDeleteAttempt {
|
||||||
t.Errorf("expected one load balancer to be created, got none")
|
deleteCallFound := false
|
||||||
} else if balancer.Name != controller.loadBalancerName(item.service) ||
|
for _, call := range cloud.Calls {
|
||||||
balancer.Region != region ||
|
if call == "delete" {
|
||||||
balancer.Ports[0].Port != item.service.Spec.Ports[0].Port {
|
deleteCallFound = true
|
||||||
t.Errorf("created load balancer has incorrect parameters: %v", balancer)
|
}
|
||||||
|
}
|
||||||
|
if !deleteCallFound {
|
||||||
|
t.Errorf("Got no delete call for load balancer, expected one")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
actionFound := false
|
|
||||||
|
expectNumPatches := 0
|
||||||
|
if tc.expectPatchStatus {
|
||||||
|
expectNumPatches++
|
||||||
|
}
|
||||||
|
if tc.expectPatchFinalizer {
|
||||||
|
expectNumPatches++
|
||||||
|
}
|
||||||
|
numPatches := 0
|
||||||
for _, action := range actions {
|
for _, action := range actions {
|
||||||
if action.GetVerb() == "patch" && action.GetResource().Resource == "services" {
|
if action.Matches("patch", "services") {
|
||||||
actionFound = true
|
numPatches++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !actionFound {
|
if numPatches != expectNumPatches {
|
||||||
t.Errorf("expected patch service to be sent to client, got these actions instead: %v", actions)
|
t.Errorf("Expected %d patches, got %d instead. Actions: %v", numPatches, expectNumPatches, actions)
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -339,7 +526,7 @@ func TestGetNodeConditionPredicate(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessServiceUpdate(t *testing.T) {
|
func TestProcessServiceCreateOrUpdate(t *testing.T) {
|
||||||
controller, _, client := newController()
|
controller, _, client := newController()
|
||||||
|
|
||||||
//A pair of old and new loadbalancer IP address
|
//A pair of old and new loadbalancer IP address
|
||||||
|
@ -420,19 +607,18 @@ func TestProcessServiceUpdate(t *testing.T) {
|
||||||
if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil {
|
if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil {
|
||||||
t.Fatalf("Failed to prepare service %s for testing: %v", tc.key, err)
|
t.Fatalf("Failed to prepare service %s for testing: %v", tc.key, err)
|
||||||
}
|
}
|
||||||
svcCache := controller.cache.getOrCreate(tc.key)
|
obtErr := controller.processServiceCreateOrUpdate(newSvc, tc.key)
|
||||||
obtErr := controller.processServiceUpdate(svcCache, newSvc, tc.key)
|
|
||||||
if err := tc.expectedFn(newSvc, obtErr); err != nil {
|
if err := tc.expectedFn(newSvc, obtErr); err != nil {
|
||||||
t.Errorf("%v processServiceUpdate() %v", tc.testName, err)
|
t.Errorf("%v processServiceCreateOrUpdate() %v", tc.testName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestConflictWhenProcessServiceUpdate tests if processServiceUpdate will
|
// TestConflictWhenProcessServiceCreateOrUpdate tests if processServiceCreateOrUpdate will
|
||||||
// retry creating the load balancer when the update operation returns a conflict
|
// retry creating the load balancer when the update operation returns a conflict
|
||||||
// error.
|
// error.
|
||||||
func TestConflictWhenProcessServiceUpdate(t *testing.T) {
|
func TestConflictWhenProcessServiceCreateOrUpdate(t *testing.T) {
|
||||||
svcName := "conflict-lb"
|
svcName := "conflict-lb"
|
||||||
svc := newService(svcName, types.UID("123"), v1.ServiceTypeLoadBalancer)
|
svc := newService(svcName, types.UID("123"), v1.ServiceTypeLoadBalancer)
|
||||||
controller, _, client := newController()
|
controller, _, client := newController()
|
||||||
|
@ -441,23 +627,22 @@ func TestConflictWhenProcessServiceUpdate(t *testing.T) {
|
||||||
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), svcName, errors.New("Object changed"))
|
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), svcName, errors.New("Object changed"))
|
||||||
})
|
})
|
||||||
|
|
||||||
svcCache := controller.cache.getOrCreate(svcName)
|
if err := controller.processServiceCreateOrUpdate(svc, svcName); err == nil {
|
||||||
if err := controller.processServiceUpdate(svcCache, svc, svcName); err == nil {
|
t.Fatalf("controller.processServiceCreateOrUpdate() = nil, want error")
|
||||||
t.Fatalf("controller.processServiceUpdate() = nil, want error")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retryMsg := "Error creating load balancer (will retry)"
|
errMsg := "Error syncing load balancer"
|
||||||
if gotEvent := func() bool {
|
if gotEvent := func() bool {
|
||||||
events := controller.eventRecorder.(*record.FakeRecorder).Events
|
events := controller.eventRecorder.(*record.FakeRecorder).Events
|
||||||
for len(events) > 0 {
|
for len(events) > 0 {
|
||||||
e := <-events
|
e := <-events
|
||||||
if strings.Contains(e, retryMsg) {
|
if strings.Contains(e, errMsg) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}(); !gotEvent {
|
}(); !gotEvent {
|
||||||
t.Errorf("controller.processServiceUpdate() = can't find retry creating lb event, want event contains %q", retryMsg)
|
t.Errorf("controller.processServiceCreateOrUpdate() = can't find sync error event, want event contains %q", errMsg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -620,7 +805,62 @@ func TestProcessServiceDeletion(t *testing.T) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDoesExternalLoadBalancerNeedsUpdate(t *testing.T) {
|
func TestNeedsCleanup(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
svc *v1.Service
|
||||||
|
expectNeedsCleanup bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "service without finalizer without timestamp",
|
||||||
|
svc: &v1.Service{},
|
||||||
|
expectNeedsCleanup: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "service without finalizer with timestamp",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
DeletionTimestamp: &metav1.Time{
|
||||||
|
Time: time.Now(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectNeedsCleanup: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "service with finalizer without timestamp",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectNeedsCleanup: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "service with finalizer with timestamp",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
DeletionTimestamp: &metav1.Time{
|
||||||
|
Time: time.Now(),
|
||||||
|
},
|
||||||
|
Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer, "unrelated"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectNeedsCleanup: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
|
if gotNeedsCleanup := needsCleanup(tc.svc); gotNeedsCleanup != tc.expectNeedsCleanup {
|
||||||
|
t.Errorf("needsCleanup() = %t, want %t", gotNeedsCleanup, tc.expectNeedsCleanup)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNeedsUpdate(t *testing.T) {
|
||||||
|
|
||||||
var oldSvc, newSvc *v1.Service
|
var oldSvc, newSvc *v1.Service
|
||||||
|
|
||||||
|
@ -861,3 +1101,211 @@ func TestNodeSlicesEqualForLB(t *testing.T) {
|
||||||
t.Errorf("nodeSlicesEqualForLB() Expected=false Obtained=true")
|
t.Errorf("nodeSlicesEqualForLB() Expected=false Obtained=true")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(@MrHohn): Verify the end state when below issue is resolved:
|
||||||
|
// https://github.com/kubernetes/client-go/issues/607
|
||||||
|
func TestAddFinalizer(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
svc *v1.Service
|
||||||
|
expectPatch bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "no-op add finalizer",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-patch-finalizer",
|
||||||
|
Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectPatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "add finalizer",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-patch-finalizer",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectPatch: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
|
c := fake.NewSimpleClientset()
|
||||||
|
s := &ServiceController{
|
||||||
|
kubeClient: c,
|
||||||
|
}
|
||||||
|
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil {
|
||||||
|
t.Fatalf("Failed to prepare service for testing: %v", err)
|
||||||
|
}
|
||||||
|
if err := s.addFinalizer(tc.svc); err != nil {
|
||||||
|
t.Fatalf("addFinalizer() = %v, want nil", err)
|
||||||
|
}
|
||||||
|
patchActionFound := false
|
||||||
|
for _, action := range c.Actions() {
|
||||||
|
if action.Matches("patch", "services") {
|
||||||
|
patchActionFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if patchActionFound != tc.expectPatch {
|
||||||
|
t.Errorf("Got patchActionFound = %t, want %t", patchActionFound, tc.expectPatch)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(@MrHohn): Verify the end state when below issue is resolved:
|
||||||
|
// https://github.com/kubernetes/client-go/issues/607
|
||||||
|
func TestRemoveFinalizer(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
svc *v1.Service
|
||||||
|
expectPatch bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "no-op remove finalizer",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-patch-finalizer",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectPatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "remove finalizer",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-patch-finalizer",
|
||||||
|
Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectPatch: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
|
c := fake.NewSimpleClientset()
|
||||||
|
s := &ServiceController{
|
||||||
|
kubeClient: c,
|
||||||
|
}
|
||||||
|
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil {
|
||||||
|
t.Fatalf("Failed to prepare service for testing: %v", err)
|
||||||
|
}
|
||||||
|
if err := s.removeFinalizer(tc.svc); err != nil {
|
||||||
|
t.Fatalf("removeFinalizer() = %v, want nil", err)
|
||||||
|
}
|
||||||
|
patchActionFound := false
|
||||||
|
for _, action := range c.Actions() {
|
||||||
|
if action.Matches("patch", "services") {
|
||||||
|
patchActionFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if patchActionFound != tc.expectPatch {
|
||||||
|
t.Errorf("Got patchActionFound = %t, want %t", patchActionFound, tc.expectPatch)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(@MrHohn): Verify the end state when below issue is resolved:
|
||||||
|
// https://github.com/kubernetes/client-go/issues/607
|
||||||
|
func TestPatchStatus(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
svc *v1.Service
|
||||||
|
newStatus *v1.LoadBalancerStatus
|
||||||
|
expectPatch bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "no-op add status",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-patch-status",
|
||||||
|
},
|
||||||
|
Status: v1.ServiceStatus{
|
||||||
|
LoadBalancer: v1.LoadBalancerStatus{
|
||||||
|
Ingress: []v1.LoadBalancerIngress{
|
||||||
|
{IP: "8.8.8.8"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
newStatus: &v1.LoadBalancerStatus{
|
||||||
|
Ingress: []v1.LoadBalancerIngress{
|
||||||
|
{IP: "8.8.8.8"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectPatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "add status",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-patch-status",
|
||||||
|
},
|
||||||
|
Status: v1.ServiceStatus{},
|
||||||
|
},
|
||||||
|
newStatus: &v1.LoadBalancerStatus{
|
||||||
|
Ingress: []v1.LoadBalancerIngress{
|
||||||
|
{IP: "8.8.8.8"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectPatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "no-op clear status",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-patch-status",
|
||||||
|
},
|
||||||
|
Status: v1.ServiceStatus{},
|
||||||
|
},
|
||||||
|
newStatus: &v1.LoadBalancerStatus{},
|
||||||
|
expectPatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "clear status",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-patch-status",
|
||||||
|
},
|
||||||
|
Status: v1.ServiceStatus{
|
||||||
|
LoadBalancer: v1.LoadBalancerStatus{
|
||||||
|
Ingress: []v1.LoadBalancerIngress{
|
||||||
|
{IP: "8.8.8.8"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
newStatus: &v1.LoadBalancerStatus{},
|
||||||
|
expectPatch: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
|
c := fake.NewSimpleClientset()
|
||||||
|
s := &ServiceController{
|
||||||
|
kubeClient: c,
|
||||||
|
}
|
||||||
|
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil {
|
||||||
|
t.Fatalf("Failed to prepare service for testing: %v", err)
|
||||||
|
}
|
||||||
|
if err := s.patchStatus(tc.svc, &tc.svc.Status.LoadBalancer, tc.newStatus); err != nil {
|
||||||
|
t.Fatalf("patchStatus() = %v, want nil", err)
|
||||||
|
}
|
||||||
|
patchActionFound := false
|
||||||
|
for _, action := range c.Actions() {
|
||||||
|
if action.Matches("patch", "services") {
|
||||||
|
patchActionFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if patchActionFound != tc.expectPatch {
|
||||||
|
t.Errorf("Got patchActionFound = %t, want %t", patchActionFound, tc.expectPatch)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -454,6 +454,12 @@ const (
|
||||||
// Enables the regional PD feature on GCE.
|
// Enables the regional PD feature on GCE.
|
||||||
deprecatedGCERegionalPersistentDisk featuregate.Feature = "GCERegionalPersistentDisk"
|
deprecatedGCERegionalPersistentDisk featuregate.Feature = "GCERegionalPersistentDisk"
|
||||||
|
|
||||||
|
// owner: @MrHohn
|
||||||
|
// alpha: v1.15
|
||||||
|
//
|
||||||
|
// Enables Finalizer Protection for Service LoadBalancers.
|
||||||
|
ServiceLoadBalancerFinalizer featuregate.Feature = "ServiceLoadBalancerFinalizer"
|
||||||
|
|
||||||
// owner: @RobertKrawitz
|
// owner: @RobertKrawitz
|
||||||
// alpha: v1.15
|
// alpha: v1.15
|
||||||
//
|
//
|
||||||
|
@ -548,6 +554,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||||
TTLAfterFinished: {Default: false, PreRelease: featuregate.Alpha},
|
TTLAfterFinished: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
KubeletPodResources: {Default: true, PreRelease: featuregate.Beta},
|
KubeletPodResources: {Default: true, PreRelease: featuregate.Beta},
|
||||||
WindowsGMSA: {Default: false, PreRelease: featuregate.Alpha},
|
WindowsGMSA: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
ServiceLoadBalancerFinalizer: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
|
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
NonPreemptingPriority: {Default: false, PreRelease: featuregate.Alpha},
|
NonPreemptingPriority: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
VolumePVCDataSource: {Default: false, PreRelease: featuregate.Alpha},
|
VolumePVCDataSource: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
|
|
@ -32,6 +32,7 @@ go_test(
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//vendor/k8s.io/utils/net:go_default_library",
|
"//vendor/k8s.io/utils/net:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -20,7 +20,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
utilnet "k8s.io/utils/net"
|
utilnet "k8s.io/utils/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -31,6 +31,11 @@ in order for in-tree cloud providers to not depend on internal packages.
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultLoadBalancerSourceRanges = "0.0.0.0/0"
|
defaultLoadBalancerSourceRanges = "0.0.0.0/0"
|
||||||
|
|
||||||
|
// LoadBalancerCleanupFinalizer is the finalizer added to load balancer
|
||||||
|
// services to ensure the Service resource is not fully deleted until
|
||||||
|
// the correlating load balancer resources are deleted.
|
||||||
|
LoadBalancerCleanupFinalizer = "service.kubernetes.io/load-balancer-cleanup"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsAllowAll checks whether the utilnet.IPNet allows traffic from 0.0.0.0/0
|
// IsAllowAll checks whether the utilnet.IPNet allows traffic from 0.0.0.0/0
|
||||||
|
@ -100,3 +105,13 @@ func NeedsHealthCheck(service *v1.Service) bool {
|
||||||
}
|
}
|
||||||
return RequestsOnlyLocalTraffic(service)
|
return RequestsOnlyLocalTraffic(service)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasLBFinalizer checks if service contains LoadBalancerCleanupFinalizer.
|
||||||
|
func HasLBFinalizer(service *v1.Service) bool {
|
||||||
|
for _, finalizer := range service.ObjectMeta.Finalizers {
|
||||||
|
if finalizer == LoadBalancerCleanupFinalizer {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
|
@ -20,7 +20,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilnet "k8s.io/utils/net"
|
utilnet "k8s.io/utils/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -219,3 +220,52 @@ func TestNeedsHealthCheck(t *testing.T) {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHasLBFinalizer(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
svc *v1.Service
|
||||||
|
hasFinalizer bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "service without finalizer",
|
||||||
|
svc: &v1.Service{},
|
||||||
|
hasFinalizer: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "service with unrelated finalizer",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Finalizers: []string{"unrelated"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
hasFinalizer: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "service with one finalizer",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Finalizers: []string{LoadBalancerCleanupFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
hasFinalizer: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "service with multiple finalizers",
|
||||||
|
svc: &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Finalizers: []string{LoadBalancerCleanupFinalizer, "unrelated"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
hasFinalizer: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
|
if hasFinalizer := HasLBFinalizer(tc.svc); hasFinalizer != tc.hasFinalizer {
|
||||||
|
t.Errorf("HasLBFinalizer() = %t, want %t", hasFinalizer, tc.hasFinalizer)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue