mirror of https://github.com/k3s-io/k3s
Merge pull request #67986 from andyzhangx/azure-lb-mixed-proto
Automatic merge from submit-queue (batch tested with PRs 67986, 68210, 67817). If you want to cherry-pick this change to another branch, please follow the instructions here: https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md. add mixed protocol support for azure load balancer **What this PR does / why we need it**: If user specify `service.beta.kubernetes.io/azure-load-balancer-mixed-protocols: "true"`, azure cloud provider will create both TCP and UDP lb rules, for more details, could refer to https://github.com/kubernetes/kubernetes/issues/66887 **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #66887 **Special notes for your reviewer**: original `reconcileLoadBalancer` func is too big, I move part of code implementation to a standalone func `createLoadBalancerRule` example service config: ``` apiVersion: v1 kind: Service metadata: annotations: service.beta.kubernetes.io/azure-load-balancer-mixed-protocols: "true" name: web namespace: default spec: ports: - port: 80 protocol: TCP targetPort: 80 selector: app: web sessionAffinity: None type: LoadBalancer ``` **Release note**: ``` add mixed protocol support for azure load balancer ``` /kind feature /sig azure /assign @feiskyer @khenidakpull/8/head
commit
771bb86fa1
|
@ -79,6 +79,10 @@ const (
|
|||
// ServiceAnnotationLoadBalancerIdleTimeout is the annotation used on the service
|
||||
// to specify the idle timeout for connections on the load balancer in minutes.
|
||||
ServiceAnnotationLoadBalancerIdleTimeout = "service.beta.kubernetes.io/azure-load-balancer-tcp-idle-timeout"
|
||||
|
||||
// ServiceAnnotationLoadBalancerMixedProtocols is the annotation used on the service
|
||||
// to create both TCP and UDP protocols when creating load balancer rules.
|
||||
ServiceAnnotationLoadBalancerMixedProtocols = "service.beta.kubernetes.io/azure-load-balancer-mixed-protocols"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -712,97 +716,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||
}
|
||||
|
||||
// update probes/rules
|
||||
var ports []v1.ServicePort
|
||||
if wantLb {
|
||||
ports = service.Spec.Ports
|
||||
} else {
|
||||
ports = []v1.ServicePort{}
|
||||
}
|
||||
|
||||
var expectedProbes []network.Probe
|
||||
var expectedRules []network.LoadBalancingRule
|
||||
for _, port := range ports {
|
||||
lbRuleName := az.getLoadBalancerRuleName(service, port, subnet(service))
|
||||
|
||||
transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if serviceapi.NeedsHealthCheck(service) {
|
||||
if port.Protocol == v1.ProtocolUDP {
|
||||
// ERROR: this isn't supported
|
||||
// health check (aka source ip preservation) is not
|
||||
// compatible with UDP (it uses an HTTP check)
|
||||
return nil, fmt.Errorf("services requiring health checks are incompatible with UDP ports")
|
||||
}
|
||||
|
||||
if port.Protocol == v1.ProtocolSCTP {
|
||||
// ERROR: this isn't supported
|
||||
// health check (aka source ip preservation) is not
|
||||
// compatible with SCTP (it uses an HTTP check)
|
||||
return nil, fmt.Errorf("services requiring health checks are incompatible with SCTP ports")
|
||||
}
|
||||
|
||||
podPresencePath, podPresencePort := serviceapi.GetServiceHealthCheckPathPort(service)
|
||||
|
||||
expectedProbes = append(expectedProbes, network.Probe{
|
||||
Name: &lbRuleName,
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
RequestPath: to.StringPtr(podPresencePath),
|
||||
Protocol: network.ProbeProtocolHTTP,
|
||||
Port: to.Int32Ptr(podPresencePort),
|
||||
IntervalInSeconds: to.Int32Ptr(5),
|
||||
NumberOfProbes: to.Int32Ptr(2),
|
||||
},
|
||||
})
|
||||
} else if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP {
|
||||
// we only add the expected probe if we're doing TCP
|
||||
expectedProbes = append(expectedProbes, network.Probe{
|
||||
Name: &lbRuleName,
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Protocol: *probeProto,
|
||||
Port: to.Int32Ptr(port.NodePort),
|
||||
IntervalInSeconds: to.Int32Ptr(5),
|
||||
NumberOfProbes: to.Int32Ptr(2),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
loadDistribution := network.Default
|
||||
if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
|
||||
loadDistribution = network.SourceIP
|
||||
}
|
||||
|
||||
expectedRule := network.LoadBalancingRule{
|
||||
Name: &lbRuleName,
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
Protocol: *transportProto,
|
||||
FrontendIPConfiguration: &network.SubResource{
|
||||
ID: to.StringPtr(lbFrontendIPConfigID),
|
||||
},
|
||||
BackendAddressPool: &network.SubResource{
|
||||
ID: to.StringPtr(lbBackendPoolID),
|
||||
},
|
||||
LoadDistribution: loadDistribution,
|
||||
FrontendPort: to.Int32Ptr(port.Port),
|
||||
BackendPort: to.Int32Ptr(port.Port),
|
||||
EnableFloatingIP: to.BoolPtr(true),
|
||||
},
|
||||
}
|
||||
if port.Protocol == v1.ProtocolTCP {
|
||||
expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout
|
||||
}
|
||||
|
||||
// we didn't construct the probe objects for UDP or SCTP because they're not used/needed/allowed
|
||||
if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP {
|
||||
expectedRule.Probe = &network.SubResource{
|
||||
ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)),
|
||||
}
|
||||
}
|
||||
|
||||
expectedRules = append(expectedRules, expectedRule)
|
||||
}
|
||||
expectedProbes, expectedRules, err := az.reconcileLoadBalancerRule(service, wantLb, lbFrontendIPConfigID, lbBackendPoolID, lbName, lbIdleTimeout)
|
||||
|
||||
// remove unwanted probes
|
||||
dirtyProbes := false
|
||||
|
@ -948,6 +862,97 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||
return lb, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) reconcileLoadBalancerRule(
|
||||
service *v1.Service,
|
||||
wantLb bool,
|
||||
lbFrontendIPConfigID string,
|
||||
lbBackendPoolID string,
|
||||
lbName string,
|
||||
lbIdleTimeout *int32) ([]network.Probe, []network.LoadBalancingRule, error) {
|
||||
|
||||
var ports []v1.ServicePort
|
||||
if wantLb {
|
||||
ports = service.Spec.Ports
|
||||
} else {
|
||||
ports = []v1.ServicePort{}
|
||||
}
|
||||
|
||||
var expectedProbes []network.Probe
|
||||
var expectedRules []network.LoadBalancingRule
|
||||
for _, port := range ports {
|
||||
lbRuleName := az.getLoadBalancerRuleName(service, port, subnet(service))
|
||||
|
||||
glog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName)
|
||||
|
||||
transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol)
|
||||
if err != nil {
|
||||
return expectedProbes, expectedRules, err
|
||||
}
|
||||
|
||||
if serviceapi.NeedsHealthCheck(service) {
|
||||
podPresencePath, podPresencePort := serviceapi.GetServiceHealthCheckPathPort(service)
|
||||
|
||||
expectedProbes = append(expectedProbes, network.Probe{
|
||||
Name: &lbRuleName,
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
RequestPath: to.StringPtr(podPresencePath),
|
||||
Protocol: network.ProbeProtocolHTTP,
|
||||
Port: to.Int32Ptr(podPresencePort),
|
||||
IntervalInSeconds: to.Int32Ptr(5),
|
||||
NumberOfProbes: to.Int32Ptr(2),
|
||||
},
|
||||
})
|
||||
} else if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP {
|
||||
// we only add the expected probe if we're doing TCP
|
||||
expectedProbes = append(expectedProbes, network.Probe{
|
||||
Name: &lbRuleName,
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Protocol: *probeProto,
|
||||
Port: to.Int32Ptr(port.NodePort),
|
||||
IntervalInSeconds: to.Int32Ptr(5),
|
||||
NumberOfProbes: to.Int32Ptr(2),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
loadDistribution := network.Default
|
||||
if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
|
||||
loadDistribution = network.SourceIP
|
||||
}
|
||||
|
||||
expectedRule := network.LoadBalancingRule{
|
||||
Name: &lbRuleName,
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
Protocol: *transportProto,
|
||||
FrontendIPConfiguration: &network.SubResource{
|
||||
ID: to.StringPtr(lbFrontendIPConfigID),
|
||||
},
|
||||
BackendAddressPool: &network.SubResource{
|
||||
ID: to.StringPtr(lbBackendPoolID),
|
||||
},
|
||||
LoadDistribution: loadDistribution,
|
||||
FrontendPort: to.Int32Ptr(port.Port),
|
||||
BackendPort: to.Int32Ptr(port.Port),
|
||||
EnableFloatingIP: to.BoolPtr(true),
|
||||
},
|
||||
}
|
||||
if port.Protocol == v1.ProtocolTCP {
|
||||
expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout
|
||||
}
|
||||
|
||||
// we didn't construct the probe objects for UDP or SCTP because they're not used/needed/allowed
|
||||
if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP {
|
||||
expectedRule.Probe = &network.SubResource{
|
||||
ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)),
|
||||
}
|
||||
}
|
||||
|
||||
expectedRules = append(expectedRules, expectedRule)
|
||||
}
|
||||
|
||||
return expectedProbes, expectedRules, nil
|
||||
}
|
||||
|
||||
// This reconciles the Network Security Group similar to how the LB is reconciled.
|
||||
// This entails adding required, missing SecurityRules and removing stale rules.
|
||||
func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) {
|
||||
|
|
Loading…
Reference in New Issue