mirror of https://github.com/k3s-io/k3s
Merge pull request #47095 from MrHohn/fix-e2e-firewall-multizone
Automatic merge from submit-queue (batch tested with PRs 47922, 47195, 47241, 47095, 47401) Fix firewall e2e test for multizone CIs Fixes #46894. - Getting node/master tags from config instead of instance. - Don't assume instance always run in default zone. /assign @freehan **Release note**: ```release-note NONE ```pull/6/head
commit
7ecb6cc238
|
@ -150,6 +150,8 @@ export PATH=$(dirname "${e2e_test}"):"${PATH}"
|
|||
--node-instance-group="${NODE_INSTANCE_GROUP:-}" \
|
||||
--prefix="${KUBE_GCE_INSTANCE_PREFIX:-e2e}" \
|
||||
--network="${KUBE_GCE_NETWORK:-${KUBE_GKE_NETWORK:-e2e}}" \
|
||||
--node-tag="${NODE_TAG:-}" \
|
||||
--master-tag="${MASTER_TAG:-}" \
|
||||
--federated-kube-context="${FEDERATION_KUBE_CONTEXT:-e2e-federation}" \
|
||||
${KUBE_CONTAINER_RUNTIME:+"--container-runtime=${KUBE_CONTAINER_RUNTIME}"} \
|
||||
${MASTER_OS_DISTRIBUTION:+"--master-os-distro=${MASTER_OS_DISTRIBUTION}"} \
|
||||
|
|
|
@ -452,6 +452,7 @@ manifest-url-header
|
|||
masquerade-all
|
||||
master-os-distro
|
||||
master-service-namespace
|
||||
master-tag
|
||||
max-concurrency
|
||||
max-connection-bytes-per-sec
|
||||
maximum-dead-containers
|
||||
|
@ -512,6 +513,7 @@ node-schedulable-timeout
|
|||
node-startup-grace-period
|
||||
node-status-update-frequency
|
||||
node-sync-period
|
||||
node-tag
|
||||
no-headers
|
||||
no-headers
|
||||
non-masquerade-cidr
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -59,6 +60,8 @@ var _ = framework.KubeDescribe("Firewall rule", func() {
|
|||
framework.Logf("Got cluster ID: %v", clusterID)
|
||||
|
||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
||||
nodeList := jig.GetNodes(framework.MaxNodesForEndpointsTests)
|
||||
Expect(nodeList).NotTo(BeNil())
|
||||
nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests)
|
||||
if len(nodesNames) <= 0 {
|
||||
framework.Failf("Expect at least 1 node, got: %v", nodesNames)
|
||||
|
@ -84,14 +87,13 @@ var _ = framework.KubeDescribe("Firewall rule", func() {
|
|||
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
|
||||
|
||||
By("Checking if service's firewall rule is correct")
|
||||
nodeTags := framework.GetInstanceTags(cloudConfig, nodesNames[0])
|
||||
lbFw := framework.ConstructFirewallForLBService(svc, nodeTags.Items)
|
||||
lbFw := framework.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)
|
||||
fw, err := gceCloud.GetFirewall(lbFw.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(framework.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking if service's nodes health check firewall rule is correct")
|
||||
nodesHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, nodeTags.Items, true)
|
||||
nodesHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)
|
||||
fw, err = gceCloud.GetFirewall(nodesHCFw.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(framework.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
|
@ -107,7 +109,7 @@ var _ = framework.KubeDescribe("Firewall rule", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for the correct local traffic health check firewall rule to be created")
|
||||
localHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, nodeTags.Items, false)
|
||||
localHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
|
||||
fw, err = framework.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(framework.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
|
@ -132,11 +134,17 @@ var _ = framework.KubeDescribe("Firewall rule", func() {
|
|||
// that's much harder to do in the current e2e framework.
|
||||
By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0]))
|
||||
nodesSet.Delete(nodesNames[0])
|
||||
removedTags := framework.SetInstanceTags(cloudConfig, nodesNames[0], []string{})
|
||||
// Instance could run in a different zone in multi-zone test. Figure out which zone
|
||||
// it is in before proceeding.
|
||||
zone := cloudConfig.Zone
|
||||
if zoneInLabel, ok := nodeList.Items[0].Labels[kubeletapis.LabelZoneFailureDomain]; ok {
|
||||
zone = zoneInLabel
|
||||
}
|
||||
removedTags := framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{})
|
||||
defer func() {
|
||||
By("Adding tags back to the node and wait till the traffic is recovered")
|
||||
nodesSet.Insert(nodesNames[0])
|
||||
framework.SetInstanceTags(cloudConfig, nodesNames[0], removedTags)
|
||||
framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
|
||||
// Make sure traffic is recovered before exit
|
||||
Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
@ -146,19 +154,13 @@ var _ = framework.KubeDescribe("Firewall rule", func() {
|
|||
})
|
||||
|
||||
It("should have correct firewall rules for e2e cluster", func() {
|
||||
By("Gathering firewall related information")
|
||||
masterTags := framework.GetInstanceTags(cloudConfig, cloudConfig.MasterName)
|
||||
Expect(len(masterTags.Items)).Should(Equal(1))
|
||||
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
if len(nodes.Items) <= 0 {
|
||||
framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
|
||||
}
|
||||
nodeTags := framework.GetInstanceTags(cloudConfig, nodes.Items[0].Name)
|
||||
Expect(len(nodeTags.Items)).Should(Equal(1))
|
||||
|
||||
By("Checking if e2e firewall rules are correct")
|
||||
for _, expFw := range framework.GetE2eFirewalls(cloudConfig.MasterName, masterTags.Items[0], nodeTags.Items[0], cloudConfig.Network) {
|
||||
for _, expFw := range framework.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network) {
|
||||
fw, err := gceCloud.GetFirewall(expFw.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(framework.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
|
|
|
@ -51,13 +51,13 @@ func MakeFirewallNameForLBService(name string) string {
|
|||
}
|
||||
|
||||
// ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructFirewallForLBService(svc *v1.Service, nodesTags []string) *compute.Firewall {
|
||||
func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeFirewallNameForLBService(cloudprovider.GetLoadBalancerName(svc))
|
||||
fw.TargetTags = nodesTags
|
||||
fw.TargetTags = []string{nodeTag}
|
||||
if svc.Spec.LoadBalancerSourceRanges == nil {
|
||||
fw.SourceRanges = []string{"0.0.0.0/0"}
|
||||
} else {
|
||||
|
@ -77,13 +77,13 @@ func MakeHealthCheckFirewallNameForLBService(clusterID, name string, isNodesHeal
|
|||
}
|
||||
|
||||
// ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodesTags []string, isNodesHealthCheck bool) *compute.Firewall {
|
||||
func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), isNodesHealthCheck)
|
||||
fw.TargetTags = nodesTags
|
||||
fw.TargetTags = []string{nodeTag}
|
||||
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
|
||||
healthCheckPort := gcecloud.GetNodesHealthCheckPort()
|
||||
if !isNodesHealthCheck {
|
||||
|
@ -98,14 +98,6 @@ func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service,
|
|||
return &fw
|
||||
}
|
||||
|
||||
// GetNodeTags gets tags from one of the Kubernetes nodes
|
||||
func GetNodeTags(c clientset.Interface, cloudConfig CloudConfig) *compute.Tags {
|
||||
nodes := GetReadySchedulableNodesOrDie(c)
|
||||
Expect(len(nodes.Items) > 0).Should(BeTrue())
|
||||
nodeTags := GetInstanceTags(cloudConfig, nodes.Items[0].Name)
|
||||
return nodeTags
|
||||
}
|
||||
|
||||
// GetInstanceTags gets tags from GCE instance with given name.
|
||||
func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags {
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
|
@ -118,12 +110,12 @@ func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags
|
|||
}
|
||||
|
||||
// SetInstanceTags sets tags on GCE instance with given name.
|
||||
func SetInstanceTags(cloudConfig CloudConfig, instanceName string, tags []string) []string {
|
||||
func SetInstanceTags(cloudConfig CloudConfig, instanceName, zone string, tags []string) []string {
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
// Re-get instance everytime because we need the latest fingerprint for updating metadata
|
||||
resTags := GetInstanceTags(cloudConfig, instanceName)
|
||||
_, err := gceCloud.GetComputeService().Instances.SetTags(
|
||||
cloudConfig.ProjectID, cloudConfig.Zone, instanceName,
|
||||
cloudConfig.ProjectID, zone, instanceName,
|
||||
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
|
||||
if err != nil {
|
||||
Failf("failed to set instance tags: %v", err)
|
||||
|
|
|
@ -974,14 +974,13 @@ func (j *IngressTestJig) GetIngressNodePorts() []string {
|
|||
}
|
||||
|
||||
// ConstructFirewallForIngress returns the expected GCE firewall rule for the ingress resource
|
||||
func (j *IngressTestJig) ConstructFirewallForIngress(gceController *GCEIngressController) *compute.Firewall {
|
||||
nodeTags := GetNodeTags(j.Client, gceController.Cloud)
|
||||
func (j *IngressTestJig) ConstructFirewallForIngress(gceController *GCEIngressController, nodeTag string) *compute.Firewall {
|
||||
nodePorts := j.GetIngressNodePorts()
|
||||
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = gceController.GetFirewallRuleName()
|
||||
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
|
||||
fw.TargetTags = nodeTags.Items
|
||||
fw.TargetTags = []string{nodeTag}
|
||||
fw.Allowed = []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
|
|
|
@ -143,6 +143,8 @@ type CloudConfig struct {
|
|||
ClusterTag string
|
||||
Network string
|
||||
ConfigFile string // for azure and openstack
|
||||
NodeTag string
|
||||
MasterTag string
|
||||
|
||||
Provider cloudprovider.Interface
|
||||
}
|
||||
|
@ -210,6 +212,8 @@ func RegisterClusterFlags() {
|
|||
flag.StringVar(&cloudConfig.NodeInstanceGroup, "node-instance-group", "", "Name of the managed instance group for nodes. Valid only for gce, gke or aws. If there is more than one group: comma separated list of groups.")
|
||||
flag.StringVar(&cloudConfig.Network, "network", "e2e", "The cloud provider network for this e2e cluster.")
|
||||
flag.IntVar(&cloudConfig.NumNodes, "num-nodes", -1, "Number of nodes in the cluster")
|
||||
flag.StringVar(&cloudConfig.NodeTag, "node-tag", "", "Network tags used on node instances. Valid only for gce, gke")
|
||||
flag.StringVar(&cloudConfig.MasterTag, "master-tag", "", "Network tags used on master instances. Valid only for gce, gke")
|
||||
|
||||
flag.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.")
|
||||
flag.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure.")
|
||||
|
|
|
@ -36,6 +36,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
|||
ns string
|
||||
jig *framework.IngressTestJig
|
||||
conformanceTests []framework.IngressConformanceTests
|
||||
cloudConfig framework.CloudConfig
|
||||
)
|
||||
f := framework.NewDefaultFramework("ingress")
|
||||
|
||||
|
@ -43,6 +44,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
|||
f.BeforeEach()
|
||||
jig = framework.NewIngressTestJig(f.ClientSet)
|
||||
ns = f.Namespace.Name
|
||||
cloudConfig = framework.TestContext.CloudConfig
|
||||
|
||||
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
|
||||
// lying around so we don't have to race any caches
|
||||
|
@ -122,7 +124,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
|||
|
||||
By("should have correct firewall rule for ingress")
|
||||
fw := gceController.GetFirewallRule()
|
||||
expFw := jig.ConstructFirewallForIngress(gceController)
|
||||
expFw := jig.ConstructFirewallForIngress(gceController, cloudConfig.NodeTag)
|
||||
// Passed the last argument as `true` to verify the backend ports is a subset
|
||||
// of the allowed ports in firewall rule, given there may be other existing
|
||||
// ingress resources and backends we are not aware of.
|
||||
|
|
Loading…
Reference in New Issue