e2e: block all master addresses

This way we can be sure that the kubelet can't communicate with the
master, even if falls-back to the internal/external IP (which seems to
be the case with DNS)

Issue #56787
pull/58/head
Justin SB 2018-11-05 22:19:05 -05:00
parent 5656ac754d
commit 0400871df9
No known key found for this signature in database
GPG Key ID: 8DEC5C8217494E37
5 changed files with 56 additions and 29 deletions

View File

@ -198,10 +198,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err)
master := framework.GetMasterAddress(c)
masterAddresses := framework.GetAllMasterAddresses(c)
defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
framework.UnblockNetwork(host, master)
for _, masterAddress := range masterAddresses {
framework.UnblockNetwork(host, masterAddress)
}
if CurrentGinkgoTestDescription().Failed {
return
@ -214,7 +216,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
}
}()
framework.BlockNetwork(host, master)
for _, masterAddress := range masterAddresses {
framework.BlockNetwork(host, masterAddress)
}
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
expectNodeReadiness(false, newNode)
@ -576,10 +580,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err)
master := framework.GetMasterAddress(c)
masterAddresses := framework.GetAllMasterAddresses(c)
defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
framework.UnblockNetwork(host, master)
for _, masterAddress := range masterAddresses {
framework.UnblockNetwork(host, masterAddress)
}
if CurrentGinkgoTestDescription().Failed {
return
@ -589,7 +595,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
expectNodeReadiness(true, newNode)
}()
framework.BlockNetwork(host, master)
for _, masterAddress := range masterAddresses {
framework.BlockNetwork(host, masterAddress)
}
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
expectNodeReadiness(false, newNode)

View File

@ -952,7 +952,7 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
if err != nil {
Failf("Error getting node external ip : %v", err)
}
master := GetMasterAddress(c)
masterAddresses := GetAllMasterAddresses(c)
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
defer func() {
// This code will execute even if setting the iptables rule failed.
@ -960,14 +960,18 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
// had been inserted. (yes, we could look at the error code and ssh error
// separately, but I prefer to stay on the safe side).
By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
UnblockNetwork(host, master)
for _, masterAddress := range masterAddresses {
UnblockNetwork(host, masterAddress)
}
}()
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
BlockNetwork(host, master)
for _, masterAddress := range masterAddresses {
BlockNetwork(host, masterAddress)
}
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {

View File

@ -4934,19 +4934,28 @@ func getMaster(c clientset.Interface) Address {
return master
}
// GetMasterAddress returns the hostname/external IP/internal IP as appropriate for e2e tests on a particular provider
// which is the address of the interface used for communication with the kubelet.
func GetMasterAddress(c clientset.Interface) string {
// GetAllMasterAddresses returns all IP addresses on which the kubelet can reach the master.
// It may return internal and external IPs, even if we expect for
// e.g. internal IPs to be used (issue #56787), so that we can be
// sure to block the master fully during tests.
func GetAllMasterAddresses(c clientset.Interface) []string {
master := getMaster(c)
var ips sets.String
switch TestContext.Provider {
case "gce", "gke":
return master.externalIP
if master.externalIP != "" {
ips.Insert(master.externalIP)
}
if master.internalIP != "" {
ips.Insert(master.internalIP)
}
case "aws":
return awsMasterIP
ips.Insert(awsMasterIP)
default:
Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider)
}
return ""
return ips.List()
}
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh

View File

@ -173,16 +173,18 @@ var _ = SIGDescribe("Firewall rule", func() {
By("Checking well known ports on master and nodes are not exposed externally")
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
Expect(len(nodeAddrs)).NotTo(BeZero())
masterAddr := framework.GetMasterAddress(cs)
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, gce.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, gce.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, gce.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, gce.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, gce.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
masterAddresses := framework.GetAllMasterAddresses(cs)
for _, masterAddr := range masterAddresses {
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, gce.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, gce.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, gce.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, gce.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, gce.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
}
})
})

View File

@ -128,12 +128,14 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
// host, err = framework.GetNodeInternalIP(&node)
// }
framework.ExpectNoError(err)
master := framework.GetMasterAddress(cs)
masterAddresses := framework.GetAllMasterAddresses(cs)
taint := newUnreachableNoExecuteTaint()
defer func() {
By(fmt.Sprintf("Unblocking traffic from node %s to the master", node.Name))
framework.UnblockNetwork(host, master)
for _, masterAddress := range masterAddresses {
framework.UnblockNetwork(host, masterAddress)
}
if CurrentGinkgoTestDescription().Failed {
framework.Failf("Current e2e test has failed, so return from here.")
@ -147,7 +149,9 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
framework.ExpectNoError(err)
}()
framework.BlockNetwork(host, master)
for _, masterAddress := range masterAddresses {
framework.BlockNetwork(host, masterAddress)
}
By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName))
if !framework.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) {