mirror of https://github.com/k3s-io/k3s
configure cluster domain via flags
parent
5c39f7b58c
commit
4c8a65ac01
|
@ -161,6 +161,7 @@ export PATH=$(dirname "${e2e_test}"):"${PATH}"
|
|||
--master-tag="${MASTER_TAG:-}" \
|
||||
--cluster-monitoring-mode="${KUBE_ENABLE_CLUSTER_MONITORING:-standalone}" \
|
||||
--prometheus-monitoring="${KUBE_ENABLE_PROMETHEUS_MONITORING:-false}" \
|
||||
--dns-domain="${KUBE_DNS_DOMAIN:-cluster.local}" \
|
||||
${KUBE_CONTAINER_RUNTIME:+"--container-runtime=${KUBE_CONTAINER_RUNTIME}"} \
|
||||
${MASTER_OS_DISTRIBUTION:+"--master-os-distro=${MASTER_OS_DISTRIBUTION}"} \
|
||||
${NODE_OS_DISTRIBUTION:+"--node-os-distro=${NODE_OS_DISTRIBUTION}"} \
|
||||
|
|
|
@ -145,6 +145,9 @@ type TestContextType struct {
|
|||
|
||||
// Indicates what path the kubernetes-anywhere is installed on
|
||||
KubernetesAnywherePath string
|
||||
|
||||
// The DNS Domain of the cluster.
|
||||
ClusterDNSDomain string
|
||||
}
|
||||
|
||||
// NodeTestContextType is part of TestContextType, it is shared by all node e2e test.
|
||||
|
@ -248,6 +251,7 @@ func RegisterClusterFlags() {
|
|||
flag.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, ubuntu, gci, coreos, or custom).")
|
||||
flag.StringVar(&TestContext.ClusterMonitoringMode, "cluster-monitoring-mode", "standalone", "The monitoring solution that is used in the cluster.")
|
||||
flag.BoolVar(&TestContext.EnablePrometheusMonitoring, "prometheus-monitoring", false, "Separate Prometheus monitoring deployed in cluster.")
|
||||
flag.StringVar(&TestContext.ClusterDNSDomain, "dns-domain", "cluster.local", "The DNS Domain of the cluster.")
|
||||
|
||||
// TODO: Flags per provider? Rename gce-project/gce-zone?
|
||||
cloudConfig := &TestContext.CloudConfig
|
||||
|
|
|
@ -47,17 +47,17 @@ var _ = SIGDescribe("DNS", func() {
|
|||
namesToResolve := []string{
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
"kubernetes.default.svc.cluster.local",
|
||||
fmt.Sprintf("kubernetes.default.svc.%s", framework.TestContext.ClusterDNSDomain),
|
||||
}
|
||||
// Added due to #8512. This is critical for GCE and GKE deployments.
|
||||
if framework.ProviderIs("gce", "gke") {
|
||||
namesToResolve = append(namesToResolve, "google.com")
|
||||
namesToResolve = append(namesToResolve, "metadata")
|
||||
}
|
||||
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", dnsTestPodHostName, dnsTestServiceName, f.Namespace.Name)
|
||||
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", dnsTestPodHostName, dnsTestServiceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
hostEntries := []string{hostFQDN, dnsTestPodHostName}
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name)
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
|
@ -91,6 +91,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||
regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
|
||||
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create regular service: %s", regularServiceName)
|
||||
|
||||
defer func() {
|
||||
By("deleting the test service")
|
||||
defer GinkgoRecover()
|
||||
|
@ -108,8 +109,8 @@ var _ = SIGDescribe("DNS", func() {
|
|||
fmt.Sprintf("_http._tcp.%s.%s.svc", regularService.Name, f.Namespace.Name),
|
||||
}
|
||||
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name)
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
|
@ -132,17 +133,18 @@ var _ = SIGDescribe("DNS", func() {
|
|||
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", serviceName)
|
||||
|
||||
defer func() {
|
||||
By("deleting the test headless service")
|
||||
defer GinkgoRecover()
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
|
||||
}()
|
||||
|
||||
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podHostname, serviceName, f.Namespace.Name)
|
||||
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
hostNames := []string{hostFQDN, podHostname}
|
||||
namesToResolve := []string{hostFQDN}
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostNames, "", "wheezy", f.Namespace.Name)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostNames, "", "jessie", f.Namespace.Name)
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostNames, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostNames, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
|
@ -163,13 +165,13 @@ var _ = SIGDescribe("DNS", func() {
|
|||
externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create ExternalName service: %s", serviceName)
|
||||
|
||||
defer func() {
|
||||
By("deleting the test externalName service")
|
||||
defer GinkgoRecover()
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil)
|
||||
}()
|
||||
|
||||
hostFQDN := fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name)
|
||||
hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
|
||||
jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
|
|
|
@ -444,7 +444,7 @@ func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd, podHostName, servic
|
|||
return dnsPod
|
||||
}
|
||||
|
||||
func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookupIP string, fileNamePrefix, namespace string) (string, []string) {
|
||||
func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookupIP string, fileNamePrefix, namespace, dnsDomain string) (string, []string) {
|
||||
fileNames := make([]string, 0, len(namesToResolve)*2)
|
||||
probeCmd := "for i in `seq 1 600`; do "
|
||||
for _, name := range namesToResolve {
|
||||
|
@ -471,7 +471,7 @@ func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookup
|
|||
|
||||
podARecByUDPFileName := fmt.Sprintf("%s_udp@PodARecord", fileNamePrefix)
|
||||
podARecByTCPFileName := fmt.Sprintf("%s_tcp@PodARecord", fileNamePrefix)
|
||||
probeCmd += fmt.Sprintf(`podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".%s.pod.cluster.local"}');`, namespace)
|
||||
probeCmd += fmt.Sprintf(`podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".%s.pod.%s"}');`, namespace, dnsDomain)
|
||||
probeCmd += fmt.Sprintf(`check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/%s;`, podARecByUDPFileName)
|
||||
probeCmd += fmt.Sprintf(`check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/%s;`, podARecByTCPFileName)
|
||||
fileNames = append(fileNames, podARecByUDPFileName)
|
||||
|
|
|
@ -60,52 +60,52 @@ func (t *dnsFederationsConfigMapTest) run() {
|
|||
originalConfigMapData := t.fetchDNSConfigMapData()
|
||||
defer t.restoreDNSConfigMap(originalConfigMapData)
|
||||
|
||||
t.validate()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
if t.name == "coredns" {
|
||||
t.labels = []string{"abc", "ghi"}
|
||||
valid1 := map[string]string{
|
||||
"Corefile": `.:53 {
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
"Corefile": fmt.Sprintf(`.:53 {
|
||||
kubernetes %v in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
federation cluster.local {
|
||||
federation %v {
|
||||
abc def.com
|
||||
}
|
||||
proxy . /etc/resolv.conf
|
||||
}`}
|
||||
}`, framework.TestContext.ClusterDNSDomain, framework.TestContext.ClusterDNSDomain)}
|
||||
valid1m := map[string]string{t.labels[0]: "def.com"}
|
||||
|
||||
valid2 := map[string]string{
|
||||
"Corefile": `:53 {
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
"Corefile": fmt.Sprintf(`:53 {
|
||||
kubernetes %v in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
federation cluster.local {
|
||||
federation %v {
|
||||
ghi xyz.com
|
||||
}
|
||||
proxy . /etc/resolv.conf
|
||||
}`}
|
||||
}`, framework.TestContext.ClusterDNSDomain, framework.TestContext.ClusterDNSDomain)}
|
||||
valid2m := map[string]string{t.labels[1]: "xyz.com"}
|
||||
|
||||
By("default -> valid1")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
|
||||
t.deleteCoreDNSPods()
|
||||
t.validate()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("valid1 -> valid2")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
|
||||
t.deleteCoreDNSPods()
|
||||
t.validate()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("valid2 -> default")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: originalConfigMapData}, nil, false)
|
||||
t.deleteCoreDNSPods()
|
||||
t.validate()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
t.restoreDNSConfigMap(originalConfigMapData)
|
||||
|
||||
|
@ -119,39 +119,39 @@ func (t *dnsFederationsConfigMapTest) run() {
|
|||
|
||||
By("empty -> valid1")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
|
||||
t.validate()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("valid1 -> valid2")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
|
||||
t.validate()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("valid2 -> invalid")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
|
||||
t.validate()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("invalid -> valid1")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
|
||||
t.validate()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("valid1 -> deleted")
|
||||
t.deleteConfigMap()
|
||||
t.validate()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("deleted -> invalid")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
|
||||
t.validate()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dnsFederationsConfigMapTest) validate() {
|
||||
func (t *dnsFederationsConfigMapTest) validate(dnsDomain string) {
|
||||
federations := t.fedMap
|
||||
|
||||
if len(federations) == 0 {
|
||||
By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels))
|
||||
|
||||
for _, label := range t.labels {
|
||||
var federationDNS = fmt.Sprintf("e2e-dns-configmap.%s.%s.svc.cluster.local.",
|
||||
t.f.Namespace.Name, label)
|
||||
var federationDNS = fmt.Sprintf("e2e-dns-configmap.%s.%s.svc.%s.",
|
||||
t.f.Namespace.Name, label, framework.TestContext.ClusterDNSDomain)
|
||||
predicate := func(actual []string) bool {
|
||||
return len(actual) == 0
|
||||
}
|
||||
|
@ -159,10 +159,10 @@ func (t *dnsFederationsConfigMapTest) validate() {
|
|||
}
|
||||
} else {
|
||||
for label := range federations {
|
||||
var federationDNS = fmt.Sprintf("%s.%s.%s.svc.cluster.local.",
|
||||
t.utilService.ObjectMeta.Name, t.f.Namespace.Name, label)
|
||||
var localDNS = fmt.Sprintf("%s.%s.svc.cluster.local.",
|
||||
t.utilService.ObjectMeta.Name, t.f.Namespace.Name)
|
||||
var federationDNS = fmt.Sprintf("%s.%s.%s.svc.%s.",
|
||||
t.utilService.ObjectMeta.Name, t.f.Namespace.Name, label, framework.TestContext.ClusterDNSDomain)
|
||||
var localDNS = fmt.Sprintf("%s.%s.svc.%s.",
|
||||
t.utilService.ObjectMeta.Name, t.f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
if t.name == "coredns" {
|
||||
localDNS = t.utilService.Spec.ClusterIP
|
||||
}
|
||||
|
@ -228,7 +228,7 @@ func (t *dnsNameserverTest) run(isIPv6 bool) {
|
|||
if t.name == "coredns" {
|
||||
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
|
||||
"Corefile": fmt.Sprintf(`.:53 {
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
kubernetes %v in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
|
@ -237,7 +237,7 @@ func (t *dnsNameserverTest) run(isIPv6 bool) {
|
|||
}
|
||||
acme.local:53 {
|
||||
proxy . %v
|
||||
}`, t.dnsServerPod.Status.PodIP, t.dnsServerPod.Status.PodIP),
|
||||
}`, framework.TestContext.ClusterDNSDomain, t.dnsServerPod.Status.PodIP, t.dnsServerPod.Status.PodIP),
|
||||
}})
|
||||
|
||||
t.deleteCoreDNSPods()
|
||||
|
@ -325,13 +325,13 @@ func (t *dnsPtrFwdTest) run(isIPv6 bool) {
|
|||
if t.name == "coredns" {
|
||||
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
|
||||
"Corefile": fmt.Sprintf(`.:53 {
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
kubernetes %v in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
proxy . %v
|
||||
}`, t.dnsServerPod.Status.PodIP),
|
||||
}`, framework.TestContext.ClusterDNSDomain, t.dnsServerPod.Status.PodIP),
|
||||
}})
|
||||
|
||||
t.deleteCoreDNSPods()
|
||||
|
@ -415,7 +415,7 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
|
|||
|
||||
if isIPv6 {
|
||||
t.checkDNSRecordFrom(
|
||||
fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name),
|
||||
fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain),
|
||||
func(actual []string) bool {
|
||||
return len(actual) >= 1 && actual[0] == googleDnsHostname+"."
|
||||
},
|
||||
|
@ -423,7 +423,7 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
|
|||
moreForeverTestTimeout)
|
||||
} else {
|
||||
t.checkDNSRecordFrom(
|
||||
fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name),
|
||||
fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain),
|
||||
func(actual []string) bool {
|
||||
return len(actual) >= 1 && actual[0] == googleDnsHostname+"."
|
||||
},
|
||||
|
@ -434,13 +434,13 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
|
|||
if t.name == "coredns" {
|
||||
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
|
||||
"Corefile": fmt.Sprintf(`.:53 {
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
kubernetes %v in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
proxy . %v
|
||||
}`, t.dnsServerPod.Status.PodIP),
|
||||
}`, framework.TestContext.ClusterDNSDomain, t.dnsServerPod.Status.PodIP),
|
||||
}})
|
||||
|
||||
t.deleteCoreDNSPods()
|
||||
|
@ -451,7 +451,7 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
|
|||
}
|
||||
if isIPv6 {
|
||||
t.checkDNSRecordFrom(
|
||||
fmt.Sprintf("%s.%s.svc.cluster.local", serviceNameLocal, f.Namespace.Name),
|
||||
fmt.Sprintf("%s.%s.svc.%s", serviceNameLocal, f.Namespace.Name, framework.TestContext.ClusterDNSDomain),
|
||||
func(actual []string) bool {
|
||||
return len(actual) >= 1 && actual[0] == fooHostname+"." && actual[1] == "2001:db8::29"
|
||||
},
|
||||
|
@ -459,7 +459,7 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
|
|||
moreForeverTestTimeout)
|
||||
} else {
|
||||
t.checkDNSRecordFrom(
|
||||
fmt.Sprintf("%s.%s.svc.cluster.local", serviceNameLocal, f.Namespace.Name),
|
||||
fmt.Sprintf("%s.%s.svc.%s", serviceNameLocal, f.Namespace.Name, framework.TestContext.ClusterDNSDomain),
|
||||
func(actual []string) bool {
|
||||
return len(actual) == 2 && actual[0] == fooHostname+"." && actual[1] == "192.0.2.123"
|
||||
},
|
||||
|
|
|
@ -83,7 +83,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
|
|||
s := services[i]
|
||||
svc, err := f.ClientSet.CoreV1().Services(s.Namespace).Get(s.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
qname := fmt.Sprintf("%v.%v.svc.cluster.local", s.Name, s.Namespace)
|
||||
qname := fmt.Sprintf("%v.%v.svc.%v", s.Name, s.Namespace, framework.TestContext.ClusterDNSDomain)
|
||||
framework.Logf("Querying %v expecting %v", qname, svc.Spec.ClusterIP)
|
||||
dnsTest.checkDNSRecordFrom(
|
||||
qname,
|
||||
|
|
|
@ -135,7 +135,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
|||
_, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec")
|
||||
|
||||
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, "dns-backend.development.svc.cluster.local", fmt.Sprintf("dns-backend.%s.svc.cluster.local", namespaces[0].Name))
|
||||
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain))
|
||||
|
||||
// create a pod in each namespace
|
||||
for _, ns := range namespaces {
|
||||
|
|
|
@ -1274,7 +1274,7 @@ var _ = SIGDescribe("Services", func() {
|
|||
By("Verifying pods for RC " + t.Name)
|
||||
framework.ExpectNoError(framework.VerifyPods(t.Client, t.Namespace, t.Name, false, 1))
|
||||
|
||||
svcName := fmt.Sprintf("%v.%v.svc.cluster.local", serviceName, f.Namespace.Name)
|
||||
svcName := fmt.Sprintf("%v.%v.svc.%v", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
By("Waiting for endpoints of Service with DNS name " + svcName)
|
||||
|
||||
execPodName := framework.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil)
|
||||
|
|
Loading…
Reference in New Issue