mirror of https://github.com/k3s-io/k3s
Merge pull request #26913 from bprashanth/petset_e2e
Automatic merge from submit-queue Improve petset e2es Mainly * scale petset to 0 and wait for volume provisioner to delete pvs so we don't leak them * bake binaries into init[0] and cp into destination, instead of wgetting at runtime * dump verbose debug output on failure https://github.com/kubernetes/test-infra/pull/115, https://github.com/kubernetes/kubernetes/issues/26824pull/6/head
commit
abc06c9d73
|
@ -37,6 +37,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/controller/petset"
|
"k8s.io/kubernetes/pkg/controller/petset"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
utilyaml "k8s.io/kubernetes/pkg/util/yaml"
|
utilyaml "k8s.io/kubernetes/pkg/util/yaml"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
@ -49,9 +50,15 @@ const (
|
||||||
zookeeperManifestPath = "test/e2e/testing-manifests/petset/zookeeper"
|
zookeeperManifestPath = "test/e2e/testing-manifests/petset/zookeeper"
|
||||||
mysqlGaleraManifestPath = "test/e2e/testing-manifests/petset/mysql-galera"
|
mysqlGaleraManifestPath = "test/e2e/testing-manifests/petset/mysql-galera"
|
||||||
redisManifestPath = "test/e2e/testing-manifests/petset/redis"
|
redisManifestPath = "test/e2e/testing-manifests/petset/redis"
|
||||||
|
// Should the test restart petset clusters?
|
||||||
|
// TODO: enable when we've productionzed bringup of pets in this e2e.
|
||||||
|
restartCluster = false
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("PetSet", func() {
|
// Time: 25m, slow by design.
|
||||||
|
// GCE Quota requirements: 3 pds, one per pet manifest declared above.
|
||||||
|
// GCE Api requirements: nodes and master need storage r/w permissions.
|
||||||
|
var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() {
|
||||||
f := framework.NewDefaultFramework("petset")
|
f := framework.NewDefaultFramework("petset")
|
||||||
var ns string
|
var ns string
|
||||||
var c *client.Client
|
var c *client.Client
|
||||||
|
@ -81,13 +88,16 @@ var _ = framework.KubeDescribe("PetSet", func() {
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
if CurrentGinkgoTestDescription().Failed {
|
||||||
|
dumpDebugInfo(c, ns)
|
||||||
|
}
|
||||||
|
framework.Logf("Deleting all petset in ns %v", ns)
|
||||||
|
deleteAllPetSets(c, ns)
|
||||||
|
})
|
||||||
|
|
||||||
It("should provide basic identity [Feature:PetSet]", func() {
|
It("should provide basic identity [Feature:PetSet]", func() {
|
||||||
By("creating petset " + psName + " in namespace " + ns)
|
By("creating petset " + psName + " in namespace " + ns)
|
||||||
defer func() {
|
|
||||||
err := c.Apps().PetSets(ns).Delete(psName, nil)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
|
|
||||||
petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||||
podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
|
podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||||
ps := newPetSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
|
ps := newPetSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
|
||||||
|
@ -99,7 +109,7 @@ var _ = framework.KubeDescribe("PetSet", func() {
|
||||||
By("Saturating pet set " + ps.Name)
|
By("Saturating pet set " + ps.Name)
|
||||||
pst.saturate(ps)
|
pst.saturate(ps)
|
||||||
|
|
||||||
cmd := "echo $(hostname) > /data/hostname"
|
cmd := "echo $(hostname) > /data/hostname; sync;"
|
||||||
By("Running " + cmd + " in all pets")
|
By("Running " + cmd + " in all pets")
|
||||||
pst.execInPets(ps, cmd)
|
pst.execInPets(ps, cmd)
|
||||||
|
|
||||||
|
@ -114,10 +124,6 @@ var _ = framework.KubeDescribe("PetSet", func() {
|
||||||
|
|
||||||
It("should handle healthy pet restarts during scale [Feature:PetSet]", func() {
|
It("should handle healthy pet restarts during scale [Feature:PetSet]", func() {
|
||||||
By("creating petset " + psName + " in namespace " + ns)
|
By("creating petset " + psName + " in namespace " + ns)
|
||||||
defer func() {
|
|
||||||
err := c.Apps().PetSets(ns).Delete(psName, nil)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
|
|
||||||
petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||||
podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
|
podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||||
|
@ -126,6 +132,7 @@ var _ = framework.KubeDescribe("PetSet", func() {
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
pst := petSetTester{c: c}
|
pst := petSetTester{c: c}
|
||||||
|
|
||||||
pst.waitForRunning(1, ps)
|
pst.waitForRunning(1, ps)
|
||||||
|
|
||||||
By("Marking pet at index 0 as healthy.")
|
By("Marking pet at index 0 as healthy.")
|
||||||
|
@ -152,17 +159,17 @@ var _ = framework.KubeDescribe("PetSet", func() {
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
framework.KubeDescribe("Deploy clustered applications", func() {
|
framework.KubeDescribe("Deploy clustered applications [Slow] [Feature:PetSet]", func() {
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
framework.SkipUnlessProviderIs("gce")
|
framework.SkipUnlessProviderIs("gce")
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
// TODO: delete pvs
|
if CurrentGinkgoTestDescription().Failed {
|
||||||
if !CurrentGinkgoTestDescription().Failed {
|
dumpDebugInfo(c, ns)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
dumpDebugInfo(c, ns)
|
framework.Logf("Deleting all petset in ns %v", ns)
|
||||||
|
deleteAllPetSets(c, ns)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should creating a working zookeeper cluster [Feature:PetSet]", func() {
|
It("should creating a working zookeeper cluster [Feature:PetSet]", func() {
|
||||||
|
@ -174,9 +181,11 @@ var _ = framework.KubeDescribe("PetSet", func() {
|
||||||
By("Creating foo:bar in member with index 0")
|
By("Creating foo:bar in member with index 0")
|
||||||
pet.write(0, map[string]string{"foo": "bar"})
|
pet.write(0, map[string]string{"foo": "bar"})
|
||||||
|
|
||||||
By("Restarting pet set " + ps.Name)
|
if restartCluster {
|
||||||
pst.restart(ps)
|
By("Restarting pet set " + ps.Name)
|
||||||
pst.waitForRunning(ps.Spec.Replicas, ps)
|
pst.restart(ps)
|
||||||
|
pst.waitForRunning(ps.Spec.Replicas, ps)
|
||||||
|
}
|
||||||
|
|
||||||
By("Reading value under foo from member with index 2")
|
By("Reading value under foo from member with index 2")
|
||||||
if v := pet.read(2, "foo"); v != "bar" {
|
if v := pet.read(2, "foo"); v != "bar" {
|
||||||
|
@ -193,9 +202,11 @@ var _ = framework.KubeDescribe("PetSet", func() {
|
||||||
By("Creating foo:bar in member with index 0")
|
By("Creating foo:bar in member with index 0")
|
||||||
pet.write(0, map[string]string{"foo": "bar"})
|
pet.write(0, map[string]string{"foo": "bar"})
|
||||||
|
|
||||||
By("Restarting pet set " + ps.Name)
|
if restartCluster {
|
||||||
pst.restart(ps)
|
By("Restarting pet set " + ps.Name)
|
||||||
pst.waitForRunning(ps.Spec.Replicas, ps)
|
pst.restart(ps)
|
||||||
|
pst.waitForRunning(ps.Spec.Replicas, ps)
|
||||||
|
}
|
||||||
|
|
||||||
By("Reading value under foo from member with index 2")
|
By("Reading value under foo from member with index 2")
|
||||||
if v := pet.read(2, "foo"); v != "bar" {
|
if v := pet.read(2, "foo"); v != "bar" {
|
||||||
|
@ -212,9 +223,11 @@ var _ = framework.KubeDescribe("PetSet", func() {
|
||||||
By("Creating foo:bar in member with index 0")
|
By("Creating foo:bar in member with index 0")
|
||||||
pet.write(0, map[string]string{"foo": "bar"})
|
pet.write(0, map[string]string{"foo": "bar"})
|
||||||
|
|
||||||
By("Restarting pet set " + ps.Name)
|
if restartCluster {
|
||||||
pst.restart(ps)
|
By("Restarting pet set " + ps.Name)
|
||||||
pst.waitForRunning(ps.Spec.Replicas, ps)
|
pst.restart(ps)
|
||||||
|
pst.waitForRunning(ps.Spec.Replicas, ps)
|
||||||
|
}
|
||||||
|
|
||||||
By("Reading value under foo from member with index 2")
|
By("Reading value under foo from member with index 2")
|
||||||
if v := pet.read(2, "foo"); v != "bar" {
|
if v := pet.read(2, "foo"); v != "bar" {
|
||||||
|
@ -381,6 +394,7 @@ func petSetFromManifest(fileName, ns string) *apps.PetSet {
|
||||||
return &ps
|
return &ps
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// petSetTester has all methods required to test a single petset.
|
||||||
type petSetTester struct {
|
type petSetTester struct {
|
||||||
c *client.Client
|
c *client.Client
|
||||||
}
|
}
|
||||||
|
@ -429,30 +443,36 @@ func (p *petSetTester) deletePetAtIndex(index int, ps *apps.PetSet) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *petSetTester) restart(ps *apps.PetSet) {
|
func (p *petSetTester) scale(ps *apps.PetSet, count int) error {
|
||||||
name := ps.Name
|
name := ps.Name
|
||||||
ns := ps.Namespace
|
ns := ps.Namespace
|
||||||
oldReplicas := ps.Spec.Replicas
|
p.update(ns, name, func(ps *apps.PetSet) { ps.Spec.Replicas = count })
|
||||||
p.update(ns, name, func(ps *apps.PetSet) { ps.Spec.Replicas = 0 })
|
|
||||||
|
|
||||||
var petList *api.PodList
|
var petList *api.PodList
|
||||||
pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
|
pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
|
||||||
petList = p.getPodList(ps)
|
petList = p.getPodList(ps)
|
||||||
if len(petList.Items) == 0 {
|
if len(petList.Items) == count {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
if pollErr != nil {
|
if pollErr != nil {
|
||||||
ts := []string{}
|
unhealthy := []string{}
|
||||||
for _, pet := range petList.Items {
|
for _, pet := range petList.Items {
|
||||||
if pet.DeletionTimestamp != nil {
|
delTs, phase, readiness := pet.DeletionTimestamp, pet.Status.Phase, api.IsPodReady(&pet)
|
||||||
ts = append(ts, fmt.Sprintf("%v", pet.DeletionTimestamp.Time))
|
if delTs != nil || phase != api.PodRunning || !readiness {
|
||||||
|
unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", pet.Name, delTs, phase, readiness))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
framework.Failf("Failed to scale petset down to 0, %d remaining pods with deletion timestamps: %v", len(petList.Items), ts)
|
return fmt.Errorf("Failed to scale petset to %d in %v. Remaining pods:\n%v", count, petsetTimeout, unhealthy)
|
||||||
}
|
}
|
||||||
p.update(ns, name, func(ps *apps.PetSet) { ps.Spec.Replicas = oldReplicas })
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *petSetTester) restart(ps *apps.PetSet) {
|
||||||
|
oldReplicas := ps.Spec.Replicas
|
||||||
|
ExpectNoError(p.scale(ps, 0))
|
||||||
|
p.update(ps.Namespace, ps.Name, func(ps *apps.PetSet) { ps.Spec.Replicas = oldReplicas })
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *petSetTester) update(ns, name string, update func(ps *apps.PetSet)) {
|
func (p *petSetTester) update(ns, name string, update func(ps *apps.PetSet)) {
|
||||||
|
@ -542,6 +562,74 @@ func (p *petSetTester) setHealthy(ps *apps.PetSet) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func deleteAllPetSets(c *client.Client, ns string) {
|
||||||
|
pst := &petSetTester{c: c}
|
||||||
|
psList, err := c.Apps().PetSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
|
||||||
|
ExpectNoError(err)
|
||||||
|
|
||||||
|
// Scale down each petset, then delete it completely.
|
||||||
|
// Deleting a pvc without doing this will leak volumes, #25101.
|
||||||
|
errList := []string{}
|
||||||
|
for _, ps := range psList.Items {
|
||||||
|
framework.Logf("Scaling petset %v to 0", ps.Name)
|
||||||
|
if err := pst.scale(&ps, 0); err != nil {
|
||||||
|
errList = append(errList, fmt.Sprintf("%v", err))
|
||||||
|
}
|
||||||
|
framework.Logf("Deleting petset %v", ps.Name)
|
||||||
|
if err := c.Apps().PetSets(ps.Namespace).Delete(ps.Name, nil); err != nil {
|
||||||
|
errList = append(errList, fmt.Sprintf("%v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pvs are global, so we need to wait for the exact ones bound to the petset pvcs.
|
||||||
|
pvNames := sets.NewString()
|
||||||
|
// TODO: Don't assume all pvcs in the ns belong to a petset
|
||||||
|
pvcPollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
|
||||||
|
pvcList, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
|
||||||
|
if err != nil {
|
||||||
|
framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
for _, pvc := range pvcList.Items {
|
||||||
|
pvNames.Insert(pvc.Spec.VolumeName)
|
||||||
|
// TODO: Double check that there are no pods referencing the pvc
|
||||||
|
framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
|
||||||
|
if err := c.PersistentVolumeClaims(ns).Delete(pvc.Name); err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
if pvcPollErr != nil {
|
||||||
|
errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
|
||||||
|
}
|
||||||
|
|
||||||
|
pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
|
||||||
|
pvList, err := c.PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()})
|
||||||
|
if err != nil {
|
||||||
|
framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
waitingFor := []string{}
|
||||||
|
for _, pv := range pvList.Items {
|
||||||
|
if pvNames.Has(pv.Name) {
|
||||||
|
waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(waitingFor) == 0 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
framework.Logf("Still waiting for pvs of petset to disappear:\n%v", strings.Join(waitingFor, "\n"))
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
if pollErr != nil {
|
||||||
|
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
|
||||||
|
}
|
||||||
|
if len(errList) != 0 {
|
||||||
|
ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func ExpectNoError(err error) {
|
func ExpectNoError(err error) {
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,6 +57,7 @@ spec:
|
||||||
}
|
}
|
||||||
]'
|
]'
|
||||||
spec:
|
spec:
|
||||||
|
terminationGracePeriodSeconds: 0
|
||||||
containers:
|
containers:
|
||||||
- name: mysql
|
- name: mysql
|
||||||
image: gcr.io/google_containers/mysql-galera:e2e
|
image: gcr.io/google_containers/mysql-galera:e2e
|
||||||
|
@ -73,10 +74,12 @@ spec:
|
||||||
- --defaults-file=/etc/mysql/my-galera.cnf
|
- --defaults-file=/etc/mysql/my-galera.cnf
|
||||||
- --user=root
|
- --user=root
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
# TODO: If docker exec is buggy just use nc mysql-id 3306 as a ping.
|
# TODO: If docker exec is buggy just use gcr.io/google_containers/mysql-healthz:1.0
|
||||||
httpGet:
|
exec:
|
||||||
path: /healthz
|
command:
|
||||||
port: 8080
|
- sh
|
||||||
|
- -c
|
||||||
|
- "mysql -u root -e 'show databases;'"
|
||||||
initialDelaySeconds: 15
|
initialDelaySeconds: 15
|
||||||
timeoutSeconds: 5
|
timeoutSeconds: 5
|
||||||
successThreshold: 2
|
successThreshold: 2
|
||||||
|
@ -85,11 +88,6 @@ spec:
|
||||||
mountPath: /var/lib/
|
mountPath: /var/lib/
|
||||||
- name: config
|
- name: config
|
||||||
mountPath: /etc/mysql
|
mountPath: /etc/mysql
|
||||||
- name: healthz
|
|
||||||
image: gcr.io/google_containers/mysql-healthz:1.0
|
|
||||||
args:
|
|
||||||
- --port=8080
|
|
||||||
- --verbose=true
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: config
|
- name: config
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
@ -104,4 +102,4 @@ spec:
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
accessModes: [ "ReadWriteOnce" ]
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
storage: 10Gi
|
storage: 1Gi
|
||||||
|
|
|
@ -14,9 +14,9 @@ spec:
|
||||||
pod.alpha.kubernetes.io/init-containers: '[
|
pod.alpha.kubernetes.io/init-containers: '[
|
||||||
{
|
{
|
||||||
"name": "install",
|
"name": "install",
|
||||||
"image": "gcr.io/google_containers/redis-install:0.1",
|
"image": "gcr.io/google_containers/redis-install-3.2.0:e2e",
|
||||||
"imagePullPolicy": "Always",
|
"imagePullPolicy": "Always",
|
||||||
"args": ["--version=3.2.0", "--install-into=/opt", "--work-dir=/work-dir"],
|
"args": ["--install-into=/opt", "--work-dir=/work-dir"],
|
||||||
"volumeMounts": [
|
"volumeMounts": [
|
||||||
{
|
{
|
||||||
"name": "opt",
|
"name": "opt",
|
||||||
|
@ -57,6 +57,7 @@ spec:
|
||||||
}
|
}
|
||||||
]'
|
]'
|
||||||
spec:
|
spec:
|
||||||
|
terminationGracePeriodSeconds: 0
|
||||||
containers:
|
containers:
|
||||||
- name: redis
|
- name: redis
|
||||||
image: debian:jessie
|
image: debian:jessie
|
||||||
|
@ -94,4 +95,4 @@ spec:
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
accessModes: [ "ReadWriteOnce" ]
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
storage: 10Gi
|
storage: 1Gi
|
||||||
|
|
|
@ -14,9 +14,9 @@ spec:
|
||||||
pod.alpha.kubernetes.io/init-containers: '[
|
pod.alpha.kubernetes.io/init-containers: '[
|
||||||
{
|
{
|
||||||
"name": "install",
|
"name": "install",
|
||||||
"image": "gcr.io/google_containers/zookeeper-install:0.1",
|
"image": "gcr.io/google_containers/zookeeper-install-3.5.0-alpha:e2e",
|
||||||
"imagePullPolicy": "Always",
|
"imagePullPolicy": "Always",
|
||||||
"args": ["--version=3.5.0-alpha", "--install-into=/opt", "--work-dir=/work-dir"],
|
"args": ["--install-into=/opt", "--work-dir=/work-dir"],
|
||||||
"volumeMounts": [
|
"volumeMounts": [
|
||||||
{
|
{
|
||||||
"name": "opt",
|
"name": "opt",
|
||||||
|
@ -47,7 +47,7 @@ spec:
|
||||||
"volumeMounts": [
|
"volumeMounts": [
|
||||||
{
|
{
|
||||||
"name": "opt",
|
"name": "opt",
|
||||||
"mountPath": "/opt/"
|
"mountPath": "/opt"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "workdir",
|
"name": "workdir",
|
||||||
|
@ -61,6 +61,7 @@ spec:
|
||||||
}
|
}
|
||||||
]'
|
]'
|
||||||
spec:
|
spec:
|
||||||
|
terminationGracePeriodSeconds: 0
|
||||||
containers:
|
containers:
|
||||||
- name: zk
|
- name: zk
|
||||||
image: java:openjdk-8-jre
|
image: java:openjdk-8-jre
|
||||||
|
@ -85,7 +86,10 @@ spec:
|
||||||
- name: datadir
|
- name: datadir
|
||||||
mountPath: /tmp/zookeeper
|
mountPath: /tmp/zookeeper
|
||||||
- name: opt
|
- name: opt
|
||||||
mountPath: /opt/
|
mountPath: /opt
|
||||||
|
# Mount the work-dir just for debugging
|
||||||
|
- name: workdir
|
||||||
|
mountPath: /work-dir
|
||||||
volumes:
|
volumes:
|
||||||
- name: opt
|
- name: opt
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
@ -100,4 +104,4 @@ spec:
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
accessModes: [ "ReadWriteOnce" ]
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
storage: 10Gi
|
storage: 1Gi
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# TODO: get rid of bash dependency and switch to plain busybox.
|
||||||
|
# The tar in busybox also doesn't seem to understand compression.
|
||||||
|
FROM debian:jessie
|
||||||
|
MAINTAINER Prashanth.B <beeps@google.com>
|
||||||
|
|
||||||
|
# TODO: just use standard redis when there is one for 3.2.0.
|
||||||
|
RUN apt-get update && apt-get install -y wget make gcc
|
||||||
|
|
||||||
|
# See README.md
|
||||||
|
RUN wget -qO /redis-3.2.0.tar.gz http://download.redis.io/releases/redis-3.2.0.tar.gz && \
|
||||||
|
tar -xzf /redis-3.2.0.tar.gz -C /tmp/ && rm /redis-3.2.0.tar.gz
|
||||||
|
|
||||||
|
# Clean out existing deps before installation
|
||||||
|
# see https://github.com/antirez/redis/issues/722
|
||||||
|
RUN cd /tmp/redis-3.2.0 && make distclean && mkdir -p /redis && \
|
||||||
|
make install INSTALL_BIN=/redis && \
|
||||||
|
mv /tmp/redis-3.2.0/redis.conf /redis/redis.conf && \
|
||||||
|
rm -rf /tmp/redis-3.2.0
|
||||||
|
|
||||||
|
ADD on-start.sh /
|
||||||
|
# See contrib/pets/peer-finder for details
|
||||||
|
RUN wget -qO /peer-finder https://storage.googleapis.com/kubernetes-release/pets/peer-finder
|
||||||
|
ADD install.sh /
|
||||||
|
RUN chmod -c 755 /install.sh /on-start.sh /peer-finder
|
||||||
|
Entrypoint ["/install.sh"]
|
|
@ -0,0 +1,27 @@
|
||||||
|
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
all: push
|
||||||
|
|
||||||
|
TAG = e2e
|
||||||
|
PREFIX = gcr.io/google_containers/redis-install-3.2.0
|
||||||
|
|
||||||
|
container:
|
||||||
|
docker build -t $(PREFIX):$(TAG) .
|
||||||
|
|
||||||
|
push: container
|
||||||
|
gcloud docker push $(PREFIX):$(TAG)
|
||||||
|
|
||||||
|
clean:
|
||||||
|
docker rmi $(PREFIX):$(TAG)
|
|
@ -0,0 +1,12 @@
|
||||||
|
# Redis petset e2e tester
|
||||||
|
|
||||||
|
The image in this directory is the init container for contrib/pets/redis but for one difference, it bakes a specific verfion of redis into the base image so we get deterministic test results without having to depend on a redis download server. Discussing the tradeoffs to either approach (download the version at runtime, or maintain an image per version) are outside the scope of this document.
|
||||||
|
|
||||||
|
You can execute the image locally via:
|
||||||
|
```
|
||||||
|
$ docker run -it gcr.io/google_containers/redis-install-3.2.0:e2e --cmd --install-into=/opt --work-dir=/work-dir
|
||||||
|
```
|
||||||
|
To share the installation with other containers mount the appropriate volumes as `--install-into` and `--work-dir`, where `install-into` is the directory to install redis into, and `work-dir` is the directory to install the user/admin supplied on-{start,change} hook scripts.
|
||||||
|
|
||||||
|
|
||||||
|
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/images/pets/redis/README.md?pixel)]()
|
|
@ -0,0 +1,51 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# This volume is assumed to exist and is shared with parent of the init
|
||||||
|
# container. It contains the redis installation.
|
||||||
|
INSTALL_VOLUME="/opt"
|
||||||
|
|
||||||
|
# This volume is assumed to exist and is shared with the peer-finder
|
||||||
|
# init container. It contains on-start/change configuration scripts.
|
||||||
|
WORK_DIR="/work-dir"
|
||||||
|
|
||||||
|
VERSION="3.2.0"
|
||||||
|
|
||||||
|
for i in "$@"
|
||||||
|
do
|
||||||
|
case $i in
|
||||||
|
-i=*|--install-into=*)
|
||||||
|
INSTALL_VOLUME="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-w=*|--work-dir=*)
|
||||||
|
WORK_DIR="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
# unknown option
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
echo installing config scripts into "${WORK_DIR}"
|
||||||
|
mkdir -p "${WORK_DIR}"
|
||||||
|
cp /on-start.sh "${WORK_DIR}"/
|
||||||
|
cp /peer-finder "${WORK_DIR}"/
|
||||||
|
|
||||||
|
echo installing redis-"${VERSION}" into "${INSTALL_VOLUME}"
|
||||||
|
mkdir -p "${INSTALL_VOLUME}"
|
||||||
|
mv /redis "${INSTALL_VOLUME}"/redis
|
|
@ -0,0 +1,49 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
CFG=/opt/redis/redis.conf
|
||||||
|
HOSTNAME=$(hostname)
|
||||||
|
DATADIR="/data"
|
||||||
|
# Port on which redis listens for connections.
|
||||||
|
PORT=6379
|
||||||
|
|
||||||
|
# Ping everyone but ourself to see if there's a master. Only one pet starts at
|
||||||
|
# a time, so if we don't see a master we can assume the position is ours.
|
||||||
|
while read -ra LINE; do
|
||||||
|
if [[ "${LINE}" == *"${HOSTNAME}"* ]]; then
|
||||||
|
sed -i -e "s|^bind.*$|bind ${LINE}|" ${CFG}
|
||||||
|
elif [ "$(/opt/redis/redis-cli -h $LINE info | grep role | sed 's,\r$,,')" = "role:master" ]; then
|
||||||
|
# TODO: More restrictive regex?
|
||||||
|
sed -i -e "s|^.*slaveof.*$|slaveof ${LINE} ${PORT}|" ${CFG}
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Set the data directory for append only log and snapshot files. This should
|
||||||
|
# be a persistent volume for consistency.
|
||||||
|
sed -i -e "s|^.*dir .*$|dir ${DATADIR}|" ${CFG}
|
||||||
|
|
||||||
|
# The append only log is written for every SET operation. Without this setting,
|
||||||
|
# redis just snapshots periodically which is only safe for a cache. This will
|
||||||
|
# produce an appendonly.aof file in the configured data dir.
|
||||||
|
sed -i -e "s|^appendonly .*$|appendonly yes|" ${CFG}
|
||||||
|
|
||||||
|
# Every write triggers an fsync. Recommended default is "everysec", which
|
||||||
|
# is only safe for AP applications.
|
||||||
|
sed -i -e "s|^appendfsync .*$|appendfsync always|" ${CFG}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# TODO: get rid of bash dependency and switch to plain busybox.
|
||||||
|
# The tar in busybox also doesn't seem to understand compression.
|
||||||
|
FROM debian:jessie
|
||||||
|
MAINTAINER Prashanth.B <beeps@google.com>
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y wget netcat
|
||||||
|
|
||||||
|
ADD on-start.sh /
|
||||||
|
# See contrib/pets/peer-finder for details
|
||||||
|
RUN wget -qO /peer-finder https://storage.googleapis.com/kubernetes-release/pets/peer-finder
|
||||||
|
|
||||||
|
# See README.md
|
||||||
|
RUN wget -q -O /zookeeper-3.5.0-alpha.tar.gz http://apache.mirrors.pair.com/zookeeper/zookeeper-3.5.0-alpha/zookeeper-3.5.0-alpha.tar.gz && \
|
||||||
|
tar -xzf /zookeeper-3.5.0-alpha.tar.gz -C /tmp/ && mv /tmp/zookeeper-3.5.0-alpha /zookeeper && rm /zookeeper-3.5.0-alpha.tar.gz
|
||||||
|
|
||||||
|
ADD install.sh /
|
||||||
|
RUN chmod -c 755 /install.sh /on-start.sh /peer-finder
|
||||||
|
Entrypoint ["/install.sh"]
|
|
@ -0,0 +1,27 @@
|
||||||
|
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
all: push
|
||||||
|
|
||||||
|
TAG = e2e
|
||||||
|
PREFIX = gcr.io/google_containers/zookeeper-install-3.5.0-alpha
|
||||||
|
|
||||||
|
container:
|
||||||
|
docker build -t $(PREFIX):$(TAG) .
|
||||||
|
|
||||||
|
push: container
|
||||||
|
gcloud docker push $(PREFIX):$(TAG)
|
||||||
|
|
||||||
|
clean:
|
||||||
|
docker rmi $(PREFIX):$(TAG)
|
|
@ -0,0 +1,12 @@
|
||||||
|
# Zookeeper petset e2e tester
|
||||||
|
|
||||||
|
The image in this directory is the init container for contrib/pets/zookeeper but for one difference, it bakes a specific verfion of zookeeper into the base image so we get deterministic test results without having to depend on a zookeeper download server. Discussing the tradeoffs to either approach (download the version at runtime, or maintain an image per version) are outside the scope of this document.
|
||||||
|
|
||||||
|
You can execute the image locally via:
|
||||||
|
```
|
||||||
|
$ docker run -it gcr.io/google_containers/zookeeper-install-3.5.0-alpha:e2e --cmd --install-into=/opt --work-dir=/work-dir
|
||||||
|
```
|
||||||
|
To share the installation with other containers mount the appropriate volumes as `--install-into` and `--work-dir`, where `install-into` is the directory to install zookeeper into, and `work-dir` is the directory to install the user/admin supplied on-{start,change} hook scripts.
|
||||||
|
|
||||||
|
|
||||||
|
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/images/pets/zookeeper/README.md?pixel)]()
|
|
@ -0,0 +1,70 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# This volume is assumed to exist and is shared with parent of the init
|
||||||
|
# container. It contains the zookeeper installation.
|
||||||
|
INSTALL_VOLUME="/opt"
|
||||||
|
|
||||||
|
# This volume is assumed to exist and is shared with the peer-finder
|
||||||
|
# init container. It contains on-start/change configuration scripts.
|
||||||
|
WORKDIR_VOLUME="/work-dir"
|
||||||
|
|
||||||
|
# As of April-2016 is 3.4.8 is the latest stable, but versions 3.5.0 onward
|
||||||
|
# allow dynamic reconfiguration.
|
||||||
|
VERSION="3.5.0-alpha"
|
||||||
|
|
||||||
|
for i in "$@"
|
||||||
|
do
|
||||||
|
case $i in
|
||||||
|
-i=*|--install-into=*)
|
||||||
|
INSTALL_VOLUME="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-w=*|--work-dir=*)
|
||||||
|
WORKDIR_VOLUME="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
# unknown option
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
echo installing config scripts into "${WORKDIR_VOLUME}"
|
||||||
|
mkdir -p "${WORKDIR_VOLUME}"
|
||||||
|
cp /on-start.sh "${WORKDIR_VOLUME}"/
|
||||||
|
cp /peer-finder "${WORKDIR_VOLUME}"/
|
||||||
|
|
||||||
|
echo installing zookeeper-"${VERSION}" into "${INSTALL_VOLUME}"
|
||||||
|
mkdir -p "${INSTALL_VOLUME}"
|
||||||
|
mv /zookeeper "${INSTALL_VOLUME}"/zookeeper
|
||||||
|
cp "${INSTALL_VOLUME}"/zookeeper/conf/zoo_sample.cfg "${INSTALL_VOLUME}"/zookeeper/conf/zoo.cfg
|
||||||
|
|
||||||
|
# TODO: Should dynamic config be tied to the version?
|
||||||
|
IFS="." read -ra RELEASE <<< "${VERSION}"
|
||||||
|
if [ $(expr "${RELEASE[1]}") -gt 4 ]; then
|
||||||
|
echo zookeeper-"${VERSION}" supports dynamic reconfiguration, enabling it
|
||||||
|
echo "standaloneEnabled=false" >> "${INSTALL_VOLUME}"/zookeeper/conf/zoo.cfg
|
||||||
|
echo "dynamicConfigFile="${INSTALL_VOLUME}"/zookeeper/conf/zoo.cfg.dynamic" >> "${INSTALL_VOLUME}"/zookeeper/conf/zoo.cfg
|
||||||
|
fi
|
||||||
|
|
||||||
|
# TODO: This is a hack, netcat is convenient to have in the zookeeper container
|
||||||
|
# I want to avoid using a custom zookeeper image just for this. So copy it.
|
||||||
|
NC=$(which nc)
|
||||||
|
if [ "${NC}" != "" ]; then
|
||||||
|
echo copying nc into "${INSTALL_VOLUME}"
|
||||||
|
cp "${NC}" "${INSTALL_VOLUME}"
|
||||||
|
fi
|
|
@ -0,0 +1,106 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
# This script configures zookeeper cluster member ship for version of zookeeper
|
||||||
|
# >= 3.5.0. It should not be used with the on-change.sh script in this example.
|
||||||
|
# As of April-2016 is 3.4.8 is the latest stable.
|
||||||
|
|
||||||
|
# Both /opt and /tmp/zookeeper are assumed to be volumes shared with the parent.
|
||||||
|
# The format of each line in the dynamic config file is:
|
||||||
|
# server.<1 based index>=<server-dns-name>:<peer port>:<election port>[:role];[<client port address>:]<client port>
|
||||||
|
# <1 based index> is the server index that matches the id in datadir/myid
|
||||||
|
# <peer port> is the port on which peers communicate to agree on updates
|
||||||
|
# <election port> is the port used for leader election
|
||||||
|
# [:role] can be set to observer, participant by default
|
||||||
|
# <client port address> is optional and defaults to 0.0.0.0
|
||||||
|
# <client port> is the port on which the server accepts client connections
|
||||||
|
|
||||||
|
CFG=/opt/zookeeper/conf/zoo.cfg.dynamic
|
||||||
|
CFG_BAK=/opt/zookeeper/conf/zoo.cfg.bak
|
||||||
|
MY_ID_FILE=/tmp/zookeeper/myid
|
||||||
|
HOSTNAME=$(hostname)
|
||||||
|
|
||||||
|
while read -ra LINE; do
|
||||||
|
PEERS=("${PEERS[@]}" $LINE)
|
||||||
|
done
|
||||||
|
|
||||||
|
# Don't add the first member as an observer
|
||||||
|
if [ ${#PEERS[@]} -eq 1 ]; then
|
||||||
|
# We need to write our index in this list of servers into MY_ID_FILE.
|
||||||
|
# Note that this may not always coincide with the hostname id.
|
||||||
|
echo 1 > "${MY_ID_FILE}"
|
||||||
|
echo "server.1=${PEERS[0]}:2888:3888;2181" > "${CFG}"
|
||||||
|
# TODO: zkServer-initialize is the safe way to handle changes to datadir
|
||||||
|
# because simply starting will create a new datadir, BUT if the user changed
|
||||||
|
# pod template they might end up with 2 datadirs and brief split brain.
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Every subsequent member is added as an observer and promoted to a participant
|
||||||
|
echo "" > "${CFG_BAK}"
|
||||||
|
i=0
|
||||||
|
LEADER=$HOSTNAME
|
||||||
|
for peer in "${PEERS[@]}"; do
|
||||||
|
let i=i+1
|
||||||
|
if [[ "${peer}" == *"${HOSTNAME}"* ]]; then
|
||||||
|
MY_ID=$i
|
||||||
|
MY_NAME=${peer}
|
||||||
|
echo $i > "${MY_ID_FILE}"
|
||||||
|
echo "server.${i}=${peer}:2888:3888:observer;2181" >> "${CFG_BAK}"
|
||||||
|
else
|
||||||
|
if [[ $(echo srvr | /opt/nc "${peer}" 2181 | grep Mode) = "Mode: leader" ]]; then
|
||||||
|
LEADER="${peer}"
|
||||||
|
fi
|
||||||
|
echo "server.${i}=${peer}:2888:3888:participant;2181" >> "${CFG_BAK}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# zookeeper won't start without myid anyway.
|
||||||
|
# This means our hostname wasn't in the peer list.
|
||||||
|
if [ ! -f "${MY_ID_FILE}" ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Once the dynamic config file is written it shouldn't be modified, so the final
|
||||||
|
# reconfigure needs to happen through the "reconfig" command.
|
||||||
|
cp ${CFG_BAK} ${CFG}
|
||||||
|
|
||||||
|
# TODO: zkServer-initialize is the safe way to handle changes to datadir
|
||||||
|
# because simply starting will create a new datadir, BUT if the user changed
|
||||||
|
# pod template they might end up with 2 datadirs and brief split brain.
|
||||||
|
/opt/zookeeper/bin/zkServer.sh start
|
||||||
|
|
||||||
|
# TODO: We shouldn't need to specify the address of the master as long as
|
||||||
|
# there's quorum. According to the docs the new server is just not allowed to
|
||||||
|
# vote, it's still allowed to propose config changes, and it knows the
|
||||||
|
# existing members of the ensemble from *its* config.
|
||||||
|
ADD_SERVER="server.$MY_ID=$MY_NAME:2888:3888:participant;0.0.0.0:2181"
|
||||||
|
/opt/zookeeper/bin/zkCli.sh reconfig -s "${LEADER}":2181 -add "${ADD_SERVER}"
|
||||||
|
|
||||||
|
# Prove that we've actually joined the running cluster
|
||||||
|
ITERATION=0
|
||||||
|
until $(echo config | /opt/nc localhost 2181 | grep "${ADD_SERVER}" > /dev/null); do
|
||||||
|
echo $ITERATION] waiting for updated config to sync back to localhost
|
||||||
|
sleep 1
|
||||||
|
let ITERATION=ITERATION+1
|
||||||
|
if [ $ITERATION -eq 20 ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
/opt/zookeeper/bin/zkServer.sh stop
|
Loading…
Reference in New Issue