petset E2Es

pull/6/head
Prashanth Balasubramanian 2016-05-08 16:18:15 -07:00
parent 8bfaec4b59
commit d57575762b
7 changed files with 709 additions and 82 deletions

View File

@ -18,7 +18,10 @@ package e2e
import (
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"time"
inf "gopkg.in/inf.v0"
@ -32,123 +35,383 @@ import (
"k8s.io/kubernetes/pkg/apis/apps"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller/petset"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
utilyaml "k8s.io/kubernetes/pkg/util/yaml"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
petsetPoll = 10 * time.Second
petsetTimeout = 5 * time.Minute
petsetPoll = 10 * time.Second
// Some pets install base packages via wget
petsetTimeout = 10 * time.Minute
zookeeperManifestPath = "test/e2e/testing-manifests/petset/zookeeper"
mysqlGaleraManifestPath = "test/e2e/testing-manifests/petset/mysql-galera"
redisManifestPath = "test/e2e/testing-manifests/petset/redis"
)
var _ = framework.KubeDescribe("PetSet", func() {
f := framework.NewDefaultFramework("petset")
psName := "pet"
labels := map[string]string{
"foo": "bar",
"baz": "blah",
}
headlessSvcName := "test"
var ns string
var c *client.Client
BeforeEach(func() {
// PetSet is in alpha, so it's disabled on all platforms except GCE.
// In theory, tests that restart pets should pass on any platform with a
// dynamic volume provisioner.
framework.SkipUnlessProviderIs("gce")
var err error
c, err = framework.LoadClient()
Expect(err).NotTo(HaveOccurred())
ns = f.Namespace.Name
By("creating service " + headlessSvcName + " in namespace " + ns)
headlessService := createServiceSpec(headlessSvcName, true, labels)
_, err = c.Services(ns).Create(headlessService)
Expect(err).NotTo(HaveOccurred())
})
It("provide basic identity [Feature:PetSet]", func() {
By("creating petset " + psName + " in namespace " + ns)
defer func() {
err := c.Apps().PetSets(ns).Delete(psName, nil)
framework.KubeDescribe("Basic PetSet functionality", func() {
psName := "pet"
labels := map[string]string{
"foo": "bar",
"baz": "blah",
}
headlessSvcName := "test"
BeforeEach(func() {
By("creating service " + headlessSvcName + " in namespace " + ns)
headlessService := createServiceSpec(headlessSvcName, true, labels)
_, err := c.Services(ns).Create(headlessService)
Expect(err).NotTo(HaveOccurred())
}()
})
petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := newPetSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
_, err := c.Apps().PetSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred())
It("should provide basic identity [Feature:PetSet]", func() {
By("creating petset " + psName + " in namespace " + ns)
defer func() {
err := c.Apps().PetSets(ns).Delete(psName, nil)
Expect(err).NotTo(HaveOccurred())
}()
pt := petTester{c: c}
petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := newPetSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
_, err := c.Apps().PetSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred())
By("Saturating pet set " + ps.Name)
pt.saturate(ps)
pst := petSetTester{c: c}
cmd := "echo $(hostname) > /data/hostname"
By("Running " + cmd + " in all pets")
pt.execInPets(ps, cmd)
By("Saturating pet set " + ps.Name)
pst.saturate(ps)
By("Restarting pet set " + ps.Name)
pt.restart(ps)
pt.saturate(ps)
cmd := "echo $(hostname) > /data/hostname"
By("Running " + cmd + " in all pets")
pst.execInPets(ps, cmd)
cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi"
By("Running " + cmd + " in all pets")
pt.execInPets(ps, cmd)
By("Restarting pet set " + ps.Name)
pst.restart(ps)
pst.saturate(ps)
cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi"
By("Running " + cmd + " in all pets")
pst.execInPets(ps, cmd)
})
It("should handle healthy pet restarts during scale [Feature:PetSet]", func() {
By("creating petset " + psName + " in namespace " + ns)
defer func() {
err := c.Apps().PetSets(ns).Delete(psName, nil)
Expect(err).NotTo(HaveOccurred())
}()
petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := newPetSet(psName, ns, headlessSvcName, 2, petMounts, podMounts, labels)
_, err := c.Apps().PetSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred())
pst := petSetTester{c: c}
pst.waitForRunning(1, ps)
By("Marking pet at index 0 as healthy.")
pst.setHealthy(ps)
By("Waiting for pet at index 1 to enter running.")
pst.waitForRunning(2, ps)
// Now we have 1 healthy and 1 unhealthy pet. Deleting the healthy pet should *not*
// create a new pet till the remaining pet becomes healthy, which won't happen till
// we set the healthy bit.
By("Deleting healthy pet at index 0.")
pst.deletePetAtIndex(0, ps)
By("Confirming pet at index 0 is not recreated.")
pst.confirmPetCount(1, ps, 10*time.Second)
By("Deleting unhealthy pet at index 1.")
pst.deletePetAtIndex(1, ps)
By("Confirming all pets in petset are created.")
pst.saturate(ps)
})
})
It("should handle healthy pet restarts during scale [Feature:PetSet]", func() {
By("creating petset " + psName + " in namespace " + ns)
defer func() {
err := c.Apps().PetSets(ns).Delete(psName, nil)
Expect(err).NotTo(HaveOccurred())
}()
framework.KubeDescribe("Deploy clustered applications", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
})
petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := newPetSet(psName, ns, headlessSvcName, 2, petMounts, podMounts, labels)
_, err := c.Apps().PetSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred())
AfterEach(func() {
// TODO: delete pvs
if !CurrentGinkgoTestDescription().Failed {
return
}
dumpDebugInfo(c, ns)
})
pt := petTester{c: c}
pt.waitForRunning(1, ps)
It("should creating a working zookeeper cluster [Feature:PetSet]", func() {
pst := &petSetTester{c: c}
pet := &zookeeperTester{tester: pst}
By("Deploying " + pet.name())
ps := pet.deploy(ns)
By("Marking pet at index 0 as healthy.")
pt.setHealthy(ps)
By("Creating foo:bar in member with index 0")
pet.write(0, map[string]string{"foo": "bar"})
By("Waiting for pet at index 1 to enter running.")
pt.waitForRunning(2, ps)
By("Restarting pet set " + ps.Name)
pst.restart(ps)
pst.waitForRunning(ps.Spec.Replicas, ps)
// Now we have 1 healthy and 1 unhealthy pet. Deleting the healthy pet should *not*
// create a new pet till the remaining pet becomes healthy, which won't happen till
// we set the healthy bit.
By("Reading value under foo from member with index 2")
if v := pet.read(2, "foo"); v != "bar" {
framework.Failf("Read unexpected value %v, expected bar under key foo", v)
}
})
By("Deleting healthy pet at index 0.")
pt.deletePetAtIndex(0, ps)
It("should creating a working redis cluster [Feature:PetSet]", func() {
pst := &petSetTester{c: c}
pet := &redisTester{tester: pst}
By("Deploying " + pet.name())
ps := pet.deploy(ns)
By("Confirming pet at index 0 is not recreated.")
pt.confirmPetCount(1, ps, 10*time.Second)
By("Creating foo:bar in member with index 0")
pet.write(0, map[string]string{"foo": "bar"})
By("Deleting unhealthy pet at index 1.")
pt.deletePetAtIndex(1, ps)
By("Restarting pet set " + ps.Name)
pst.restart(ps)
pst.waitForRunning(ps.Spec.Replicas, ps)
By("Confirming all pets in petset are created.")
pt.saturate(ps)
By("Reading value under foo from member with index 2")
if v := pet.read(2, "foo"); v != "bar" {
framework.Failf("Read unexpected value %v, expected bar under key foo", v)
}
})
It("should creating a working mysql cluster [Feature:PetSet]", func() {
pst := &petSetTester{c: c}
pet := &mysqlGaleraTester{tester: pst}
By("Deploying " + pet.name())
ps := pet.deploy(ns)
By("Creating foo:bar in member with index 0")
pet.write(0, map[string]string{"foo": "bar"})
By("Restarting pet set " + ps.Name)
pst.restart(ps)
pst.waitForRunning(ps.Spec.Replicas, ps)
By("Reading value under foo from member with index 2")
if v := pet.read(2, "foo"); v != "bar" {
framework.Failf("Read unexpected value %v, expected bar under key foo", v)
}
})
})
})
type petTester struct {
func dumpDebugInfo(c *client.Client, ns string) {
pl, _ := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
for _, p := range pl.Items {
desc, _ := framework.RunKubectl("describe", "po", p.Name, fmt.Sprintf("--namespace=%v", ns))
framework.Logf("\nOutput of kubectl describe %v:\n%v", p.Name, desc)
l, _ := framework.RunKubectl("logs", p.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100")
framework.Logf("\nLast 100 log lines of %v:\n%v", p.Name, l)
}
}
func kubectlExecWithRetries(args ...string) (out string) {
var err error
for i := 0; i < 3; i++ {
if out, err = framework.RunKubectl(args...); err == nil {
return
}
framework.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out)
}
framework.Failf("Failed to execute \"%v\" with retries: %v", args, err)
return
}
type petTester interface {
deploy(ns string) *apps.PetSet
write(petIndex int, kv map[string]string)
read(petIndex int, key string) string
name() string
}
type zookeeperTester struct {
ps *apps.PetSet
tester *petSetTester
}
func (z *zookeeperTester) name() string {
return "zookeeper"
}
func (z *zookeeperTester) deploy(ns string) *apps.PetSet {
z.ps = z.tester.createPetSet(zookeeperManifestPath, ns)
return z.ps
}
func (z *zookeeperTester) write(petIndex int, kv map[string]string) {
name := fmt.Sprintf("%v-%d", z.ps.Name, petIndex)
ns := fmt.Sprintf("--namespace=%v", z.ps.Namespace)
for k, v := range kv {
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v)
framework.Logf(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd))
}
}
func (z *zookeeperTester) read(petIndex int, key string) string {
name := fmt.Sprintf("%v-%d", z.ps.Name, petIndex)
ns := fmt.Sprintf("--namespace=%v", z.ps.Namespace)
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh get /%v", key)
return lastLine(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd))
}
type mysqlGaleraTester struct {
ps *apps.PetSet
tester *petSetTester
}
func (m *mysqlGaleraTester) name() string {
return "mysql: galera"
}
func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string {
cmd = fmt.Sprintf("/usr/bin/mysql -u root -B -e '%v'", cmd)
// TODO: Find a readiness probe for mysql that guarantees writes will
// succeed and ditch retries. Current probe only reads, so there's a window
// for a race.
return kubectlExecWithRetries(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
}
func (m *mysqlGaleraTester) deploy(ns string) *apps.PetSet {
m.ps = m.tester.createPetSet(mysqlGaleraManifestPath, ns)
framework.Logf("Deployed petset %v, initializing database", m.ps.Name)
for _, cmd := range []string{
"create database petset;",
"use petset; create table pet (k varchar(20), v varchar(20));",
} {
framework.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ps.Name)))
}
return m.ps
}
func (m *mysqlGaleraTester) write(petIndex int, kv map[string]string) {
name := fmt.Sprintf("%v-%d", m.ps.Name, petIndex)
for k, v := range kv {
cmd := fmt.Sprintf("use petset; insert into pet (k, v) values (\"%v\", \"%v\");", k, v)
framework.Logf(m.mysqlExec(cmd, m.ps.Namespace, name))
}
}
func (m *mysqlGaleraTester) read(petIndex int, key string) string {
name := fmt.Sprintf("%v-%d", m.ps.Name, petIndex)
return lastLine(m.mysqlExec(fmt.Sprintf("use petset; select v from pet where k=\"%v\";", key), m.ps.Namespace, name))
}
type redisTester struct {
ps *apps.PetSet
tester *petSetTester
}
func (m *redisTester) name() string {
return "redis: master/slave"
}
func (m *redisTester) redisExec(cmd, ns, podName string) string {
cmd = fmt.Sprintf("/opt/redis/redis-cli -h %v %v", podName, cmd)
return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
}
func (m *redisTester) deploy(ns string) *apps.PetSet {
m.ps = m.tester.createPetSet(redisManifestPath, ns)
return m.ps
}
func (m *redisTester) write(petIndex int, kv map[string]string) {
name := fmt.Sprintf("%v-%d", m.ps.Name, petIndex)
for k, v := range kv {
framework.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ps.Namespace, name))
}
}
func (m *redisTester) read(petIndex int, key string) string {
name := fmt.Sprintf("%v-%d", m.ps.Name, petIndex)
return lastLine(m.redisExec(fmt.Sprintf("GET %v", key), m.ps.Namespace, name))
}
func lastLine(out string) string {
outLines := strings.Split(strings.Trim(out, "\n"), "\n")
return outLines[len(outLines)-1]
}
func petSetFromManifest(fileName, ns string) *apps.PetSet {
var ps apps.PetSet
framework.Logf("Parsing petset from %v", fileName)
data, err := ioutil.ReadFile(fileName)
Expect(err).NotTo(HaveOccurred())
json, err := utilyaml.ToJSON(data)
Expect(err).NotTo(HaveOccurred())
Expect(runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &ps)).NotTo(HaveOccurred())
ps.Namespace = ns
if ps.Spec.Selector == nil {
ps.Spec.Selector = &unversioned.LabelSelector{
MatchLabels: ps.Spec.Template.Labels,
}
}
return &ps
}
type petSetTester struct {
c *client.Client
}
func (p *petTester) execInPets(ps *apps.PetSet, cmd string) {
func (p *petSetTester) createPetSet(manifestPath, ns string) *apps.PetSet {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, manifestPath, file)
}
ps := petSetFromManifest(mkpath("petset.yaml"), ns)
framework.Logf(fmt.Sprintf("creating " + ps.Name + " service"))
framework.RunKubectlOrDie("create", "-f", mkpath("service.yaml"), fmt.Sprintf("--namespace=%v", ns))
framework.Logf(fmt.Sprintf("creating petset %v/%v with %d replicas and selector %+v", ps.Namespace, ps.Name, ps.Spec.Replicas, ps.Spec.Selector))
framework.RunKubectlOrDie("create", "-f", mkpath("petset.yaml"), fmt.Sprintf("--namespace=%v", ns))
p.waitForRunning(ps.Spec.Replicas, ps)
return ps
}
func (p *petSetTester) execInPets(ps *apps.PetSet, cmd string) {
podList := p.getPodList(ps)
for _, pet := range podList.Items {
stdout, err := framework.RunHostCmd(pet.Namespace, pet.Name, cmd)
ExpectNoError(err)
framework.Logf("stdout %v on %v: %v", cmd, pet.Name, stdout)
framework.Logf("stdout of %v on %v: %v", cmd, pet.Name, stdout)
}
}
func (p *petTester) saturate(ps *apps.PetSet) {
func (p *petSetTester) saturate(ps *apps.PetSet) {
// TOOD: Watch events and check that creation timestamps don't overlap
for i := 0; i < ps.Spec.Replicas; i++ {
framework.Logf("Waiting for pet at index " + fmt.Sprintf("%v", i+1) + " to enter Running")
@ -158,7 +421,7 @@ func (p *petTester) saturate(ps *apps.PetSet) {
}
}
func (p *petTester) deletePetAtIndex(index int, ps *apps.PetSet) {
func (p *petSetTester) deletePetAtIndex(index int, ps *apps.PetSet) {
// TODO: we won't use "-index" as the name strategy forever,
// pull the name out from an identity mapper.
name := fmt.Sprintf("%v-%v", ps.Name, index)
@ -168,7 +431,7 @@ func (p *petTester) deletePetAtIndex(index int, ps *apps.PetSet) {
}
}
func (p *petTester) restart(ps *apps.PetSet) {
func (p *petSetTester) restart(ps *apps.PetSet) {
name := ps.Name
ns := ps.Namespace
oldReplicas := ps.Spec.Replicas
@ -194,7 +457,7 @@ func (p *petTester) restart(ps *apps.PetSet) {
p.update(ns, name, func(ps *apps.PetSet) { ps.Spec.Replicas = oldReplicas })
}
func (p *petTester) update(ns, name string, update func(ps *apps.PetSet)) {
func (p *petSetTester) update(ns, name string, update func(ps *apps.PetSet)) {
for i := 0; i < 3; i++ {
ps, err := p.c.Apps().PetSets(ns).Get(name)
if err != nil {
@ -212,7 +475,7 @@ func (p *petTester) update(ns, name string, update func(ps *apps.PetSet)) {
framework.Failf("too many retries draining petset %q", name)
}
func (p *petTester) getPodList(ps *apps.PetSet) *api.PodList {
func (p *petSetTester) getPodList(ps *apps.PetSet) *api.PodList {
selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector)
ExpectNoError(err)
podList, err := p.c.Pods(ps.Namespace).List(api.ListOptions{LabelSelector: selector})
@ -220,11 +483,7 @@ func (p *petTester) getPodList(ps *apps.PetSet) *api.PodList {
return podList
}
func ExpectNoError(err error) {
Expect(err).NotTo(HaveOccurred())
}
func (p *petTester) confirmPetCount(count int, ps *apps.PetSet, timeout time.Duration) {
func (p *petSetTester) confirmPetCount(count int, ps *apps.PetSet, timeout time.Duration) {
start := time.Now()
deadline := start.Add(timeout)
for t := time.Now(); t.Before(deadline); t = time.Now() {
@ -238,7 +497,7 @@ func (p *petTester) confirmPetCount(count int, ps *apps.PetSet, timeout time.Dur
}
}
func (p *petTester) waitForRunning(numPets int, ps *apps.PetSet) {
func (p *petSetTester) waitForRunning(numPets int, ps *apps.PetSet) {
pollErr := wait.PollImmediate(petsetPoll, petsetTimeout,
func() (bool, error) {
podList := p.getPodList(ps)
@ -247,11 +506,12 @@ func (p *petTester) waitForRunning(numPets int, ps *apps.PetSet) {
return false, nil
}
if len(podList.Items) > numPets {
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPods, len(podList.Items))
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
}
for _, p := range podList.Items {
if p.Status.Phase != api.PodRunning {
framework.Logf("Waiting for pod %v to enter %v, currently %v", p.Name, api.PodRunning, p.Status.Phase)
isReady := api.IsPodReady(&p)
if p.Status.Phase != api.PodRunning || !isReady {
framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, api.PodRunning, p.Status.Phase, isReady)
return false, nil
}
}
@ -262,7 +522,7 @@ func (p *petTester) waitForRunning(numPets int, ps *apps.PetSet) {
}
}
func (p *petTester) setHealthy(ps *apps.PetSet) {
func (p *petSetTester) setHealthy(ps *apps.PetSet) {
podList := p.getPodList(ps)
markedHealthyPod := ""
for _, pod := range podList.Items {
@ -284,6 +544,10 @@ func (p *petTester) setHealthy(ps *apps.PetSet) {
}
}
func ExpectNoError(err error) {
Expect(err).NotTo(HaveOccurred())
}
func isInitialized(pod api.Pod) bool {
initialized, ok := pod.Annotations[petset.PetSetInitAnnotation]
if !ok {

View File

@ -0,0 +1,107 @@
apiVersion: apps/v1alpha1
kind: PetSet
metadata:
name: mysql
spec:
serviceName: "galera"
replicas: 3
template:
metadata:
labels:
app: mysql
annotations:
pod.alpha.kubernetes.io/initialized: "true"
pod.alpha.kubernetes.io/init-containers: '[
{
"name": "install",
"image": "gcr.io/google_containers/galera-install:0.1",
"imagePullPolicy": "Always",
"args": ["--work-dir=/work-dir"],
"volumeMounts": [
{
"name": "workdir",
"mountPath": "/work-dir"
},
{
"name": "config",
"mountPath": "/etc/mysql"
}
]
},
{
"name": "bootstrap",
"image": "debian:jessie",
"command": ["/work-dir/peer-finder"],
"args": ["-on-start=\"/work-dir/on-start.sh\"", "-service=galera"],
"env": [
{
"name": "POD_NAMESPACE",
"valueFrom": {
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
}
}
}
],
"volumeMounts": [
{
"name": "workdir",
"mountPath": "/work-dir"
},
{
"name": "config",
"mountPath": "/etc/mysql"
}
]
}
]'
spec:
containers:
- name: mysql
image: gcr.io/google_containers/mysql-galera:e2e
ports:
- containerPort: 3306
name: mysql
- containerPort: 4444
name: sst
- containerPort: 4567
name: replication
- containerPort: 4568
name: ist
args:
- --defaults-file=/etc/mysql/my-galera.cnf
- --user=root
readinessProbe:
# TODO: If docker exec is buggy just use nc mysql-id 3306 as a ping.
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 15
timeoutSeconds: 5
successThreshold: 2
volumeMounts:
- name: datadir
mountPath: /var/lib/
- name: config
mountPath: /etc/mysql
- name: healthz
image: gcr.io/google_containers/mysql-healthz:1.0
args:
- --port=8080
- --verbose=true
volumes:
- name: config
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
annotations:
volume.alpha.kubernetes.io/storage-class: anything
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi

View File

@ -0,0 +1,18 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
name: galera
labels:
app: mysql
spec:
ports:
- port: 3306
name: mysql
# *.galear.default.svc.cluster.local
clusterIP: None
selector:
app: mysql

View File

@ -0,0 +1,97 @@
apiVersion: apps/v1alpha1
kind: PetSet
metadata:
name: rd
spec:
serviceName: "redis"
replicas: 3
template:
metadata:
labels:
app: redis
annotations:
pod.alpha.kubernetes.io/initialized: "true"
pod.alpha.kubernetes.io/init-containers: '[
{
"name": "install",
"image": "gcr.io/google_containers/redis-install:0.1",
"imagePullPolicy": "Always",
"args": ["--version=3.2.0", "--install-into=/opt", "--work-dir=/work-dir"],
"volumeMounts": [
{
"name": "opt",
"mountPath": "/opt"
},
{
"name": "workdir",
"mountPath": "/work-dir"
}
]
},
{
"name": "bootstrap",
"image": "debian:jessie",
"command": ["/work-dir/peer-finder"],
"args": ["-on-start=\"/work-dir/on-start.sh\"", "-service=redis"],
"env": [
{
"name": "POD_NAMESPACE",
"valueFrom": {
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
}
}
}
],
"volumeMounts": [
{
"name": "opt",
"mountPath": "/opt"
},
{
"name": "workdir",
"mountPath": "/work-dir"
}
]
}
]'
spec:
containers:
- name: redis
image: debian:jessie
ports:
- containerPort: 6379
name: peer
command:
- /opt/redis/redis-server
args:
- /opt/redis/redis.conf
readinessProbe:
exec:
command:
- sh
- -c
- "/opt/redis/redis-cli -h $(hostname) ping"
initialDelaySeconds: 15
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /data
- name: opt
mountPath: /opt
volumes:
- name: opt
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
annotations:
volume.alpha.kubernetes.io/storage-class: anything
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi

View File

@ -0,0 +1,18 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
name: redis
labels:
app: redis
spec:
ports:
- port: 6379
name: peer
# *.redis.default.svc.cluster.local
clusterIP: None
selector:
app: redis

View File

@ -0,0 +1,103 @@
apiVersion: apps/v1alpha1
kind: PetSet
metadata:
name: zoo
spec:
serviceName: "zk"
replicas: 3
template:
metadata:
labels:
app: zk
annotations:
pod.alpha.kubernetes.io/initialized: "true"
pod.alpha.kubernetes.io/init-containers: '[
{
"name": "install",
"image": "gcr.io/google_containers/zookeeper-install:0.1",
"imagePullPolicy": "Always",
"args": ["--version=3.5.0-alpha", "--install-into=/opt", "--work-dir=/work-dir"],
"volumeMounts": [
{
"name": "opt",
"mountPath": "/opt/"
},
{
"name": "workdir",
"mountPath": "/work-dir"
}
]
},
{
"name": "bootstrap",
"image": "java:openjdk-8-jre",
"command": ["/work-dir/peer-finder"],
"args": ["-on-start=\"/work-dir/on-start.sh\"", "-service=zk"],
"env": [
{
"name": "POD_NAMESPACE",
"valueFrom": {
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
}
}
}
],
"volumeMounts": [
{
"name": "opt",
"mountPath": "/opt/"
},
{
"name": "workdir",
"mountPath": "/work-dir"
},
{
"name": "datadir",
"mountPath": "/tmp/zookeeper"
}
]
}
]'
spec:
containers:
- name: zk
image: java:openjdk-8-jre
ports:
- containerPort: 2888
name: peer
- containerPort: 3888
name: leader-election
command:
- /opt/zookeeper/bin/zkServer.sh
args:
- start-foreground
readinessProbe:
exec:
command:
- sh
- -c
- "/opt/zookeeper/bin/zkCli.sh ls /"
initialDelaySeconds: 15
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /tmp/zookeeper
- name: opt
mountPath: /opt/
volumes:
- name: opt
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
annotations:
volume.alpha.kubernetes.io/storage-class: anything
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi

View File

@ -0,0 +1,20 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
name: zk
labels:
app: zk
spec:
ports:
- port: 2888
name: peer
- port: 3888
name: leader-election
# *.zk.default.svc.cluster.local
clusterIP: None
selector:
app: zk