- updates to docker

- updating java to 3.7
- added pet set example
- adding pet set for Cassandra e2e tests
- changed service as we do not want a lb service, as we are running C*
- updated docs

fixing headers and adding exception for run.sh

adding documentation, thank god for reflog

Did not mean to commit that as the README ... fixing

fixing problems in README

fixing more problems in README

more README tweaks

munge updates

updating examples_test for PetSet in Cassandra examples

updating petset to no use better security context
pull/6/head
chrislovecnm 2016-07-16 19:08:11 +00:00
parent 63905492f0
commit 07f751956d
21 changed files with 1049 additions and 254 deletions

View File

@ -0,0 +1,34 @@
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
This file has moved to: http://kubernetes.io/docs/user-guide/petset/
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/petsets.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View File

@ -29,6 +29,8 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/apis/apps"
appsvalidation "k8s.io/kubernetes/pkg/apis/apps/validation"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/extensions"
expvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation"
@ -128,6 +130,11 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) {
t.Namespace = api.NamespaceDefault
}
errors = expvalidation.ValidateDaemonSet(t)
case *apps.PetSet:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = appsvalidation.ValidatePetSet(t)
default:
errors = field.ErrorList{}
errors = append(errors, field.InternalError(field.NewPath(""), fmt.Errorf("no validation defined for %#v", obj)))
@ -214,6 +221,7 @@ func TestExampleObjectSchemas(t *testing.T) {
"cassandra-daemonset": &extensions.DaemonSet{},
"cassandra-controller": &api.ReplicationController{},
"cassandra-service": &api.Service{},
"cassandra-petset": &apps.PetSet{},
},
"../examples/cluster-dns": {
"dns-backend-rc": &api.ReplicationController{},

View File

@ -40,12 +40,15 @@ Documentation for other releases can be found at
- [Prerequisites](#prerequisites)
- [Cassandra Docker](#cassandra-docker)
- [tl;dr Quickstart](#tldr-quickstart)
- [Step 1: Create a Cassandra Service](#step-1-create-a-cassandra-service)
- [Step 2: Use a Replication Controller to create Cassandra node pods](#step-2-use-a-replication-controller-to-create-cassandra-node-pods)
- [Step 3: Scale up the Cassandra cluster](#step-3-scale-up-the-cassandra-cluster)
- [Step 4: Delete the Replication Controller](#step-4-delete-the-replication-controller)
- [Step 5: Use a DaemonSet instead of a Replication Controller](#step-5-use-a-daemonset-instead-of-a-replication-controller)
- [Step 6: Resource Cleanup](#step-6-resource-cleanup)
- [Step 1: Create a Cassandra Headless Service](#step-1-create-a-cassandra-headless-service)
- [Step 2: Use a Pet Set to create Cassandra Ring](#step-2-create-a-cassandra-petset)
- [Step 3: Validate and Modify The Cassandra Pet Set](#step-3-validate-and-modify-the-cassandra-pet-set)
- [Step 4: Delete Cassandra Pet Set](#step-4-delete-cassandra-pet-set)
- [Step 5: Use a Replication Controller to create Cassandra node pods](#step-5-use-a-replication-controller-to-create-cassandra-node-pods)
- [Step 6: Scale up the Cassandra cluster](#step-6-scale-up-the-cassandra-cluster)
- [Step 7: Delete the Replication Controller](#step-7-delete-the-replication-controller)
- [Step 8: Use a DaemonSet instead of a Replication Controller](#step-8-use-a-daemonset-instead-of-a-replication-controller)
- [Step 9: Resource Cleanup](#step-9-resource-cleanup)
- [Seed Provider Source](#seed-provider-source)
The following document describes the development of a _cloud native_
@ -61,6 +64,7 @@ This example also uses some of the core components of Kubernetes:
- [_Pods_](../../../docs/user-guide/pods.md)
- [ _Services_](../../../docs/user-guide/services.md)
- [_Replication Controllers_](../../../docs/user-guide/replication-controller.md)
- [_Pet Sets_](../../../docs/user-guide/petsets.md)
- [_Daemon Sets_](../../../docs/admin/daemons.md)
## Prerequisites
@ -77,11 +81,10 @@ computer.
## Cassandra Docker
The pods use the [```gcr.io/google-samples/cassandra:v9```](image/Dockerfile)
The pods use the [```gcr.io/google-samples/cassandra:v11```](image/Dockerfile)
image from Google's [container registry](https://cloud.google.com/container-registry/docs/).
The docker is based on `debian:jessie` and includes OpenJDK 8. This image
includes a standard Cassandra installation from the Apache Debian repo. Through the use
of environment variables you are able to change values that are inserted into the `cassandra.yaml`.
includes a standard Cassandra installation from the Apache Debian repo. Through the use of environment variables you are able to change values that are inserted into the `cassandra.yaml`.
| ENV VAR | DEFAULT VALUE |
| ------------- |:-------------: |
@ -89,40 +92,36 @@ of environment variables you are able to change values that are inserted into th
| CASSANDRA_NUM_TOKENS | 32 |
| CASSANDRA_RPC_ADDRESS | 0.0.0.0 |
### Custom Seed Provider
A custom [`SeedProvider`](https://svn.apache.org/repos/asf/cassandra/trunk/src/java/org/apache/cassandra/locator/SeedProvider.java)
is included for running Cassandra on top of Kubernetes. In Cassandra, a
`SeedProvider` bootstraps the gossip protocol that Cassandra uses to find other
Cassandra nodes. Seed addresses are hosts deemed as contact points. Cassandra
instances use the seed list to find each other and learn the topology of the
ring. The [`KubernetesSeedProvider`](java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java)
discovers Cassandra seeds IP addresses vis the Kubernetes API, those Cassandra
instances are defined within the Cassandra Service.
Refer to the custom seed provider [README](java/README.md) for further
`KubernetesSeedProvider` configurations. For this example you should not need
to customize the Seed Provider configurations.
See the [image](image/) directory of this example for specifics on
how the container docker image was built and what it contains.
You may also note that we are setting some Cassandra parameters (`MAX_HEAP_SIZE`
and `HEAP_NEWSIZE`), and adding information about the
[namespace](../../../docs/user-guide/namespaces.md).
We also tell Kubernetes that the container exposes
both the `CQL` and `Thrift` API ports. Finally, we tell the cluster
manager that we need 0.1 cpu (0.1 core).
## tl;dr Quickstart
If you want to jump straight to the commands we will run,
here are the steps:
```sh
# create a service to track all cassandra nodes
#
# Pet Set
#
# create a service to track all cassandra petset nodes
kubectl create -f examples/storage/cassandra/cassandra-service.yaml
# create a petset
kubectl create -f examples/storage/cassandra/cassandra-petset.yaml
# validate the Cassandra cluster. Substitute the name of one of your pods.
kubectl exec -ti cassandra-0 -- nodetool status
# cleanup
grace=$(kubectl get po cassandra-0 --template '{{.spec.terminationGracePeriodSeconds}}') \
&& kubectl delete petset,po -l app=cassandra \
&& echo "Sleeping $grace" \
&& sleep $grace \
&& kubectl delete pvc -l app=cassandra
#
# Resource Controller Example
#
# create a replication controller to replicate cassandra nodes
kubectl create -f examples/storage/cassandra/cassandra-controller.yaml
@ -135,7 +134,10 @@ kubectl scale rc cassandra --replicas=4
# delete the replication controller
kubectl delete rc cassandra
# then, create a daemonset to place a cassandra node on each kubernetes node
#
# Create a daemonset to place a cassandra node on each kubernetes node
#
kubectl create -f examples/storage/cassandra/cassandra-daemonset.yaml --validate=false
# resource cleanup
@ -143,17 +145,15 @@ kubectl delete service -l app=cassandra
kubectl delete daemonset cassandra
```
## Step 1: Create a Cassandra Service
## Step 1: Create a Cassandra Headless Service
A Kubernetes _[Service](../../../docs/user-guide/services.md)_ describes a set of
[_Pods_](../../../docs/user-guide/pods.md) that perform the same task. In
Kubernetes, the atomic unit of an application is a Pod: one or more containers
that _must_ be scheduled onto the same host.
An important use for a Service is to create a load balancer which
distributes traffic across members of the set of Pods. But a Service can also
be used as a standing query which makes a dynamically changing set of Pods
available via the Kubernetes API. We'll show that in this example.
The Service is used for DNS lookups between Cassandra Pods, and Cassandra clients
within the Kubernetes Cluster.
Here is the service description:
@ -167,6 +167,7 @@ metadata:
app: cassandra
name: cassandra
spec:
clusterIP: None
ports:
- port: 9042
selector:
@ -176,25 +177,277 @@ spec:
[Download example](cassandra-service.yaml?raw=true)
<!-- END MUNGE: EXAMPLE cassandra-service.yaml -->
An important thing to note here is the `selector`. It is a query over labels,
that identifies the set of Pods contained by this Service. In this case the
selector is `app=cassandra`. If there are any pods with that label, they will be
selected for membership in this service. We'll see that in action shortly.
Create the service for the Pet Set:
Create the Cassandra service as follows:
```console
$ kubectl create -f examples/storage/cassandra/cassandra-service.yaml
```
The following command shows if the service has been created.
## Step 2: Use a Replication Controller to create Cassandra node pods
```console
$ kubectl get svc cassandra
```
As we noted above, in Kubernetes, the atomic unit of an application is a
[_Pod_](../../../docs/user-guide/pods.md).
A Pod is one or more containers that _must_ be scheduled onto
the same host. All containers in a pod share a network namespace, and may
optionally share mounted volumes.
The response should be like:
```console
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
cassandra None <none> 9042/TCP 45s
```
If an error is returned the service create failed.
## Step 2: Use a Pet Set to create Cassandra Ring
Pet Sets are a new feature that was added as an <i>Alpha</i> component in Kubernetes
1.3. Deploying stateful distributed applications, like Cassandra, within a clustered
environment can be challenging. We implemented Pet Set to greatly simplify this
process. Multiple Pet Set features are used within this example, but is out of
scope of this documentation. [Please refer to the Pet Set documentation.](../../../docs/user-guide/petsets.md)
The Pet Set manifest that is included below, creates a Cassandra ring that consists
of three pods.
<!-- BEGIN MUNGE: EXAMPLE cassandra-petset.yaml -->
```yaml
apiVersion: "apps/v1alpha1"
kind: PetSet
metadata:
name: cassandra
spec:
serviceName: cassandra
replicas: 3
template:
metadata:
annotations:
pod.alpha.kubernetes.io/initialized: "true"
labels:
app: cassandra
spec:
containers:
- name: cassandra
image: gcr.io/google-samples/cassandra:v11
imagePullPolicy: Always
ports:
- containerPort: 7000
name: intra-node
- containerPort: 7001
name: tls-intra-node
- containerPort: 7199
name: jmx
- containerPort: 9042
name: cql
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: "1"
memory: 1Gi
securityContext:
capabilities:
add:
- IPC_LOCK
env:
- name: MAX_HEAP_SIZE
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: CASSANDRA_SEEDS
value: "cassandra-0.cassandra.default.svc.cluster.local"
- name: CASSANDRA_CLUSTER_NAME
value: "K8Demo"
- name: CASSANDRA_DC
value: "DC1-K8Demo"
- name: CASSANDRA_RACK
value: "Rack1-K8Demo"
- name: CASSANDRA_AUTO_BOOTSTRAP
value: "false"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
readinessProbe:
exec:
command:
- /bin/bash
- -c
- /ready-probe.sh
initialDelaySeconds: 15
timeoutSeconds: 5
# These volume mounts are persistent. They are like inline claims,
# but not exactly because the names need to match exactly one of
# the pet volumes.
volumeMounts:
- name: cassandra-data
mountPath: /cassandra_data
# These are converted to volume claims by the controller
# and mounted at the paths mentioned above.
# do not use these in production until ssd GCEPersistentDisk or other ssd pd
volumeClaimTemplates:
- metadata:
name: cassandra-data
annotations:
volume.alpha.kubernetes.io/storage-class: anything
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
```
[Download example](cassandra-petset.yaml?raw=true)
<!-- END MUNGE: EXAMPLE cassandra-petset.yaml -->
Create the Cassandra Pet Set as follows:
```console
$ kubectl create -f examples/storage/cassandra/cassandra-petset.yaml
```
## Step 3: Validate and Modify The Cassandra Pet Set
Deploying this Pet Set shows off two of the new features that Pet Sets provides.
1. The pod names are known
2. The pods deploy in incremental order
First validate that the Pet Set has deployed, by running `kubectl` command below.
```console
$ kubectl get petset cassandra
```
The command should respond like:
```console
NAME DESIRED CURRENT AGE
cassandra 3 3 13s
```
Next watch the Cassandra pods deploy, one after another. The Pet Set resource
deploys pods in a number fashion: 1, 2, 3, etc. If you execute the following
command before the pods deploy you are able to see the ordered creation.
```console
$ kubectl get pods -l="app=cassandra"
NAME READY STATUS RESTARTS AGE
cassandra-0 1/1 Running 0 1m
cassandra-1 0/1 ContainerCreating 0 8s
```
The above example shows two of the three pods in the Cassandra Pet Set deployed.
Once all of the pods are deployed the same command will respond with the full
Pet Set.
```console
$ kubectl get pods -l="app=cassandra"
NAME READY STATUS RESTARTS AGE
cassandra-0 1/1 Running 0 10m
cassandra-1 1/1 Running 0 9m
cassandra-2 1/1 Running 0 8m
```
Running the Cassandra utility `nodetool` will display the status of the ring.
```console
$ kubectl exec cassandra-0 -- nodetool status
Datacenter: DC1-K8Demo
======================
Status=Up/Down
|/ State=Normal/Leaving/Joining/Moving
-- Address Load Tokens Owns (effective) Host ID Rack
UN 10.4.2.4 65.26 KiB 32 63.7% a9d27f81-6783-461d-8583-87de2589133e Rack1-K8Demo
UN 10.4.0.4 102.04 KiB 32 66.7% 5559a58c-8b03-47ad-bc32-c621708dc2e4 Rack1-K8Demo
UN 10.4.1.4 83.06 KiB 32 69.6% 9dce943c-581d-4c0e-9543-f519969cc805 Rack1-K8Demo
```
You can also run `cqlsh` to describe the keyspaces in the cluster.
```console
$ kubectl exec cassandra-0 -- cqlsh -e 'desc keyspaces'
system_traces system_schema system_auth system system_distributed
```
In order to increase or decrease the size of the Cassandra Pet Set, you must use
`kubectl edit`. You can find more information about the edit command in the [documentation](../../../docs/user-guide/kubectl/kubectl_edit.md).
Use the following command to edit the Pet Set.
```console
$ kubectl edit petset cassandra
```
This will create an editor in your terminal. The line you are looking to change is
`replicas`. The example does on contain the entire contents of the terminal window, and
the last line of the example below is the replicas line that you want to change.
```console
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: apps/v1alpha1
kind: PetSet
metadata:
creationTimestamp: 2016-08-13T18:40:58Z
generation: 1
labels:
app: cassandra
name: cassandra
namespace: default
resourceVersion: "323"
selfLink: /apis/apps/v1alpha1/namespaces/default/petsets/cassandra
uid: 7a219483-6185-11e6-a910-42010a8a0fc0
spec:
replicas: 3
```
Modify the manifest to the following, and save the manifest.
```console
spec:
replicas: 4
```
The Pet Set will now contain four Pets.
```console
$ kubectl get petset cassandra
```
The command should respond like:
```console
NAME DESIRED CURRENT AGE
cassandra 4 4 36m
```
For the Alpha release of Kubernetes 1.3 the Pet Set resource does not have `kubectl scale`
functionality, like a Deployment, ReplicaSet, Replication Controller, or Job.
## Step 4: Delete Cassandra Pet Set
There are some limitations with the Alpha release of Pet Set in 1.3. From the [documentation](../../../docs/user-guide/petsets.md):
"Deleting the Pet Set will not delete any pets. You will either have to manually scale it down to 0 pets first, or delete the pets yourself.
Deleting and/or scaling a Pet Set down will not delete the volumes associated with the Pet Set. This is done to ensure safety first, your data is more valuable than an auto purge of all related Pet Set resources. Deleting the Persistent Volume Claims will result in a deletion of the associated volumes."
Use the following commands to delete the Pet Set.
```console
$ grace=$(kubectl get po cassandra-0 --template '{{.spec.terminationGracePeriodSeconds}}') \
&& kubectl delete petset,po -l app=cassandra \
&& echo "Sleeping $grace" \
&& sleep $grace \
&& kubectl delete pvc -l app=cassandra
```
## Step 5: Use a Replication Controller to create Cassandra node pods
A Kubernetes
_[Replication Controller](../../../docs/user-guide/replication-controller.md)_
@ -242,6 +495,8 @@ spec:
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: CASSANDRA_SEED_PROVIDER
value: "io.k8s.cassandra.KubernetesSeedProvider"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
@ -250,7 +505,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
image: gcr.io/google-samples/cassandra:v9
image: gcr.io/google-samples/cassandra:v11
name: cassandra
ports:
- containerPort: 7000
@ -261,9 +516,6 @@ spec:
name: jmx
- containerPort: 9042
name: cql
# If you need it it is going away in C* 4.0
#- containerPort: 9160
# name: thrift
volumeMounts:
- mountPath: /cassandra_data
name: data
@ -288,8 +540,6 @@ by the Service."
The `replicas` attribute specifies the desired number of replicas, in this
case 2 initially. We'll scale up to more shortly.
Create the Replication Controller:
```console
@ -304,7 +554,7 @@ You can list the new controller:
$ kubectl get rc -o wide
NAME DESIRED CURRENT AGE CONTAINER(S) IMAGE(S) SELECTOR
cassandra 2 2 11s cassandra gcr.io/google-samples/cassandra:v9 app=cassandra
cassandra 2 2 11s cassandra gcr.io/google-samples/cassandra:v11 app=cassandra
```
@ -374,7 +624,7 @@ UN 10.244.3.3 51.28 KB 256 100.0% dafe3154-1d67-42e1-ac1d-78e
```
## Step 3: Scale up the Cassandra cluster
## Step 6: Scale up the Cassandra cluster
Now let's scale our Cassandra cluster to 4 pods. We do this by telling the
Replication Controller that we now want 4 replicas.
@ -416,7 +666,7 @@ UN 10.244.0.5 68.2 KB 256 53.4% 72ca27e2-c72c-402a-9313-1e4
```
## Step 4: Delete the Replication Controller
## Step 7: Delete the Replication Controller
Before you start Step 5, __delete the replication controller__ you created above:
@ -426,7 +676,7 @@ $ kubectl delete rc cassandra
```
## Step 5: Use a DaemonSet instead of a Replication Controller
## Step 8: Use a DaemonSet instead of a Replication Controller
In Kubernetes, a [_Daemon Set_](../../../docs/admin/daemons.md) can distribute pods
onto Kubernetes nodes, one-to-one. Like a _ReplicationController_, it has a
@ -471,6 +721,8 @@ spec:
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: CASSANDRA_SEED_PROVIDER
value: "io.k8s.cassandra.KubernetesSeedProvider"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
@ -479,7 +731,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
image: gcr.io/google-samples/cassandra:v9
image: gcr.io/google-samples/cassandra:v11
name: cassandra
ports:
- containerPort: 7000
@ -583,7 +835,7 @@ If we didn't delete the RC first, the two resources would conflict with
respect to how many pods they wanted to have running. If we wanted, we could support running
both together by using additional labels and selectors.
## Step 6: Resource Cleanup
## Step 9: Resource Cleanup
When you are ready to take down your resources, do the following:
@ -594,6 +846,32 @@ $ kubectl delete daemonset cassandra
```
### Custom Seed Provider
A custom [`SeedProvider`](https://svn.apache.org/repos/asf/cassandra/trunk/src/java/org/apache/cassandra/locator/SeedProvider.java)
is included for running Cassandra on top of Kubernetes. Only when you deploy Cassandra
via a replication control or a deamonset, you will need to use the custom seed provider.
In Cassandra, a `SeedProvider` bootstraps the gossip protocol that Cassandra uses to find other
Cassandra nodes. Seed addresses are hosts deemed as contact points. Cassandra
instances use the seed list to find each other and learn the topology of the
ring. The [`KubernetesSeedProvider`](java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java)
discovers Cassandra seeds IP addresses vis the Kubernetes API, those Cassandra
instances are defined within the Cassandra Service.
Refer to the custom seed provider [README](java/README.md) for further
`KubernetesSeedProvider` configurations. For this example you should not need
to customize the Seed Provider configurations.
See the [image](image/) directory of this example for specifics on
how the container docker image was built and what it contains.
You may also note that we are setting some Cassandra parameters (`MAX_HEAP_SIZE`
and `HEAP_NEWSIZE`), and adding information about the
[namespace](../../../docs/user-guide/namespaces.md).
We also tell Kubernetes that the container exposes
both the `CQL` and `Thrift` API ports. Finally, we tell the cluster
manager that we need 0.1 cpu (0.1 core).
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/cassandra/README.md?pixel)]()

View File

@ -28,6 +28,8 @@ spec:
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: CASSANDRA_SEED_PROVIDER
value: "io.k8s.cassandra.KubernetesSeedProvider"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
@ -36,7 +38,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
image: gcr.io/google-samples/cassandra:v9
image: gcr.io/google-samples/cassandra:v11
name: cassandra
ports:
- containerPort: 7000
@ -47,9 +49,6 @@ spec:
name: jmx
- containerPort: 9042
name: cql
# If you need it it is going away in C* 4.0
#- containerPort: 9160
# name: thrift
volumeMounts:
- mountPath: /cassandra_data
name: data

View File

@ -21,6 +21,8 @@ spec:
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: CASSANDRA_SEED_PROVIDER
value: "io.k8s.cassandra.KubernetesSeedProvider"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
@ -29,7 +31,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
image: gcr.io/google-samples/cassandra:v9
image: gcr.io/google-samples/cassandra:v11
name: cassandra
ports:
- containerPort: 7000

View File

@ -0,0 +1,84 @@
apiVersion: "apps/v1alpha1"
kind: PetSet
metadata:
name: cassandra
spec:
serviceName: cassandra
replicas: 3
template:
metadata:
annotations:
pod.alpha.kubernetes.io/initialized: "true"
labels:
app: cassandra
spec:
containers:
- name: cassandra
image: gcr.io/google-samples/cassandra:v11
imagePullPolicy: Always
ports:
- containerPort: 7000
name: intra-node
- containerPort: 7001
name: tls-intra-node
- containerPort: 7199
name: jmx
- containerPort: 9042
name: cql
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: "1"
memory: 1Gi
securityContext:
capabilities:
add:
- IPC_LOCK
env:
- name: MAX_HEAP_SIZE
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: CASSANDRA_SEEDS
value: "cassandra-0.cassandra.default.svc.cluster.local"
- name: CASSANDRA_CLUSTER_NAME
value: "K8Demo"
- name: CASSANDRA_DC
value: "DC1-K8Demo"
- name: CASSANDRA_RACK
value: "Rack1-K8Demo"
- name: CASSANDRA_AUTO_BOOTSTRAP
value: "false"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
readinessProbe:
exec:
command:
- /bin/bash
- -c
- /ready-probe.sh
initialDelaySeconds: 15
timeoutSeconds: 5
# These volume mounts are persistent. They are like inline claims,
# but not exactly because the names need to match exactly one of
# the pet volumes.
volumeMounts:
- name: cassandra-data
mountPath: /cassandra_data
# These are converted to volume claims by the controller
# and mounted at the paths mentioned above.
# do not use these in production until ssd GCEPersistentDisk or other ssd pd
volumeClaimTemplates:
- metadata:
name: cassandra-data
annotations:
volume.alpha.kubernetes.io/storage-class: anything
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -5,6 +5,7 @@ metadata:
app: cassandra
name: cassandra
spec:
clusterIP: None
ports:
- port: 9042
selector:

View File

@ -14,28 +14,36 @@
FROM google/debian:jessie
COPY cassandra.list /etc/apt/sources.list.d/cassandra.list
COPY run.sh /run.sh
COPY cassandra.yaml /cassandra.yaml
COPY logback.xml /logback.xml
COPY kubernetes-cassandra.jar /kubernetes-cassandra.jar
ADD files /
RUN gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D && \
gpg --export --armor F758CE318D77295D | apt-key add - && \
gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 && \
gpg --export --armor 2B5C1B00 | apt-key add - && \
gpg --keyserver pgp.mit.edu --recv-keys 0353B12C && \
gpg --export --armor 0353B12C | apt-key add - && \
apt-get update && \
apt-get -qq -y install procps cassandra openjdk-8-jre-headless && \
chmod a+rx /run.sh && \
mkdir -p /cassandra_data/data && \
mv /logback.xml /etc/cassandra/ && \
mv /cassandra.yaml /etc/cassandra/ && \
chown -R cassandra: /etc/cassandra /cassandra_data /run.sh \
/kubernetes-cassandra.jar && \
chmod o+w -R /etc/cassandra /cassandra_data && \
rm -rf \
ENV DI_VERSION="1.1.1" DI_SHA="dec8167091671df0dd3748a8938102479db5fffc"
RUN mv /java.list /etc/apt/sources.list.d/java.list \
&& apt-get update \
&& apt-get -qq -y install --no-install-recommends procps openjdk-8-jre-headless libjemalloc1 curl \
localepurge \
&& curl -L https://github.com/Yelp/dumb-init/releases/download/v${DI_VERSION}/dumb-init_${DI_VERSION}_amd64 > /sbin/dumb-init \
&& echo "$DI_SHA /sbin/dumb-init" | sha1sum -c - \
&& mv /cassandra.list /etc/apt/sources.list.d/cassandra.list \
&& gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D \
&& gpg --export --armor F758CE318D77295D | apt-key add - \
&& gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 \
&& gpg --export --armor 2B5C1B00 | apt-key add - \
&& gpg --keyserver pgp.mit.edu --recv-keys 0353B12C \
&& gpg --export --armor 0353B12C | apt-key add - \
&& apt-get update \
&& apt-get -qq -y install --no-install-recommends curl cassandra localepurge \
&& chmod a+rx /run.sh /sbin/dumb-init /ready-probe.sh \
&& mkdir -p /cassandra_data/data \
&& mv /logback.xml /cassandra.yaml /etc/cassandra/ \
# Not able to run as cassandra until https://github.com/kubernetes/kubernetes/issues/2630 is resolved
# && chown -R cassandra: /etc/cassandra /cassandra_data /run.sh /kubernetes-cassandra.jar \
# && chmod o+w -R /etc/cassandra /cassandra_data \
&& apt-get -y purge curl localepurge \
&& apt-get clean \
&& rm -rf \
doc \
man \
info \
@ -54,7 +62,7 @@ RUN gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D && \
/tmp/*
VOLUME ["/cassandra_data/data"]
VOLUME ["/cassandra_data"]
# 7000: intra-node communication
# 7001: TLS intra-node communication
@ -63,6 +71,8 @@ VOLUME ["/cassandra_data/data"]
# 9160: thrift service not included cause it is going away
EXPOSE 7000 7001 7199 9042
USER cassandra
# Not able to do this until https://github.com/kubernetes/kubernetes/issues/2630 is resolved
# if you are using attached storage
# USER cassandra
CMD /run.sh
CMD ["/sbin/dumb-init", "/bin/bash", "/run.sh"]

View File

@ -14,19 +14,21 @@
# build the cassandra image.
VERSION=v9
VERSION=v11
PROJECT_ID=google_samples
PROJECT=gcr.io/${PROJECT_ID}
all: build
kubernetes-cassandra.jar: ../java/* ../java/src/main/java/io/k8s/cassandra/*.java
cd ../java && mvn package
mv ../java/target/kubernetes-cassandra*.jar kubernetes-cassandra.jar
cd ../java && mvn clean && mvn package
mv ../java/target/kubernetes-cassandra*.jar files/kubernetes-cassandra.jar
cd ../java && mvn clean
build: kubernetes-cassandra.jar
docker build -t gcr.io/google_samples/cassandra:${VERSION} .
docker build -t ${PROJECT}/cassandra:${VERSION} .
push: build
gcloud docker push gcr.io/google_samples/cassandra:${VERSION}
gcloud docker push ${PROJECT}/cassandra:${VERSION}
.PHONY: all build push

View File

@ -1,5 +0,0 @@
deb http://www.apache.org/dist/cassandra/debian 35x main
deb-src http://www.apache.org/dist/cassandra/debian 35x main
# for jre8
deb http://http.debian.net/debian jessie-backports main

View File

@ -0,0 +1,2 @@
deb http://www.apache.org/dist/cassandra/debian 37x main
deb-src http://www.apache.org/dist/cassandra/debian 37x main

View File

@ -22,7 +22,18 @@ cluster_name: 'Test Cluster'
#
# If you already have a cluster with 1 token per node, and wish to migrate to
# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
num_tokens: 32
num_tokens: 256
# Triggers automatic allocation of num_tokens tokens for this node. The allocation
# algorithm attempts to choose tokens in a way that optimizes replicated load over
# the nodes in the datacenter for the replication strategy used by the specified
# keyspace.
#
# The load assigned to each node will be close to proportional to its number of
# vnodes.
#
# Only supported with the Murmur3Partitioner.
# allocate_tokens_for_keyspace: KEYSPACE
# initial_token allows you to specify tokens manually. While you can use # it with
# vnodes (num_tokens > 1, above) -- in which case you should provide a
@ -31,25 +42,49 @@ num_tokens: 32
# initial_token:
# See http://wiki.apache.org/cassandra/HintedHandoff
# May either be "true" or "false" to enable globally, or contain a list
# of data centers to enable per-datacenter.
# hinted_handoff_enabled: DC1,DC2
# May either be "true" or "false" to enable globally
hinted_handoff_enabled: true
# When hinted_handoff_enabled is true, a black list of data centers that will not
# perform hinted handoff
# hinted_handoff_disabled_datacenters:
# - DC1
# - DC2
# this defines the maximum amount of time a dead host will have hints
# generated. After it has been dead this long, new hints for it will not be
# created until it has been seen alive and gone down again.
max_hint_window_in_ms: 10800000 # 3 hours
# Maximum throttle in KBs per second, per delivery thread. This will be
# reduced proportionally to the number of nodes in the cluster. (If there
# are two nodes in the cluster, each delivery thread will use the maximum
# rate; if there are three, each will throttle to half of the maximum,
# since we expect two nodes to be delivering hints simultaneously.)
hinted_handoff_throttle_in_kb: 1024
# Number of threads with which to deliver hints;
# Consider increasing this number when you have multi-dc deployments, since
# cross-dc handoff tends to be slower
max_hints_delivery_threads: 2
# Directory where Cassandra should store hints.
# If not set, the default directory is $CASSANDRA_HOME/data/hints.
# hints_directory: /var/lib/cassandra/hints
# How often hints should be flushed from the internal buffers to disk.
# Will *not* trigger fsync.
hints_flush_period_in_ms: 10000
# Maximum size for a single hints file, in megabytes.
max_hints_file_size_in_mb: 128
# Compression to apply to the hint files. If omitted, hints files
# will be written uncompressed. LZ4, Snappy, and Deflate compressors
# are supported.
#hints_compression:
# - class_name: LZ4Compressor
# parameters:
# -
# Maximum throttle in KBs per second, total. This will be
# reduced proportionally to the number of nodes in the cluster.
batchlog_replay_throttle_in_kb: 1024
@ -62,6 +97,7 @@ batchlog_replay_throttle_in_kb: 1024
# - PasswordAuthenticator relies on username/password pairs to authenticate
# users. It keeps usernames and hashed passwords in system_auth.credentials table.
# Please increase system_auth keyspace replication factor if you use this authenticator.
# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
authenticator: AllowAllAuthenticator
# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
@ -73,12 +109,66 @@ authenticator: AllowAllAuthenticator
# increase system_auth keyspace replication factor if you use this authorizer.
authorizer: AllowAllAuthorizer
# Part of the Authentication & Authorization backend, implementing IRoleManager; used
# to maintain grants and memberships between roles.
# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager,
# which stores role information in the system_auth keyspace. Most functions of the
# IRoleManager require an authenticated login, so unless the configured IAuthenticator
# actually implements authentication, most of this functionality will be unavailable.
#
# - CassandraRoleManager stores role data in the system_auth keyspace. Please
# increase system_auth keyspace replication factor if you use this role manager.
role_manager: CassandraRoleManager
# Validity period for roles cache (fetching granted roles can be an expensive
# operation depending on the role manager, CassandraRoleManager is one example)
# Granted roles are cached for authenticated sessions in AuthenticatedUser and
# after the period specified here, become eligible for (async) reload.
# Defaults to 2000, set to 0 to disable caching entirely.
# Will be disabled automatically for AllowAllAuthenticator.
roles_validity_in_ms: 2000
# Refresh interval for roles cache (if enabled).
# After this interval, cache entries become eligible for refresh. Upon next
# access, an async reload is scheduled and the old value returned until it
# completes. If roles_validity_in_ms is non-zero, then this must be
# also.
# Defaults to the same value as roles_validity_in_ms.
# roles_update_interval_in_ms: 2000
# Validity period for permissions cache (fetching permissions can be an
# expensive operation depending on the authorizer, CassandraAuthorizer is
# one example). Defaults to 2000, set to 0 to disable.
# Will be disabled automatically for AllowAllAuthorizer.
permissions_validity_in_ms: 2000
# Refresh interval for permissions cache (if enabled).
# After this interval, cache entries become eligible for refresh. Upon next
# access, an async reload is scheduled and the old value returned until it
# completes. If permissions_validity_in_ms is non-zero, then this must be
# also.
# Defaults to the same value as permissions_validity_in_ms.
# permissions_update_interval_in_ms: 2000
# Validity period for credentials cache. This cache is tightly coupled to
# the provided PasswordAuthenticator implementation of IAuthenticator. If
# another IAuthenticator implementation is configured, this cache will not
# be automatically used and so the following settings will have no effect.
# Please note, credentials are cached in their encrypted form, so while
# activating this cache may reduce the number of queries made to the
# underlying table, it may not bring a significant reduction in the
# latency of individual authentication attempts.
# Defaults to 2000, set to 0 to disable credentials caching.
credentials_validity_in_ms: 2000
# Refresh interval for credentials cache (if enabled).
# After this interval, cache entries become eligible for refresh. Upon next
# access, an async reload is scheduled and the old value returned until it
# completes. If credentials_validity_in_ms is non-zero, then this must be
# also.
# Defaults to the same value as credentials_validity_in_ms.
# credentials_update_interval_in_ms: 2000
# The partitioner is responsible for distributing groups of rows (by
# partition key) across nodes in the cluster. You should leave this
# alone for new clusters. The partitioner can NOT be changed without
@ -104,11 +194,12 @@ data_file_directories:
commitlog_directory: /cassandra_data/commitlog
# policy for data disk failures:
# die: shut down gossip and Thrift and kill the JVM for any fs errors or
# die: shut down gossip and client transports and kill the JVM for any fs errors or
# single-sstable errors, so the node can be replaced.
# stop_paranoid: shut down gossip and Thrift even for single-sstable errors.
# stop: shut down gossip and Thrift, leaving the node effectively dead, but
# can still be inspected via JMX.
# stop_paranoid: shut down gossip and client transports even for single-sstable errors,
# kill the JVM for errors during startup.
# stop: shut down gossip and client transports, leaving the node effectively dead, but
# can still be inspected via JMX, kill the JVM for errors during startup.
# best_effort: stop using the failed disk and respond to requests based on
# remaining available sstables. This means you WILL see obsolete
# data at CL.ONE!
@ -153,15 +244,25 @@ key_cache_save_period: 14400
# Disabled by default, meaning all keys are going to be saved
# key_cache_keys_to_save: 100
# Row cache implementation class name.
# Available implementations:
# org.apache.cassandra.cache.OHCProvider Fully off-heap row cache implementation (default).
# org.apache.cassandra.cache.SerializingCacheProvider This is the row cache implementation availabile
# in previous releases of Cassandra.
# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
# Maximum size of the row cache in memory.
# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
# Please note that OHC cache implementation requires some additional off-heap memory to manage
# the map structures and some in-flight memory during operations before/after cache entries can be
# accounted against the cache capacity. This overhead is usually small compared to the whole capacity.
# Do not specify more memory that the system can afford in the worst usual situation and leave some
# headroom for OS block level cache. Do never allow your system to swap.
#
# Default value is 0, to disable row caching.
row_cache_size_in_mb: 16
row_cache_size_in_mb: 0
# Duration in seconds after which Cassandra should
# save the row cache. Caches are saved to saved_caches_directory as specified
# in this configuration file.
# Duration in seconds after which Cassandra should save the row cache.
# Caches are saved to saved_caches_directory as specified in this configuration file.
#
# Saved caches greatly improve cold-start speeds, and is relatively cheap in
# terms of I/O for the key cache. Row cache saving is much more expensive and
@ -170,8 +271,8 @@ row_cache_size_in_mb: 16
# Default is 0 to disable saving the row cache.
row_cache_save_period: 0
# Number of keys from the row cache to save
# Disabled by default, meaning all keys are going to be saved
# Number of keys from the row cache to save.
# Specify 0 (which is the default), meaning all keys are going to be saved
# row_cache_keys_to_save: 100
# Maximum size of the counter cache in memory.
@ -200,41 +301,27 @@ counter_cache_save_period: 7200
# Disabled by default, meaning all keys are going to be saved
# counter_cache_keys_to_save: 100
# The off-heap memory allocator. Affects storage engine metadata as
# well as caches. Experiments show that JEMAlloc saves some memory
# than the native GCC allocator (i.e., JEMalloc is more
# fragmentation-resistant).
#
# Supported values are: NativeAllocator, JEMallocAllocator
#
# If you intend to use JEMallocAllocator you have to install JEMalloc as library and
# modify cassandra-env.sh as directed in the file.
#
# Defaults to NativeAllocator
# memory_allocator: NativeAllocator
# saved caches
# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
saved_caches_directory: /cassandra_data/saved_caches
# commitlog_sync may be either "periodic" or "batch."
#
# When in batch mode, Cassandra won't ack writes until the commit log
# has been fsynced to disk. It will wait up to
# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
# performing the sync.
# has been fsynced to disk. It will wait
# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
# This window should be kept short because the writer threads will
# be unable to do extra work while waiting. (You may need to increase
# concurrent_writes for the same reason.)
#
# commitlog_sync: batch
# commitlog_sync_batch_window_in_ms: 1.0
# commitlog_sync_batch_window_in_ms: 2
#
# the other option is "periodic" where writes may be acked immediately
# and the CommitLog is simply synced every commitlog_sync_period_in_ms
# milliseconds. commitlog_periodic_queue_size allows 1024*(CPU cores) pending
# entries on the commitlog queue by default. If you are writing very large
# blobs, you should reduce that; 16*cores works reasonably well for 1MB blobs.
# It should be at least as large as the concurrent_writes setting.
# milliseconds.
commitlog_sync: periodic
commitlog_sync_period_in_ms: 10000
# commitlog_periodic_queue_size:
# The size of the individual commitlog file segments. A commitlog
# segment may be archived, deleted, or recycled once all the data
@ -245,8 +332,22 @@ commitlog_sync_period_in_ms: 10000
# archiving commitlog segments (see commitlog_archiving.properties),
# then you probably want a finer granularity of archiving; 8 or 16 MB
# is reasonable.
# Max mutation size is also configurable via max_mutation_size_in_kb setting in
# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
#
# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must
# be set to at least twice the size of max_mutation_size_in_kb / 1024
#
commitlog_segment_size_in_mb: 32
# Compression to apply to the commit log. If omitted, the commit log
# will be written uncompressed. LZ4, Snappy, and Deflate compressors
# are supported.
#commitlog_compression:
# - class_name: LZ4Compressor
# parameters:
# -
# any class that implements the SeedProvider interface and has a
# constructor that takes a Map<String, String> of parameters will do.
seed_provider:
@ -254,11 +355,12 @@ seed_provider:
# Cassandra nodes use this list of hosts to find each other and learn
# the topology of the ring. You must change this if you are running
# multiple nodes!
- class_name: io.k8s.cassandra.KubernetesSeedProvider
#- class_name: io.k8s.cassandra.KubernetesSeedProvider
- class_name: SEED_PROVIDER
parameters:
# seeds is actually a comma-delimited list of addresses.
# Ex: "<ip1>,<ip2>,<ip3>"
- seeds: 127.0.0.1
- seeds: "127.0.0.1"
# For workloads with more data than can fit in memory, Cassandra's
# bottleneck will be reads that need to fetch data from
@ -275,10 +377,27 @@ concurrent_reads: 32
concurrent_writes: 32
concurrent_counter_writes: 32
# Total memory to use for sstable-reading buffers. Defaults to
# the smaller of 1/4 of heap or 512MB.
# For materialized view writes, as there is a read involved, so this should
# be limited by the less of concurrent reads or concurrent writes.
concurrent_materialized_view_writes: 32
# Maximum memory to use for pooling sstable buffers. Defaults to the smaller
# of 1/4 of heap or 512MB. This pool is allocated off-heap, so is in addition
# to the memory allocated for heap. Memory is only allocated as needed.
# file_cache_size_in_mb: 512
# Flag indicating whether to allocate on or off heap when the sstable buffer
# pool is exhausted, that is when it has exceeded the maximum memory
# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.
# buffer_pool_use_heap_if_exhausted: true
# The strategy for optimizing disk read
# Possible values are:
# ssd (for solid state disks, the default)
# spinning (for spinning disks)
# disk_optimization_strategy: ssd
# Total permitted memory to use for memtables. Cassandra will stop
# accepting writes when the limit is exceeded until a flush completes,
# and will trigger a flush based on memtable_cleanup_threshold
@ -287,7 +406,7 @@ concurrent_counter_writes: 32
# memtable_offheap_space_in_mb: 2048
# Ratio of occupied non-flushing memtable size to total permitted size
# that will trigger a flush of the largest memtable. Lager mct will
# that will trigger a flush of the largest memtable. Larger mct will
# mean larger flushes and hence less compaction, but also less concurrent
# flush activity which can make it difficult to keep your disks fed
# under heavy write load.
@ -299,29 +418,29 @@ concurrent_counter_writes: 32
# Options are:
# heap_buffers: on heap nio buffers
# offheap_buffers: off heap (direct) nio buffers
# offheap_objects: native memory, eliminating nio buffer heap overhead
memtable_allocation_type: offheap_objects
# offheap_objects: off heap objects
memtable_allocation_type: heap_buffers
# Total space to use for commitlogs. Since commitlog segments are
# mmapped, and hence use up address space, the default size is 32
# on 32-bit JVMs, and 8192 on 64-bit JVMs.
# Total space to use for commit logs on disk.
#
# If space gets above this value, Cassandra will flush every dirty CF
# in the oldest segment and remove it. So a small total commitlog space
# will tend to cause more flush activity on less-active columnfamilies.
#
# The default value is the smaller of 8192, and 1/4 of the total space
# of the commitlog volume.
#
# If space gets above this value (it will round up to the next nearest
# segment multiple), Cassandra will flush every dirty CF in the oldest
# segment and remove it. So a small total commitlog space will tend
# to cause more flush activity on less-active columnfamilies.
# commitlog_total_space_in_mb: 8192
# This sets the amount of memtable flush writer threads. These will
# be blocked by disk io, and each one will hold a memtable in memory
# while blocked.
#
# memtable_flush_writers defaults to the smaller of (number of disks,
# number of cores), with a minimum of 2 and a maximum of 8.
# memtable_flush_writers defaults to one per data_file_directory.
#
# If your data directories are backed by SSD, you should increase this
# to the number of cores.
#memtable_flush_writers: 8
# If your data directories are backed by SSD, you can increase this, but
# avoid having memtable_flush_writers * data_file_directories > number of cores
#memtable_flush_writers: 1
# A fixed memory pool size in MB for for SSTable index summaries. If left
# empty, this will default to 5% of the heap size. If the memory usage of
@ -346,10 +465,12 @@ trickle_fsync: false
trickle_fsync_interval_in_kb: 10240
# TCP port, for commands and data
# For security reasons, you should not expose this port to the internet. Firewall it if needed.
storage_port: 7000
# SSL port, for encrypted communication. Unused unless enabled in
# encryption_options
# For security reasons, you should not expose this port to the internet. Firewall it if needed.
ssl_storage_port: 7001
# Address or interface to bind to and tell other Cassandra nodes to connect to.
@ -364,13 +485,27 @@ ssl_storage_port: 7001
# address associated with the hostname (it might not be).
#
# Setting listen_address to 0.0.0.0 is always wrong.
listen_address: 127.0.0.1
#
# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
listen_address: localhost
# listen_interface: eth0
# listen_interface_prefer_ipv6: false
# Address to broadcast to other Cassandra nodes
# Leaving this blank will set it to the same value as listen_address
# broadcast_address: 1.2.3.4
# When using multiple physical network interfaces, set this
# to true to listen on broadcast_address in addition to
# the listen_address, allowing nodes to communicate in both
# interfaces.
# Ignore this property if the network configuration automatically
# routes between the public and private networks such as EC2.
# listen_on_broadcast_address: false
# Internode authentication backend, implementing IInternodeAuthenticator;
# used to allow/disallow connections from peer nodes.
# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
@ -380,7 +515,16 @@ listen_address: 127.0.0.1
# same as the rpc_address. The port however is different and specified below.
start_native_transport: true
# port for the CQL native transport to listen for clients on
# For security reasons, you should not expose this port to the internet. Firewall it if needed.
native_transport_port: 9042
# Enabling native transport encryption in client_encryption_options allows you to either use
# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
# standard native_transport_port.
# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
# for native_transport_port. Setting native_transport_port_ssl to a different value
# from native_transport_port will use encryption for native_transport_port_ssl while
# keeping native_transport_port unencrypted.
# native_transport_port_ssl: 9142
# The maximum threads for handling requests when the native transport is used.
# This is similar to rpc_max_threads though the default differs slightly (and
# there is no native_transport_min_threads, idle threads will always be stopped
@ -391,8 +535,16 @@ native_transport_port: 9042
# be rejected as invalid. The default is 256MB.
# native_transport_max_frame_size_in_mb: 256
# The maximum number of concurrent client connections.
# The default is -1, which means unlimited.
# native_transport_max_concurrent_connections: -1
# The maximum number of concurrent client connections per source ip.
# The default is -1, which means unlimited.
# native_transport_max_concurrent_connections_per_ip: -1
# Whether to start the thrift rpc server.
start_rpc: true
start_rpc: false
# The address or interface to bind the Thrift RPC service and native transport
# server to.
@ -405,8 +557,16 @@ start_rpc: true
#
# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
# set broadcast_rpc_address to a value other than 0.0.0.0.
rpc_address: 127.0.0.1
#
# For security reasons, you should not expose this port to the internet. Firewall it if needed.
#
# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
rpc_address: localhost
# rpc_interface: eth1
# rpc_interface_prefer_ipv6: false
# port for Thrift to listen for clients on
rpc_port: 9160
@ -476,7 +636,7 @@ thrift_framed_transport_size_in_mb: 15
# flushed or streamed locally in a backups/ subdirectory of the
# keyspace data. Removing these links is the operator's
# responsibility.
incremental_backups: true
incremental_backups: false
# Whether or not to take a snapshot before each compaction. Be
# careful using this option, since Cassandra won't clean up the
@ -518,6 +678,9 @@ column_index_size_in_kb: 64
# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
batch_size_warn_threshold_in_kb: 5
# Fail any batch exceeding this value. 50kb (10x warn threshold) by default.
batch_size_fail_threshold_in_kb: 50
# Number of simultaneous compactions to allow, NOT including
# validation "compactions" for anti-entropy repair. Simultaneous
# compactions can help preserve read performance in a mixed read/write
@ -532,7 +695,6 @@ batch_size_warn_threshold_in_kb: 5
#
# If your data directories are backed by SSD, you should increase this
# to the number of cores.
# TODO: set this based on env??
#concurrent_compactors: 1
# Throttles compaction to the given total throughput across the entire
@ -543,6 +705,9 @@ batch_size_warn_threshold_in_kb: 5
# of compaction, including validation compaction.
compaction_throughput_mb_per_sec: 16
# Log a warning when compacting partitions larger than this value
compaction_large_partition_warning_threshold_mb: 100
# When compacting, the replacement sstable(s) can be opened before they
# are completely written, and used in place of the prior sstables for
# any range that has been written. This helps to smoothly transfer reads
@ -560,7 +725,8 @@ sstable_preemptive_open_interval_in_mb: 50
# this setting allows users to throttle inter dc stream throughput in addition
# to throttling all network stream traffic as configured with
# stream_throughput_outbound_megabits_per_sec
# inter_dc_stream_throughput_outbound_megabits_per_sec:
# When unset, the default is 200 Mbps or 25 MB/s
# inter_dc_stream_throughput_outbound_megabits_per_sec: 200
# How long the coordinator should wait for read operations to complete
read_request_timeout_in_ms: 5000
@ -590,12 +756,11 @@ request_timeout_in_ms: 10000
# and the times are synchronized between the nodes.
cross_node_timeout: false
# Enable socket timeout for streaming operation.
# When a timeout occurs during streaming, streaming is retried from the start
# of the current file. This _can_ involve re-streaming an important amount of
# data, so you should avoid setting the value too low.
# Default value is 0, which never timeout streams.
# streaming_socket_timeout_in_ms: 0
# Set socket timeout for streaming operation.
# The stream session is failed if no data is received by any of the
# participants within that period.
# Default value is 3600000, which means streams timeout after an hour.
# streaming_socket_timeout_in_ms: 3600000
# phi value that must be reached for a host to be marked down.
# most users should never need to adjust this.
@ -615,6 +780,9 @@ cross_node_timeout: false
# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
# ARE PLACED.
#
# IF THE RACK A REPLICA IS PLACED IN CHANGES AFTER THE REPLICA HAS BEEN
# ADDED TO A RING, THE NODE MUST BE DECOMMISSIONED AND REBOOTSTRAPPED.
#
# Out of the box, Cassandra provides
# - SimpleSnitch:
# Treats Strategy order as proximity. This can improve cache
@ -679,9 +847,7 @@ dynamic_snitch_badness_threshold: 0.1
# client requests to a node with a separate queue for each
# request_scheduler_id. The scheduler is further customized by
# request_scheduler_options as described below.
request_scheduler: org.apache.cassandra.scheduler.RoundRobinScheduler
request_scheduler_id: keyspace
request_scheduler: org.apache.cassandra.scheduler.NoScheduler
# Scheduler Options vary based on the type of scheduler
# NoScheduler - Has no options
@ -741,6 +907,8 @@ server_encryption_options:
# enable or disable client/server encryption.
client_encryption_options:
enabled: false
# If enabled and optional is set to true encrypted and unencrypted connections are handled.
optional: false
keystore: conf/.keystore
keystore_password: cassandra
# require_client_auth: false
@ -766,9 +934,57 @@ internode_compression: all
# latency if you block for cross-datacenter responses.
inter_dc_tcp_nodelay: false
disk_access_mode: mmap
row_cache_class_name: org.apache.cassandra.cache.OHCProvider
# TTL for different trace types used during logging of the repair process.
tracetype_query_ttl: 86400
tracetype_repair_ttl: 604800
# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
# Adjust the threshold based on your application throughput requirement
# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
gc_warn_threshold_in_ms: 1000
# UDFs (user defined functions) are disabled by default.
# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
enable_user_defined_functions: false
# Enables scripted UDFs (JavaScript UDFs).
# Java UDFs are always enabled, if enable_user_defined_functions is true.
# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider.
# This option has no effect, if enable_user_defined_functions is false.
enable_scripted_user_defined_functions: false
# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
# Lowering this value on Windows can provide much tighter latency and better throughput, however
# some virtualized environments may see a negative performance impact from changing this setting
# below their system default. The sysinternals 'clockres' tool can confirm your system's default
# setting.
windows_timer_interval: 1
# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys
# can still (and should!) be in the keystore and will be used on decrypt operations
# (to handle the case of key rotation).
#
# It is strongly recommended to download and install Java Cryptography Extension (JCE)
# Unlimited Strength Jurisdiction Policy Files for your version of the JDK.
# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)
#
# Currently, only the following file types are supported for transparent data encryption, although
# more are coming in future cassandra releases: commitlog, hints
transparent_data_encryption_options:
enabled: false
chunk_length_kb: 64
cipher: AES/CBC/PKCS5Padding
key_alias: testing:1
# CBC IV length for AES needs to be 16 bytes (which is also the default size)
# iv_length: 16
key_provider:
- class_name: org.apache.cassandra.security.JKSKeyProvider
parameters:
- keystore: conf/.keystore
keystore_password: cassandra
store_type: JCEKS
key_password: cassandra
# Not till 3.5
#enable_user_defined_functions: true
#enable_scripted_user_defined_functions: tru

View File

@ -0,0 +1,2 @@
# for jre8
deb http://http.debian.net/debian jessie-backports main

View File

@ -0,0 +1,27 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [[ $(nodetool status | grep $POD_IP) == *"UN"* ]]; then
if [[ $DEBUG ]]; then
echo "Not Up";
fi
exit 0;
else
if [[ $DEBUG ]]; then
echo "UN";
fi
exit 1;
fi

View File

@ -0,0 +1,125 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
CONF_DIR=/etc/cassandra
CFG=$CONF_DIR/cassandra.yaml
# we are doing PetSet or just setting our seeds
if [ -z "$CASSANDRA_SEEDS" ]; then
HOSTNAME=$(hostname -f)
fi
# The following vars relate to there counter parts in $CFG
# for instance rpc_address
CASSANDRA_RPC_ADDRESS="${CASSANDRA_RPC_ADDRESS:-0.0.0.0}"
CASSANDRA_NUM_TOKENS="${CASSANDRA_NUM_TOKENS:-32}"
CASSANDRA_CLUSTER_NAME="${CASSANDRA_CLUSTER_NAME:='Test Cluster'}"
CASSANDRA_LISTEN_ADDRESS=${POD_IP:-$HOSTNAME}
CASSANDRA_BROADCAST_ADDRESS=${POD_IP:-$HOSTNAME}
CASSANDRA_BROADCAST_RPC_ADDRESS=${POD_IP:-$HOSTNAME}
CASSANDRA_DISK_OPTIMIZATION_STRATEGY="${CASSANDRA_DISK_OPTIMIZATION_STRATEGY:-ssd}"
CASSANDRA_MIGRATION_WAIT="${CASSANDRA_MIGRATION_WAIT:-1}"
CASSANDRA_ENDPOINT_SNITCH="${CASSANDRA_ENDPOINT_SNITCH:-SimpleSnitch}"
CASSANDRA_DC="${CASSANDRA_DC}"
CASSANDRA_RACK="${CASSANDRA_RACK}"
CASSANDRA_RING_DELAY="${CASSANDRA_RING_DELAY:-30000}"
CASSANDRA_AUTO_BOOTSTRAP="${CASSANDRA_AUTO_BOOTSTRAP:-true}"
CASSANDRA_SEEDS="${CASSANDRA_SEEDS:false}"
CASSANDRA_SEED_PROVIDER="${CASSANDRA_SEED_PROVIDER:-org.apache.cassandra.locator.SimpleSeedProvider}"
CASSANDRA_AUTO_BOOTSTRAP="${CASSANDRA_AUTO_BOOTSTRAP:false}"
# Turn off JMX auth
CASSANDRA_OPEN_JMX="${CASSANDRA_OPEN_JMX:-false}"
# send GC to STDOUT
CASSANDRA_GC_STDOUT="${CASSANDRA_GC_STDOUT:-false}"
# if DC and RACK are set, use GossipingPropertyFileSnitch
if [[ $CASSANDRA_DC && $CASSANDRA_RACK ]]; then
echo "dc=$CASSANDRA_DC" > $CONF_DIR/cassandra-rackdc.properties
echo "rack=$CASSANDRA_RACK" >> $CONF_DIR/cassandra-rackdc.properties
CASSANDRA_ENDPOINT_SNITCH="GossipingPropertyFileSnitch"
fi
# TODO what else needs to be modified
for yaml in \
broadcast_address \
broadcast_rpc_address \
cluster_name \
listen_address \
num_tokens \
rpc_address \
disk_optimization_strategy \
endpoint_snitch \
; do
var="CASSANDRA_${yaml^^}"
val="${!var}"
if [ "$val" ]; then
sed -ri 's/^(# )?('"$yaml"':).*/\2 '"$val"'/' "$CFG"
fi
done
echo "auto_bootstrap: ${CASSANDRA_AUTO_BOOTSTRAP}" >> $CFG
# set the seed to itself. This is only for the first pod, otherwise
# it will be able to get seeds from the seed provider
if [[ $CASSANDRA_SEEDS == 'false' ]]; then
sed -ri 's/- seeds:.*/- seeds: "'"$POD_IP"'"/' $CFG
else # if we have seeds set them. Probably PetSet
sed -ri 's/- seeds:.*/- seeds: "'"$CASSANDRA_SEEDS"'"/' $CFG
fi
sed -ri 's/- class_name: SEED_PROVIDER/- class_name: '"$CASSANDRA_SEED_PROVIDER"'/' $CFG
# send gc to stdout
if [[ $CASSANDRA_GC_STDOUT == 'true' ]]; then
sed -ri 's/ -Xloggc:\/var\/log\/cassandra\/gc\.log//' $CONF_DIR/cassandra-env.sh
fi
# enable RMI and JMX to work on one port
echo "JVM_OPTS=\"\$JVM_OPTS -Djava.rmi.server.hostname=$POD_IP\"" >> $CONF_DIR/cassandra-env.sh
# getting WARNING messages with Migration Service
echo "-Dcassandra.migration_task_wait_in_seconds=${CASSANDRA_MIGRATION_WAIT}" >> $CONF_DIR/jvm.options
echo "-Dcassandra.ring_delay_ms=${CASSANDRA_RING_DELAY}" >> $CONF_DIR/jvm.options
if [[ $CASSANDRA_OPEN_JMX == 'true' ]]; then
export LOCAL_JMX=no
sed -ri 's/ -Dcom\.sun\.management\.jmxremote\.authenticate=true/ -Dcom\.sun\.management\.jmxremote\.authenticate=false/' $CONF_DIR/cassandra-env.sh
sed -ri 's/ -Dcom\.sun\.management\.jmxremote\.password\.file=\/etc\/cassandra\/jmxremote\.password//' $CONF_DIR/cassandra-env.sh
fi
echo Starting Cassandra on ${CASSANDRA_LISTEN_ADDRESS}
echo CASSANDRA_RPC_ADDRESS ${CASSANDRA_RPC_ADDRESS}
echo CASSANDRA_NUM_TOKENS ${CASSANDRA_NUM_TOKENS}
echo CASSANDRA_CLUSTER_NAME ${CASSANDRA_CLUSTER_NAME}
echo CASSANDRA_LISTEN_ADDRESS ${CASSANDRA_LISTEN_ADDRESS}
echo CASSANDRA_BROADCAST_ADDRESS ${CASSANDRA_BROADCAST_ADDRESS}
echo CASSANDRA_BROADCAST_RPC_ADDRESS ${CASSANDRA_BROADCAST_RPC_ADDRESS}
echo CASSANDRA_DISK_OPTIMIZATION_STRATEGY ${CASSANDRA_DISK_OPTIMIZATION_STRATEGY}
echo CASSANDRA_MIGRATION_WAIT ${CASSANDRA_MIGRATION_WAIT}
echo CASSANDRA_ENDPOINT_SNITCH ${CASSANDRA_ENDPOINT_SNITCH}
echo CASSANDRA_DC ${CASSANDRA_DC}
echo CASSANDRA_RACK ${CASSANDRA_RACK}
echo CASSANDRA_RING_DELAY ${CASSANDRA_RING_DELAY}
echo CASSANDRA_AUTO_BOOTSTRAP ${CASSANDRA_AUTO_BOOTSTRAP}
echo CASSANDRA_SEEDS ${CASSANDRA_SEEDS}
echo CASSANDRA_SEED_PROVIDER ${CASSANDRA_SEED_PROVIDER}
echo CASSANDRA_AUTO_BOOTSTRAP ${CASSANDRA_AUTO_BOOTSTRAP}
export CLASSPATH=/kubernetes-cassandra.jar
cassandra -R -f

View File

@ -1,67 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
CFG=/etc/cassandra/cassandra.yaml
CASSANDRA_RPC_ADDRESS="${CASSANDRA_RPC_ADDRESS:-0.0.0.0}"
CASSANDRA_NUM_TOKENS="${CASSANDRA_NUM_TOKENS:-32}"
CASSANDRA_CLUSTER_NAME="${CASSANDRA_CLUSTER_NAME:='Test Cluster'}"
CASSANDRA_LISTEN_ADDRESS=${POD_IP}
CASSANDRA_BROADCAST_ADDRESS=${POD_IP}
CASSANDRA_BROADCAST_RPC_ADDRESS=${POD_IP}
# TODO what else needs to be modified
for yaml in \
broadcast_address \
broadcast_rpc_address \
cluster_name \
listen_address \
num_tokens \
rpc_address \
; do
var="CASSANDRA_${yaml^^}"
val="${!var}"
if [ "$val" ]; then
sed -ri 's/^(# )?('"$yaml"':).*/\2 '"$val"'/' "$CFG"
fi
done
# Eventual do snitch $DC && $RACK?
#if [[ $SNITCH ]]; then
# sed -i -e "s/endpoint_snitch: SimpleSnitch/endpoint_snitch: $SNITCH/" $CONFIG/cassandra.yaml
#fi
#if [[ $DC && $RACK ]]; then
# echo "dc=$DC" > $CONFIG/cassandra-rackdc.properties
# echo "rack=$RACK" >> $CONFIG/cassandra-rackdc.properties
#fi
#
# see if this is needed
#echo "JVM_OPTS=\"\$JVM_OPTS -Djava.rmi.server.hostname=$IP\"" >> $CASSANDRA_CONFIG/cassandra-env.sh
#
# FIXME create README for these args
echo "Starting Cassandra on $POD_IP"
echo CASSANDRA_RPC_ADDRESS ${CASSANDRA_RPC_ADDRESS}
echo CASSANDRA_NUM_TOKENS ${CASSANDRA_NUM_TOKENS}
echo CASSANDRA_CLUSTER_NAME ${CASSANDRA_CLUSTER_NAME}
echo CASSANDRA_LISTEN_ADDRESS ${POD_IP}
echo CASSANDRA_BROADCAST_ADDRESS ${POD_IP}
echo CASSANDRA_BROADCAST_RPC_ADDRESS ${POD_IP}
export CLASSPATH=/kubernetes-cassandra.jar
cassandra -f

View File

@ -17,7 +17,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>io.k8s.cassandra</groupId>
<artifactId>kubernetes-cassandra</artifactId>
<version>1.0.1</version>
<version>1.0.2</version>
<build>
<plugins>
<plugin>
@ -33,6 +33,7 @@
<properties>
<logback.version>1.1.3</logback.version>
<cassandra.version>3.7</cassandra.version>
</properties>
<dependencies>
@ -85,7 +86,7 @@
<dependency>
<groupId>org.apache.cassandra</groupId>
<artifactId>cassandra-all</artifactId>
<version>3.5</version>
<version>${cassandra.version}</version>
<scope>provided</scope>
</dependency>

View File

@ -44,6 +44,7 @@ cluster/saltbase/salt/etcd/etcd.manifest: "value": "{{ storage_backend }}
cluster/saltbase/salt/etcd/etcd.manifest:{% set storage_backend = pillar.get('storage_backend', 'etcd2') -%}
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = params + " " + feature_gates -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers + ":6443" -%}
@ -69,7 +70,7 @@ cluster/vagrant/provision-utils.sh: node_ip: '$(echo "$MASTER_IP" | sed -e "s/'
cluster/vagrant/provision-utils.sh: runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
cluster/vsphere/templates/salt-minion.sh: hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}')
examples/cluster-dns/images/frontend/client.py: service_address = socket.gethostbyname(hostname)
examples/storage/cassandra/image/run.sh: cluster_name \
examples/storage/cassandra/image/files/run.sh: cluster_name \
examples/storage/vitess/env.sh: node_ip=$(get_node_ip)
federation/cluster/common.sh: local cert_dir="${kube_temp}/easy-rsa-master/easyrsa3"
federation/deploy/config.json.sample: "cloud_provider": "gce",

View File

@ -21,6 +21,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
@ -28,6 +29,7 @@ import (
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@ -225,22 +227,95 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred())
forEachPod("app", "cassandra", func(pod api.Pod) {
framework.Logf("Verifying pod %v ", pod.Name)
_, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Listening for thrift clients", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Handshaking version", serverStartTimeout)
// TODO how do we do this better? Ready Probe?
_, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Starting listening for CQL clients", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
By("Finding each node in the nodetool status lines")
forEachPod("app", "cassandra", func(pod api.Pod) {
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
if !strings.Contains(output, pod.Status.PodIP) {
framework.Failf("Pod ip %s not found in nodetool status", pod.Status.PodIP)
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
if matched != true {
framework.Failf("Cassandra pod ip %s is not reporting Up and Normal 'UN' via nodetool status", pod.Status.PodIP)
}
})
})
})
framework.KubeDescribe("CassandraPetSet", func() {
It("should create petset", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/cassandra", file)
}
serviceYaml := mkpath("cassandra-service.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
// have to change dns prefix because of the dynamic namespace
input, err := ioutil.ReadFile(mkpath("cassandra-petset.yaml"))
Expect(err).NotTo(HaveOccurred())
output := strings.Replace(string(input), "cassandra-0.cassandra.default.svc.cluster.local", "cassandra-0.cassandra."+ns+".svc.cluster.local", -1)
petSetYaml := "/tmp/cassandra-petset.yaml"
err = ioutil.WriteFile(petSetYaml, []byte(output), 0644)
Expect(err).NotTo(HaveOccurred())
By("Starting the cassandra service")
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
framework.Logf("wait for service")
err = framework.WaitForService(c, ns, "cassandra", true, framework.Poll, framework.ServiceRespondingTimeout)
Expect(err).NotTo(HaveOccurred())
// Create an PetSet with n nodes in it. Each node will then be verified.
By("Creating a Cassandra PetSet")
framework.RunKubectlOrDie("create", "-f", petSetYaml, nsFlag)
petsetPoll := 30 * time.Second
petsetTimeout := 10 * time.Minute
// TODO - parse this number out of the yaml
numPets := 3
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
err = wait.PollImmediate(petsetPoll, petsetTimeout,
func() (bool, error) {
podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: label})
if err != nil {
return false, fmt.Errorf("Unable to get list of pods in petset %s", label)
}
ExpectNoError(err)
if len(podList.Items) < numPets {
framework.Logf("Found %d pets, waiting for %d", len(podList.Items), numPets)
return false, nil
}
if len(podList.Items) > numPets {
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
}
for _, p := range podList.Items {
isReady := api.IsPodReady(&p)
if p.Status.Phase != api.PodRunning || !isReady {
framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, api.PodRunning, p.Status.Phase, isReady)
return false, nil
}
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
By("Finding each node in the nodetool status lines")
forEachPod("app", "cassandra", func(pod api.Pod) {
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
if matched != true {
framework.Failf("Cassandra pod ip %s is not reporting Up and Normal 'UN' via nodetool status", pod.Status.PodIP)
}
})
// using out of petset e2e as deleting pvc is a pain
deleteAllPetSets(c, ns)
})
})
framework.KubeDescribe("Storm", func() {
It("should create and stop Zookeeper, Nimbus and Storm worker servers", func() {
mkpath := func(file string) string {