Merge pull request #7661 from brendandburns/release-0.16

Release 0.16.1
pull/6/head
Zach Loafman 2015-05-01 16:41:28 -07:00
commit ce3b134df8
6 changed files with 40 additions and 18 deletions

View File

@ -303,5 +303,5 @@ function kube-down() {
echo "... in kube-down()" >&2
detect-project >&2
"${GCLOUD}" alpha container clusters delete --project="${PROJECT}" \
--zone="${ZONE}" "${CLUSTER_NAME}"
--zone="${ZONE}" "${CLUSTER_NAME}" --quiet
}

View File

@ -66,4 +66,4 @@ kubectl
* [kubectl update](kubectl_update.md) - Update a resource by filename or stdin.
* [kubectl version](kubectl_version.md) - Print the client and server version information.
###### Auto generated by spf13/cobra at 2015-04-28 03:34:57.18016485 +0000 UTC
###### Auto generated by spf13/cobra at 2015-05-01 22:27:18.37385 +0000 UTC

View File

@ -139,12 +139,16 @@ for version in "${kube_api_versions[@]}"; do
labels_field=".metadata.labels"
service_selector_field=".spec.selector"
rc_replicas_field=".spec.replicas"
rc_status_replicas_field=".status.replicas"
rc_container_image_field=".spec.template.spec.containers"
port_field="(index .spec.ports 0).port"
if [ "${version}" = "v1beta1" ] || [ "${version}" = "v1beta2" ]; then
id_field=".id"
labels_field=".labels"
service_selector_field=".selector"
rc_replicas_field=".desiredState.replicas"
rc_status_replicas_field=".currentState.replicas"
rc_container_image_field=".desiredState.podTemplate.desiredState.manifest.containers"
port_field=".port"
fi
@ -541,6 +545,14 @@ __EOF__
kubectl delete pod valid-pod "${kube_flags[@]}"
kubectl delete service frontend{,-2,-3} "${kube_flags[@]}"
### Perform a rolling update with --image
# Pre-condition status.Replicas is 3, otherwise the rcmanager could update it and interfere with the rolling update
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl rolling-update frontend --image=kubernetes/pause --update-period=10ns --poll-interval=10ms "${kube_flags[@]}"
# Post-condition: current image IS kubernetes/pause
kube::test::get_object_assert 'rc frontend' '{{range \$c:=$rc_container_image_field}} {{\$c.image}} {{end}}' ' +kubernetes/pause +'
### Delete replication controller with id
# Pre-condition: frontend replication controller is running
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
@ -647,7 +659,7 @@ __EOF__
#####################
# Resource aliasing #
#####################
#####################
kube::log::status "Testing resource aliasing"
kubectl create -f examples/cassandra/cassandra.yaml "${kube_flags[@]}"

View File

@ -154,6 +154,9 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg
return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename)
}
}
// If the --image option is specified, we need to create a new rc with at least one different selector
// than the old rc. This selector is the hash of the rc, which will differ because the new rc has a
// different image.
if len(image) != 0 {
var err error
// load the old RC into the "new" RC
@ -190,7 +193,7 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg
newRc.ResourceVersion = ""
if _, found := oldRc.Spec.Selector[deploymentKey]; !found {
if err := addDeploymentKeyToReplicationController(oldRc, client, deploymentKey, cmdNamespace, out); err != nil {
if oldRc, err = addDeploymentKeyToReplicationController(oldRc, client, deploymentKey, cmdNamespace, out); err != nil {
return err
}
}
@ -203,6 +206,9 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg
updater := kubectl.NewRollingUpdater(newRc.Namespace, kubectl.NewRollingUpdaterClient(client))
// To successfully pull off a rolling update the new and old rc have to differ
// by at least one selector. Every new pod should have the selector and every
// old pod should not have the selector.
var hasLabel bool
for key, oldValue := range oldRc.Spec.Selector {
if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue {
@ -265,25 +271,25 @@ func hashObject(obj runtime.Object, codec runtime.Codec) (string, error) {
const MaxRetries = 3
func addDeploymentKeyToReplicationController(oldRc *api.ReplicationController, client *client.Client, deploymentKey, namespace string, out io.Writer) error {
func addDeploymentKeyToReplicationController(oldRc *api.ReplicationController, client *client.Client, deploymentKey, namespace string, out io.Writer) (*api.ReplicationController, error) {
oldHash, err := hashObject(oldRc, client.Codec)
if err != nil {
return err
return nil, err
}
// First, update the template label. This ensures that any newly created pods will have the new label
if oldRc.Spec.Template.Labels == nil {
oldRc.Spec.Template.Labels = map[string]string{}
}
oldRc.Spec.Template.Labels[deploymentKey] = oldHash
if _, err := client.ReplicationControllers(namespace).Update(oldRc); err != nil {
return err
if oldRc, err = client.ReplicationControllers(namespace).Update(oldRc); err != nil {
return nil, err
}
// Update all labels to include the new hash, so they are correctly adopted
// Update all pods managed by the rc to have the new hash label, so they are correctly adopted
// TODO: extract the code from the label command and re-use it here.
podList, err := client.Pods(namespace).List(labels.SelectorFromSet(oldRc.Spec.Selector), fields.Everything())
if err != nil {
return err
return nil, err
}
for ix := range podList.Items {
pod := &podList.Items[ix]
@ -307,7 +313,7 @@ func addDeploymentKeyToReplicationController(oldRc *api.ReplicationController, c
}
}
if err != nil {
return err
return nil, err
}
}
@ -321,19 +327,23 @@ func addDeploymentKeyToReplicationController(oldRc *api.ReplicationController, c
}
oldRc.Spec.Selector[deploymentKey] = oldHash
if _, err := client.ReplicationControllers(namespace).Update(oldRc); err != nil {
return err
// Update the selector of the rc so it manages all the pods we updated above
if oldRc, err = client.ReplicationControllers(namespace).Update(oldRc); err != nil {
return nil, err
}
// Clean up any orphaned pods that don't have the new label, this can happen if the rc manager
// doesn't see the update to its pod template and creates a new pod with the old labels after
// we've finished re-adopting existing pods to the rc.
podList, err = client.Pods(namespace).List(labels.SelectorFromSet(selectorCopy), fields.Everything())
for ix := range podList.Items {
pod := &podList.Items[ix]
if value, found := pod.Labels[deploymentKey]; !found || value != oldHash {
if err := client.Pods(namespace).Delete(pod.Name); err != nil {
return err
return nil, err
}
}
}
return nil
return oldRc, nil
}

View File

@ -170,7 +170,7 @@ func TestAddDeploymentHash(t *testing.T) {
return
}
if err := addDeploymentKeyToReplicationController(rc, client, "hash", api.NamespaceDefault, buf); err != nil {
if _, err := addDeploymentKeyToReplicationController(rc, client, "hash", api.NamespaceDefault, buf); err != nil {
t.Errorf("unexpected error: %v", err)
}
for _, pod := range podList.Items {

View File

@ -36,8 +36,8 @@ package version
var (
// TODO: Deprecate gitMajor and gitMinor, use only gitVersion instead.
gitMajor string = "0" // major version, always numeric
gitMinor string = "16.0+" // minor version, numeric possibly followed by "+"
gitVersion string = "v0.16.0-dev" // version from git, output of $(git describe)
gitMinor string = "16.1+" // minor version, numeric possibly followed by "+"
gitVersion string = "v0.16.1-dev" // version from git, output of $(git describe)
gitCommit string = "" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"
)